blob: 2b209fa004bab471c184dbeb6f1eaf156b34f159 [file] [log] [blame]
Linus Walleij8d318a52010-03-30 15:33:42 +02001/*
Jonas Aaberg767a9672010-08-09 12:08:34 +00002 * Copyright (C) ST-Ericsson SA 2007-2010
3 * Author: Per Friden <per.friden@stericsson.com> for ST-Ericsson
4 * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
Linus Walleij8d318a52010-03-30 15:33:42 +02005 * License terms: GNU General Public License (GPL) version 2
Linus Walleij8d318a52010-03-30 15:33:42 +02006 */
7
8#include <linux/kernel.h>
9#include <linux/slab.h>
10#include <linux/dmaengine.h>
11#include <linux/platform_device.h>
12#include <linux/clk.h>
13#include <linux/delay.h>
14
15#include <plat/ste_dma40.h>
16
17#include "ste_dma40_ll.h"
18
19#define D40_NAME "dma40"
20
21#define D40_PHY_CHAN -1
22
23/* For masking out/in 2 bit channel positions */
24#define D40_CHAN_POS(chan) (2 * (chan / 2))
25#define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan))
26
27/* Maximum iterations taken before giving up suspending a channel */
28#define D40_SUSPEND_MAX_IT 500
29
Linus Walleij508849a2010-06-20 21:26:07 +000030/* Hardware requirement on LCLA alignment */
31#define LCLA_ALIGNMENT 0x40000
32/* Attempts before giving up to trying to get pages that are aligned */
33#define MAX_LCLA_ALLOC_ATTEMPTS 256
34
35/* Bit markings for allocation map */
Linus Walleij8d318a52010-03-30 15:33:42 +020036#define D40_ALLOC_FREE (1 << 31)
37#define D40_ALLOC_PHY (1 << 30)
38#define D40_ALLOC_LOG_FREE 0
39
Linus Walleij8d318a52010-03-30 15:33:42 +020040/* Hardware designer of the block */
Jonas Aaberg3ae02672010-08-09 12:08:18 +000041#define D40_HW_DESIGNER 0x8
Linus Walleij8d318a52010-03-30 15:33:42 +020042
43/**
44 * enum 40_command - The different commands and/or statuses.
45 *
46 * @D40_DMA_STOP: DMA channel command STOP or status STOPPED,
47 * @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN.
48 * @D40_DMA_SUSPEND_REQ: Request the DMA to SUSPEND as soon as possible.
49 * @D40_DMA_SUSPENDED: The DMA channel is SUSPENDED.
50 */
51enum d40_command {
52 D40_DMA_STOP = 0,
53 D40_DMA_RUN = 1,
54 D40_DMA_SUSPEND_REQ = 2,
55 D40_DMA_SUSPENDED = 3
56};
57
58/**
59 * struct d40_lli_pool - Structure for keeping LLIs in memory
60 *
61 * @base: Pointer to memory area when the pre_alloc_lli's are not large
62 * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if
63 * pre_alloc_lli is used.
64 * @size: The size in bytes of the memory at base or the size of pre_alloc_lli.
65 * @pre_alloc_lli: Pre allocated area for the most common case of transfers,
66 * one buffer to one buffer.
67 */
68struct d40_lli_pool {
69 void *base;
Linus Walleij508849a2010-06-20 21:26:07 +000070 int size;
Linus Walleij8d318a52010-03-30 15:33:42 +020071 /* Space for dst and src, plus an extra for padding */
Linus Walleij508849a2010-06-20 21:26:07 +000072 u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)];
Linus Walleij8d318a52010-03-30 15:33:42 +020073};
74
75/**
76 * struct d40_desc - A descriptor is one DMA job.
77 *
78 * @lli_phy: LLI settings for physical channel. Both src and dst=
79 * points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if
80 * lli_len equals one.
81 * @lli_log: Same as above but for logical channels.
82 * @lli_pool: The pool with two entries pre-allocated.
Per Friden941b77a2010-06-20 21:24:45 +000083 * @lli_len: Number of llis of current descriptor.
84 * @lli_count: Number of transfered llis.
85 * @lli_tx_len: Max number of LLIs per transfer, there can be
86 * many transfer for one descriptor.
Linus Walleij8d318a52010-03-30 15:33:42 +020087 * @txd: DMA engine struct. Used for among other things for communication
88 * during a transfer.
89 * @node: List entry.
Linus Walleij8d318a52010-03-30 15:33:42 +020090 * @is_in_client_list: true if the client owns this descriptor.
Jonas Aabergaa182ae2010-08-09 12:08:26 +000091 * @is_hw_linked: true if this job will automatically be continued for
92 * the previous one.
Linus Walleij8d318a52010-03-30 15:33:42 +020093 *
94 * This descriptor is used for both logical and physical transfers.
95 */
96
97struct d40_desc {
98 /* LLI physical */
99 struct d40_phy_lli_bidir lli_phy;
100 /* LLI logical */
101 struct d40_log_lli_bidir lli_log;
102
103 struct d40_lli_pool lli_pool;
Per Friden941b77a2010-06-20 21:24:45 +0000104 int lli_len;
105 int lli_count;
106 u32 lli_tx_len;
Linus Walleij8d318a52010-03-30 15:33:42 +0200107
108 struct dma_async_tx_descriptor txd;
109 struct list_head node;
110
Linus Walleij8d318a52010-03-30 15:33:42 +0200111 bool is_in_client_list;
Jonas Aabergaa182ae2010-08-09 12:08:26 +0000112 bool is_hw_linked;
Linus Walleij8d318a52010-03-30 15:33:42 +0200113};
114
115/**
116 * struct d40_lcla_pool - LCLA pool settings and data.
117 *
Linus Walleij508849a2010-06-20 21:26:07 +0000118 * @base: The virtual address of LCLA. 18 bit aligned.
119 * @base_unaligned: The orignal kmalloc pointer, if kmalloc is used.
120 * This pointer is only there for clean-up on error.
121 * @pages: The number of pages needed for all physical channels.
122 * Only used later for clean-up on error
Linus Walleij8d318a52010-03-30 15:33:42 +0200123 * @lock: Lock to protect the content in this struct.
Linus Walleij508849a2010-06-20 21:26:07 +0000124 * @alloc_map: Bitmap mapping between physical channel and LCLA entries.
Linus Walleij8d318a52010-03-30 15:33:42 +0200125 * @num_blocks: The number of entries of alloc_map. Equals to the
126 * number of physical channels.
127 */
128struct d40_lcla_pool {
129 void *base;
Linus Walleij508849a2010-06-20 21:26:07 +0000130 void *base_unaligned;
131 int pages;
Linus Walleij8d318a52010-03-30 15:33:42 +0200132 spinlock_t lock;
133 u32 *alloc_map;
134 int num_blocks;
135};
136
137/**
138 * struct d40_phy_res - struct for handling eventlines mapped to physical
139 * channels.
140 *
141 * @lock: A lock protection this entity.
142 * @num: The physical channel number of this entity.
143 * @allocated_src: Bit mapped to show which src event line's are mapped to
144 * this physical channel. Can also be free or physically allocated.
145 * @allocated_dst: Same as for src but is dst.
146 * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as
Jonas Aaberg767a9672010-08-09 12:08:34 +0000147 * event line number.
Linus Walleij8d318a52010-03-30 15:33:42 +0200148 */
149struct d40_phy_res {
150 spinlock_t lock;
151 int num;
152 u32 allocated_src;
153 u32 allocated_dst;
154};
155
156struct d40_base;
157
158/**
159 * struct d40_chan - Struct that describes a channel.
160 *
161 * @lock: A spinlock to protect this struct.
162 * @log_num: The logical number, if any of this channel.
163 * @completed: Starts with 1, after first interrupt it is set to dma engine's
164 * current cookie.
165 * @pending_tx: The number of pending transfers. Used between interrupt handler
166 * and tasklet.
167 * @busy: Set to true when transfer is ongoing on this channel.
Jonas Aaberg2a614342010-06-20 21:25:24 +0000168 * @phy_chan: Pointer to physical channel which this instance runs on. If this
169 * point is NULL, then the channel is not allocated.
Linus Walleij8d318a52010-03-30 15:33:42 +0200170 * @chan: DMA engine handle.
171 * @tasklet: Tasklet that gets scheduled from interrupt context to complete a
172 * transfer and call client callback.
173 * @client: Cliented owned descriptor list.
174 * @active: Active descriptor.
175 * @queue: Queued jobs.
Linus Walleij8d318a52010-03-30 15:33:42 +0200176 * @dma_cfg: The client configuration of this dma channel.
177 * @base: Pointer to the device instance struct.
178 * @src_def_cfg: Default cfg register setting for src.
179 * @dst_def_cfg: Default cfg register setting for dst.
180 * @log_def: Default logical channel settings.
181 * @lcla: Space for one dst src pair for logical channel transfers.
182 * @lcpa: Pointer to dst and src lcpa settings.
183 *
184 * This struct can either "be" a logical or a physical channel.
185 */
186struct d40_chan {
187 spinlock_t lock;
188 int log_num;
189 /* ID of the most recent completed transfer */
190 int completed;
191 int pending_tx;
192 bool busy;
193 struct d40_phy_res *phy_chan;
194 struct dma_chan chan;
195 struct tasklet_struct tasklet;
196 struct list_head client;
197 struct list_head active;
198 struct list_head queue;
Linus Walleij8d318a52010-03-30 15:33:42 +0200199 struct stedma40_chan_cfg dma_cfg;
200 struct d40_base *base;
201 /* Default register configurations */
202 u32 src_def_cfg;
203 u32 dst_def_cfg;
204 struct d40_def_lcsp log_def;
205 struct d40_lcla_elem lcla;
206 struct d40_log_lli_full *lcpa;
Linus Walleij95e14002010-08-04 13:37:45 +0200207 /* Runtime reconfiguration */
208 dma_addr_t runtime_addr;
209 enum dma_data_direction runtime_direction;
Linus Walleij8d318a52010-03-30 15:33:42 +0200210};
211
212/**
213 * struct d40_base - The big global struct, one for each probe'd instance.
214 *
215 * @interrupt_lock: Lock used to make sure one interrupt is handle a time.
216 * @execmd_lock: Lock for execute command usage since several channels share
217 * the same physical register.
218 * @dev: The device structure.
219 * @virtbase: The virtual base address of the DMA's register.
Linus Walleijf4185592010-06-22 18:06:42 -0700220 * @rev: silicon revision detected.
Linus Walleij8d318a52010-03-30 15:33:42 +0200221 * @clk: Pointer to the DMA clock structure.
222 * @phy_start: Physical memory start of the DMA registers.
223 * @phy_size: Size of the DMA register map.
224 * @irq: The IRQ number.
225 * @num_phy_chans: The number of physical channels. Read from HW. This
226 * is the number of available channels for this driver, not counting "Secure
227 * mode" allocated physical channels.
228 * @num_log_chans: The number of logical channels. Calculated from
229 * num_phy_chans.
230 * @dma_both: dma_device channels that can do both memcpy and slave transfers.
231 * @dma_slave: dma_device channels that can do only do slave transfers.
232 * @dma_memcpy: dma_device channels that can do only do memcpy transfers.
Linus Walleij8d318a52010-03-30 15:33:42 +0200233 * @log_chans: Room for all possible logical channels in system.
234 * @lookup_log_chans: Used to map interrupt number to logical channel. Points
235 * to log_chans entries.
236 * @lookup_phy_chans: Used to map interrupt number to physical channel. Points
237 * to phy_chans entries.
238 * @plat_data: Pointer to provided platform_data which is the driver
239 * configuration.
240 * @phy_res: Vector containing all physical channels.
241 * @lcla_pool: lcla pool settings and data.
242 * @lcpa_base: The virtual mapped address of LCPA.
243 * @phy_lcpa: The physical address of the LCPA.
244 * @lcpa_size: The size of the LCPA area.
Jonas Aabergc675b1b2010-06-20 21:25:08 +0000245 * @desc_slab: cache for descriptors.
Linus Walleij8d318a52010-03-30 15:33:42 +0200246 */
247struct d40_base {
248 spinlock_t interrupt_lock;
249 spinlock_t execmd_lock;
250 struct device *dev;
251 void __iomem *virtbase;
Linus Walleijf4185592010-06-22 18:06:42 -0700252 u8 rev:4;
Linus Walleij8d318a52010-03-30 15:33:42 +0200253 struct clk *clk;
254 phys_addr_t phy_start;
255 resource_size_t phy_size;
256 int irq;
257 int num_phy_chans;
258 int num_log_chans;
259 struct dma_device dma_both;
260 struct dma_device dma_slave;
261 struct dma_device dma_memcpy;
262 struct d40_chan *phy_chans;
263 struct d40_chan *log_chans;
264 struct d40_chan **lookup_log_chans;
265 struct d40_chan **lookup_phy_chans;
266 struct stedma40_platform_data *plat_data;
267 /* Physical half channels */
268 struct d40_phy_res *phy_res;
269 struct d40_lcla_pool lcla_pool;
270 void *lcpa_base;
271 dma_addr_t phy_lcpa;
272 resource_size_t lcpa_size;
Jonas Aabergc675b1b2010-06-20 21:25:08 +0000273 struct kmem_cache *desc_slab;
Linus Walleij8d318a52010-03-30 15:33:42 +0200274};
275
276/**
277 * struct d40_interrupt_lookup - lookup table for interrupt handler
278 *
279 * @src: Interrupt mask register.
280 * @clr: Interrupt clear register.
281 * @is_error: true if this is an error interrupt.
282 * @offset: start delta in the lookup_log_chans in d40_base. If equals to
283 * D40_PHY_CHAN, the lookup_phy_chans shall be used instead.
284 */
285struct d40_interrupt_lookup {
286 u32 src;
287 u32 clr;
288 bool is_error;
289 int offset;
290};
291
292/**
293 * struct d40_reg_val - simple lookup struct
294 *
295 * @reg: The register.
296 * @val: The value that belongs to the register in reg.
297 */
298struct d40_reg_val {
299 unsigned int reg;
300 unsigned int val;
301};
302
303static int d40_pool_lli_alloc(struct d40_desc *d40d,
304 int lli_len, bool is_log)
305{
306 u32 align;
307 void *base;
308
309 if (is_log)
310 align = sizeof(struct d40_log_lli);
311 else
312 align = sizeof(struct d40_phy_lli);
313
314 if (lli_len == 1) {
315 base = d40d->lli_pool.pre_alloc_lli;
316 d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli);
317 d40d->lli_pool.base = NULL;
318 } else {
319 d40d->lli_pool.size = ALIGN(lli_len * 2 * align, align);
320
321 base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT);
322 d40d->lli_pool.base = base;
323
324 if (d40d->lli_pool.base == NULL)
325 return -ENOMEM;
326 }
327
328 if (is_log) {
329 d40d->lli_log.src = PTR_ALIGN((struct d40_log_lli *) base,
330 align);
331 d40d->lli_log.dst = PTR_ALIGN(d40d->lli_log.src + lli_len,
332 align);
333 } else {
334 d40d->lli_phy.src = PTR_ALIGN((struct d40_phy_lli *)base,
335 align);
336 d40d->lli_phy.dst = PTR_ALIGN(d40d->lli_phy.src + lli_len,
337 align);
Linus Walleij8d318a52010-03-30 15:33:42 +0200338 }
339
340 return 0;
341}
342
343static void d40_pool_lli_free(struct d40_desc *d40d)
344{
345 kfree(d40d->lli_pool.base);
346 d40d->lli_pool.base = NULL;
347 d40d->lli_pool.size = 0;
348 d40d->lli_log.src = NULL;
349 d40d->lli_log.dst = NULL;
350 d40d->lli_phy.src = NULL;
351 d40d->lli_phy.dst = NULL;
Linus Walleij8d318a52010-03-30 15:33:42 +0200352}
353
Linus Walleij8d318a52010-03-30 15:33:42 +0200354static void d40_desc_remove(struct d40_desc *d40d)
355{
356 list_del(&d40d->node);
357}
358
359static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
360{
Linus Walleij8d318a52010-03-30 15:33:42 +0200361 struct d40_desc *d;
362 struct d40_desc *_d;
363
364 if (!list_empty(&d40c->client)) {
365 list_for_each_entry_safe(d, _d, &d40c->client, node)
366 if (async_tx_test_ack(&d->txd)) {
367 d40_pool_lli_free(d);
368 d40_desc_remove(d);
Jonas Aabergc675b1b2010-06-20 21:25:08 +0000369 break;
Linus Walleij8d318a52010-03-30 15:33:42 +0200370 }
Linus Walleij8d318a52010-03-30 15:33:42 +0200371 } else {
Jonas Aabergc675b1b2010-06-20 21:25:08 +0000372 d = kmem_cache_alloc(d40c->base->desc_slab, GFP_NOWAIT);
373 if (d != NULL) {
374 memset(d, 0, sizeof(struct d40_desc));
375 INIT_LIST_HEAD(&d->node);
376 }
Linus Walleij8d318a52010-03-30 15:33:42 +0200377 }
Jonas Aabergc675b1b2010-06-20 21:25:08 +0000378 return d;
Linus Walleij8d318a52010-03-30 15:33:42 +0200379}
380
381static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d)
382{
Jonas Aabergc675b1b2010-06-20 21:25:08 +0000383 kmem_cache_free(d40c->base->desc_slab, d40d);
Linus Walleij8d318a52010-03-30 15:33:42 +0200384}
385
386static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc)
387{
388 list_add_tail(&desc->node, &d40c->active);
389}
390
391static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
392{
393 struct d40_desc *d;
394
395 if (list_empty(&d40c->active))
396 return NULL;
397
398 d = list_first_entry(&d40c->active,
399 struct d40_desc,
400 node);
401 return d;
402}
403
404static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
405{
406 list_add_tail(&desc->node, &d40c->queue);
407}
408
409static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
410{
411 struct d40_desc *d;
412
413 if (list_empty(&d40c->queue))
414 return NULL;
415
416 d = list_first_entry(&d40c->queue,
417 struct d40_desc,
418 node);
419 return d;
420}
421
Jonas Aabergaa182ae2010-08-09 12:08:26 +0000422static struct d40_desc *d40_last_queued(struct d40_chan *d40c)
423{
424 struct d40_desc *d;
425
426 if (list_empty(&d40c->queue))
427 return NULL;
428 list_for_each_entry(d, &d40c->queue, node)
429 if (list_is_last(&d->node, &d40c->queue))
430 break;
431 return d;
432}
433
Linus Walleij8d318a52010-03-30 15:33:42 +0200434/* Support functions for logical channels */
435
Linus Walleij508849a2010-06-20 21:26:07 +0000436static int d40_lcla_id_get(struct d40_chan *d40c)
Linus Walleij8d318a52010-03-30 15:33:42 +0200437{
438 int src_id = 0;
439 int dst_id = 0;
440 struct d40_log_lli *lcla_lidx_base =
Linus Walleij508849a2010-06-20 21:26:07 +0000441 d40c->base->lcla_pool.base + d40c->phy_chan->num * 1024;
Linus Walleij8d318a52010-03-30 15:33:42 +0200442 int i;
443 int lli_per_log = d40c->base->plat_data->llis_per_log;
Jonas Aaberg2292b882010-06-20 21:25:39 +0000444 unsigned long flags;
Linus Walleij8d318a52010-03-30 15:33:42 +0200445
446 if (d40c->lcla.src_id >= 0 && d40c->lcla.dst_id >= 0)
447 return 0;
448
Linus Walleij508849a2010-06-20 21:26:07 +0000449 if (d40c->base->lcla_pool.num_blocks > 32)
Linus Walleij8d318a52010-03-30 15:33:42 +0200450 return -EINVAL;
451
Linus Walleij508849a2010-06-20 21:26:07 +0000452 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +0200453
Linus Walleij508849a2010-06-20 21:26:07 +0000454 for (i = 0; i < d40c->base->lcla_pool.num_blocks; i++) {
455 if (!(d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] &
456 (0x1 << i))) {
457 d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] |=
458 (0x1 << i);
Linus Walleij8d318a52010-03-30 15:33:42 +0200459 break;
460 }
461 }
462 src_id = i;
Linus Walleij508849a2010-06-20 21:26:07 +0000463 if (src_id >= d40c->base->lcla_pool.num_blocks)
Linus Walleij8d318a52010-03-30 15:33:42 +0200464 goto err;
465
Linus Walleij508849a2010-06-20 21:26:07 +0000466 for (; i < d40c->base->lcla_pool.num_blocks; i++) {
467 if (!(d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] &
468 (0x1 << i))) {
469 d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] |=
470 (0x1 << i);
Linus Walleij8d318a52010-03-30 15:33:42 +0200471 break;
472 }
473 }
474
475 dst_id = i;
476 if (dst_id == src_id)
477 goto err;
478
479 d40c->lcla.src_id = src_id;
480 d40c->lcla.dst_id = dst_id;
481 d40c->lcla.dst = lcla_lidx_base + dst_id * lli_per_log + 1;
482 d40c->lcla.src = lcla_lidx_base + src_id * lli_per_log + 1;
483
Linus Walleij508849a2010-06-20 21:26:07 +0000484 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +0200485 return 0;
486err:
Linus Walleij508849a2010-06-20 21:26:07 +0000487 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +0200488 return -EINVAL;
489}
490
Linus Walleij8d318a52010-03-30 15:33:42 +0200491
492static int d40_channel_execute_command(struct d40_chan *d40c,
493 enum d40_command command)
494{
Jonas Aaberg767a9672010-08-09 12:08:34 +0000495 u32 status;
496 int i;
Linus Walleij8d318a52010-03-30 15:33:42 +0200497 void __iomem *active_reg;
498 int ret = 0;
499 unsigned long flags;
Jonas Aaberg1d392a72010-06-20 21:26:01 +0000500 u32 wmask;
Linus Walleij8d318a52010-03-30 15:33:42 +0200501
502 spin_lock_irqsave(&d40c->base->execmd_lock, flags);
503
504 if (d40c->phy_chan->num % 2 == 0)
505 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
506 else
507 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
508
509 if (command == D40_DMA_SUSPEND_REQ) {
510 status = (readl(active_reg) &
511 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
512 D40_CHAN_POS(d40c->phy_chan->num);
513
514 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
515 goto done;
516 }
517
Jonas Aaberg1d392a72010-06-20 21:26:01 +0000518 wmask = 0xffffffff & ~(D40_CHAN_POS_MASK(d40c->phy_chan->num));
519 writel(wmask | (command << D40_CHAN_POS(d40c->phy_chan->num)),
520 active_reg);
Linus Walleij8d318a52010-03-30 15:33:42 +0200521
522 if (command == D40_DMA_SUSPEND_REQ) {
523
524 for (i = 0 ; i < D40_SUSPEND_MAX_IT; i++) {
525 status = (readl(active_reg) &
526 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
527 D40_CHAN_POS(d40c->phy_chan->num);
528
529 cpu_relax();
530 /*
531 * Reduce the number of bus accesses while
532 * waiting for the DMA to suspend.
533 */
534 udelay(3);
535
536 if (status == D40_DMA_STOP ||
537 status == D40_DMA_SUSPENDED)
538 break;
539 }
540
541 if (i == D40_SUSPEND_MAX_IT) {
542 dev_err(&d40c->chan.dev->device,
543 "[%s]: unable to suspend the chl %d (log: %d) status %x\n",
544 __func__, d40c->phy_chan->num, d40c->log_num,
545 status);
546 dump_stack();
547 ret = -EBUSY;
548 }
549
550 }
551done:
552 spin_unlock_irqrestore(&d40c->base->execmd_lock, flags);
553 return ret;
554}
555
556static void d40_term_all(struct d40_chan *d40c)
557{
558 struct d40_desc *d40d;
Linus Walleij508849a2010-06-20 21:26:07 +0000559 unsigned long flags;
Linus Walleij8d318a52010-03-30 15:33:42 +0200560
561 /* Release active descriptors */
562 while ((d40d = d40_first_active_get(d40c))) {
563 d40_desc_remove(d40d);
Linus Walleij8d318a52010-03-30 15:33:42 +0200564 d40_desc_free(d40c, d40d);
565 }
566
567 /* Release queued descriptors waiting for transfer */
568 while ((d40d = d40_first_queued(d40c))) {
569 d40_desc_remove(d40d);
Linus Walleij8d318a52010-03-30 15:33:42 +0200570 d40_desc_free(d40c, d40d);
571 }
572
Linus Walleij508849a2010-06-20 21:26:07 +0000573 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
574
575 d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] &=
576 (~(0x1 << d40c->lcla.dst_id));
577 d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] &=
578 (~(0x1 << d40c->lcla.src_id));
579
580 d40c->lcla.src_id = -1;
581 d40c->lcla.dst_id = -1;
582
583 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +0200584
585 d40c->pending_tx = 0;
586 d40c->busy = false;
587}
588
589static void d40_config_set_event(struct d40_chan *d40c, bool do_enable)
590{
591 u32 val;
592 unsigned long flags;
593
Jonas Aaberg0c322692010-06-20 21:25:46 +0000594 /* Notice, that disable requires the physical channel to be stopped */
Linus Walleij8d318a52010-03-30 15:33:42 +0200595 if (do_enable)
596 val = D40_ACTIVATE_EVENTLINE;
597 else
598 val = D40_DEACTIVATE_EVENTLINE;
599
600 spin_lock_irqsave(&d40c->phy_chan->lock, flags);
601
602 /* Enable event line connected to device (or memcpy) */
603 if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
604 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) {
605 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
606
607 writel((val << D40_EVENTLINE_POS(event)) |
608 ~D40_EVENTLINE_MASK(event),
609 d40c->base->virtbase + D40_DREG_PCBASE +
610 d40c->phy_chan->num * D40_DREG_PCDELTA +
611 D40_CHAN_REG_SSLNK);
612 }
613 if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) {
614 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
615
616 writel((val << D40_EVENTLINE_POS(event)) |
617 ~D40_EVENTLINE_MASK(event),
618 d40c->base->virtbase + D40_DREG_PCBASE +
619 d40c->phy_chan->num * D40_DREG_PCDELTA +
620 D40_CHAN_REG_SDLNK);
621 }
622
623 spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
624}
625
Jonas Aaberga5ebca42010-05-18 00:41:09 +0200626static u32 d40_chan_has_events(struct d40_chan *d40c)
Linus Walleij8d318a52010-03-30 15:33:42 +0200627{
Jonas Aabergbe8cb7d2010-08-09 12:07:44 +0000628 u32 val;
Linus Walleij8d318a52010-03-30 15:33:42 +0200629
Jonas Aabergbe8cb7d2010-08-09 12:07:44 +0000630 val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
631 d40c->phy_chan->num * D40_DREG_PCDELTA +
632 D40_CHAN_REG_SSLNK);
Linus Walleij8d318a52010-03-30 15:33:42 +0200633
Jonas Aabergbe8cb7d2010-08-09 12:07:44 +0000634 val |= readl(d40c->base->virtbase + D40_DREG_PCBASE +
635 d40c->phy_chan->num * D40_DREG_PCDELTA +
636 D40_CHAN_REG_SDLNK);
Jonas Aaberga5ebca42010-05-18 00:41:09 +0200637 return val;
Linus Walleij8d318a52010-03-30 15:33:42 +0200638}
639
Jonas Aabergb55912c2010-08-09 12:08:02 +0000640static void d40_config_write(struct d40_chan *d40c)
Linus Walleij8d318a52010-03-30 15:33:42 +0200641{
642 u32 addr_base;
643 u32 var;
Linus Walleij8d318a52010-03-30 15:33:42 +0200644
645 /* Odd addresses are even addresses + 4 */
646 addr_base = (d40c->phy_chan->num % 2) * 4;
647 /* Setup channel mode to logical or physical */
648 var = ((u32)(d40c->log_num != D40_PHY_CHAN) + 1) <<
649 D40_CHAN_POS(d40c->phy_chan->num);
650 writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base);
651
652 /* Setup operational mode option register */
653 var = ((d40c->dma_cfg.channel_type >> STEDMA40_INFO_CH_MODE_OPT_POS) &
654 0x3) << D40_CHAN_POS(d40c->phy_chan->num);
655
656 writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base);
657
658 if (d40c->log_num != D40_PHY_CHAN) {
659 /* Set default config for CFG reg */
660 writel(d40c->src_def_cfg,
661 d40c->base->virtbase + D40_DREG_PCBASE +
662 d40c->phy_chan->num * D40_DREG_PCDELTA +
663 D40_CHAN_REG_SSCFG);
664 writel(d40c->dst_def_cfg,
665 d40c->base->virtbase + D40_DREG_PCBASE +
666 d40c->phy_chan->num * D40_DREG_PCDELTA +
667 D40_CHAN_REG_SDCFG);
668
Jonas Aabergb55912c2010-08-09 12:08:02 +0000669 /* Set LIDX for lcla */
670 writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
671 D40_SREG_ELEM_LOG_LIDX_MASK,
672 d40c->base->virtbase + D40_DREG_PCBASE +
673 d40c->phy_chan->num * D40_DREG_PCDELTA +
674 D40_CHAN_REG_SDELT);
675
676 writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
677 D40_SREG_ELEM_LOG_LIDX_MASK,
678 d40c->base->virtbase + D40_DREG_PCBASE +
679 d40c->phy_chan->num * D40_DREG_PCDELTA +
680 D40_CHAN_REG_SSELT);
681
Linus Walleij8d318a52010-03-30 15:33:42 +0200682 }
Linus Walleij8d318a52010-03-30 15:33:42 +0200683}
684
685static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
686{
Linus Walleij8d318a52010-03-30 15:33:42 +0200687 if (d40d->lli_phy.dst && d40d->lli_phy.src) {
688 d40_phy_lli_write(d40c->base->virtbase,
689 d40c->phy_chan->num,
690 d40d->lli_phy.dst,
691 d40d->lli_phy.src);
Linus Walleij8d318a52010-03-30 15:33:42 +0200692 } else if (d40d->lli_log.dst && d40d->lli_log.src) {
Linus Walleij8d318a52010-03-30 15:33:42 +0200693 struct d40_log_lli *src = d40d->lli_log.src;
694 struct d40_log_lli *dst = d40d->lli_log.dst;
Linus Walleij508849a2010-06-20 21:26:07 +0000695 int s;
Linus Walleij8d318a52010-03-30 15:33:42 +0200696
Per Friden941b77a2010-06-20 21:24:45 +0000697 src += d40d->lli_count;
698 dst += d40d->lli_count;
Linus Walleij508849a2010-06-20 21:26:07 +0000699 s = d40_log_lli_write(d40c->lcpa,
700 d40c->lcla.src, d40c->lcla.dst,
701 dst, src,
702 d40c->base->plat_data->llis_per_log);
703
704 /* If s equals to zero, the job is not linked */
705 if (s > 0) {
706 (void) dma_map_single(d40c->base->dev, d40c->lcla.src,
707 s * sizeof(struct d40_log_lli),
708 DMA_TO_DEVICE);
709 (void) dma_map_single(d40c->base->dev, d40c->lcla.dst,
710 s * sizeof(struct d40_log_lli),
711 DMA_TO_DEVICE);
712 }
Linus Walleij8d318a52010-03-30 15:33:42 +0200713 }
Per Friden941b77a2010-06-20 21:24:45 +0000714 d40d->lli_count += d40d->lli_tx_len;
Linus Walleij8d318a52010-03-30 15:33:42 +0200715}
716
Jonas Aabergaa182ae2010-08-09 12:08:26 +0000717static u32 d40_residue(struct d40_chan *d40c)
718{
719 u32 num_elt;
720
721 if (d40c->log_num != D40_PHY_CHAN)
722 num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK)
723 >> D40_MEM_LCSP2_ECNT_POS;
724 else
725 num_elt = (readl(d40c->base->virtbase + D40_DREG_PCBASE +
726 d40c->phy_chan->num * D40_DREG_PCDELTA +
727 D40_CHAN_REG_SDELT) &
728 D40_SREG_ELEM_PHY_ECNT_MASK) >>
729 D40_SREG_ELEM_PHY_ECNT_POS;
730 return num_elt * (1 << d40c->dma_cfg.dst_info.data_width);
731}
732
733static bool d40_tx_is_linked(struct d40_chan *d40c)
734{
735 bool is_link;
736
737 if (d40c->log_num != D40_PHY_CHAN)
738 is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK;
739 else
740 is_link = readl(d40c->base->virtbase + D40_DREG_PCBASE +
741 d40c->phy_chan->num * D40_DREG_PCDELTA +
742 D40_CHAN_REG_SDLNK) &
743 D40_SREG_LNK_PHYS_LNK_MASK;
744 return is_link;
745}
746
747static int d40_pause(struct dma_chan *chan)
748{
749 struct d40_chan *d40c =
750 container_of(chan, struct d40_chan, chan);
751 int res = 0;
752 unsigned long flags;
753
754 spin_lock_irqsave(&d40c->lock, flags);
755
756 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
757 if (res == 0) {
758 if (d40c->log_num != D40_PHY_CHAN) {
759 d40_config_set_event(d40c, false);
760 /* Resume the other logical channels if any */
761 if (d40_chan_has_events(d40c))
762 res = d40_channel_execute_command(d40c,
763 D40_DMA_RUN);
764 }
765 }
766
767 spin_unlock_irqrestore(&d40c->lock, flags);
768 return res;
769}
770
771static int d40_resume(struct dma_chan *chan)
772{
773 struct d40_chan *d40c =
774 container_of(chan, struct d40_chan, chan);
775 int res = 0;
776 unsigned long flags;
777
778 spin_lock_irqsave(&d40c->lock, flags);
779
780 if (d40c->base->rev == 0)
781 if (d40c->log_num != D40_PHY_CHAN) {
782 res = d40_channel_execute_command(d40c,
783 D40_DMA_SUSPEND_REQ);
784 goto no_suspend;
785 }
786
787 /* If bytes left to transfer or linked tx resume job */
788 if (d40_residue(d40c) || d40_tx_is_linked(d40c)) {
789
790 if (d40c->log_num != D40_PHY_CHAN)
791 d40_config_set_event(d40c, true);
792
793 res = d40_channel_execute_command(d40c, D40_DMA_RUN);
794 }
795
796no_suspend:
797 spin_unlock_irqrestore(&d40c->lock, flags);
798 return res;
799}
800
801static void d40_tx_submit_log(struct d40_chan *d40c, struct d40_desc *d40d)
802{
803 /* TODO: Write */
804}
805
806static void d40_tx_submit_phy(struct d40_chan *d40c, struct d40_desc *d40d)
807{
808 struct d40_desc *d40d_prev = NULL;
809 int i;
810 u32 val;
811
812 if (!list_empty(&d40c->queue))
813 d40d_prev = d40_last_queued(d40c);
814 else if (!list_empty(&d40c->active))
815 d40d_prev = d40_first_active_get(d40c);
816
817 if (!d40d_prev)
818 return;
819
820 /* Here we try to join this job with previous jobs */
821 val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
822 d40c->phy_chan->num * D40_DREG_PCDELTA +
823 D40_CHAN_REG_SSLNK);
824
825 /* Figure out which link we're currently transmitting */
826 for (i = 0; i < d40d_prev->lli_len; i++)
827 if (val == d40d_prev->lli_phy.src[i].reg_lnk)
828 break;
829
830 val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
831 d40c->phy_chan->num * D40_DREG_PCDELTA +
832 D40_CHAN_REG_SSELT) >> D40_SREG_ELEM_LOG_ECNT_POS;
833
834 if (i == (d40d_prev->lli_len - 1) && val > 0) {
835 /* Change the current one */
836 writel(virt_to_phys(d40d->lli_phy.src),
837 d40c->base->virtbase + D40_DREG_PCBASE +
838 d40c->phy_chan->num * D40_DREG_PCDELTA +
839 D40_CHAN_REG_SSLNK);
840 writel(virt_to_phys(d40d->lli_phy.dst),
841 d40c->base->virtbase + D40_DREG_PCBASE +
842 d40c->phy_chan->num * D40_DREG_PCDELTA +
843 D40_CHAN_REG_SDLNK);
844
845 d40d->is_hw_linked = true;
846
847 } else if (i < d40d_prev->lli_len) {
848 (void) dma_unmap_single(d40c->base->dev,
849 virt_to_phys(d40d_prev->lli_phy.src),
850 d40d_prev->lli_pool.size,
851 DMA_TO_DEVICE);
852
853 /* Keep the settings */
854 val = d40d_prev->lli_phy.src[d40d_prev->lli_len - 1].reg_lnk &
855 ~D40_SREG_LNK_PHYS_LNK_MASK;
856 d40d_prev->lli_phy.src[d40d_prev->lli_len - 1].reg_lnk =
857 val | virt_to_phys(d40d->lli_phy.src);
858
859 val = d40d_prev->lli_phy.dst[d40d_prev->lli_len - 1].reg_lnk &
860 ~D40_SREG_LNK_PHYS_LNK_MASK;
861 d40d_prev->lli_phy.dst[d40d_prev->lli_len - 1].reg_lnk =
862 val | virt_to_phys(d40d->lli_phy.dst);
863
864 (void) dma_map_single(d40c->base->dev,
865 d40d_prev->lli_phy.src,
866 d40d_prev->lli_pool.size,
867 DMA_TO_DEVICE);
868 d40d->is_hw_linked = true;
869 }
870}
871
Linus Walleij8d318a52010-03-30 15:33:42 +0200872static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
873{
874 struct d40_chan *d40c = container_of(tx->chan,
875 struct d40_chan,
876 chan);
877 struct d40_desc *d40d = container_of(tx, struct d40_desc, txd);
878 unsigned long flags;
879
Jonas Aabergaa182ae2010-08-09 12:08:26 +0000880 (void) d40_pause(&d40c->chan);
881
Linus Walleij8d318a52010-03-30 15:33:42 +0200882 spin_lock_irqsave(&d40c->lock, flags);
883
Jonas Aabergaa182ae2010-08-09 12:08:26 +0000884 d40c->chan.cookie++;
885
886 if (d40c->chan.cookie < 0)
887 d40c->chan.cookie = 1;
888
889 d40d->txd.cookie = d40c->chan.cookie;
890
891 if (d40c->log_num == D40_PHY_CHAN)
892 d40_tx_submit_phy(d40c, d40d);
893 else
894 d40_tx_submit_log(d40c, d40d);
Linus Walleij8d318a52010-03-30 15:33:42 +0200895
896 d40_desc_queue(d40c, d40d);
897
898 spin_unlock_irqrestore(&d40c->lock, flags);
899
Jonas Aabergaa182ae2010-08-09 12:08:26 +0000900 (void) d40_resume(&d40c->chan);
901
Linus Walleij8d318a52010-03-30 15:33:42 +0200902 return tx->cookie;
903}
904
905static int d40_start(struct d40_chan *d40c)
906{
Linus Walleijf4185592010-06-22 18:06:42 -0700907 if (d40c->base->rev == 0) {
908 int err;
909
910 if (d40c->log_num != D40_PHY_CHAN) {
911 err = d40_channel_execute_command(d40c,
912 D40_DMA_SUSPEND_REQ);
913 if (err)
914 return err;
915 }
916 }
917
Jonas Aaberg0c322692010-06-20 21:25:46 +0000918 if (d40c->log_num != D40_PHY_CHAN)
Linus Walleij8d318a52010-03-30 15:33:42 +0200919 d40_config_set_event(d40c, true);
Linus Walleij8d318a52010-03-30 15:33:42 +0200920
Jonas Aaberg0c322692010-06-20 21:25:46 +0000921 return d40_channel_execute_command(d40c, D40_DMA_RUN);
Linus Walleij8d318a52010-03-30 15:33:42 +0200922}
923
924static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
925{
926 struct d40_desc *d40d;
927 int err;
928
929 /* Start queued jobs, if any */
930 d40d = d40_first_queued(d40c);
931
932 if (d40d != NULL) {
933 d40c->busy = true;
934
935 /* Remove from queue */
936 d40_desc_remove(d40d);
937
938 /* Add to active queue */
939 d40_desc_submit(d40c, d40d);
940
Jonas Aabergaa182ae2010-08-09 12:08:26 +0000941 /*
942 * If this job is already linked in hw,
943 * do not submit it.
944 */
945 if (!d40d->is_hw_linked) {
946 /* Initiate DMA job */
947 d40_desc_load(d40c, d40d);
Linus Walleij8d318a52010-03-30 15:33:42 +0200948
Jonas Aabergaa182ae2010-08-09 12:08:26 +0000949 /* Start dma job */
950 err = d40_start(d40c);
Linus Walleij8d318a52010-03-30 15:33:42 +0200951
Jonas Aabergaa182ae2010-08-09 12:08:26 +0000952 if (err)
953 return NULL;
954 }
Linus Walleij8d318a52010-03-30 15:33:42 +0200955 }
956
957 return d40d;
958}
959
960/* called from interrupt context */
961static void dma_tc_handle(struct d40_chan *d40c)
962{
963 struct d40_desc *d40d;
964
Linus Walleij8d318a52010-03-30 15:33:42 +0200965 /* Get first active entry from list */
966 d40d = d40_first_active_get(d40c);
967
968 if (d40d == NULL)
969 return;
970
Per Friden941b77a2010-06-20 21:24:45 +0000971 if (d40d->lli_count < d40d->lli_len) {
Linus Walleij8d318a52010-03-30 15:33:42 +0200972
973 d40_desc_load(d40c, d40d);
974 /* Start dma job */
975 (void) d40_start(d40c);
976 return;
977 }
978
979 if (d40_queue_start(d40c) == NULL)
980 d40c->busy = false;
981
982 d40c->pending_tx++;
983 tasklet_schedule(&d40c->tasklet);
984
985}
986
987static void dma_tasklet(unsigned long data)
988{
989 struct d40_chan *d40c = (struct d40_chan *) data;
Jonas Aaberg767a9672010-08-09 12:08:34 +0000990 struct d40_desc *d40d;
Linus Walleij8d318a52010-03-30 15:33:42 +0200991 unsigned long flags;
992 dma_async_tx_callback callback;
993 void *callback_param;
994
995 spin_lock_irqsave(&d40c->lock, flags);
996
997 /* Get first active entry from list */
Jonas Aaberg767a9672010-08-09 12:08:34 +0000998 d40d = d40_first_active_get(d40c);
Linus Walleij8d318a52010-03-30 15:33:42 +0200999
Jonas Aaberg767a9672010-08-09 12:08:34 +00001000 if (d40d == NULL)
Linus Walleij8d318a52010-03-30 15:33:42 +02001001 goto err;
1002
Jonas Aaberg767a9672010-08-09 12:08:34 +00001003 d40c->completed = d40d->txd.cookie;
Linus Walleij8d318a52010-03-30 15:33:42 +02001004
1005 /*
1006 * If terminating a channel pending_tx is set to zero.
1007 * This prevents any finished active jobs to return to the client.
1008 */
1009 if (d40c->pending_tx == 0) {
1010 spin_unlock_irqrestore(&d40c->lock, flags);
1011 return;
1012 }
1013
1014 /* Callback to client */
Jonas Aaberg767a9672010-08-09 12:08:34 +00001015 callback = d40d->txd.callback;
1016 callback_param = d40d->txd.callback_param;
Linus Walleij8d318a52010-03-30 15:33:42 +02001017
Jonas Aaberg767a9672010-08-09 12:08:34 +00001018 if (async_tx_test_ack(&d40d->txd)) {
1019 d40_pool_lli_free(d40d);
1020 d40_desc_remove(d40d);
1021 d40_desc_free(d40c, d40d);
Linus Walleij8d318a52010-03-30 15:33:42 +02001022 } else {
Jonas Aaberg767a9672010-08-09 12:08:34 +00001023 if (!d40d->is_in_client_list) {
1024 d40_desc_remove(d40d);
1025 list_add_tail(&d40d->node, &d40c->client);
1026 d40d->is_in_client_list = true;
Linus Walleij8d318a52010-03-30 15:33:42 +02001027 }
1028 }
1029
1030 d40c->pending_tx--;
1031
1032 if (d40c->pending_tx)
1033 tasklet_schedule(&d40c->tasklet);
1034
1035 spin_unlock_irqrestore(&d40c->lock, flags);
1036
Jonas Aaberg767a9672010-08-09 12:08:34 +00001037 if (callback && (d40d->txd.flags & DMA_PREP_INTERRUPT))
Linus Walleij8d318a52010-03-30 15:33:42 +02001038 callback(callback_param);
1039
1040 return;
1041
1042 err:
1043 /* Rescue manouver if receiving double interrupts */
1044 if (d40c->pending_tx > 0)
1045 d40c->pending_tx--;
1046 spin_unlock_irqrestore(&d40c->lock, flags);
1047}
1048
1049static irqreturn_t d40_handle_interrupt(int irq, void *data)
1050{
1051 static const struct d40_interrupt_lookup il[] = {
1052 {D40_DREG_LCTIS0, D40_DREG_LCICR0, false, 0},
1053 {D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32},
1054 {D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64},
1055 {D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96},
1056 {D40_DREG_LCEIS0, D40_DREG_LCICR0, true, 0},
1057 {D40_DREG_LCEIS1, D40_DREG_LCICR1, true, 32},
1058 {D40_DREG_LCEIS2, D40_DREG_LCICR2, true, 64},
1059 {D40_DREG_LCEIS3, D40_DREG_LCICR3, true, 96},
1060 {D40_DREG_PCTIS, D40_DREG_PCICR, false, D40_PHY_CHAN},
1061 {D40_DREG_PCEIS, D40_DREG_PCICR, true, D40_PHY_CHAN},
1062 };
1063
1064 int i;
1065 u32 regs[ARRAY_SIZE(il)];
Linus Walleij8d318a52010-03-30 15:33:42 +02001066 u32 idx;
1067 u32 row;
1068 long chan = -1;
1069 struct d40_chan *d40c;
1070 unsigned long flags;
1071 struct d40_base *base = data;
1072
1073 spin_lock_irqsave(&base->interrupt_lock, flags);
1074
1075 /* Read interrupt status of both logical and physical channels */
1076 for (i = 0; i < ARRAY_SIZE(il); i++)
1077 regs[i] = readl(base->virtbase + il[i].src);
1078
1079 for (;;) {
1080
1081 chan = find_next_bit((unsigned long *)regs,
1082 BITS_PER_LONG * ARRAY_SIZE(il), chan + 1);
1083
1084 /* No more set bits found? */
1085 if (chan == BITS_PER_LONG * ARRAY_SIZE(il))
1086 break;
1087
1088 row = chan / BITS_PER_LONG;
1089 idx = chan & (BITS_PER_LONG - 1);
1090
1091 /* ACK interrupt */
Jonas Aaberg1b003482010-08-09 12:07:54 +00001092 writel(1 << idx, base->virtbase + il[row].clr);
Linus Walleij8d318a52010-03-30 15:33:42 +02001093
1094 if (il[row].offset == D40_PHY_CHAN)
1095 d40c = base->lookup_phy_chans[idx];
1096 else
1097 d40c = base->lookup_log_chans[il[row].offset + idx];
1098 spin_lock(&d40c->lock);
1099
1100 if (!il[row].is_error)
1101 dma_tc_handle(d40c);
1102 else
Linus Walleij508849a2010-06-20 21:26:07 +00001103 dev_err(base->dev,
1104 "[%s] IRQ chan: %ld offset %d idx %d\n",
Linus Walleij8d318a52010-03-30 15:33:42 +02001105 __func__, chan, il[row].offset, idx);
1106
1107 spin_unlock(&d40c->lock);
1108 }
1109
1110 spin_unlock_irqrestore(&base->interrupt_lock, flags);
1111
1112 return IRQ_HANDLED;
1113}
1114
Linus Walleij8d318a52010-03-30 15:33:42 +02001115static int d40_validate_conf(struct d40_chan *d40c,
1116 struct stedma40_chan_cfg *conf)
1117{
1118 int res = 0;
1119 u32 dst_event_group = D40_TYPE_TO_GROUP(conf->dst_dev_type);
1120 u32 src_event_group = D40_TYPE_TO_GROUP(conf->src_dev_type);
1121 bool is_log = (conf->channel_type & STEDMA40_CHANNEL_IN_OPER_MODE)
1122 == STEDMA40_CHANNEL_IN_LOG_MODE;
1123
Linus Walleij0747c7ba2010-08-09 12:07:36 +00001124 if (!conf->dir) {
1125 dev_err(&d40c->chan.dev->device, "[%s] Invalid direction.\n",
1126 __func__);
1127 res = -EINVAL;
1128 }
1129
1130 if (conf->dst_dev_type != STEDMA40_DEV_DST_MEMORY &&
1131 d40c->base->plat_data->dev_tx[conf->dst_dev_type] == 0 &&
1132 d40c->runtime_addr == 0) {
1133
1134 dev_err(&d40c->chan.dev->device,
1135 "[%s] Invalid TX channel address (%d)\n",
1136 __func__, conf->dst_dev_type);
1137 res = -EINVAL;
1138 }
1139
1140 if (conf->src_dev_type != STEDMA40_DEV_SRC_MEMORY &&
1141 d40c->base->plat_data->dev_rx[conf->src_dev_type] == 0 &&
1142 d40c->runtime_addr == 0) {
1143 dev_err(&d40c->chan.dev->device,
1144 "[%s] Invalid RX channel address (%d)\n",
1145 __func__, conf->src_dev_type);
1146 res = -EINVAL;
1147 }
1148
1149 if (conf->dir == STEDMA40_MEM_TO_PERIPH &&
Linus Walleij8d318a52010-03-30 15:33:42 +02001150 dst_event_group == STEDMA40_DEV_DST_MEMORY) {
1151 dev_err(&d40c->chan.dev->device, "[%s] Invalid dst\n",
1152 __func__);
1153 res = -EINVAL;
1154 }
1155
Linus Walleij0747c7ba2010-08-09 12:07:36 +00001156 if (conf->dir == STEDMA40_PERIPH_TO_MEM &&
Linus Walleij8d318a52010-03-30 15:33:42 +02001157 src_event_group == STEDMA40_DEV_SRC_MEMORY) {
1158 dev_err(&d40c->chan.dev->device, "[%s] Invalid src\n",
1159 __func__);
1160 res = -EINVAL;
1161 }
1162
1163 if (src_event_group == STEDMA40_DEV_SRC_MEMORY &&
1164 dst_event_group == STEDMA40_DEV_DST_MEMORY && is_log) {
1165 dev_err(&d40c->chan.dev->device,
1166 "[%s] No event line\n", __func__);
1167 res = -EINVAL;
1168 }
1169
1170 if (conf->dir == STEDMA40_PERIPH_TO_PERIPH &&
1171 (src_event_group != dst_event_group)) {
1172 dev_err(&d40c->chan.dev->device,
1173 "[%s] Invalid event group\n", __func__);
1174 res = -EINVAL;
1175 }
1176
1177 if (conf->dir == STEDMA40_PERIPH_TO_PERIPH) {
1178 /*
1179 * DMAC HW supports it. Will be added to this driver,
1180 * in case any dma client requires it.
1181 */
1182 dev_err(&d40c->chan.dev->device,
1183 "[%s] periph to periph not supported\n",
1184 __func__);
1185 res = -EINVAL;
1186 }
1187
1188 return res;
1189}
1190
1191static bool d40_alloc_mask_set(struct d40_phy_res *phy, bool is_src,
Marcin Mielczarczyk4aed79b2010-05-18 00:41:21 +02001192 int log_event_line, bool is_log)
Linus Walleij8d318a52010-03-30 15:33:42 +02001193{
1194 unsigned long flags;
1195 spin_lock_irqsave(&phy->lock, flags);
Marcin Mielczarczyk4aed79b2010-05-18 00:41:21 +02001196 if (!is_log) {
Linus Walleij8d318a52010-03-30 15:33:42 +02001197 /* Physical interrupts are masked per physical full channel */
1198 if (phy->allocated_src == D40_ALLOC_FREE &&
1199 phy->allocated_dst == D40_ALLOC_FREE) {
1200 phy->allocated_dst = D40_ALLOC_PHY;
1201 phy->allocated_src = D40_ALLOC_PHY;
1202 goto found;
1203 } else
1204 goto not_found;
1205 }
1206
1207 /* Logical channel */
1208 if (is_src) {
1209 if (phy->allocated_src == D40_ALLOC_PHY)
1210 goto not_found;
1211
1212 if (phy->allocated_src == D40_ALLOC_FREE)
1213 phy->allocated_src = D40_ALLOC_LOG_FREE;
1214
1215 if (!(phy->allocated_src & (1 << log_event_line))) {
1216 phy->allocated_src |= 1 << log_event_line;
1217 goto found;
1218 } else
1219 goto not_found;
1220 } else {
1221 if (phy->allocated_dst == D40_ALLOC_PHY)
1222 goto not_found;
1223
1224 if (phy->allocated_dst == D40_ALLOC_FREE)
1225 phy->allocated_dst = D40_ALLOC_LOG_FREE;
1226
1227 if (!(phy->allocated_dst & (1 << log_event_line))) {
1228 phy->allocated_dst |= 1 << log_event_line;
1229 goto found;
1230 } else
1231 goto not_found;
1232 }
1233
1234not_found:
1235 spin_unlock_irqrestore(&phy->lock, flags);
1236 return false;
1237found:
1238 spin_unlock_irqrestore(&phy->lock, flags);
1239 return true;
1240}
1241
1242static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src,
1243 int log_event_line)
1244{
1245 unsigned long flags;
1246 bool is_free = false;
1247
1248 spin_lock_irqsave(&phy->lock, flags);
1249 if (!log_event_line) {
1250 /* Physical interrupts are masked per physical full channel */
1251 phy->allocated_dst = D40_ALLOC_FREE;
1252 phy->allocated_src = D40_ALLOC_FREE;
1253 is_free = true;
1254 goto out;
1255 }
1256
1257 /* Logical channel */
1258 if (is_src) {
1259 phy->allocated_src &= ~(1 << log_event_line);
1260 if (phy->allocated_src == D40_ALLOC_LOG_FREE)
1261 phy->allocated_src = D40_ALLOC_FREE;
1262 } else {
1263 phy->allocated_dst &= ~(1 << log_event_line);
1264 if (phy->allocated_dst == D40_ALLOC_LOG_FREE)
1265 phy->allocated_dst = D40_ALLOC_FREE;
1266 }
1267
1268 is_free = ((phy->allocated_src | phy->allocated_dst) ==
1269 D40_ALLOC_FREE);
1270
1271out:
1272 spin_unlock_irqrestore(&phy->lock, flags);
1273
1274 return is_free;
1275}
1276
1277static int d40_allocate_channel(struct d40_chan *d40c)
1278{
1279 int dev_type;
1280 int event_group;
1281 int event_line;
1282 struct d40_phy_res *phys;
1283 int i;
1284 int j;
1285 int log_num;
1286 bool is_src;
Linus Walleij508849a2010-06-20 21:26:07 +00001287 bool is_log = (d40c->dma_cfg.channel_type &
1288 STEDMA40_CHANNEL_IN_OPER_MODE)
Linus Walleij8d318a52010-03-30 15:33:42 +02001289 == STEDMA40_CHANNEL_IN_LOG_MODE;
1290
1291
1292 phys = d40c->base->phy_res;
1293
1294 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
1295 dev_type = d40c->dma_cfg.src_dev_type;
1296 log_num = 2 * dev_type;
1297 is_src = true;
1298 } else if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1299 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1300 /* dst event lines are used for logical memcpy */
1301 dev_type = d40c->dma_cfg.dst_dev_type;
1302 log_num = 2 * dev_type + 1;
1303 is_src = false;
1304 } else
1305 return -EINVAL;
1306
1307 event_group = D40_TYPE_TO_GROUP(dev_type);
1308 event_line = D40_TYPE_TO_EVENT(dev_type);
1309
1310 if (!is_log) {
1311 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1312 /* Find physical half channel */
1313 for (i = 0; i < d40c->base->num_phy_chans; i++) {
1314
Marcin Mielczarczyk4aed79b2010-05-18 00:41:21 +02001315 if (d40_alloc_mask_set(&phys[i], is_src,
1316 0, is_log))
Linus Walleij8d318a52010-03-30 15:33:42 +02001317 goto found_phy;
1318 }
1319 } else
1320 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1321 int phy_num = j + event_group * 2;
1322 for (i = phy_num; i < phy_num + 2; i++) {
Linus Walleij508849a2010-06-20 21:26:07 +00001323 if (d40_alloc_mask_set(&phys[i],
1324 is_src,
1325 0,
1326 is_log))
Linus Walleij8d318a52010-03-30 15:33:42 +02001327 goto found_phy;
1328 }
1329 }
1330 return -EINVAL;
1331found_phy:
1332 d40c->phy_chan = &phys[i];
1333 d40c->log_num = D40_PHY_CHAN;
1334 goto out;
1335 }
1336 if (dev_type == -1)
1337 return -EINVAL;
1338
1339 /* Find logical channel */
1340 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1341 int phy_num = j + event_group * 2;
1342 /*
1343 * Spread logical channels across all available physical rather
1344 * than pack every logical channel at the first available phy
1345 * channels.
1346 */
1347 if (is_src) {
1348 for (i = phy_num; i < phy_num + 2; i++) {
1349 if (d40_alloc_mask_set(&phys[i], is_src,
Marcin Mielczarczyk4aed79b2010-05-18 00:41:21 +02001350 event_line, is_log))
Linus Walleij8d318a52010-03-30 15:33:42 +02001351 goto found_log;
1352 }
1353 } else {
1354 for (i = phy_num + 1; i >= phy_num; i--) {
1355 if (d40_alloc_mask_set(&phys[i], is_src,
Marcin Mielczarczyk4aed79b2010-05-18 00:41:21 +02001356 event_line, is_log))
Linus Walleij8d318a52010-03-30 15:33:42 +02001357 goto found_log;
1358 }
1359 }
1360 }
1361 return -EINVAL;
1362
1363found_log:
1364 d40c->phy_chan = &phys[i];
1365 d40c->log_num = log_num;
1366out:
1367
1368 if (is_log)
1369 d40c->base->lookup_log_chans[d40c->log_num] = d40c;
1370 else
1371 d40c->base->lookup_phy_chans[d40c->phy_chan->num] = d40c;
1372
1373 return 0;
1374
1375}
1376
Linus Walleij8d318a52010-03-30 15:33:42 +02001377static int d40_config_memcpy(struct d40_chan *d40c)
1378{
1379 dma_cap_mask_t cap = d40c->chan.device->cap_mask;
1380
1381 if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) {
1382 d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_log;
1383 d40c->dma_cfg.src_dev_type = STEDMA40_DEV_SRC_MEMORY;
1384 d40c->dma_cfg.dst_dev_type = d40c->base->plat_data->
1385 memcpy[d40c->chan.chan_id];
1386
1387 } else if (dma_has_cap(DMA_MEMCPY, cap) &&
1388 dma_has_cap(DMA_SLAVE, cap)) {
1389 d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_phy;
1390 } else {
1391 dev_err(&d40c->chan.dev->device, "[%s] No memcpy\n",
1392 __func__);
1393 return -EINVAL;
1394 }
1395
1396 return 0;
1397}
1398
1399
1400static int d40_free_dma(struct d40_chan *d40c)
1401{
1402
1403 int res = 0;
Jonas Aabergd181b3a2010-06-20 21:26:38 +00001404 u32 event;
Linus Walleij8d318a52010-03-30 15:33:42 +02001405 struct d40_phy_res *phy = d40c->phy_chan;
1406 bool is_src;
Per Fridena8be8622010-06-20 21:24:59 +00001407 struct d40_desc *d;
1408 struct d40_desc *_d;
1409
Linus Walleij8d318a52010-03-30 15:33:42 +02001410
1411 /* Terminate all queued and active transfers */
1412 d40_term_all(d40c);
1413
Per Fridena8be8622010-06-20 21:24:59 +00001414 /* Release client owned descriptors */
1415 if (!list_empty(&d40c->client))
1416 list_for_each_entry_safe(d, _d, &d40c->client, node) {
1417 d40_pool_lli_free(d);
1418 d40_desc_remove(d);
Per Fridena8be8622010-06-20 21:24:59 +00001419 d40_desc_free(d40c, d);
1420 }
1421
Linus Walleij8d318a52010-03-30 15:33:42 +02001422 if (phy == NULL) {
1423 dev_err(&d40c->chan.dev->device, "[%s] phy == null\n",
1424 __func__);
1425 return -EINVAL;
1426 }
1427
1428 if (phy->allocated_src == D40_ALLOC_FREE &&
1429 phy->allocated_dst == D40_ALLOC_FREE) {
1430 dev_err(&d40c->chan.dev->device, "[%s] channel already free\n",
1431 __func__);
1432 return -EINVAL;
1433 }
1434
Linus Walleij8d318a52010-03-30 15:33:42 +02001435 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1436 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1437 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
Linus Walleij8d318a52010-03-30 15:33:42 +02001438 is_src = false;
1439 } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
1440 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
Linus Walleij8d318a52010-03-30 15:33:42 +02001441 is_src = true;
1442 } else {
1443 dev_err(&d40c->chan.dev->device,
1444 "[%s] Unknown direction\n", __func__);
1445 return -EINVAL;
1446 }
1447
Jonas Aabergd181b3a2010-06-20 21:26:38 +00001448 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1449 if (res) {
1450 dev_err(&d40c->chan.dev->device, "[%s] suspend failed\n",
1451 __func__);
1452 return res;
1453 }
Linus Walleij8d318a52010-03-30 15:33:42 +02001454
Jonas Aabergd181b3a2010-06-20 21:26:38 +00001455 if (d40c->log_num != D40_PHY_CHAN) {
1456 /* Release logical channel, deactivate the event line */
1457
1458 d40_config_set_event(d40c, false);
Linus Walleij8d318a52010-03-30 15:33:42 +02001459 d40c->base->lookup_log_chans[d40c->log_num] = NULL;
1460
1461 /*
1462 * Check if there are more logical allocation
1463 * on this phy channel.
1464 */
1465 if (!d40_alloc_mask_free(phy, is_src, event)) {
1466 /* Resume the other logical channels if any */
1467 if (d40_chan_has_events(d40c)) {
1468 res = d40_channel_execute_command(d40c,
1469 D40_DMA_RUN);
1470 if (res) {
1471 dev_err(&d40c->chan.dev->device,
1472 "[%s] Executing RUN command\n",
1473 __func__);
1474 return res;
1475 }
1476 }
1477 return 0;
1478 }
Jonas Aabergd181b3a2010-06-20 21:26:38 +00001479 } else {
1480 (void) d40_alloc_mask_free(phy, is_src, 0);
1481 }
Linus Walleij8d318a52010-03-30 15:33:42 +02001482
1483 /* Release physical channel */
1484 res = d40_channel_execute_command(d40c, D40_DMA_STOP);
1485 if (res) {
1486 dev_err(&d40c->chan.dev->device,
1487 "[%s] Failed to stop channel\n", __func__);
1488 return res;
1489 }
1490 d40c->phy_chan = NULL;
1491 /* Invalidate channel type */
1492 d40c->dma_cfg.channel_type = 0;
1493 d40c->base->lookup_phy_chans[phy->num] = NULL;
1494
1495 return 0;
Linus Walleij8d318a52010-03-30 15:33:42 +02001496}
1497
Jonas Aaberga5ebca42010-05-18 00:41:09 +02001498static bool d40_is_paused(struct d40_chan *d40c)
1499{
1500 bool is_paused = false;
1501 unsigned long flags;
1502 void __iomem *active_reg;
1503 u32 status;
1504 u32 event;
Jonas Aaberga5ebca42010-05-18 00:41:09 +02001505
1506 spin_lock_irqsave(&d40c->lock, flags);
1507
1508 if (d40c->log_num == D40_PHY_CHAN) {
1509 if (d40c->phy_chan->num % 2 == 0)
1510 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
1511 else
1512 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
1513
1514 status = (readl(active_reg) &
1515 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1516 D40_CHAN_POS(d40c->phy_chan->num);
1517 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
1518 is_paused = true;
1519
1520 goto _exit;
1521 }
1522
Jonas Aaberga5ebca42010-05-18 00:41:09 +02001523 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
Jonas Aaberg9dbfbd35c2010-08-09 12:08:41 +00001524 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
Jonas Aaberga5ebca42010-05-18 00:41:09 +02001525 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
Jonas Aaberg9dbfbd35c2010-08-09 12:08:41 +00001526 status = readl(d40c->base->virtbase + D40_DREG_PCBASE +
1527 d40c->phy_chan->num * D40_DREG_PCDELTA +
1528 D40_CHAN_REG_SDLNK);
1529 } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
Jonas Aaberga5ebca42010-05-18 00:41:09 +02001530 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
Jonas Aaberg9dbfbd35c2010-08-09 12:08:41 +00001531 status = readl(d40c->base->virtbase + D40_DREG_PCBASE +
1532 d40c->phy_chan->num * D40_DREG_PCDELTA +
1533 D40_CHAN_REG_SSLNK);
1534 } else {
Jonas Aaberga5ebca42010-05-18 00:41:09 +02001535 dev_err(&d40c->chan.dev->device,
1536 "[%s] Unknown direction\n", __func__);
1537 goto _exit;
1538 }
Jonas Aaberg9dbfbd35c2010-08-09 12:08:41 +00001539
Jonas Aaberga5ebca42010-05-18 00:41:09 +02001540 status = (status & D40_EVENTLINE_MASK(event)) >>
1541 D40_EVENTLINE_POS(event);
1542
1543 if (status != D40_DMA_RUN)
1544 is_paused = true;
Jonas Aaberga5ebca42010-05-18 00:41:09 +02001545_exit:
1546 spin_unlock_irqrestore(&d40c->lock, flags);
1547 return is_paused;
1548
1549}
1550
1551
Linus Walleij8d318a52010-03-30 15:33:42 +02001552static u32 stedma40_residue(struct dma_chan *chan)
1553{
1554 struct d40_chan *d40c =
1555 container_of(chan, struct d40_chan, chan);
1556 u32 bytes_left;
1557 unsigned long flags;
1558
1559 spin_lock_irqsave(&d40c->lock, flags);
1560 bytes_left = d40_residue(d40c);
1561 spin_unlock_irqrestore(&d40c->lock, flags);
1562
1563 return bytes_left;
1564}
1565
1566/* Public DMA functions in addition to the DMA engine framework */
1567
1568int stedma40_set_psize(struct dma_chan *chan,
1569 int src_psize,
1570 int dst_psize)
1571{
1572 struct d40_chan *d40c =
1573 container_of(chan, struct d40_chan, chan);
1574 unsigned long flags;
1575
1576 spin_lock_irqsave(&d40c->lock, flags);
1577
1578 if (d40c->log_num != D40_PHY_CHAN) {
1579 d40c->log_def.lcsp1 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK;
1580 d40c->log_def.lcsp3 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK;
Linus Walleij508849a2010-06-20 21:26:07 +00001581 d40c->log_def.lcsp1 |= src_psize <<
1582 D40_MEM_LCSP1_SCFG_PSIZE_POS;
1583 d40c->log_def.lcsp3 |= dst_psize <<
1584 D40_MEM_LCSP1_SCFG_PSIZE_POS;
Linus Walleij8d318a52010-03-30 15:33:42 +02001585 goto out;
1586 }
1587
1588 if (src_psize == STEDMA40_PSIZE_PHY_1)
1589 d40c->src_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS);
1590 else {
1591 d40c->src_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS;
1592 d40c->src_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 <<
1593 D40_SREG_CFG_PSIZE_POS);
1594 d40c->src_def_cfg |= src_psize << D40_SREG_CFG_PSIZE_POS;
1595 }
1596
1597 if (dst_psize == STEDMA40_PSIZE_PHY_1)
1598 d40c->dst_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS);
1599 else {
1600 d40c->dst_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS;
1601 d40c->dst_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 <<
1602 D40_SREG_CFG_PSIZE_POS);
1603 d40c->dst_def_cfg |= dst_psize << D40_SREG_CFG_PSIZE_POS;
1604 }
1605out:
1606 spin_unlock_irqrestore(&d40c->lock, flags);
1607 return 0;
1608}
1609EXPORT_SYMBOL(stedma40_set_psize);
1610
1611struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
1612 struct scatterlist *sgl_dst,
1613 struct scatterlist *sgl_src,
1614 unsigned int sgl_len,
Jonas Aaberg2a614342010-06-20 21:25:24 +00001615 unsigned long dma_flags)
Linus Walleij8d318a52010-03-30 15:33:42 +02001616{
1617 int res;
1618 struct d40_desc *d40d;
1619 struct d40_chan *d40c = container_of(chan, struct d40_chan,
1620 chan);
Jonas Aaberg2a614342010-06-20 21:25:24 +00001621 unsigned long flags;
Linus Walleij8d318a52010-03-30 15:33:42 +02001622
Jonas Aaberg0d0f6b82010-06-20 21:25:31 +00001623 if (d40c->phy_chan == NULL) {
1624 dev_err(&d40c->chan.dev->device,
1625 "[%s] Unallocated channel.\n", __func__);
1626 return ERR_PTR(-EINVAL);
1627 }
1628
Jonas Aaberg2a614342010-06-20 21:25:24 +00001629 spin_lock_irqsave(&d40c->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02001630 d40d = d40_desc_get(d40c);
1631
1632 if (d40d == NULL)
1633 goto err;
1634
Linus Walleij8d318a52010-03-30 15:33:42 +02001635 d40d->lli_len = sgl_len;
Per Friden941b77a2010-06-20 21:24:45 +00001636 d40d->lli_tx_len = d40d->lli_len;
Jonas Aaberg2a614342010-06-20 21:25:24 +00001637 d40d->txd.flags = dma_flags;
Linus Walleij8d318a52010-03-30 15:33:42 +02001638
1639 if (d40c->log_num != D40_PHY_CHAN) {
Per Friden941b77a2010-06-20 21:24:45 +00001640 if (d40d->lli_len > d40c->base->plat_data->llis_per_log)
1641 d40d->lli_tx_len = d40c->base->plat_data->llis_per_log;
1642
Linus Walleij8d318a52010-03-30 15:33:42 +02001643 if (sgl_len > 1)
1644 /*
1645 * Check if there is space available in lcla. If not,
1646 * split list into 1-length and run only in lcpa
1647 * space.
1648 */
Linus Walleij508849a2010-06-20 21:26:07 +00001649 if (d40_lcla_id_get(d40c) != 0)
Per Friden941b77a2010-06-20 21:24:45 +00001650 d40d->lli_tx_len = 1;
Linus Walleij8d318a52010-03-30 15:33:42 +02001651
1652 if (d40_pool_lli_alloc(d40d, sgl_len, true) < 0) {
1653 dev_err(&d40c->chan.dev->device,
1654 "[%s] Out of memory\n", __func__);
1655 goto err;
1656 }
1657
1658 (void) d40_log_sg_to_lli(d40c->lcla.src_id,
1659 sgl_src,
1660 sgl_len,
1661 d40d->lli_log.src,
1662 d40c->log_def.lcsp1,
1663 d40c->dma_cfg.src_info.data_width,
Per Friden941b77a2010-06-20 21:24:45 +00001664 d40d->lli_tx_len,
Linus Walleij8d318a52010-03-30 15:33:42 +02001665 d40c->base->plat_data->llis_per_log);
1666
1667 (void) d40_log_sg_to_lli(d40c->lcla.dst_id,
1668 sgl_dst,
1669 sgl_len,
1670 d40d->lli_log.dst,
1671 d40c->log_def.lcsp3,
1672 d40c->dma_cfg.dst_info.data_width,
Per Friden941b77a2010-06-20 21:24:45 +00001673 d40d->lli_tx_len,
Linus Walleij8d318a52010-03-30 15:33:42 +02001674 d40c->base->plat_data->llis_per_log);
1675
1676
1677 } else {
1678 if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) {
1679 dev_err(&d40c->chan.dev->device,
1680 "[%s] Out of memory\n", __func__);
1681 goto err;
1682 }
1683
1684 res = d40_phy_sg_to_lli(sgl_src,
1685 sgl_len,
1686 0,
1687 d40d->lli_phy.src,
Jonas Aabergaa182ae2010-08-09 12:08:26 +00001688 virt_to_phys(d40d->lli_phy.src),
Linus Walleij8d318a52010-03-30 15:33:42 +02001689 d40c->src_def_cfg,
1690 d40c->dma_cfg.src_info.data_width,
Jonas Aaberg0246e772010-08-09 12:08:10 +00001691 d40c->dma_cfg.src_info.psize);
Linus Walleij8d318a52010-03-30 15:33:42 +02001692
1693 if (res < 0)
1694 goto err;
1695
1696 res = d40_phy_sg_to_lli(sgl_dst,
1697 sgl_len,
1698 0,
1699 d40d->lli_phy.dst,
Jonas Aabergaa182ae2010-08-09 12:08:26 +00001700 virt_to_phys(d40d->lli_phy.dst),
Linus Walleij8d318a52010-03-30 15:33:42 +02001701 d40c->dst_def_cfg,
1702 d40c->dma_cfg.dst_info.data_width,
Jonas Aaberg0246e772010-08-09 12:08:10 +00001703 d40c->dma_cfg.dst_info.psize);
Linus Walleij8d318a52010-03-30 15:33:42 +02001704
1705 if (res < 0)
1706 goto err;
1707
1708 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
1709 d40d->lli_pool.size, DMA_TO_DEVICE);
1710 }
1711
1712 dma_async_tx_descriptor_init(&d40d->txd, chan);
1713
1714 d40d->txd.tx_submit = d40_tx_submit;
1715
Jonas Aaberg2a614342010-06-20 21:25:24 +00001716 spin_unlock_irqrestore(&d40c->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02001717
1718 return &d40d->txd;
1719err:
Jonas Aaberg2a614342010-06-20 21:25:24 +00001720 spin_unlock_irqrestore(&d40c->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02001721 return NULL;
1722}
1723EXPORT_SYMBOL(stedma40_memcpy_sg);
1724
1725bool stedma40_filter(struct dma_chan *chan, void *data)
1726{
1727 struct stedma40_chan_cfg *info = data;
1728 struct d40_chan *d40c =
1729 container_of(chan, struct d40_chan, chan);
1730 int err;
1731
1732 if (data) {
1733 err = d40_validate_conf(d40c, info);
1734 if (!err)
1735 d40c->dma_cfg = *info;
1736 } else
1737 err = d40_config_memcpy(d40c);
1738
1739 return err == 0;
1740}
1741EXPORT_SYMBOL(stedma40_filter);
1742
1743/* DMA ENGINE functions */
1744static int d40_alloc_chan_resources(struct dma_chan *chan)
1745{
1746 int err;
1747 unsigned long flags;
1748 struct d40_chan *d40c =
1749 container_of(chan, struct d40_chan, chan);
Linus Walleijef1872e2010-06-20 21:24:52 +00001750 bool is_free_phy;
Linus Walleij8d318a52010-03-30 15:33:42 +02001751 spin_lock_irqsave(&d40c->lock, flags);
1752
1753 d40c->completed = chan->cookie = 1;
1754
1755 /*
1756 * If no dma configuration is set (channel_type == 0)
Linus Walleijef1872e2010-06-20 21:24:52 +00001757 * use default configuration (memcpy)
Linus Walleij8d318a52010-03-30 15:33:42 +02001758 */
1759 if (d40c->dma_cfg.channel_type == 0) {
Jonas Aabergaa182ae2010-08-09 12:08:26 +00001760
Linus Walleij8d318a52010-03-30 15:33:42 +02001761 err = d40_config_memcpy(d40c);
Jonas Aabergff0b12b2010-06-20 21:25:15 +00001762 if (err) {
1763 dev_err(&d40c->chan.dev->device,
1764 "[%s] Failed to configure memcpy channel\n",
1765 __func__);
1766 goto fail;
1767 }
Linus Walleij8d318a52010-03-30 15:33:42 +02001768 }
Linus Walleijef1872e2010-06-20 21:24:52 +00001769 is_free_phy = (d40c->phy_chan == NULL);
Linus Walleij8d318a52010-03-30 15:33:42 +02001770
1771 err = d40_allocate_channel(d40c);
1772 if (err) {
1773 dev_err(&d40c->chan.dev->device,
1774 "[%s] Failed to allocate channel\n", __func__);
Jonas Aabergff0b12b2010-06-20 21:25:15 +00001775 goto fail;
Linus Walleij8d318a52010-03-30 15:33:42 +02001776 }
1777
Linus Walleijef1872e2010-06-20 21:24:52 +00001778 /* Fill in basic CFG register values */
1779 d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg,
1780 &d40c->dst_def_cfg, d40c->log_num != D40_PHY_CHAN);
1781
1782 if (d40c->log_num != D40_PHY_CHAN) {
1783 d40_log_cfg(&d40c->dma_cfg,
1784 &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
1785
1786 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
1787 d40c->lcpa = d40c->base->lcpa_base +
1788 d40c->dma_cfg.src_dev_type * D40_LCPA_CHAN_SIZE;
1789 else
1790 d40c->lcpa = d40c->base->lcpa_base +
1791 d40c->dma_cfg.dst_dev_type *
1792 D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA;
1793 }
1794
1795 /*
1796 * Only write channel configuration to the DMA if the physical
1797 * resource is free. In case of multiple logical channels
1798 * on the same physical resource, only the first write is necessary.
1799 */
Jonas Aabergb55912c2010-08-09 12:08:02 +00001800 if (is_free_phy)
1801 d40_config_write(d40c);
Jonas Aabergff0b12b2010-06-20 21:25:15 +00001802fail:
Linus Walleij8d318a52010-03-30 15:33:42 +02001803 spin_unlock_irqrestore(&d40c->lock, flags);
Jonas Aabergff0b12b2010-06-20 21:25:15 +00001804 return err;
Linus Walleij8d318a52010-03-30 15:33:42 +02001805}
1806
1807static void d40_free_chan_resources(struct dma_chan *chan)
1808{
1809 struct d40_chan *d40c =
1810 container_of(chan, struct d40_chan, chan);
1811 int err;
1812 unsigned long flags;
1813
Jonas Aaberg0d0f6b82010-06-20 21:25:31 +00001814 if (d40c->phy_chan == NULL) {
1815 dev_err(&d40c->chan.dev->device,
1816 "[%s] Cannot free unallocated channel\n", __func__);
1817 return;
1818 }
1819
1820
Linus Walleij8d318a52010-03-30 15:33:42 +02001821 spin_lock_irqsave(&d40c->lock, flags);
1822
1823 err = d40_free_dma(d40c);
1824
1825 if (err)
1826 dev_err(&d40c->chan.dev->device,
1827 "[%s] Failed to free channel\n", __func__);
1828 spin_unlock_irqrestore(&d40c->lock, flags);
1829}
1830
1831static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
1832 dma_addr_t dst,
1833 dma_addr_t src,
1834 size_t size,
Jonas Aaberg2a614342010-06-20 21:25:24 +00001835 unsigned long dma_flags)
Linus Walleij8d318a52010-03-30 15:33:42 +02001836{
1837 struct d40_desc *d40d;
1838 struct d40_chan *d40c = container_of(chan, struct d40_chan,
1839 chan);
Jonas Aaberg2a614342010-06-20 21:25:24 +00001840 unsigned long flags;
Linus Walleij8d318a52010-03-30 15:33:42 +02001841 int err = 0;
1842
Jonas Aaberg0d0f6b82010-06-20 21:25:31 +00001843 if (d40c->phy_chan == NULL) {
1844 dev_err(&d40c->chan.dev->device,
1845 "[%s] Channel is not allocated.\n", __func__);
1846 return ERR_PTR(-EINVAL);
1847 }
1848
Jonas Aaberg2a614342010-06-20 21:25:24 +00001849 spin_lock_irqsave(&d40c->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02001850 d40d = d40_desc_get(d40c);
1851
1852 if (d40d == NULL) {
1853 dev_err(&d40c->chan.dev->device,
1854 "[%s] Descriptor is NULL\n", __func__);
1855 goto err;
1856 }
1857
Jonas Aaberg2a614342010-06-20 21:25:24 +00001858 d40d->txd.flags = dma_flags;
Linus Walleij8d318a52010-03-30 15:33:42 +02001859
1860 dma_async_tx_descriptor_init(&d40d->txd, chan);
1861
1862 d40d->txd.tx_submit = d40_tx_submit;
1863
1864 if (d40c->log_num != D40_PHY_CHAN) {
1865
1866 if (d40_pool_lli_alloc(d40d, 1, true) < 0) {
1867 dev_err(&d40c->chan.dev->device,
1868 "[%s] Out of memory\n", __func__);
1869 goto err;
1870 }
1871 d40d->lli_len = 1;
Per Friden941b77a2010-06-20 21:24:45 +00001872 d40d->lli_tx_len = 1;
Linus Walleij8d318a52010-03-30 15:33:42 +02001873
1874 d40_log_fill_lli(d40d->lli_log.src,
1875 src,
1876 size,
1877 0,
1878 d40c->log_def.lcsp1,
1879 d40c->dma_cfg.src_info.data_width,
Jonas Aaberg2123a612010-06-20 21:25:54 +00001880 false, true);
Linus Walleij8d318a52010-03-30 15:33:42 +02001881
1882 d40_log_fill_lli(d40d->lli_log.dst,
1883 dst,
1884 size,
1885 0,
1886 d40c->log_def.lcsp3,
1887 d40c->dma_cfg.dst_info.data_width,
1888 true, true);
1889
1890 } else {
1891
1892 if (d40_pool_lli_alloc(d40d, 1, false) < 0) {
1893 dev_err(&d40c->chan.dev->device,
1894 "[%s] Out of memory\n", __func__);
1895 goto err;
1896 }
1897
1898 err = d40_phy_fill_lli(d40d->lli_phy.src,
1899 src,
1900 size,
1901 d40c->dma_cfg.src_info.psize,
1902 0,
1903 d40c->src_def_cfg,
1904 true,
1905 d40c->dma_cfg.src_info.data_width,
1906 false);
1907 if (err)
1908 goto err_fill_lli;
1909
1910 err = d40_phy_fill_lli(d40d->lli_phy.dst,
1911 dst,
1912 size,
1913 d40c->dma_cfg.dst_info.psize,
1914 0,
1915 d40c->dst_def_cfg,
1916 true,
1917 d40c->dma_cfg.dst_info.data_width,
1918 false);
1919
1920 if (err)
1921 goto err_fill_lli;
1922
1923 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
1924 d40d->lli_pool.size, DMA_TO_DEVICE);
1925 }
1926
Jonas Aaberg2a614342010-06-20 21:25:24 +00001927 spin_unlock_irqrestore(&d40c->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02001928 return &d40d->txd;
1929
1930err_fill_lli:
1931 dev_err(&d40c->chan.dev->device,
1932 "[%s] Failed filling in PHY LLI\n", __func__);
1933 d40_pool_lli_free(d40d);
1934err:
Jonas Aaberg2a614342010-06-20 21:25:24 +00001935 spin_unlock_irqrestore(&d40c->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02001936 return NULL;
1937}
1938
1939static int d40_prep_slave_sg_log(struct d40_desc *d40d,
1940 struct d40_chan *d40c,
1941 struct scatterlist *sgl,
1942 unsigned int sg_len,
1943 enum dma_data_direction direction,
Jonas Aaberg2a614342010-06-20 21:25:24 +00001944 unsigned long dma_flags)
Linus Walleij8d318a52010-03-30 15:33:42 +02001945{
1946 dma_addr_t dev_addr = 0;
1947 int total_size;
Linus Walleij8d318a52010-03-30 15:33:42 +02001948
1949 if (d40_pool_lli_alloc(d40d, sg_len, true) < 0) {
1950 dev_err(&d40c->chan.dev->device,
1951 "[%s] Out of memory\n", __func__);
1952 return -ENOMEM;
1953 }
1954
1955 d40d->lli_len = sg_len;
Per Friden941b77a2010-06-20 21:24:45 +00001956 if (d40d->lli_len <= d40c->base->plat_data->llis_per_log)
1957 d40d->lli_tx_len = d40d->lli_len;
1958 else
1959 d40d->lli_tx_len = d40c->base->plat_data->llis_per_log;
Linus Walleij8d318a52010-03-30 15:33:42 +02001960
1961 if (sg_len > 1)
1962 /*
1963 * Check if there is space available in lcla.
1964 * If not, split list into 1-length and run only
1965 * in lcpa space.
1966 */
Linus Walleij508849a2010-06-20 21:26:07 +00001967 if (d40_lcla_id_get(d40c) != 0)
Per Friden941b77a2010-06-20 21:24:45 +00001968 d40d->lli_tx_len = 1;
Linus Walleij8d318a52010-03-30 15:33:42 +02001969
Jonas Aaberg2a614342010-06-20 21:25:24 +00001970 if (direction == DMA_FROM_DEVICE)
Linus Walleij95e14002010-08-04 13:37:45 +02001971 if (d40c->runtime_addr)
1972 dev_addr = d40c->runtime_addr;
1973 else
1974 dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
Jonas Aaberg2a614342010-06-20 21:25:24 +00001975 else if (direction == DMA_TO_DEVICE)
Linus Walleij95e14002010-08-04 13:37:45 +02001976 if (d40c->runtime_addr)
1977 dev_addr = d40c->runtime_addr;
1978 else
1979 dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
1980
Jonas Aaberg2a614342010-06-20 21:25:24 +00001981 else
Linus Walleij8d318a52010-03-30 15:33:42 +02001982 return -EINVAL;
Jonas Aaberg2a614342010-06-20 21:25:24 +00001983
1984 total_size = d40_log_sg_to_dev(&d40c->lcla,
1985 sgl, sg_len,
1986 &d40d->lli_log,
1987 &d40c->log_def,
1988 d40c->dma_cfg.src_info.data_width,
1989 d40c->dma_cfg.dst_info.data_width,
1990 direction,
Jonas Aaberg2a614342010-06-20 21:25:24 +00001991 dev_addr, d40d->lli_tx_len,
1992 d40c->base->plat_data->llis_per_log);
1993
Linus Walleij8d318a52010-03-30 15:33:42 +02001994 if (total_size < 0)
1995 return -EINVAL;
1996
1997 return 0;
1998}
1999
2000static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
2001 struct d40_chan *d40c,
2002 struct scatterlist *sgl,
2003 unsigned int sgl_len,
2004 enum dma_data_direction direction,
Jonas Aaberg2a614342010-06-20 21:25:24 +00002005 unsigned long dma_flags)
Linus Walleij8d318a52010-03-30 15:33:42 +02002006{
2007 dma_addr_t src_dev_addr;
2008 dma_addr_t dst_dev_addr;
2009 int res;
2010
2011 if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) {
2012 dev_err(&d40c->chan.dev->device,
2013 "[%s] Out of memory\n", __func__);
2014 return -ENOMEM;
2015 }
2016
2017 d40d->lli_len = sgl_len;
Per Friden941b77a2010-06-20 21:24:45 +00002018 d40d->lli_tx_len = sgl_len;
Linus Walleij8d318a52010-03-30 15:33:42 +02002019
2020 if (direction == DMA_FROM_DEVICE) {
2021 dst_dev_addr = 0;
Linus Walleij95e14002010-08-04 13:37:45 +02002022 if (d40c->runtime_addr)
2023 src_dev_addr = d40c->runtime_addr;
2024 else
2025 src_dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
Linus Walleij8d318a52010-03-30 15:33:42 +02002026 } else if (direction == DMA_TO_DEVICE) {
Linus Walleij95e14002010-08-04 13:37:45 +02002027 if (d40c->runtime_addr)
2028 dst_dev_addr = d40c->runtime_addr;
2029 else
2030 dst_dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
Linus Walleij8d318a52010-03-30 15:33:42 +02002031 src_dev_addr = 0;
2032 } else
2033 return -EINVAL;
2034
2035 res = d40_phy_sg_to_lli(sgl,
2036 sgl_len,
2037 src_dev_addr,
2038 d40d->lli_phy.src,
Jonas Aabergaa182ae2010-08-09 12:08:26 +00002039 virt_to_phys(d40d->lli_phy.src),
Linus Walleij8d318a52010-03-30 15:33:42 +02002040 d40c->src_def_cfg,
2041 d40c->dma_cfg.src_info.data_width,
Jonas Aaberg0246e772010-08-09 12:08:10 +00002042 d40c->dma_cfg.src_info.psize);
Linus Walleij8d318a52010-03-30 15:33:42 +02002043 if (res < 0)
2044 return res;
2045
2046 res = d40_phy_sg_to_lli(sgl,
2047 sgl_len,
2048 dst_dev_addr,
2049 d40d->lli_phy.dst,
Jonas Aabergaa182ae2010-08-09 12:08:26 +00002050 virt_to_phys(d40d->lli_phy.dst),
Linus Walleij8d318a52010-03-30 15:33:42 +02002051 d40c->dst_def_cfg,
2052 d40c->dma_cfg.dst_info.data_width,
Jonas Aaberg0246e772010-08-09 12:08:10 +00002053 d40c->dma_cfg.dst_info.psize);
Linus Walleij8d318a52010-03-30 15:33:42 +02002054 if (res < 0)
2055 return res;
2056
2057 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
2058 d40d->lli_pool.size, DMA_TO_DEVICE);
2059 return 0;
2060}
2061
2062static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
2063 struct scatterlist *sgl,
2064 unsigned int sg_len,
2065 enum dma_data_direction direction,
Jonas Aaberg2a614342010-06-20 21:25:24 +00002066 unsigned long dma_flags)
Linus Walleij8d318a52010-03-30 15:33:42 +02002067{
2068 struct d40_desc *d40d;
2069 struct d40_chan *d40c = container_of(chan, struct d40_chan,
2070 chan);
Jonas Aaberg2a614342010-06-20 21:25:24 +00002071 unsigned long flags;
Linus Walleij8d318a52010-03-30 15:33:42 +02002072 int err;
2073
Jonas Aaberg0d0f6b82010-06-20 21:25:31 +00002074 if (d40c->phy_chan == NULL) {
2075 dev_err(&d40c->chan.dev->device,
2076 "[%s] Cannot prepare unallocated channel\n", __func__);
2077 return ERR_PTR(-EINVAL);
2078 }
2079
Linus Walleij8d318a52010-03-30 15:33:42 +02002080 if (d40c->dma_cfg.pre_transfer)
2081 d40c->dma_cfg.pre_transfer(chan,
2082 d40c->dma_cfg.pre_transfer_data,
2083 sg_dma_len(sgl));
2084
Jonas Aaberg2a614342010-06-20 21:25:24 +00002085 spin_lock_irqsave(&d40c->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02002086 d40d = d40_desc_get(d40c);
Jonas Aaberg2a614342010-06-20 21:25:24 +00002087 spin_unlock_irqrestore(&d40c->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02002088
2089 if (d40d == NULL)
2090 return NULL;
2091
Linus Walleij8d318a52010-03-30 15:33:42 +02002092 if (d40c->log_num != D40_PHY_CHAN)
2093 err = d40_prep_slave_sg_log(d40d, d40c, sgl, sg_len,
Jonas Aaberg2a614342010-06-20 21:25:24 +00002094 direction, dma_flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02002095 else
2096 err = d40_prep_slave_sg_phy(d40d, d40c, sgl, sg_len,
Jonas Aaberg2a614342010-06-20 21:25:24 +00002097 direction, dma_flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02002098 if (err) {
2099 dev_err(&d40c->chan.dev->device,
2100 "[%s] Failed to prepare %s slave sg job: %d\n",
2101 __func__,
2102 d40c->log_num != D40_PHY_CHAN ? "log" : "phy", err);
2103 return NULL;
2104 }
2105
Jonas Aaberg2a614342010-06-20 21:25:24 +00002106 d40d->txd.flags = dma_flags;
Linus Walleij8d318a52010-03-30 15:33:42 +02002107
2108 dma_async_tx_descriptor_init(&d40d->txd, chan);
2109
2110 d40d->txd.tx_submit = d40_tx_submit;
2111
2112 return &d40d->txd;
2113}
2114
2115static enum dma_status d40_tx_status(struct dma_chan *chan,
2116 dma_cookie_t cookie,
2117 struct dma_tx_state *txstate)
2118{
2119 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2120 dma_cookie_t last_used;
2121 dma_cookie_t last_complete;
2122 int ret;
2123
Jonas Aaberg0d0f6b82010-06-20 21:25:31 +00002124 if (d40c->phy_chan == NULL) {
2125 dev_err(&d40c->chan.dev->device,
2126 "[%s] Cannot read status of unallocated channel\n",
2127 __func__);
2128 return -EINVAL;
2129 }
2130
Linus Walleij8d318a52010-03-30 15:33:42 +02002131 last_complete = d40c->completed;
2132 last_used = chan->cookie;
2133
Jonas Aaberga5ebca42010-05-18 00:41:09 +02002134 if (d40_is_paused(d40c))
2135 ret = DMA_PAUSED;
2136 else
2137 ret = dma_async_is_complete(cookie, last_complete, last_used);
Linus Walleij8d318a52010-03-30 15:33:42 +02002138
Jonas Aaberga5ebca42010-05-18 00:41:09 +02002139 dma_set_tx_state(txstate, last_complete, last_used,
2140 stedma40_residue(chan));
Linus Walleij8d318a52010-03-30 15:33:42 +02002141
2142 return ret;
2143}
2144
2145static void d40_issue_pending(struct dma_chan *chan)
2146{
2147 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2148 unsigned long flags;
2149
Jonas Aaberg0d0f6b82010-06-20 21:25:31 +00002150 if (d40c->phy_chan == NULL) {
2151 dev_err(&d40c->chan.dev->device,
2152 "[%s] Channel is not allocated!\n", __func__);
2153 return;
2154 }
2155
Linus Walleij8d318a52010-03-30 15:33:42 +02002156 spin_lock_irqsave(&d40c->lock, flags);
2157
2158 /* Busy means that pending jobs are already being processed */
2159 if (!d40c->busy)
2160 (void) d40_queue_start(d40c);
2161
2162 spin_unlock_irqrestore(&d40c->lock, flags);
2163}
2164
Linus Walleij95e14002010-08-04 13:37:45 +02002165/* Runtime reconfiguration extension */
2166static void d40_set_runtime_config(struct dma_chan *chan,
2167 struct dma_slave_config *config)
2168{
2169 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2170 struct stedma40_chan_cfg *cfg = &d40c->dma_cfg;
2171 enum dma_slave_buswidth config_addr_width;
2172 dma_addr_t config_addr;
2173 u32 config_maxburst;
2174 enum stedma40_periph_data_width addr_width;
2175 int psize;
2176
2177 if (config->direction == DMA_FROM_DEVICE) {
2178 dma_addr_t dev_addr_rx =
2179 d40c->base->plat_data->dev_rx[cfg->src_dev_type];
2180
2181 config_addr = config->src_addr;
2182 if (dev_addr_rx)
2183 dev_dbg(d40c->base->dev,
2184 "channel has a pre-wired RX address %08x "
2185 "overriding with %08x\n",
2186 dev_addr_rx, config_addr);
2187 if (cfg->dir != STEDMA40_PERIPH_TO_MEM)
2188 dev_dbg(d40c->base->dev,
2189 "channel was not configured for peripheral "
2190 "to memory transfer (%d) overriding\n",
2191 cfg->dir);
2192 cfg->dir = STEDMA40_PERIPH_TO_MEM;
2193
2194 config_addr_width = config->src_addr_width;
2195 config_maxburst = config->src_maxburst;
2196
2197 } else if (config->direction == DMA_TO_DEVICE) {
2198 dma_addr_t dev_addr_tx =
2199 d40c->base->plat_data->dev_tx[cfg->dst_dev_type];
2200
2201 config_addr = config->dst_addr;
2202 if (dev_addr_tx)
2203 dev_dbg(d40c->base->dev,
2204 "channel has a pre-wired TX address %08x "
2205 "overriding with %08x\n",
2206 dev_addr_tx, config_addr);
2207 if (cfg->dir != STEDMA40_MEM_TO_PERIPH)
2208 dev_dbg(d40c->base->dev,
2209 "channel was not configured for memory "
2210 "to peripheral transfer (%d) overriding\n",
2211 cfg->dir);
2212 cfg->dir = STEDMA40_MEM_TO_PERIPH;
2213
2214 config_addr_width = config->dst_addr_width;
2215 config_maxburst = config->dst_maxburst;
2216
2217 } else {
2218 dev_err(d40c->base->dev,
2219 "unrecognized channel direction %d\n",
2220 config->direction);
2221 return;
2222 }
2223
2224 switch (config_addr_width) {
2225 case DMA_SLAVE_BUSWIDTH_1_BYTE:
2226 addr_width = STEDMA40_BYTE_WIDTH;
2227 break;
2228 case DMA_SLAVE_BUSWIDTH_2_BYTES:
2229 addr_width = STEDMA40_HALFWORD_WIDTH;
2230 break;
2231 case DMA_SLAVE_BUSWIDTH_4_BYTES:
2232 addr_width = STEDMA40_WORD_WIDTH;
2233 break;
2234 case DMA_SLAVE_BUSWIDTH_8_BYTES:
2235 addr_width = STEDMA40_DOUBLEWORD_WIDTH;
2236 break;
2237 default:
2238 dev_err(d40c->base->dev,
2239 "illegal peripheral address width "
2240 "requested (%d)\n",
2241 config->src_addr_width);
2242 return;
2243 }
2244
2245 if (config_maxburst >= 16)
2246 psize = STEDMA40_PSIZE_LOG_16;
2247 else if (config_maxburst >= 8)
2248 psize = STEDMA40_PSIZE_LOG_8;
2249 else if (config_maxburst >= 4)
2250 psize = STEDMA40_PSIZE_LOG_4;
2251 else
2252 psize = STEDMA40_PSIZE_LOG_1;
2253
2254 /* Set up all the endpoint configs */
2255 cfg->src_info.data_width = addr_width;
2256 cfg->src_info.psize = psize;
2257 cfg->src_info.endianess = STEDMA40_LITTLE_ENDIAN;
2258 cfg->src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL;
2259 cfg->dst_info.data_width = addr_width;
2260 cfg->dst_info.psize = psize;
2261 cfg->dst_info.endianess = STEDMA40_LITTLE_ENDIAN;
2262 cfg->dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL;
2263
2264 /* These settings will take precedence later */
2265 d40c->runtime_addr = config_addr;
2266 d40c->runtime_direction = config->direction;
2267 dev_dbg(d40c->base->dev,
2268 "configured channel %s for %s, data width %d, "
2269 "maxburst %d bytes, LE, no flow control\n",
2270 dma_chan_name(chan),
2271 (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX",
2272 config_addr_width,
2273 config_maxburst);
2274}
2275
Linus Walleij05827632010-05-17 16:30:42 -07002276static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
2277 unsigned long arg)
Linus Walleij8d318a52010-03-30 15:33:42 +02002278{
2279 unsigned long flags;
2280 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2281
Jonas Aaberg0d0f6b82010-06-20 21:25:31 +00002282 if (d40c->phy_chan == NULL) {
2283 dev_err(&d40c->chan.dev->device,
2284 "[%s] Channel is not allocated!\n", __func__);
2285 return -EINVAL;
2286 }
2287
Linus Walleij8d318a52010-03-30 15:33:42 +02002288 switch (cmd) {
2289 case DMA_TERMINATE_ALL:
2290 spin_lock_irqsave(&d40c->lock, flags);
2291 d40_term_all(d40c);
2292 spin_unlock_irqrestore(&d40c->lock, flags);
2293 return 0;
2294 case DMA_PAUSE:
2295 return d40_pause(chan);
2296 case DMA_RESUME:
2297 return d40_resume(chan);
Linus Walleij95e14002010-08-04 13:37:45 +02002298 case DMA_SLAVE_CONFIG:
2299 d40_set_runtime_config(chan,
2300 (struct dma_slave_config *) arg);
2301 return 0;
2302 default:
2303 break;
Linus Walleij8d318a52010-03-30 15:33:42 +02002304 }
2305
2306 /* Other commands are unimplemented */
2307 return -ENXIO;
2308}
2309
2310/* Initialization functions */
2311
2312static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
2313 struct d40_chan *chans, int offset,
2314 int num_chans)
2315{
2316 int i = 0;
2317 struct d40_chan *d40c;
2318
2319 INIT_LIST_HEAD(&dma->channels);
2320
2321 for (i = offset; i < offset + num_chans; i++) {
2322 d40c = &chans[i];
2323 d40c->base = base;
2324 d40c->chan.device = dma;
2325
2326 /* Invalidate lcla element */
2327 d40c->lcla.src_id = -1;
2328 d40c->lcla.dst_id = -1;
2329
2330 spin_lock_init(&d40c->lock);
2331
2332 d40c->log_num = D40_PHY_CHAN;
2333
Linus Walleij8d318a52010-03-30 15:33:42 +02002334 INIT_LIST_HEAD(&d40c->active);
2335 INIT_LIST_HEAD(&d40c->queue);
2336 INIT_LIST_HEAD(&d40c->client);
2337
Linus Walleij8d318a52010-03-30 15:33:42 +02002338 tasklet_init(&d40c->tasklet, dma_tasklet,
2339 (unsigned long) d40c);
2340
2341 list_add_tail(&d40c->chan.device_node,
2342 &dma->channels);
2343 }
2344}
2345
2346static int __init d40_dmaengine_init(struct d40_base *base,
2347 int num_reserved_chans)
2348{
2349 int err ;
2350
2351 d40_chan_init(base, &base->dma_slave, base->log_chans,
2352 0, base->num_log_chans);
2353
2354 dma_cap_zero(base->dma_slave.cap_mask);
2355 dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask);
2356
2357 base->dma_slave.device_alloc_chan_resources = d40_alloc_chan_resources;
2358 base->dma_slave.device_free_chan_resources = d40_free_chan_resources;
2359 base->dma_slave.device_prep_dma_memcpy = d40_prep_memcpy;
2360 base->dma_slave.device_prep_slave_sg = d40_prep_slave_sg;
2361 base->dma_slave.device_tx_status = d40_tx_status;
2362 base->dma_slave.device_issue_pending = d40_issue_pending;
2363 base->dma_slave.device_control = d40_control;
2364 base->dma_slave.dev = base->dev;
2365
2366 err = dma_async_device_register(&base->dma_slave);
2367
2368 if (err) {
2369 dev_err(base->dev,
2370 "[%s] Failed to register slave channels\n",
2371 __func__);
2372 goto failure1;
2373 }
2374
2375 d40_chan_init(base, &base->dma_memcpy, base->log_chans,
2376 base->num_log_chans, base->plat_data->memcpy_len);
2377
2378 dma_cap_zero(base->dma_memcpy.cap_mask);
2379 dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
2380
2381 base->dma_memcpy.device_alloc_chan_resources = d40_alloc_chan_resources;
2382 base->dma_memcpy.device_free_chan_resources = d40_free_chan_resources;
2383 base->dma_memcpy.device_prep_dma_memcpy = d40_prep_memcpy;
2384 base->dma_memcpy.device_prep_slave_sg = d40_prep_slave_sg;
2385 base->dma_memcpy.device_tx_status = d40_tx_status;
2386 base->dma_memcpy.device_issue_pending = d40_issue_pending;
2387 base->dma_memcpy.device_control = d40_control;
2388 base->dma_memcpy.dev = base->dev;
2389 /*
2390 * This controller can only access address at even
2391 * 32bit boundaries, i.e. 2^2
2392 */
2393 base->dma_memcpy.copy_align = 2;
2394
2395 err = dma_async_device_register(&base->dma_memcpy);
2396
2397 if (err) {
2398 dev_err(base->dev,
2399 "[%s] Failed to regsiter memcpy only channels\n",
2400 __func__);
2401 goto failure2;
2402 }
2403
2404 d40_chan_init(base, &base->dma_both, base->phy_chans,
2405 0, num_reserved_chans);
2406
2407 dma_cap_zero(base->dma_both.cap_mask);
2408 dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask);
2409 dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask);
2410
2411 base->dma_both.device_alloc_chan_resources = d40_alloc_chan_resources;
2412 base->dma_both.device_free_chan_resources = d40_free_chan_resources;
2413 base->dma_both.device_prep_dma_memcpy = d40_prep_memcpy;
2414 base->dma_both.device_prep_slave_sg = d40_prep_slave_sg;
2415 base->dma_both.device_tx_status = d40_tx_status;
2416 base->dma_both.device_issue_pending = d40_issue_pending;
2417 base->dma_both.device_control = d40_control;
2418 base->dma_both.dev = base->dev;
2419 base->dma_both.copy_align = 2;
2420 err = dma_async_device_register(&base->dma_both);
2421
2422 if (err) {
2423 dev_err(base->dev,
2424 "[%s] Failed to register logical and physical capable channels\n",
2425 __func__);
2426 goto failure3;
2427 }
2428 return 0;
2429failure3:
2430 dma_async_device_unregister(&base->dma_memcpy);
2431failure2:
2432 dma_async_device_unregister(&base->dma_slave);
2433failure1:
2434 return err;
2435}
2436
2437/* Initialization functions. */
2438
2439static int __init d40_phy_res_init(struct d40_base *base)
2440{
2441 int i;
2442 int num_phy_chans_avail = 0;
2443 u32 val[2];
2444 int odd_even_bit = -2;
2445
2446 val[0] = readl(base->virtbase + D40_DREG_PRSME);
2447 val[1] = readl(base->virtbase + D40_DREG_PRSMO);
2448
2449 for (i = 0; i < base->num_phy_chans; i++) {
2450 base->phy_res[i].num = i;
2451 odd_even_bit += 2 * ((i % 2) == 0);
2452 if (((val[i % 2] >> odd_even_bit) & 3) == 1) {
2453 /* Mark security only channels as occupied */
2454 base->phy_res[i].allocated_src = D40_ALLOC_PHY;
2455 base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
2456 } else {
2457 base->phy_res[i].allocated_src = D40_ALLOC_FREE;
2458 base->phy_res[i].allocated_dst = D40_ALLOC_FREE;
2459 num_phy_chans_avail++;
2460 }
2461 spin_lock_init(&base->phy_res[i].lock);
2462 }
Jonas Aaberg6b7acd82010-06-20 21:26:59 +00002463
2464 /* Mark disabled channels as occupied */
2465 for (i = 0; base->plat_data->disabled_channels[i] != -1; i++) {
2466 base->phy_res[i].allocated_src = D40_ALLOC_PHY;
2467 base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
2468 num_phy_chans_avail--;
2469 }
2470
Linus Walleij8d318a52010-03-30 15:33:42 +02002471 dev_info(base->dev, "%d of %d physical DMA channels available\n",
2472 num_phy_chans_avail, base->num_phy_chans);
2473
2474 /* Verify settings extended vs standard */
2475 val[0] = readl(base->virtbase + D40_DREG_PRTYP);
2476
2477 for (i = 0; i < base->num_phy_chans; i++) {
2478
2479 if (base->phy_res[i].allocated_src == D40_ALLOC_FREE &&
2480 (val[0] & 0x3) != 1)
2481 dev_info(base->dev,
2482 "[%s] INFO: channel %d is misconfigured (%d)\n",
2483 __func__, i, val[0] & 0x3);
2484
2485 val[0] = val[0] >> 2;
2486 }
2487
2488 return num_phy_chans_avail;
2489}
2490
2491static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
2492{
2493 static const struct d40_reg_val dma_id_regs[] = {
2494 /* Peripheral Id */
2495 { .reg = D40_DREG_PERIPHID0, .val = 0x0040},
2496 { .reg = D40_DREG_PERIPHID1, .val = 0x0000},
2497 /*
2498 * D40_DREG_PERIPHID2 Depends on HW revision:
2499 * MOP500/HREF ED has 0x0008,
2500 * ? has 0x0018,
2501 * HREF V1 has 0x0028
2502 */
2503 { .reg = D40_DREG_PERIPHID3, .val = 0x0000},
2504
2505 /* PCell Id */
2506 { .reg = D40_DREG_CELLID0, .val = 0x000d},
2507 { .reg = D40_DREG_CELLID1, .val = 0x00f0},
2508 { .reg = D40_DREG_CELLID2, .val = 0x0005},
2509 { .reg = D40_DREG_CELLID3, .val = 0x00b1}
2510 };
2511 struct stedma40_platform_data *plat_data;
2512 struct clk *clk = NULL;
2513 void __iomem *virtbase = NULL;
2514 struct resource *res = NULL;
2515 struct d40_base *base = NULL;
2516 int num_log_chans = 0;
2517 int num_phy_chans;
2518 int i;
Linus Walleijf4185592010-06-22 18:06:42 -07002519 u32 val;
Jonas Aaberg3ae02672010-08-09 12:08:18 +00002520 u32 rev;
Linus Walleij8d318a52010-03-30 15:33:42 +02002521
2522 clk = clk_get(&pdev->dev, NULL);
2523
2524 if (IS_ERR(clk)) {
2525 dev_err(&pdev->dev, "[%s] No matching clock found\n",
2526 __func__);
2527 goto failure;
2528 }
2529
2530 clk_enable(clk);
2531
2532 /* Get IO for DMAC base address */
2533 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
2534 if (!res)
2535 goto failure;
2536
2537 if (request_mem_region(res->start, resource_size(res),
2538 D40_NAME " I/O base") == NULL)
2539 goto failure;
2540
2541 virtbase = ioremap(res->start, resource_size(res));
2542 if (!virtbase)
2543 goto failure;
2544
2545 /* HW version check */
2546 for (i = 0; i < ARRAY_SIZE(dma_id_regs); i++) {
2547 if (dma_id_regs[i].val !=
2548 readl(virtbase + dma_id_regs[i].reg)) {
2549 dev_err(&pdev->dev,
2550 "[%s] Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n",
2551 __func__,
2552 dma_id_regs[i].val,
2553 dma_id_regs[i].reg,
2554 readl(virtbase + dma_id_regs[i].reg));
2555 goto failure;
2556 }
2557 }
2558
Jonas Aaberg3ae02672010-08-09 12:08:18 +00002559 /* Get silicon revision and designer */
Linus Walleijf4185592010-06-22 18:06:42 -07002560 val = readl(virtbase + D40_DREG_PERIPHID2);
Linus Walleij8d318a52010-03-30 15:33:42 +02002561
Jonas Aaberg3ae02672010-08-09 12:08:18 +00002562 if ((val & D40_DREG_PERIPHID2_DESIGNER_MASK) !=
2563 D40_HW_DESIGNER) {
Linus Walleij8d318a52010-03-30 15:33:42 +02002564 dev_err(&pdev->dev,
2565 "[%s] Unknown designer! Got %x wanted %x\n",
Jonas Aaberg3ae02672010-08-09 12:08:18 +00002566 __func__, val & D40_DREG_PERIPHID2_DESIGNER_MASK,
2567 D40_HW_DESIGNER);
Linus Walleij8d318a52010-03-30 15:33:42 +02002568 goto failure;
2569 }
2570
Jonas Aaberg3ae02672010-08-09 12:08:18 +00002571 rev = (val & D40_DREG_PERIPHID2_REV_MASK) >>
2572 D40_DREG_PERIPHID2_REV_POS;
2573
Linus Walleij8d318a52010-03-30 15:33:42 +02002574 /* The number of physical channels on this HW */
2575 num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
2576
2577 dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n",
Jonas Aaberg3ae02672010-08-09 12:08:18 +00002578 rev, res->start);
Linus Walleij8d318a52010-03-30 15:33:42 +02002579
2580 plat_data = pdev->dev.platform_data;
2581
2582 /* Count the number of logical channels in use */
2583 for (i = 0; i < plat_data->dev_len; i++)
2584 if (plat_data->dev_rx[i] != 0)
2585 num_log_chans++;
2586
2587 for (i = 0; i < plat_data->dev_len; i++)
2588 if (plat_data->dev_tx[i] != 0)
2589 num_log_chans++;
2590
2591 base = kzalloc(ALIGN(sizeof(struct d40_base), 4) +
2592 (num_phy_chans + num_log_chans + plat_data->memcpy_len) *
2593 sizeof(struct d40_chan), GFP_KERNEL);
2594
2595 if (base == NULL) {
2596 dev_err(&pdev->dev, "[%s] Out of memory\n", __func__);
2597 goto failure;
2598 }
2599
Jonas Aaberg3ae02672010-08-09 12:08:18 +00002600 base->rev = rev;
Linus Walleij8d318a52010-03-30 15:33:42 +02002601 base->clk = clk;
2602 base->num_phy_chans = num_phy_chans;
2603 base->num_log_chans = num_log_chans;
2604 base->phy_start = res->start;
2605 base->phy_size = resource_size(res);
2606 base->virtbase = virtbase;
2607 base->plat_data = plat_data;
2608 base->dev = &pdev->dev;
2609 base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4);
2610 base->log_chans = &base->phy_chans[num_phy_chans];
2611
2612 base->phy_res = kzalloc(num_phy_chans * sizeof(struct d40_phy_res),
2613 GFP_KERNEL);
2614 if (!base->phy_res)
2615 goto failure;
2616
2617 base->lookup_phy_chans = kzalloc(num_phy_chans *
2618 sizeof(struct d40_chan *),
2619 GFP_KERNEL);
2620 if (!base->lookup_phy_chans)
2621 goto failure;
2622
2623 if (num_log_chans + plat_data->memcpy_len) {
2624 /*
2625 * The max number of logical channels are event lines for all
2626 * src devices and dst devices
2627 */
2628 base->lookup_log_chans = kzalloc(plat_data->dev_len * 2 *
2629 sizeof(struct d40_chan *),
2630 GFP_KERNEL);
2631 if (!base->lookup_log_chans)
2632 goto failure;
2633 }
2634 base->lcla_pool.alloc_map = kzalloc(num_phy_chans * sizeof(u32),
2635 GFP_KERNEL);
2636 if (!base->lcla_pool.alloc_map)
2637 goto failure;
2638
Jonas Aabergc675b1b2010-06-20 21:25:08 +00002639 base->desc_slab = kmem_cache_create(D40_NAME, sizeof(struct d40_desc),
2640 0, SLAB_HWCACHE_ALIGN,
2641 NULL);
2642 if (base->desc_slab == NULL)
2643 goto failure;
2644
Linus Walleij8d318a52010-03-30 15:33:42 +02002645 return base;
2646
2647failure:
2648 if (clk) {
2649 clk_disable(clk);
2650 clk_put(clk);
2651 }
2652 if (virtbase)
2653 iounmap(virtbase);
2654 if (res)
2655 release_mem_region(res->start,
2656 resource_size(res));
2657 if (virtbase)
2658 iounmap(virtbase);
2659
2660 if (base) {
2661 kfree(base->lcla_pool.alloc_map);
2662 kfree(base->lookup_log_chans);
2663 kfree(base->lookup_phy_chans);
2664 kfree(base->phy_res);
2665 kfree(base);
2666 }
2667
2668 return NULL;
2669}
2670
2671static void __init d40_hw_init(struct d40_base *base)
2672{
2673
2674 static const struct d40_reg_val dma_init_reg[] = {
2675 /* Clock every part of the DMA block from start */
2676 { .reg = D40_DREG_GCC, .val = 0x0000ff01},
2677
2678 /* Interrupts on all logical channels */
2679 { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF},
2680 { .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF},
2681 { .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF},
2682 { .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF},
2683 { .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF},
2684 { .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF},
2685 { .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF},
2686 { .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF},
2687 { .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF},
2688 { .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF},
2689 { .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF},
2690 { .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF}
2691 };
2692 int i;
2693 u32 prmseo[2] = {0, 0};
2694 u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF};
2695 u32 pcmis = 0;
2696 u32 pcicr = 0;
2697
2698 for (i = 0; i < ARRAY_SIZE(dma_init_reg); i++)
2699 writel(dma_init_reg[i].val,
2700 base->virtbase + dma_init_reg[i].reg);
2701
2702 /* Configure all our dma channels to default settings */
2703 for (i = 0; i < base->num_phy_chans; i++) {
2704
2705 activeo[i % 2] = activeo[i % 2] << 2;
2706
2707 if (base->phy_res[base->num_phy_chans - i - 1].allocated_src
2708 == D40_ALLOC_PHY) {
2709 activeo[i % 2] |= 3;
2710 continue;
2711 }
2712
2713 /* Enable interrupt # */
2714 pcmis = (pcmis << 1) | 1;
2715
2716 /* Clear interrupt # */
2717 pcicr = (pcicr << 1) | 1;
2718
2719 /* Set channel to physical mode */
2720 prmseo[i % 2] = prmseo[i % 2] << 2;
2721 prmseo[i % 2] |= 1;
2722
2723 }
2724
2725 writel(prmseo[1], base->virtbase + D40_DREG_PRMSE);
2726 writel(prmseo[0], base->virtbase + D40_DREG_PRMSO);
2727 writel(activeo[1], base->virtbase + D40_DREG_ACTIVE);
2728 writel(activeo[0], base->virtbase + D40_DREG_ACTIVO);
2729
2730 /* Write which interrupt to enable */
2731 writel(pcmis, base->virtbase + D40_DREG_PCMIS);
2732
2733 /* Write which interrupt to clear */
2734 writel(pcicr, base->virtbase + D40_DREG_PCICR);
2735
2736}
2737
Linus Walleij508849a2010-06-20 21:26:07 +00002738static int __init d40_lcla_allocate(struct d40_base *base)
2739{
2740 unsigned long *page_list;
2741 int i, j;
2742 int ret = 0;
2743
2744 /*
2745 * This is somewhat ugly. We need 8192 bytes that are 18 bit aligned,
2746 * To full fill this hardware requirement without wasting 256 kb
2747 * we allocate pages until we get an aligned one.
2748 */
2749 page_list = kmalloc(sizeof(unsigned long) * MAX_LCLA_ALLOC_ATTEMPTS,
2750 GFP_KERNEL);
2751
2752 if (!page_list) {
2753 ret = -ENOMEM;
2754 goto failure;
2755 }
2756
2757 /* Calculating how many pages that are required */
2758 base->lcla_pool.pages = SZ_1K * base->num_phy_chans / PAGE_SIZE;
2759
2760 for (i = 0; i < MAX_LCLA_ALLOC_ATTEMPTS; i++) {
2761 page_list[i] = __get_free_pages(GFP_KERNEL,
2762 base->lcla_pool.pages);
2763 if (!page_list[i]) {
2764
2765 dev_err(base->dev,
2766 "[%s] Failed to allocate %d pages.\n",
2767 __func__, base->lcla_pool.pages);
2768
2769 for (j = 0; j < i; j++)
2770 free_pages(page_list[j], base->lcla_pool.pages);
2771 goto failure;
2772 }
2773
2774 if ((virt_to_phys((void *)page_list[i]) &
2775 (LCLA_ALIGNMENT - 1)) == 0)
2776 break;
2777 }
2778
2779 for (j = 0; j < i; j++)
2780 free_pages(page_list[j], base->lcla_pool.pages);
2781
2782 if (i < MAX_LCLA_ALLOC_ATTEMPTS) {
2783 base->lcla_pool.base = (void *)page_list[i];
2784 } else {
Jonas Aaberg767a9672010-08-09 12:08:34 +00002785 /*
2786 * After many attempts and no succees with finding the correct
2787 * alignment, try with allocating a big buffer.
2788 */
Linus Walleij508849a2010-06-20 21:26:07 +00002789 dev_warn(base->dev,
2790 "[%s] Failed to get %d pages @ 18 bit align.\n",
2791 __func__, base->lcla_pool.pages);
2792 base->lcla_pool.base_unaligned = kmalloc(SZ_1K *
2793 base->num_phy_chans +
2794 LCLA_ALIGNMENT,
2795 GFP_KERNEL);
2796 if (!base->lcla_pool.base_unaligned) {
2797 ret = -ENOMEM;
2798 goto failure;
2799 }
2800
2801 base->lcla_pool.base = PTR_ALIGN(base->lcla_pool.base_unaligned,
2802 LCLA_ALIGNMENT);
2803 }
2804
2805 writel(virt_to_phys(base->lcla_pool.base),
2806 base->virtbase + D40_DREG_LCLA);
2807failure:
2808 kfree(page_list);
2809 return ret;
2810}
2811
Linus Walleij8d318a52010-03-30 15:33:42 +02002812static int __init d40_probe(struct platform_device *pdev)
2813{
2814 int err;
2815 int ret = -ENOENT;
2816 struct d40_base *base;
2817 struct resource *res = NULL;
2818 int num_reserved_chans;
2819 u32 val;
2820
2821 base = d40_hw_detect_init(pdev);
2822
2823 if (!base)
2824 goto failure;
2825
2826 num_reserved_chans = d40_phy_res_init(base);
2827
2828 platform_set_drvdata(pdev, base);
2829
2830 spin_lock_init(&base->interrupt_lock);
2831 spin_lock_init(&base->execmd_lock);
2832
2833 /* Get IO for logical channel parameter address */
2834 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa");
2835 if (!res) {
2836 ret = -ENOENT;
2837 dev_err(&pdev->dev,
2838 "[%s] No \"lcpa\" memory resource\n",
2839 __func__);
2840 goto failure;
2841 }
2842 base->lcpa_size = resource_size(res);
2843 base->phy_lcpa = res->start;
2844
2845 if (request_mem_region(res->start, resource_size(res),
2846 D40_NAME " I/O lcpa") == NULL) {
2847 ret = -EBUSY;
2848 dev_err(&pdev->dev,
2849 "[%s] Failed to request LCPA region 0x%x-0x%x\n",
2850 __func__, res->start, res->end);
2851 goto failure;
2852 }
2853
2854 /* We make use of ESRAM memory for this. */
2855 val = readl(base->virtbase + D40_DREG_LCPA);
2856 if (res->start != val && val != 0) {
2857 dev_warn(&pdev->dev,
2858 "[%s] Mismatch LCPA dma 0x%x, def 0x%x\n",
2859 __func__, val, res->start);
2860 } else
2861 writel(res->start, base->virtbase + D40_DREG_LCPA);
2862
2863 base->lcpa_base = ioremap(res->start, resource_size(res));
2864 if (!base->lcpa_base) {
2865 ret = -ENOMEM;
2866 dev_err(&pdev->dev,
2867 "[%s] Failed to ioremap LCPA region\n",
2868 __func__);
2869 goto failure;
2870 }
Linus Walleij508849a2010-06-20 21:26:07 +00002871
2872 ret = d40_lcla_allocate(base);
2873 if (ret) {
2874 dev_err(&pdev->dev, "[%s] Failed to allocate LCLA area\n",
Linus Walleij8d318a52010-03-30 15:33:42 +02002875 __func__);
2876 goto failure;
2877 }
2878
Linus Walleij8d318a52010-03-30 15:33:42 +02002879 spin_lock_init(&base->lcla_pool.lock);
2880
2881 base->lcla_pool.num_blocks = base->num_phy_chans;
2882
2883 base->irq = platform_get_irq(pdev, 0);
2884
2885 ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
2886
2887 if (ret) {
2888 dev_err(&pdev->dev, "[%s] No IRQ defined\n", __func__);
2889 goto failure;
2890 }
2891
2892 err = d40_dmaengine_init(base, num_reserved_chans);
2893 if (err)
2894 goto failure;
2895
2896 d40_hw_init(base);
2897
2898 dev_info(base->dev, "initialized\n");
2899 return 0;
2900
2901failure:
2902 if (base) {
Jonas Aabergc675b1b2010-06-20 21:25:08 +00002903 if (base->desc_slab)
2904 kmem_cache_destroy(base->desc_slab);
Linus Walleij8d318a52010-03-30 15:33:42 +02002905 if (base->virtbase)
2906 iounmap(base->virtbase);
Linus Walleij508849a2010-06-20 21:26:07 +00002907 if (!base->lcla_pool.base_unaligned && base->lcla_pool.base)
2908 free_pages((unsigned long)base->lcla_pool.base,
2909 base->lcla_pool.pages);
Jonas Aaberg767a9672010-08-09 12:08:34 +00002910
2911 kfree(base->lcla_pool.base_unaligned);
2912
Linus Walleij8d318a52010-03-30 15:33:42 +02002913 if (base->phy_lcpa)
2914 release_mem_region(base->phy_lcpa,
2915 base->lcpa_size);
2916 if (base->phy_start)
2917 release_mem_region(base->phy_start,
2918 base->phy_size);
2919 if (base->clk) {
2920 clk_disable(base->clk);
2921 clk_put(base->clk);
2922 }
2923
2924 kfree(base->lcla_pool.alloc_map);
2925 kfree(base->lookup_log_chans);
2926 kfree(base->lookup_phy_chans);
2927 kfree(base->phy_res);
2928 kfree(base);
2929 }
2930
2931 dev_err(&pdev->dev, "[%s] probe failed\n", __func__);
2932 return ret;
2933}
2934
2935static struct platform_driver d40_driver = {
2936 .driver = {
2937 .owner = THIS_MODULE,
2938 .name = D40_NAME,
2939 },
2940};
2941
2942int __init stedma40_init(void)
2943{
2944 return platform_driver_probe(&d40_driver, d40_probe);
2945}
2946arch_initcall(stedma40_init);