blob: 7a4919bf1e92a95b16384ceb2b1486a24ecba7d1 [file] [log] [blame]
Linus Walleij8d318a52010-03-30 15:33:42 +02001/*
Jonas Aaberg767a9672010-08-09 12:08:34 +00002 * Copyright (C) ST-Ericsson SA 2007-2010
3 * Author: Per Friden <per.friden@stericsson.com> for ST-Ericsson
4 * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
Linus Walleij8d318a52010-03-30 15:33:42 +02005 * License terms: GNU General Public License (GPL) version 2
Linus Walleij8d318a52010-03-30 15:33:42 +02006 */
7
8#include <linux/kernel.h>
9#include <linux/slab.h>
10#include <linux/dmaengine.h>
11#include <linux/platform_device.h>
12#include <linux/clk.h>
13#include <linux/delay.h>
14
15#include <plat/ste_dma40.h>
16
17#include "ste_dma40_ll.h"
18
19#define D40_NAME "dma40"
20
21#define D40_PHY_CHAN -1
22
23/* For masking out/in 2 bit channel positions */
24#define D40_CHAN_POS(chan) (2 * (chan / 2))
25#define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan))
26
27/* Maximum iterations taken before giving up suspending a channel */
28#define D40_SUSPEND_MAX_IT 500
29
Linus Walleij508849a2010-06-20 21:26:07 +000030/* Hardware requirement on LCLA alignment */
31#define LCLA_ALIGNMENT 0x40000
32/* Attempts before giving up to trying to get pages that are aligned */
33#define MAX_LCLA_ALLOC_ATTEMPTS 256
34
35/* Bit markings for allocation map */
Linus Walleij8d318a52010-03-30 15:33:42 +020036#define D40_ALLOC_FREE (1 << 31)
37#define D40_ALLOC_PHY (1 << 30)
38#define D40_ALLOC_LOG_FREE 0
39
Linus Walleij8d318a52010-03-30 15:33:42 +020040/* Hardware designer of the block */
Jonas Aaberg3ae02672010-08-09 12:08:18 +000041#define D40_HW_DESIGNER 0x8
Linus Walleij8d318a52010-03-30 15:33:42 +020042
43/**
44 * enum 40_command - The different commands and/or statuses.
45 *
46 * @D40_DMA_STOP: DMA channel command STOP or status STOPPED,
47 * @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN.
48 * @D40_DMA_SUSPEND_REQ: Request the DMA to SUSPEND as soon as possible.
49 * @D40_DMA_SUSPENDED: The DMA channel is SUSPENDED.
50 */
51enum d40_command {
52 D40_DMA_STOP = 0,
53 D40_DMA_RUN = 1,
54 D40_DMA_SUSPEND_REQ = 2,
55 D40_DMA_SUSPENDED = 3
56};
57
58/**
59 * struct d40_lli_pool - Structure for keeping LLIs in memory
60 *
61 * @base: Pointer to memory area when the pre_alloc_lli's are not large
62 * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if
63 * pre_alloc_lli is used.
64 * @size: The size in bytes of the memory at base or the size of pre_alloc_lli.
65 * @pre_alloc_lli: Pre allocated area for the most common case of transfers,
66 * one buffer to one buffer.
67 */
68struct d40_lli_pool {
69 void *base;
Linus Walleij508849a2010-06-20 21:26:07 +000070 int size;
Linus Walleij8d318a52010-03-30 15:33:42 +020071 /* Space for dst and src, plus an extra for padding */
Linus Walleij508849a2010-06-20 21:26:07 +000072 u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)];
Linus Walleij8d318a52010-03-30 15:33:42 +020073};
74
75/**
76 * struct d40_desc - A descriptor is one DMA job.
77 *
78 * @lli_phy: LLI settings for physical channel. Both src and dst=
79 * points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if
80 * lli_len equals one.
81 * @lli_log: Same as above but for logical channels.
82 * @lli_pool: The pool with two entries pre-allocated.
Per Friden941b77a2010-06-20 21:24:45 +000083 * @lli_len: Number of llis of current descriptor.
84 * @lli_count: Number of transfered llis.
85 * @lli_tx_len: Max number of LLIs per transfer, there can be
86 * many transfer for one descriptor.
Linus Walleij8d318a52010-03-30 15:33:42 +020087 * @txd: DMA engine struct. Used for among other things for communication
88 * during a transfer.
89 * @node: List entry.
Linus Walleij8d318a52010-03-30 15:33:42 +020090 * @is_in_client_list: true if the client owns this descriptor.
Jonas Aabergaa182ae2010-08-09 12:08:26 +000091 * @is_hw_linked: true if this job will automatically be continued for
92 * the previous one.
Linus Walleij8d318a52010-03-30 15:33:42 +020093 *
94 * This descriptor is used for both logical and physical transfers.
95 */
96
97struct d40_desc {
98 /* LLI physical */
99 struct d40_phy_lli_bidir lli_phy;
100 /* LLI logical */
101 struct d40_log_lli_bidir lli_log;
102
103 struct d40_lli_pool lli_pool;
Per Friden941b77a2010-06-20 21:24:45 +0000104 int lli_len;
105 int lli_count;
106 u32 lli_tx_len;
Linus Walleij8d318a52010-03-30 15:33:42 +0200107
108 struct dma_async_tx_descriptor txd;
109 struct list_head node;
110
Linus Walleij8d318a52010-03-30 15:33:42 +0200111 bool is_in_client_list;
Jonas Aabergaa182ae2010-08-09 12:08:26 +0000112 bool is_hw_linked;
Linus Walleij8d318a52010-03-30 15:33:42 +0200113};
114
115/**
116 * struct d40_lcla_pool - LCLA pool settings and data.
117 *
Linus Walleij508849a2010-06-20 21:26:07 +0000118 * @base: The virtual address of LCLA. 18 bit aligned.
119 * @base_unaligned: The orignal kmalloc pointer, if kmalloc is used.
120 * This pointer is only there for clean-up on error.
121 * @pages: The number of pages needed for all physical channels.
122 * Only used later for clean-up on error
Linus Walleij8d318a52010-03-30 15:33:42 +0200123 * @lock: Lock to protect the content in this struct.
Linus Walleij508849a2010-06-20 21:26:07 +0000124 * @alloc_map: Bitmap mapping between physical channel and LCLA entries.
Linus Walleij8d318a52010-03-30 15:33:42 +0200125 * @num_blocks: The number of entries of alloc_map. Equals to the
126 * number of physical channels.
127 */
128struct d40_lcla_pool {
129 void *base;
Linus Walleij508849a2010-06-20 21:26:07 +0000130 void *base_unaligned;
131 int pages;
Linus Walleij8d318a52010-03-30 15:33:42 +0200132 spinlock_t lock;
133 u32 *alloc_map;
134 int num_blocks;
135};
136
137/**
138 * struct d40_phy_res - struct for handling eventlines mapped to physical
139 * channels.
140 *
141 * @lock: A lock protection this entity.
142 * @num: The physical channel number of this entity.
143 * @allocated_src: Bit mapped to show which src event line's are mapped to
144 * this physical channel. Can also be free or physically allocated.
145 * @allocated_dst: Same as for src but is dst.
146 * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as
Jonas Aaberg767a9672010-08-09 12:08:34 +0000147 * event line number.
Linus Walleij8d318a52010-03-30 15:33:42 +0200148 */
149struct d40_phy_res {
150 spinlock_t lock;
151 int num;
152 u32 allocated_src;
153 u32 allocated_dst;
154};
155
156struct d40_base;
157
158/**
159 * struct d40_chan - Struct that describes a channel.
160 *
161 * @lock: A spinlock to protect this struct.
162 * @log_num: The logical number, if any of this channel.
163 * @completed: Starts with 1, after first interrupt it is set to dma engine's
164 * current cookie.
165 * @pending_tx: The number of pending transfers. Used between interrupt handler
166 * and tasklet.
167 * @busy: Set to true when transfer is ongoing on this channel.
Jonas Aaberg2a614342010-06-20 21:25:24 +0000168 * @phy_chan: Pointer to physical channel which this instance runs on. If this
169 * point is NULL, then the channel is not allocated.
Linus Walleij8d318a52010-03-30 15:33:42 +0200170 * @chan: DMA engine handle.
171 * @tasklet: Tasklet that gets scheduled from interrupt context to complete a
172 * transfer and call client callback.
173 * @client: Cliented owned descriptor list.
174 * @active: Active descriptor.
175 * @queue: Queued jobs.
Linus Walleij8d318a52010-03-30 15:33:42 +0200176 * @dma_cfg: The client configuration of this dma channel.
177 * @base: Pointer to the device instance struct.
178 * @src_def_cfg: Default cfg register setting for src.
179 * @dst_def_cfg: Default cfg register setting for dst.
180 * @log_def: Default logical channel settings.
181 * @lcla: Space for one dst src pair for logical channel transfers.
182 * @lcpa: Pointer to dst and src lcpa settings.
183 *
184 * This struct can either "be" a logical or a physical channel.
185 */
186struct d40_chan {
187 spinlock_t lock;
188 int log_num;
189 /* ID of the most recent completed transfer */
190 int completed;
191 int pending_tx;
192 bool busy;
193 struct d40_phy_res *phy_chan;
194 struct dma_chan chan;
195 struct tasklet_struct tasklet;
196 struct list_head client;
197 struct list_head active;
198 struct list_head queue;
Linus Walleij8d318a52010-03-30 15:33:42 +0200199 struct stedma40_chan_cfg dma_cfg;
200 struct d40_base *base;
201 /* Default register configurations */
202 u32 src_def_cfg;
203 u32 dst_def_cfg;
204 struct d40_def_lcsp log_def;
205 struct d40_lcla_elem lcla;
206 struct d40_log_lli_full *lcpa;
Linus Walleij95e14002010-08-04 13:37:45 +0200207 /* Runtime reconfiguration */
208 dma_addr_t runtime_addr;
209 enum dma_data_direction runtime_direction;
Linus Walleij8d318a52010-03-30 15:33:42 +0200210};
211
212/**
213 * struct d40_base - The big global struct, one for each probe'd instance.
214 *
215 * @interrupt_lock: Lock used to make sure one interrupt is handle a time.
216 * @execmd_lock: Lock for execute command usage since several channels share
217 * the same physical register.
218 * @dev: The device structure.
219 * @virtbase: The virtual base address of the DMA's register.
Linus Walleijf4185592010-06-22 18:06:42 -0700220 * @rev: silicon revision detected.
Linus Walleij8d318a52010-03-30 15:33:42 +0200221 * @clk: Pointer to the DMA clock structure.
222 * @phy_start: Physical memory start of the DMA registers.
223 * @phy_size: Size of the DMA register map.
224 * @irq: The IRQ number.
225 * @num_phy_chans: The number of physical channels. Read from HW. This
226 * is the number of available channels for this driver, not counting "Secure
227 * mode" allocated physical channels.
228 * @num_log_chans: The number of logical channels. Calculated from
229 * num_phy_chans.
230 * @dma_both: dma_device channels that can do both memcpy and slave transfers.
231 * @dma_slave: dma_device channels that can do only do slave transfers.
232 * @dma_memcpy: dma_device channels that can do only do memcpy transfers.
Linus Walleij8d318a52010-03-30 15:33:42 +0200233 * @log_chans: Room for all possible logical channels in system.
234 * @lookup_log_chans: Used to map interrupt number to logical channel. Points
235 * to log_chans entries.
236 * @lookup_phy_chans: Used to map interrupt number to physical channel. Points
237 * to phy_chans entries.
238 * @plat_data: Pointer to provided platform_data which is the driver
239 * configuration.
240 * @phy_res: Vector containing all physical channels.
241 * @lcla_pool: lcla pool settings and data.
242 * @lcpa_base: The virtual mapped address of LCPA.
243 * @phy_lcpa: The physical address of the LCPA.
244 * @lcpa_size: The size of the LCPA area.
Jonas Aabergc675b1b2010-06-20 21:25:08 +0000245 * @desc_slab: cache for descriptors.
Linus Walleij8d318a52010-03-30 15:33:42 +0200246 */
247struct d40_base {
248 spinlock_t interrupt_lock;
249 spinlock_t execmd_lock;
250 struct device *dev;
251 void __iomem *virtbase;
Linus Walleijf4185592010-06-22 18:06:42 -0700252 u8 rev:4;
Linus Walleij8d318a52010-03-30 15:33:42 +0200253 struct clk *clk;
254 phys_addr_t phy_start;
255 resource_size_t phy_size;
256 int irq;
257 int num_phy_chans;
258 int num_log_chans;
259 struct dma_device dma_both;
260 struct dma_device dma_slave;
261 struct dma_device dma_memcpy;
262 struct d40_chan *phy_chans;
263 struct d40_chan *log_chans;
264 struct d40_chan **lookup_log_chans;
265 struct d40_chan **lookup_phy_chans;
266 struct stedma40_platform_data *plat_data;
267 /* Physical half channels */
268 struct d40_phy_res *phy_res;
269 struct d40_lcla_pool lcla_pool;
270 void *lcpa_base;
271 dma_addr_t phy_lcpa;
272 resource_size_t lcpa_size;
Jonas Aabergc675b1b2010-06-20 21:25:08 +0000273 struct kmem_cache *desc_slab;
Linus Walleij8d318a52010-03-30 15:33:42 +0200274};
275
276/**
277 * struct d40_interrupt_lookup - lookup table for interrupt handler
278 *
279 * @src: Interrupt mask register.
280 * @clr: Interrupt clear register.
281 * @is_error: true if this is an error interrupt.
282 * @offset: start delta in the lookup_log_chans in d40_base. If equals to
283 * D40_PHY_CHAN, the lookup_phy_chans shall be used instead.
284 */
285struct d40_interrupt_lookup {
286 u32 src;
287 u32 clr;
288 bool is_error;
289 int offset;
290};
291
292/**
293 * struct d40_reg_val - simple lookup struct
294 *
295 * @reg: The register.
296 * @val: The value that belongs to the register in reg.
297 */
298struct d40_reg_val {
299 unsigned int reg;
300 unsigned int val;
301};
302
303static int d40_pool_lli_alloc(struct d40_desc *d40d,
304 int lli_len, bool is_log)
305{
306 u32 align;
307 void *base;
308
309 if (is_log)
310 align = sizeof(struct d40_log_lli);
311 else
312 align = sizeof(struct d40_phy_lli);
313
314 if (lli_len == 1) {
315 base = d40d->lli_pool.pre_alloc_lli;
316 d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli);
317 d40d->lli_pool.base = NULL;
318 } else {
319 d40d->lli_pool.size = ALIGN(lli_len * 2 * align, align);
320
321 base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT);
322 d40d->lli_pool.base = base;
323
324 if (d40d->lli_pool.base == NULL)
325 return -ENOMEM;
326 }
327
328 if (is_log) {
329 d40d->lli_log.src = PTR_ALIGN((struct d40_log_lli *) base,
330 align);
331 d40d->lli_log.dst = PTR_ALIGN(d40d->lli_log.src + lli_len,
332 align);
333 } else {
334 d40d->lli_phy.src = PTR_ALIGN((struct d40_phy_lli *)base,
335 align);
336 d40d->lli_phy.dst = PTR_ALIGN(d40d->lli_phy.src + lli_len,
337 align);
Linus Walleij8d318a52010-03-30 15:33:42 +0200338 }
339
340 return 0;
341}
342
343static void d40_pool_lli_free(struct d40_desc *d40d)
344{
345 kfree(d40d->lli_pool.base);
346 d40d->lli_pool.base = NULL;
347 d40d->lli_pool.size = 0;
348 d40d->lli_log.src = NULL;
349 d40d->lli_log.dst = NULL;
350 d40d->lli_phy.src = NULL;
351 d40d->lli_phy.dst = NULL;
Linus Walleij8d318a52010-03-30 15:33:42 +0200352}
353
Linus Walleij8d318a52010-03-30 15:33:42 +0200354static void d40_desc_remove(struct d40_desc *d40d)
355{
356 list_del(&d40d->node);
357}
358
359static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
360{
Linus Walleij8d318a52010-03-30 15:33:42 +0200361 struct d40_desc *d;
362 struct d40_desc *_d;
363
364 if (!list_empty(&d40c->client)) {
365 list_for_each_entry_safe(d, _d, &d40c->client, node)
366 if (async_tx_test_ack(&d->txd)) {
367 d40_pool_lli_free(d);
368 d40_desc_remove(d);
Jonas Aabergc675b1b2010-06-20 21:25:08 +0000369 break;
Linus Walleij8d318a52010-03-30 15:33:42 +0200370 }
Linus Walleij8d318a52010-03-30 15:33:42 +0200371 } else {
Jonas Aabergc675b1b2010-06-20 21:25:08 +0000372 d = kmem_cache_alloc(d40c->base->desc_slab, GFP_NOWAIT);
373 if (d != NULL) {
374 memset(d, 0, sizeof(struct d40_desc));
375 INIT_LIST_HEAD(&d->node);
376 }
Linus Walleij8d318a52010-03-30 15:33:42 +0200377 }
Jonas Aabergc675b1b2010-06-20 21:25:08 +0000378 return d;
Linus Walleij8d318a52010-03-30 15:33:42 +0200379}
380
381static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d)
382{
Jonas Aabergc675b1b2010-06-20 21:25:08 +0000383 kmem_cache_free(d40c->base->desc_slab, d40d);
Linus Walleij8d318a52010-03-30 15:33:42 +0200384}
385
386static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc)
387{
388 list_add_tail(&desc->node, &d40c->active);
389}
390
391static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
392{
393 struct d40_desc *d;
394
395 if (list_empty(&d40c->active))
396 return NULL;
397
398 d = list_first_entry(&d40c->active,
399 struct d40_desc,
400 node);
401 return d;
402}
403
404static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
405{
406 list_add_tail(&desc->node, &d40c->queue);
407}
408
409static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
410{
411 struct d40_desc *d;
412
413 if (list_empty(&d40c->queue))
414 return NULL;
415
416 d = list_first_entry(&d40c->queue,
417 struct d40_desc,
418 node);
419 return d;
420}
421
Jonas Aabergaa182ae2010-08-09 12:08:26 +0000422static struct d40_desc *d40_last_queued(struct d40_chan *d40c)
423{
424 struct d40_desc *d;
425
426 if (list_empty(&d40c->queue))
427 return NULL;
428 list_for_each_entry(d, &d40c->queue, node)
429 if (list_is_last(&d->node, &d40c->queue))
430 break;
431 return d;
432}
433
Linus Walleij8d318a52010-03-30 15:33:42 +0200434/* Support functions for logical channels */
435
Linus Walleij508849a2010-06-20 21:26:07 +0000436static int d40_lcla_id_get(struct d40_chan *d40c)
Linus Walleij8d318a52010-03-30 15:33:42 +0200437{
438 int src_id = 0;
439 int dst_id = 0;
440 struct d40_log_lli *lcla_lidx_base =
Linus Walleij508849a2010-06-20 21:26:07 +0000441 d40c->base->lcla_pool.base + d40c->phy_chan->num * 1024;
Linus Walleij8d318a52010-03-30 15:33:42 +0200442 int i;
443 int lli_per_log = d40c->base->plat_data->llis_per_log;
Jonas Aaberg2292b882010-06-20 21:25:39 +0000444 unsigned long flags;
Linus Walleij8d318a52010-03-30 15:33:42 +0200445
446 if (d40c->lcla.src_id >= 0 && d40c->lcla.dst_id >= 0)
447 return 0;
448
Linus Walleij508849a2010-06-20 21:26:07 +0000449 if (d40c->base->lcla_pool.num_blocks > 32)
Linus Walleij8d318a52010-03-30 15:33:42 +0200450 return -EINVAL;
451
Linus Walleij508849a2010-06-20 21:26:07 +0000452 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +0200453
Linus Walleij508849a2010-06-20 21:26:07 +0000454 for (i = 0; i < d40c->base->lcla_pool.num_blocks; i++) {
455 if (!(d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] &
456 (0x1 << i))) {
457 d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] |=
458 (0x1 << i);
Linus Walleij8d318a52010-03-30 15:33:42 +0200459 break;
460 }
461 }
462 src_id = i;
Linus Walleij508849a2010-06-20 21:26:07 +0000463 if (src_id >= d40c->base->lcla_pool.num_blocks)
Linus Walleij8d318a52010-03-30 15:33:42 +0200464 goto err;
465
Linus Walleij508849a2010-06-20 21:26:07 +0000466 for (; i < d40c->base->lcla_pool.num_blocks; i++) {
467 if (!(d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] &
468 (0x1 << i))) {
469 d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] |=
470 (0x1 << i);
Linus Walleij8d318a52010-03-30 15:33:42 +0200471 break;
472 }
473 }
474
475 dst_id = i;
476 if (dst_id == src_id)
477 goto err;
478
479 d40c->lcla.src_id = src_id;
480 d40c->lcla.dst_id = dst_id;
481 d40c->lcla.dst = lcla_lidx_base + dst_id * lli_per_log + 1;
482 d40c->lcla.src = lcla_lidx_base + src_id * lli_per_log + 1;
483
Linus Walleij508849a2010-06-20 21:26:07 +0000484 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +0200485 return 0;
486err:
Linus Walleij508849a2010-06-20 21:26:07 +0000487 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +0200488 return -EINVAL;
489}
490
Linus Walleij8d318a52010-03-30 15:33:42 +0200491
492static int d40_channel_execute_command(struct d40_chan *d40c,
493 enum d40_command command)
494{
Jonas Aaberg767a9672010-08-09 12:08:34 +0000495 u32 status;
496 int i;
Linus Walleij8d318a52010-03-30 15:33:42 +0200497 void __iomem *active_reg;
498 int ret = 0;
499 unsigned long flags;
Jonas Aaberg1d392a72010-06-20 21:26:01 +0000500 u32 wmask;
Linus Walleij8d318a52010-03-30 15:33:42 +0200501
502 spin_lock_irqsave(&d40c->base->execmd_lock, flags);
503
504 if (d40c->phy_chan->num % 2 == 0)
505 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
506 else
507 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
508
509 if (command == D40_DMA_SUSPEND_REQ) {
510 status = (readl(active_reg) &
511 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
512 D40_CHAN_POS(d40c->phy_chan->num);
513
514 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
515 goto done;
516 }
517
Jonas Aaberg1d392a72010-06-20 21:26:01 +0000518 wmask = 0xffffffff & ~(D40_CHAN_POS_MASK(d40c->phy_chan->num));
519 writel(wmask | (command << D40_CHAN_POS(d40c->phy_chan->num)),
520 active_reg);
Linus Walleij8d318a52010-03-30 15:33:42 +0200521
522 if (command == D40_DMA_SUSPEND_REQ) {
523
524 for (i = 0 ; i < D40_SUSPEND_MAX_IT; i++) {
525 status = (readl(active_reg) &
526 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
527 D40_CHAN_POS(d40c->phy_chan->num);
528
529 cpu_relax();
530 /*
531 * Reduce the number of bus accesses while
532 * waiting for the DMA to suspend.
533 */
534 udelay(3);
535
536 if (status == D40_DMA_STOP ||
537 status == D40_DMA_SUSPENDED)
538 break;
539 }
540
541 if (i == D40_SUSPEND_MAX_IT) {
542 dev_err(&d40c->chan.dev->device,
543 "[%s]: unable to suspend the chl %d (log: %d) status %x\n",
544 __func__, d40c->phy_chan->num, d40c->log_num,
545 status);
546 dump_stack();
547 ret = -EBUSY;
548 }
549
550 }
551done:
552 spin_unlock_irqrestore(&d40c->base->execmd_lock, flags);
553 return ret;
554}
555
556static void d40_term_all(struct d40_chan *d40c)
557{
558 struct d40_desc *d40d;
Linus Walleij508849a2010-06-20 21:26:07 +0000559 unsigned long flags;
Linus Walleij8d318a52010-03-30 15:33:42 +0200560
561 /* Release active descriptors */
562 while ((d40d = d40_first_active_get(d40c))) {
563 d40_desc_remove(d40d);
Linus Walleij8d318a52010-03-30 15:33:42 +0200564 d40_desc_free(d40c, d40d);
565 }
566
567 /* Release queued descriptors waiting for transfer */
568 while ((d40d = d40_first_queued(d40c))) {
569 d40_desc_remove(d40d);
Linus Walleij8d318a52010-03-30 15:33:42 +0200570 d40_desc_free(d40c, d40d);
571 }
572
Linus Walleij508849a2010-06-20 21:26:07 +0000573 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
574
575 d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] &=
576 (~(0x1 << d40c->lcla.dst_id));
577 d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] &=
578 (~(0x1 << d40c->lcla.src_id));
579
580 d40c->lcla.src_id = -1;
581 d40c->lcla.dst_id = -1;
582
583 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +0200584
585 d40c->pending_tx = 0;
586 d40c->busy = false;
587}
588
589static void d40_config_set_event(struct d40_chan *d40c, bool do_enable)
590{
591 u32 val;
592 unsigned long flags;
593
Jonas Aaberg0c322692010-06-20 21:25:46 +0000594 /* Notice, that disable requires the physical channel to be stopped */
Linus Walleij8d318a52010-03-30 15:33:42 +0200595 if (do_enable)
596 val = D40_ACTIVATE_EVENTLINE;
597 else
598 val = D40_DEACTIVATE_EVENTLINE;
599
600 spin_lock_irqsave(&d40c->phy_chan->lock, flags);
601
602 /* Enable event line connected to device (or memcpy) */
603 if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
604 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) {
605 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
606
607 writel((val << D40_EVENTLINE_POS(event)) |
608 ~D40_EVENTLINE_MASK(event),
609 d40c->base->virtbase + D40_DREG_PCBASE +
610 d40c->phy_chan->num * D40_DREG_PCDELTA +
611 D40_CHAN_REG_SSLNK);
612 }
613 if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) {
614 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
615
616 writel((val << D40_EVENTLINE_POS(event)) |
617 ~D40_EVENTLINE_MASK(event),
618 d40c->base->virtbase + D40_DREG_PCBASE +
619 d40c->phy_chan->num * D40_DREG_PCDELTA +
620 D40_CHAN_REG_SDLNK);
621 }
622
623 spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
624}
625
Jonas Aaberga5ebca42010-05-18 00:41:09 +0200626static u32 d40_chan_has_events(struct d40_chan *d40c)
Linus Walleij8d318a52010-03-30 15:33:42 +0200627{
Jonas Aabergbe8cb7d2010-08-09 12:07:44 +0000628 u32 val;
Linus Walleij8d318a52010-03-30 15:33:42 +0200629
Jonas Aabergbe8cb7d2010-08-09 12:07:44 +0000630 val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
631 d40c->phy_chan->num * D40_DREG_PCDELTA +
632 D40_CHAN_REG_SSLNK);
Linus Walleij8d318a52010-03-30 15:33:42 +0200633
Jonas Aabergbe8cb7d2010-08-09 12:07:44 +0000634 val |= readl(d40c->base->virtbase + D40_DREG_PCBASE +
635 d40c->phy_chan->num * D40_DREG_PCDELTA +
636 D40_CHAN_REG_SDLNK);
Jonas Aaberga5ebca42010-05-18 00:41:09 +0200637 return val;
Linus Walleij8d318a52010-03-30 15:33:42 +0200638}
639
Jonas Aabergb55912c2010-08-09 12:08:02 +0000640static void d40_config_write(struct d40_chan *d40c)
Linus Walleij8d318a52010-03-30 15:33:42 +0200641{
642 u32 addr_base;
643 u32 var;
Linus Walleij8d318a52010-03-30 15:33:42 +0200644
645 /* Odd addresses are even addresses + 4 */
646 addr_base = (d40c->phy_chan->num % 2) * 4;
647 /* Setup channel mode to logical or physical */
648 var = ((u32)(d40c->log_num != D40_PHY_CHAN) + 1) <<
649 D40_CHAN_POS(d40c->phy_chan->num);
650 writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base);
651
652 /* Setup operational mode option register */
653 var = ((d40c->dma_cfg.channel_type >> STEDMA40_INFO_CH_MODE_OPT_POS) &
654 0x3) << D40_CHAN_POS(d40c->phy_chan->num);
655
656 writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base);
657
658 if (d40c->log_num != D40_PHY_CHAN) {
659 /* Set default config for CFG reg */
660 writel(d40c->src_def_cfg,
661 d40c->base->virtbase + D40_DREG_PCBASE +
662 d40c->phy_chan->num * D40_DREG_PCDELTA +
663 D40_CHAN_REG_SSCFG);
664 writel(d40c->dst_def_cfg,
665 d40c->base->virtbase + D40_DREG_PCBASE +
666 d40c->phy_chan->num * D40_DREG_PCDELTA +
667 D40_CHAN_REG_SDCFG);
668
Jonas Aabergb55912c2010-08-09 12:08:02 +0000669 /* Set LIDX for lcla */
670 writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
671 D40_SREG_ELEM_LOG_LIDX_MASK,
672 d40c->base->virtbase + D40_DREG_PCBASE +
673 d40c->phy_chan->num * D40_DREG_PCDELTA +
674 D40_CHAN_REG_SDELT);
675
676 writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
677 D40_SREG_ELEM_LOG_LIDX_MASK,
678 d40c->base->virtbase + D40_DREG_PCBASE +
679 d40c->phy_chan->num * D40_DREG_PCDELTA +
680 D40_CHAN_REG_SSELT);
681
Linus Walleij8d318a52010-03-30 15:33:42 +0200682 }
Linus Walleij8d318a52010-03-30 15:33:42 +0200683}
684
685static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
686{
Linus Walleij8d318a52010-03-30 15:33:42 +0200687 if (d40d->lli_phy.dst && d40d->lli_phy.src) {
688 d40_phy_lli_write(d40c->base->virtbase,
689 d40c->phy_chan->num,
690 d40d->lli_phy.dst,
691 d40d->lli_phy.src);
Linus Walleij8d318a52010-03-30 15:33:42 +0200692 } else if (d40d->lli_log.dst && d40d->lli_log.src) {
Linus Walleij8d318a52010-03-30 15:33:42 +0200693 struct d40_log_lli *src = d40d->lli_log.src;
694 struct d40_log_lli *dst = d40d->lli_log.dst;
Linus Walleij508849a2010-06-20 21:26:07 +0000695 int s;
Linus Walleij8d318a52010-03-30 15:33:42 +0200696
Per Friden941b77a2010-06-20 21:24:45 +0000697 src += d40d->lli_count;
698 dst += d40d->lli_count;
Linus Walleij508849a2010-06-20 21:26:07 +0000699 s = d40_log_lli_write(d40c->lcpa,
700 d40c->lcla.src, d40c->lcla.dst,
701 dst, src,
702 d40c->base->plat_data->llis_per_log);
703
704 /* If s equals to zero, the job is not linked */
705 if (s > 0) {
706 (void) dma_map_single(d40c->base->dev, d40c->lcla.src,
707 s * sizeof(struct d40_log_lli),
708 DMA_TO_DEVICE);
709 (void) dma_map_single(d40c->base->dev, d40c->lcla.dst,
710 s * sizeof(struct d40_log_lli),
711 DMA_TO_DEVICE);
712 }
Linus Walleij8d318a52010-03-30 15:33:42 +0200713 }
Per Friden941b77a2010-06-20 21:24:45 +0000714 d40d->lli_count += d40d->lli_tx_len;
Linus Walleij8d318a52010-03-30 15:33:42 +0200715}
716
Jonas Aabergaa182ae2010-08-09 12:08:26 +0000717static u32 d40_residue(struct d40_chan *d40c)
718{
719 u32 num_elt;
720
721 if (d40c->log_num != D40_PHY_CHAN)
722 num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK)
723 >> D40_MEM_LCSP2_ECNT_POS;
724 else
725 num_elt = (readl(d40c->base->virtbase + D40_DREG_PCBASE +
726 d40c->phy_chan->num * D40_DREG_PCDELTA +
727 D40_CHAN_REG_SDELT) &
728 D40_SREG_ELEM_PHY_ECNT_MASK) >>
729 D40_SREG_ELEM_PHY_ECNT_POS;
730 return num_elt * (1 << d40c->dma_cfg.dst_info.data_width);
731}
732
733static bool d40_tx_is_linked(struct d40_chan *d40c)
734{
735 bool is_link;
736
737 if (d40c->log_num != D40_PHY_CHAN)
738 is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK;
739 else
740 is_link = readl(d40c->base->virtbase + D40_DREG_PCBASE +
741 d40c->phy_chan->num * D40_DREG_PCDELTA +
742 D40_CHAN_REG_SDLNK) &
743 D40_SREG_LNK_PHYS_LNK_MASK;
744 return is_link;
745}
746
747static int d40_pause(struct dma_chan *chan)
748{
749 struct d40_chan *d40c =
750 container_of(chan, struct d40_chan, chan);
751 int res = 0;
752 unsigned long flags;
753
754 spin_lock_irqsave(&d40c->lock, flags);
755
756 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
757 if (res == 0) {
758 if (d40c->log_num != D40_PHY_CHAN) {
759 d40_config_set_event(d40c, false);
760 /* Resume the other logical channels if any */
761 if (d40_chan_has_events(d40c))
762 res = d40_channel_execute_command(d40c,
763 D40_DMA_RUN);
764 }
765 }
766
767 spin_unlock_irqrestore(&d40c->lock, flags);
768 return res;
769}
770
771static int d40_resume(struct dma_chan *chan)
772{
773 struct d40_chan *d40c =
774 container_of(chan, struct d40_chan, chan);
775 int res = 0;
776 unsigned long flags;
777
778 spin_lock_irqsave(&d40c->lock, flags);
779
780 if (d40c->base->rev == 0)
781 if (d40c->log_num != D40_PHY_CHAN) {
782 res = d40_channel_execute_command(d40c,
783 D40_DMA_SUSPEND_REQ);
784 goto no_suspend;
785 }
786
787 /* If bytes left to transfer or linked tx resume job */
788 if (d40_residue(d40c) || d40_tx_is_linked(d40c)) {
789
790 if (d40c->log_num != D40_PHY_CHAN)
791 d40_config_set_event(d40c, true);
792
793 res = d40_channel_execute_command(d40c, D40_DMA_RUN);
794 }
795
796no_suspend:
797 spin_unlock_irqrestore(&d40c->lock, flags);
798 return res;
799}
800
801static void d40_tx_submit_log(struct d40_chan *d40c, struct d40_desc *d40d)
802{
803 /* TODO: Write */
804}
805
806static void d40_tx_submit_phy(struct d40_chan *d40c, struct d40_desc *d40d)
807{
808 struct d40_desc *d40d_prev = NULL;
809 int i;
810 u32 val;
811
812 if (!list_empty(&d40c->queue))
813 d40d_prev = d40_last_queued(d40c);
814 else if (!list_empty(&d40c->active))
815 d40d_prev = d40_first_active_get(d40c);
816
817 if (!d40d_prev)
818 return;
819
820 /* Here we try to join this job with previous jobs */
821 val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
822 d40c->phy_chan->num * D40_DREG_PCDELTA +
823 D40_CHAN_REG_SSLNK);
824
825 /* Figure out which link we're currently transmitting */
826 for (i = 0; i < d40d_prev->lli_len; i++)
827 if (val == d40d_prev->lli_phy.src[i].reg_lnk)
828 break;
829
830 val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
831 d40c->phy_chan->num * D40_DREG_PCDELTA +
832 D40_CHAN_REG_SSELT) >> D40_SREG_ELEM_LOG_ECNT_POS;
833
834 if (i == (d40d_prev->lli_len - 1) && val > 0) {
835 /* Change the current one */
836 writel(virt_to_phys(d40d->lli_phy.src),
837 d40c->base->virtbase + D40_DREG_PCBASE +
838 d40c->phy_chan->num * D40_DREG_PCDELTA +
839 D40_CHAN_REG_SSLNK);
840 writel(virt_to_phys(d40d->lli_phy.dst),
841 d40c->base->virtbase + D40_DREG_PCBASE +
842 d40c->phy_chan->num * D40_DREG_PCDELTA +
843 D40_CHAN_REG_SDLNK);
844
845 d40d->is_hw_linked = true;
846
847 } else if (i < d40d_prev->lli_len) {
848 (void) dma_unmap_single(d40c->base->dev,
849 virt_to_phys(d40d_prev->lli_phy.src),
850 d40d_prev->lli_pool.size,
851 DMA_TO_DEVICE);
852
853 /* Keep the settings */
854 val = d40d_prev->lli_phy.src[d40d_prev->lli_len - 1].reg_lnk &
855 ~D40_SREG_LNK_PHYS_LNK_MASK;
856 d40d_prev->lli_phy.src[d40d_prev->lli_len - 1].reg_lnk =
857 val | virt_to_phys(d40d->lli_phy.src);
858
859 val = d40d_prev->lli_phy.dst[d40d_prev->lli_len - 1].reg_lnk &
860 ~D40_SREG_LNK_PHYS_LNK_MASK;
861 d40d_prev->lli_phy.dst[d40d_prev->lli_len - 1].reg_lnk =
862 val | virt_to_phys(d40d->lli_phy.dst);
863
864 (void) dma_map_single(d40c->base->dev,
865 d40d_prev->lli_phy.src,
866 d40d_prev->lli_pool.size,
867 DMA_TO_DEVICE);
868 d40d->is_hw_linked = true;
869 }
870}
871
Linus Walleij8d318a52010-03-30 15:33:42 +0200872static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
873{
874 struct d40_chan *d40c = container_of(tx->chan,
875 struct d40_chan,
876 chan);
877 struct d40_desc *d40d = container_of(tx, struct d40_desc, txd);
878 unsigned long flags;
879
Jonas Aabergaa182ae2010-08-09 12:08:26 +0000880 (void) d40_pause(&d40c->chan);
881
Linus Walleij8d318a52010-03-30 15:33:42 +0200882 spin_lock_irqsave(&d40c->lock, flags);
883
Jonas Aabergaa182ae2010-08-09 12:08:26 +0000884 d40c->chan.cookie++;
885
886 if (d40c->chan.cookie < 0)
887 d40c->chan.cookie = 1;
888
889 d40d->txd.cookie = d40c->chan.cookie;
890
891 if (d40c->log_num == D40_PHY_CHAN)
892 d40_tx_submit_phy(d40c, d40d);
893 else
894 d40_tx_submit_log(d40c, d40d);
Linus Walleij8d318a52010-03-30 15:33:42 +0200895
896 d40_desc_queue(d40c, d40d);
897
898 spin_unlock_irqrestore(&d40c->lock, flags);
899
Jonas Aabergaa182ae2010-08-09 12:08:26 +0000900 (void) d40_resume(&d40c->chan);
901
Linus Walleij8d318a52010-03-30 15:33:42 +0200902 return tx->cookie;
903}
904
905static int d40_start(struct d40_chan *d40c)
906{
Linus Walleijf4185592010-06-22 18:06:42 -0700907 if (d40c->base->rev == 0) {
908 int err;
909
910 if (d40c->log_num != D40_PHY_CHAN) {
911 err = d40_channel_execute_command(d40c,
912 D40_DMA_SUSPEND_REQ);
913 if (err)
914 return err;
915 }
916 }
917
Jonas Aaberg0c322692010-06-20 21:25:46 +0000918 if (d40c->log_num != D40_PHY_CHAN)
Linus Walleij8d318a52010-03-30 15:33:42 +0200919 d40_config_set_event(d40c, true);
Linus Walleij8d318a52010-03-30 15:33:42 +0200920
Jonas Aaberg0c322692010-06-20 21:25:46 +0000921 return d40_channel_execute_command(d40c, D40_DMA_RUN);
Linus Walleij8d318a52010-03-30 15:33:42 +0200922}
923
924static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
925{
926 struct d40_desc *d40d;
927 int err;
928
929 /* Start queued jobs, if any */
930 d40d = d40_first_queued(d40c);
931
932 if (d40d != NULL) {
933 d40c->busy = true;
934
935 /* Remove from queue */
936 d40_desc_remove(d40d);
937
938 /* Add to active queue */
939 d40_desc_submit(d40c, d40d);
940
Jonas Aabergaa182ae2010-08-09 12:08:26 +0000941 /*
942 * If this job is already linked in hw,
943 * do not submit it.
944 */
945 if (!d40d->is_hw_linked) {
946 /* Initiate DMA job */
947 d40_desc_load(d40c, d40d);
Linus Walleij8d318a52010-03-30 15:33:42 +0200948
Jonas Aabergaa182ae2010-08-09 12:08:26 +0000949 /* Start dma job */
950 err = d40_start(d40c);
Linus Walleij8d318a52010-03-30 15:33:42 +0200951
Jonas Aabergaa182ae2010-08-09 12:08:26 +0000952 if (err)
953 return NULL;
954 }
Linus Walleij8d318a52010-03-30 15:33:42 +0200955 }
956
957 return d40d;
958}
959
960/* called from interrupt context */
961static void dma_tc_handle(struct d40_chan *d40c)
962{
963 struct d40_desc *d40d;
964
Linus Walleij8d318a52010-03-30 15:33:42 +0200965 /* Get first active entry from list */
966 d40d = d40_first_active_get(d40c);
967
968 if (d40d == NULL)
969 return;
970
Per Friden941b77a2010-06-20 21:24:45 +0000971 if (d40d->lli_count < d40d->lli_len) {
Linus Walleij8d318a52010-03-30 15:33:42 +0200972
973 d40_desc_load(d40c, d40d);
974 /* Start dma job */
975 (void) d40_start(d40c);
976 return;
977 }
978
979 if (d40_queue_start(d40c) == NULL)
980 d40c->busy = false;
981
982 d40c->pending_tx++;
983 tasklet_schedule(&d40c->tasklet);
984
985}
986
987static void dma_tasklet(unsigned long data)
988{
989 struct d40_chan *d40c = (struct d40_chan *) data;
Jonas Aaberg767a9672010-08-09 12:08:34 +0000990 struct d40_desc *d40d;
Linus Walleij8d318a52010-03-30 15:33:42 +0200991 unsigned long flags;
992 dma_async_tx_callback callback;
993 void *callback_param;
994
995 spin_lock_irqsave(&d40c->lock, flags);
996
997 /* Get first active entry from list */
Jonas Aaberg767a9672010-08-09 12:08:34 +0000998 d40d = d40_first_active_get(d40c);
Linus Walleij8d318a52010-03-30 15:33:42 +0200999
Jonas Aaberg767a9672010-08-09 12:08:34 +00001000 if (d40d == NULL)
Linus Walleij8d318a52010-03-30 15:33:42 +02001001 goto err;
1002
Jonas Aaberg767a9672010-08-09 12:08:34 +00001003 d40c->completed = d40d->txd.cookie;
Linus Walleij8d318a52010-03-30 15:33:42 +02001004
1005 /*
1006 * If terminating a channel pending_tx is set to zero.
1007 * This prevents any finished active jobs to return to the client.
1008 */
1009 if (d40c->pending_tx == 0) {
1010 spin_unlock_irqrestore(&d40c->lock, flags);
1011 return;
1012 }
1013
1014 /* Callback to client */
Jonas Aaberg767a9672010-08-09 12:08:34 +00001015 callback = d40d->txd.callback;
1016 callback_param = d40d->txd.callback_param;
Linus Walleij8d318a52010-03-30 15:33:42 +02001017
Jonas Aaberg767a9672010-08-09 12:08:34 +00001018 if (async_tx_test_ack(&d40d->txd)) {
1019 d40_pool_lli_free(d40d);
1020 d40_desc_remove(d40d);
1021 d40_desc_free(d40c, d40d);
Linus Walleij8d318a52010-03-30 15:33:42 +02001022 } else {
Jonas Aaberg767a9672010-08-09 12:08:34 +00001023 if (!d40d->is_in_client_list) {
1024 d40_desc_remove(d40d);
1025 list_add_tail(&d40d->node, &d40c->client);
1026 d40d->is_in_client_list = true;
Linus Walleij8d318a52010-03-30 15:33:42 +02001027 }
1028 }
1029
1030 d40c->pending_tx--;
1031
1032 if (d40c->pending_tx)
1033 tasklet_schedule(&d40c->tasklet);
1034
1035 spin_unlock_irqrestore(&d40c->lock, flags);
1036
Jonas Aaberg767a9672010-08-09 12:08:34 +00001037 if (callback && (d40d->txd.flags & DMA_PREP_INTERRUPT))
Linus Walleij8d318a52010-03-30 15:33:42 +02001038 callback(callback_param);
1039
1040 return;
1041
1042 err:
1043 /* Rescue manouver if receiving double interrupts */
1044 if (d40c->pending_tx > 0)
1045 d40c->pending_tx--;
1046 spin_unlock_irqrestore(&d40c->lock, flags);
1047}
1048
1049static irqreturn_t d40_handle_interrupt(int irq, void *data)
1050{
1051 static const struct d40_interrupt_lookup il[] = {
1052 {D40_DREG_LCTIS0, D40_DREG_LCICR0, false, 0},
1053 {D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32},
1054 {D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64},
1055 {D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96},
1056 {D40_DREG_LCEIS0, D40_DREG_LCICR0, true, 0},
1057 {D40_DREG_LCEIS1, D40_DREG_LCICR1, true, 32},
1058 {D40_DREG_LCEIS2, D40_DREG_LCICR2, true, 64},
1059 {D40_DREG_LCEIS3, D40_DREG_LCICR3, true, 96},
1060 {D40_DREG_PCTIS, D40_DREG_PCICR, false, D40_PHY_CHAN},
1061 {D40_DREG_PCEIS, D40_DREG_PCICR, true, D40_PHY_CHAN},
1062 };
1063
1064 int i;
1065 u32 regs[ARRAY_SIZE(il)];
Linus Walleij8d318a52010-03-30 15:33:42 +02001066 u32 idx;
1067 u32 row;
1068 long chan = -1;
1069 struct d40_chan *d40c;
1070 unsigned long flags;
1071 struct d40_base *base = data;
1072
1073 spin_lock_irqsave(&base->interrupt_lock, flags);
1074
1075 /* Read interrupt status of both logical and physical channels */
1076 for (i = 0; i < ARRAY_SIZE(il); i++)
1077 regs[i] = readl(base->virtbase + il[i].src);
1078
1079 for (;;) {
1080
1081 chan = find_next_bit((unsigned long *)regs,
1082 BITS_PER_LONG * ARRAY_SIZE(il), chan + 1);
1083
1084 /* No more set bits found? */
1085 if (chan == BITS_PER_LONG * ARRAY_SIZE(il))
1086 break;
1087
1088 row = chan / BITS_PER_LONG;
1089 idx = chan & (BITS_PER_LONG - 1);
1090
1091 /* ACK interrupt */
Jonas Aaberg1b003482010-08-09 12:07:54 +00001092 writel(1 << idx, base->virtbase + il[row].clr);
Linus Walleij8d318a52010-03-30 15:33:42 +02001093
1094 if (il[row].offset == D40_PHY_CHAN)
1095 d40c = base->lookup_phy_chans[idx];
1096 else
1097 d40c = base->lookup_log_chans[il[row].offset + idx];
1098 spin_lock(&d40c->lock);
1099
1100 if (!il[row].is_error)
1101 dma_tc_handle(d40c);
1102 else
Linus Walleij508849a2010-06-20 21:26:07 +00001103 dev_err(base->dev,
1104 "[%s] IRQ chan: %ld offset %d idx %d\n",
Linus Walleij8d318a52010-03-30 15:33:42 +02001105 __func__, chan, il[row].offset, idx);
1106
1107 spin_unlock(&d40c->lock);
1108 }
1109
1110 spin_unlock_irqrestore(&base->interrupt_lock, flags);
1111
1112 return IRQ_HANDLED;
1113}
1114
Linus Walleij8d318a52010-03-30 15:33:42 +02001115static int d40_validate_conf(struct d40_chan *d40c,
1116 struct stedma40_chan_cfg *conf)
1117{
1118 int res = 0;
1119 u32 dst_event_group = D40_TYPE_TO_GROUP(conf->dst_dev_type);
1120 u32 src_event_group = D40_TYPE_TO_GROUP(conf->src_dev_type);
1121 bool is_log = (conf->channel_type & STEDMA40_CHANNEL_IN_OPER_MODE)
1122 == STEDMA40_CHANNEL_IN_LOG_MODE;
1123
Linus Walleij0747c7ba2010-08-09 12:07:36 +00001124 if (!conf->dir) {
1125 dev_err(&d40c->chan.dev->device, "[%s] Invalid direction.\n",
1126 __func__);
1127 res = -EINVAL;
1128 }
1129
1130 if (conf->dst_dev_type != STEDMA40_DEV_DST_MEMORY &&
1131 d40c->base->plat_data->dev_tx[conf->dst_dev_type] == 0 &&
1132 d40c->runtime_addr == 0) {
1133
1134 dev_err(&d40c->chan.dev->device,
1135 "[%s] Invalid TX channel address (%d)\n",
1136 __func__, conf->dst_dev_type);
1137 res = -EINVAL;
1138 }
1139
1140 if (conf->src_dev_type != STEDMA40_DEV_SRC_MEMORY &&
1141 d40c->base->plat_data->dev_rx[conf->src_dev_type] == 0 &&
1142 d40c->runtime_addr == 0) {
1143 dev_err(&d40c->chan.dev->device,
1144 "[%s] Invalid RX channel address (%d)\n",
1145 __func__, conf->src_dev_type);
1146 res = -EINVAL;
1147 }
1148
1149 if (conf->dir == STEDMA40_MEM_TO_PERIPH &&
Linus Walleij8d318a52010-03-30 15:33:42 +02001150 dst_event_group == STEDMA40_DEV_DST_MEMORY) {
1151 dev_err(&d40c->chan.dev->device, "[%s] Invalid dst\n",
1152 __func__);
1153 res = -EINVAL;
1154 }
1155
Linus Walleij0747c7ba2010-08-09 12:07:36 +00001156 if (conf->dir == STEDMA40_PERIPH_TO_MEM &&
Linus Walleij8d318a52010-03-30 15:33:42 +02001157 src_event_group == STEDMA40_DEV_SRC_MEMORY) {
1158 dev_err(&d40c->chan.dev->device, "[%s] Invalid src\n",
1159 __func__);
1160 res = -EINVAL;
1161 }
1162
1163 if (src_event_group == STEDMA40_DEV_SRC_MEMORY &&
1164 dst_event_group == STEDMA40_DEV_DST_MEMORY && is_log) {
1165 dev_err(&d40c->chan.dev->device,
1166 "[%s] No event line\n", __func__);
1167 res = -EINVAL;
1168 }
1169
1170 if (conf->dir == STEDMA40_PERIPH_TO_PERIPH &&
1171 (src_event_group != dst_event_group)) {
1172 dev_err(&d40c->chan.dev->device,
1173 "[%s] Invalid event group\n", __func__);
1174 res = -EINVAL;
1175 }
1176
1177 if (conf->dir == STEDMA40_PERIPH_TO_PERIPH) {
1178 /*
1179 * DMAC HW supports it. Will be added to this driver,
1180 * in case any dma client requires it.
1181 */
1182 dev_err(&d40c->chan.dev->device,
1183 "[%s] periph to periph not supported\n",
1184 __func__);
1185 res = -EINVAL;
1186 }
1187
1188 return res;
1189}
1190
1191static bool d40_alloc_mask_set(struct d40_phy_res *phy, bool is_src,
Marcin Mielczarczyk4aed79b2010-05-18 00:41:21 +02001192 int log_event_line, bool is_log)
Linus Walleij8d318a52010-03-30 15:33:42 +02001193{
1194 unsigned long flags;
1195 spin_lock_irqsave(&phy->lock, flags);
Marcin Mielczarczyk4aed79b2010-05-18 00:41:21 +02001196 if (!is_log) {
Linus Walleij8d318a52010-03-30 15:33:42 +02001197 /* Physical interrupts are masked per physical full channel */
1198 if (phy->allocated_src == D40_ALLOC_FREE &&
1199 phy->allocated_dst == D40_ALLOC_FREE) {
1200 phy->allocated_dst = D40_ALLOC_PHY;
1201 phy->allocated_src = D40_ALLOC_PHY;
1202 goto found;
1203 } else
1204 goto not_found;
1205 }
1206
1207 /* Logical channel */
1208 if (is_src) {
1209 if (phy->allocated_src == D40_ALLOC_PHY)
1210 goto not_found;
1211
1212 if (phy->allocated_src == D40_ALLOC_FREE)
1213 phy->allocated_src = D40_ALLOC_LOG_FREE;
1214
1215 if (!(phy->allocated_src & (1 << log_event_line))) {
1216 phy->allocated_src |= 1 << log_event_line;
1217 goto found;
1218 } else
1219 goto not_found;
1220 } else {
1221 if (phy->allocated_dst == D40_ALLOC_PHY)
1222 goto not_found;
1223
1224 if (phy->allocated_dst == D40_ALLOC_FREE)
1225 phy->allocated_dst = D40_ALLOC_LOG_FREE;
1226
1227 if (!(phy->allocated_dst & (1 << log_event_line))) {
1228 phy->allocated_dst |= 1 << log_event_line;
1229 goto found;
1230 } else
1231 goto not_found;
1232 }
1233
1234not_found:
1235 spin_unlock_irqrestore(&phy->lock, flags);
1236 return false;
1237found:
1238 spin_unlock_irqrestore(&phy->lock, flags);
1239 return true;
1240}
1241
1242static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src,
1243 int log_event_line)
1244{
1245 unsigned long flags;
1246 bool is_free = false;
1247
1248 spin_lock_irqsave(&phy->lock, flags);
1249 if (!log_event_line) {
1250 /* Physical interrupts are masked per physical full channel */
1251 phy->allocated_dst = D40_ALLOC_FREE;
1252 phy->allocated_src = D40_ALLOC_FREE;
1253 is_free = true;
1254 goto out;
1255 }
1256
1257 /* Logical channel */
1258 if (is_src) {
1259 phy->allocated_src &= ~(1 << log_event_line);
1260 if (phy->allocated_src == D40_ALLOC_LOG_FREE)
1261 phy->allocated_src = D40_ALLOC_FREE;
1262 } else {
1263 phy->allocated_dst &= ~(1 << log_event_line);
1264 if (phy->allocated_dst == D40_ALLOC_LOG_FREE)
1265 phy->allocated_dst = D40_ALLOC_FREE;
1266 }
1267
1268 is_free = ((phy->allocated_src | phy->allocated_dst) ==
1269 D40_ALLOC_FREE);
1270
1271out:
1272 spin_unlock_irqrestore(&phy->lock, flags);
1273
1274 return is_free;
1275}
1276
1277static int d40_allocate_channel(struct d40_chan *d40c)
1278{
1279 int dev_type;
1280 int event_group;
1281 int event_line;
1282 struct d40_phy_res *phys;
1283 int i;
1284 int j;
1285 int log_num;
1286 bool is_src;
Linus Walleij508849a2010-06-20 21:26:07 +00001287 bool is_log = (d40c->dma_cfg.channel_type &
1288 STEDMA40_CHANNEL_IN_OPER_MODE)
Linus Walleij8d318a52010-03-30 15:33:42 +02001289 == STEDMA40_CHANNEL_IN_LOG_MODE;
1290
1291
1292 phys = d40c->base->phy_res;
1293
1294 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
1295 dev_type = d40c->dma_cfg.src_dev_type;
1296 log_num = 2 * dev_type;
1297 is_src = true;
1298 } else if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1299 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1300 /* dst event lines are used for logical memcpy */
1301 dev_type = d40c->dma_cfg.dst_dev_type;
1302 log_num = 2 * dev_type + 1;
1303 is_src = false;
1304 } else
1305 return -EINVAL;
1306
1307 event_group = D40_TYPE_TO_GROUP(dev_type);
1308 event_line = D40_TYPE_TO_EVENT(dev_type);
1309
1310 if (!is_log) {
1311 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1312 /* Find physical half channel */
1313 for (i = 0; i < d40c->base->num_phy_chans; i++) {
1314
Marcin Mielczarczyk4aed79b2010-05-18 00:41:21 +02001315 if (d40_alloc_mask_set(&phys[i], is_src,
1316 0, is_log))
Linus Walleij8d318a52010-03-30 15:33:42 +02001317 goto found_phy;
1318 }
1319 } else
1320 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1321 int phy_num = j + event_group * 2;
1322 for (i = phy_num; i < phy_num + 2; i++) {
Linus Walleij508849a2010-06-20 21:26:07 +00001323 if (d40_alloc_mask_set(&phys[i],
1324 is_src,
1325 0,
1326 is_log))
Linus Walleij8d318a52010-03-30 15:33:42 +02001327 goto found_phy;
1328 }
1329 }
1330 return -EINVAL;
1331found_phy:
1332 d40c->phy_chan = &phys[i];
1333 d40c->log_num = D40_PHY_CHAN;
1334 goto out;
1335 }
1336 if (dev_type == -1)
1337 return -EINVAL;
1338
1339 /* Find logical channel */
1340 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1341 int phy_num = j + event_group * 2;
1342 /*
1343 * Spread logical channels across all available physical rather
1344 * than pack every logical channel at the first available phy
1345 * channels.
1346 */
1347 if (is_src) {
1348 for (i = phy_num; i < phy_num + 2; i++) {
1349 if (d40_alloc_mask_set(&phys[i], is_src,
Marcin Mielczarczyk4aed79b2010-05-18 00:41:21 +02001350 event_line, is_log))
Linus Walleij8d318a52010-03-30 15:33:42 +02001351 goto found_log;
1352 }
1353 } else {
1354 for (i = phy_num + 1; i >= phy_num; i--) {
1355 if (d40_alloc_mask_set(&phys[i], is_src,
Marcin Mielczarczyk4aed79b2010-05-18 00:41:21 +02001356 event_line, is_log))
Linus Walleij8d318a52010-03-30 15:33:42 +02001357 goto found_log;
1358 }
1359 }
1360 }
1361 return -EINVAL;
1362
1363found_log:
1364 d40c->phy_chan = &phys[i];
1365 d40c->log_num = log_num;
1366out:
1367
1368 if (is_log)
1369 d40c->base->lookup_log_chans[d40c->log_num] = d40c;
1370 else
1371 d40c->base->lookup_phy_chans[d40c->phy_chan->num] = d40c;
1372
1373 return 0;
1374
1375}
1376
Linus Walleij8d318a52010-03-30 15:33:42 +02001377static int d40_config_memcpy(struct d40_chan *d40c)
1378{
1379 dma_cap_mask_t cap = d40c->chan.device->cap_mask;
1380
1381 if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) {
1382 d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_log;
1383 d40c->dma_cfg.src_dev_type = STEDMA40_DEV_SRC_MEMORY;
1384 d40c->dma_cfg.dst_dev_type = d40c->base->plat_data->
1385 memcpy[d40c->chan.chan_id];
1386
1387 } else if (dma_has_cap(DMA_MEMCPY, cap) &&
1388 dma_has_cap(DMA_SLAVE, cap)) {
1389 d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_phy;
1390 } else {
1391 dev_err(&d40c->chan.dev->device, "[%s] No memcpy\n",
1392 __func__);
1393 return -EINVAL;
1394 }
1395
1396 return 0;
1397}
1398
1399
1400static int d40_free_dma(struct d40_chan *d40c)
1401{
1402
1403 int res = 0;
Jonas Aabergd181b3a2010-06-20 21:26:38 +00001404 u32 event;
Linus Walleij8d318a52010-03-30 15:33:42 +02001405 struct d40_phy_res *phy = d40c->phy_chan;
1406 bool is_src;
Per Fridena8be8622010-06-20 21:24:59 +00001407 struct d40_desc *d;
1408 struct d40_desc *_d;
1409
Linus Walleij8d318a52010-03-30 15:33:42 +02001410
1411 /* Terminate all queued and active transfers */
1412 d40_term_all(d40c);
1413
Per Fridena8be8622010-06-20 21:24:59 +00001414 /* Release client owned descriptors */
1415 if (!list_empty(&d40c->client))
1416 list_for_each_entry_safe(d, _d, &d40c->client, node) {
1417 d40_pool_lli_free(d);
1418 d40_desc_remove(d);
Per Fridena8be8622010-06-20 21:24:59 +00001419 d40_desc_free(d40c, d);
1420 }
1421
Linus Walleij8d318a52010-03-30 15:33:42 +02001422 if (phy == NULL) {
1423 dev_err(&d40c->chan.dev->device, "[%s] phy == null\n",
1424 __func__);
1425 return -EINVAL;
1426 }
1427
1428 if (phy->allocated_src == D40_ALLOC_FREE &&
1429 phy->allocated_dst == D40_ALLOC_FREE) {
1430 dev_err(&d40c->chan.dev->device, "[%s] channel already free\n",
1431 __func__);
1432 return -EINVAL;
1433 }
1434
Linus Walleij8d318a52010-03-30 15:33:42 +02001435 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1436 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1437 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
Linus Walleij8d318a52010-03-30 15:33:42 +02001438 is_src = false;
1439 } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
1440 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
Linus Walleij8d318a52010-03-30 15:33:42 +02001441 is_src = true;
1442 } else {
1443 dev_err(&d40c->chan.dev->device,
1444 "[%s] Unknown direction\n", __func__);
1445 return -EINVAL;
1446 }
1447
Jonas Aabergd181b3a2010-06-20 21:26:38 +00001448 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1449 if (res) {
1450 dev_err(&d40c->chan.dev->device, "[%s] suspend failed\n",
1451 __func__);
1452 return res;
1453 }
Linus Walleij8d318a52010-03-30 15:33:42 +02001454
Jonas Aabergd181b3a2010-06-20 21:26:38 +00001455 if (d40c->log_num != D40_PHY_CHAN) {
1456 /* Release logical channel, deactivate the event line */
1457
1458 d40_config_set_event(d40c, false);
Linus Walleij8d318a52010-03-30 15:33:42 +02001459 d40c->base->lookup_log_chans[d40c->log_num] = NULL;
1460
1461 /*
1462 * Check if there are more logical allocation
1463 * on this phy channel.
1464 */
1465 if (!d40_alloc_mask_free(phy, is_src, event)) {
1466 /* Resume the other logical channels if any */
1467 if (d40_chan_has_events(d40c)) {
1468 res = d40_channel_execute_command(d40c,
1469 D40_DMA_RUN);
1470 if (res) {
1471 dev_err(&d40c->chan.dev->device,
1472 "[%s] Executing RUN command\n",
1473 __func__);
1474 return res;
1475 }
1476 }
1477 return 0;
1478 }
Jonas Aabergd181b3a2010-06-20 21:26:38 +00001479 } else {
1480 (void) d40_alloc_mask_free(phy, is_src, 0);
1481 }
Linus Walleij8d318a52010-03-30 15:33:42 +02001482
1483 /* Release physical channel */
1484 res = d40_channel_execute_command(d40c, D40_DMA_STOP);
1485 if (res) {
1486 dev_err(&d40c->chan.dev->device,
1487 "[%s] Failed to stop channel\n", __func__);
1488 return res;
1489 }
1490 d40c->phy_chan = NULL;
1491 /* Invalidate channel type */
1492 d40c->dma_cfg.channel_type = 0;
1493 d40c->base->lookup_phy_chans[phy->num] = NULL;
1494
1495 return 0;
Linus Walleij8d318a52010-03-30 15:33:42 +02001496}
1497
Jonas Aaberga5ebca42010-05-18 00:41:09 +02001498static bool d40_is_paused(struct d40_chan *d40c)
1499{
1500 bool is_paused = false;
1501 unsigned long flags;
1502 void __iomem *active_reg;
1503 u32 status;
1504 u32 event;
Jonas Aaberga5ebca42010-05-18 00:41:09 +02001505
1506 spin_lock_irqsave(&d40c->lock, flags);
1507
1508 if (d40c->log_num == D40_PHY_CHAN) {
1509 if (d40c->phy_chan->num % 2 == 0)
1510 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
1511 else
1512 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
1513
1514 status = (readl(active_reg) &
1515 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1516 D40_CHAN_POS(d40c->phy_chan->num);
1517 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
1518 is_paused = true;
1519
1520 goto _exit;
1521 }
1522
Jonas Aaberga5ebca42010-05-18 00:41:09 +02001523 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1524 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM)
1525 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
1526 else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
1527 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
1528 else {
1529 dev_err(&d40c->chan.dev->device,
1530 "[%s] Unknown direction\n", __func__);
1531 goto _exit;
1532 }
1533 status = d40_chan_has_events(d40c);
1534 status = (status & D40_EVENTLINE_MASK(event)) >>
1535 D40_EVENTLINE_POS(event);
1536
1537 if (status != D40_DMA_RUN)
1538 is_paused = true;
Jonas Aaberga5ebca42010-05-18 00:41:09 +02001539_exit:
1540 spin_unlock_irqrestore(&d40c->lock, flags);
1541 return is_paused;
1542
1543}
1544
1545
Linus Walleij8d318a52010-03-30 15:33:42 +02001546static u32 stedma40_residue(struct dma_chan *chan)
1547{
1548 struct d40_chan *d40c =
1549 container_of(chan, struct d40_chan, chan);
1550 u32 bytes_left;
1551 unsigned long flags;
1552
1553 spin_lock_irqsave(&d40c->lock, flags);
1554 bytes_left = d40_residue(d40c);
1555 spin_unlock_irqrestore(&d40c->lock, flags);
1556
1557 return bytes_left;
1558}
1559
1560/* Public DMA functions in addition to the DMA engine framework */
1561
1562int stedma40_set_psize(struct dma_chan *chan,
1563 int src_psize,
1564 int dst_psize)
1565{
1566 struct d40_chan *d40c =
1567 container_of(chan, struct d40_chan, chan);
1568 unsigned long flags;
1569
1570 spin_lock_irqsave(&d40c->lock, flags);
1571
1572 if (d40c->log_num != D40_PHY_CHAN) {
1573 d40c->log_def.lcsp1 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK;
1574 d40c->log_def.lcsp3 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK;
Linus Walleij508849a2010-06-20 21:26:07 +00001575 d40c->log_def.lcsp1 |= src_psize <<
1576 D40_MEM_LCSP1_SCFG_PSIZE_POS;
1577 d40c->log_def.lcsp3 |= dst_psize <<
1578 D40_MEM_LCSP1_SCFG_PSIZE_POS;
Linus Walleij8d318a52010-03-30 15:33:42 +02001579 goto out;
1580 }
1581
1582 if (src_psize == STEDMA40_PSIZE_PHY_1)
1583 d40c->src_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS);
1584 else {
1585 d40c->src_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS;
1586 d40c->src_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 <<
1587 D40_SREG_CFG_PSIZE_POS);
1588 d40c->src_def_cfg |= src_psize << D40_SREG_CFG_PSIZE_POS;
1589 }
1590
1591 if (dst_psize == STEDMA40_PSIZE_PHY_1)
1592 d40c->dst_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS);
1593 else {
1594 d40c->dst_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS;
1595 d40c->dst_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 <<
1596 D40_SREG_CFG_PSIZE_POS);
1597 d40c->dst_def_cfg |= dst_psize << D40_SREG_CFG_PSIZE_POS;
1598 }
1599out:
1600 spin_unlock_irqrestore(&d40c->lock, flags);
1601 return 0;
1602}
1603EXPORT_SYMBOL(stedma40_set_psize);
1604
1605struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
1606 struct scatterlist *sgl_dst,
1607 struct scatterlist *sgl_src,
1608 unsigned int sgl_len,
Jonas Aaberg2a614342010-06-20 21:25:24 +00001609 unsigned long dma_flags)
Linus Walleij8d318a52010-03-30 15:33:42 +02001610{
1611 int res;
1612 struct d40_desc *d40d;
1613 struct d40_chan *d40c = container_of(chan, struct d40_chan,
1614 chan);
Jonas Aaberg2a614342010-06-20 21:25:24 +00001615 unsigned long flags;
Linus Walleij8d318a52010-03-30 15:33:42 +02001616
Jonas Aaberg0d0f6b82010-06-20 21:25:31 +00001617 if (d40c->phy_chan == NULL) {
1618 dev_err(&d40c->chan.dev->device,
1619 "[%s] Unallocated channel.\n", __func__);
1620 return ERR_PTR(-EINVAL);
1621 }
1622
Jonas Aaberg2a614342010-06-20 21:25:24 +00001623 spin_lock_irqsave(&d40c->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02001624 d40d = d40_desc_get(d40c);
1625
1626 if (d40d == NULL)
1627 goto err;
1628
Linus Walleij8d318a52010-03-30 15:33:42 +02001629 d40d->lli_len = sgl_len;
Per Friden941b77a2010-06-20 21:24:45 +00001630 d40d->lli_tx_len = d40d->lli_len;
Jonas Aaberg2a614342010-06-20 21:25:24 +00001631 d40d->txd.flags = dma_flags;
Linus Walleij8d318a52010-03-30 15:33:42 +02001632
1633 if (d40c->log_num != D40_PHY_CHAN) {
Per Friden941b77a2010-06-20 21:24:45 +00001634 if (d40d->lli_len > d40c->base->plat_data->llis_per_log)
1635 d40d->lli_tx_len = d40c->base->plat_data->llis_per_log;
1636
Linus Walleij8d318a52010-03-30 15:33:42 +02001637 if (sgl_len > 1)
1638 /*
1639 * Check if there is space available in lcla. If not,
1640 * split list into 1-length and run only in lcpa
1641 * space.
1642 */
Linus Walleij508849a2010-06-20 21:26:07 +00001643 if (d40_lcla_id_get(d40c) != 0)
Per Friden941b77a2010-06-20 21:24:45 +00001644 d40d->lli_tx_len = 1;
Linus Walleij8d318a52010-03-30 15:33:42 +02001645
1646 if (d40_pool_lli_alloc(d40d, sgl_len, true) < 0) {
1647 dev_err(&d40c->chan.dev->device,
1648 "[%s] Out of memory\n", __func__);
1649 goto err;
1650 }
1651
1652 (void) d40_log_sg_to_lli(d40c->lcla.src_id,
1653 sgl_src,
1654 sgl_len,
1655 d40d->lli_log.src,
1656 d40c->log_def.lcsp1,
1657 d40c->dma_cfg.src_info.data_width,
Per Friden941b77a2010-06-20 21:24:45 +00001658 d40d->lli_tx_len,
Linus Walleij8d318a52010-03-30 15:33:42 +02001659 d40c->base->plat_data->llis_per_log);
1660
1661 (void) d40_log_sg_to_lli(d40c->lcla.dst_id,
1662 sgl_dst,
1663 sgl_len,
1664 d40d->lli_log.dst,
1665 d40c->log_def.lcsp3,
1666 d40c->dma_cfg.dst_info.data_width,
Per Friden941b77a2010-06-20 21:24:45 +00001667 d40d->lli_tx_len,
Linus Walleij8d318a52010-03-30 15:33:42 +02001668 d40c->base->plat_data->llis_per_log);
1669
1670
1671 } else {
1672 if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) {
1673 dev_err(&d40c->chan.dev->device,
1674 "[%s] Out of memory\n", __func__);
1675 goto err;
1676 }
1677
1678 res = d40_phy_sg_to_lli(sgl_src,
1679 sgl_len,
1680 0,
1681 d40d->lli_phy.src,
Jonas Aabergaa182ae2010-08-09 12:08:26 +00001682 virt_to_phys(d40d->lli_phy.src),
Linus Walleij8d318a52010-03-30 15:33:42 +02001683 d40c->src_def_cfg,
1684 d40c->dma_cfg.src_info.data_width,
Jonas Aaberg0246e772010-08-09 12:08:10 +00001685 d40c->dma_cfg.src_info.psize);
Linus Walleij8d318a52010-03-30 15:33:42 +02001686
1687 if (res < 0)
1688 goto err;
1689
1690 res = d40_phy_sg_to_lli(sgl_dst,
1691 sgl_len,
1692 0,
1693 d40d->lli_phy.dst,
Jonas Aabergaa182ae2010-08-09 12:08:26 +00001694 virt_to_phys(d40d->lli_phy.dst),
Linus Walleij8d318a52010-03-30 15:33:42 +02001695 d40c->dst_def_cfg,
1696 d40c->dma_cfg.dst_info.data_width,
Jonas Aaberg0246e772010-08-09 12:08:10 +00001697 d40c->dma_cfg.dst_info.psize);
Linus Walleij8d318a52010-03-30 15:33:42 +02001698
1699 if (res < 0)
1700 goto err;
1701
1702 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
1703 d40d->lli_pool.size, DMA_TO_DEVICE);
1704 }
1705
1706 dma_async_tx_descriptor_init(&d40d->txd, chan);
1707
1708 d40d->txd.tx_submit = d40_tx_submit;
1709
Jonas Aaberg2a614342010-06-20 21:25:24 +00001710 spin_unlock_irqrestore(&d40c->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02001711
1712 return &d40d->txd;
1713err:
Jonas Aaberg2a614342010-06-20 21:25:24 +00001714 spin_unlock_irqrestore(&d40c->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02001715 return NULL;
1716}
1717EXPORT_SYMBOL(stedma40_memcpy_sg);
1718
1719bool stedma40_filter(struct dma_chan *chan, void *data)
1720{
1721 struct stedma40_chan_cfg *info = data;
1722 struct d40_chan *d40c =
1723 container_of(chan, struct d40_chan, chan);
1724 int err;
1725
1726 if (data) {
1727 err = d40_validate_conf(d40c, info);
1728 if (!err)
1729 d40c->dma_cfg = *info;
1730 } else
1731 err = d40_config_memcpy(d40c);
1732
1733 return err == 0;
1734}
1735EXPORT_SYMBOL(stedma40_filter);
1736
1737/* DMA ENGINE functions */
1738static int d40_alloc_chan_resources(struct dma_chan *chan)
1739{
1740 int err;
1741 unsigned long flags;
1742 struct d40_chan *d40c =
1743 container_of(chan, struct d40_chan, chan);
Linus Walleijef1872e2010-06-20 21:24:52 +00001744 bool is_free_phy;
Linus Walleij8d318a52010-03-30 15:33:42 +02001745 spin_lock_irqsave(&d40c->lock, flags);
1746
1747 d40c->completed = chan->cookie = 1;
1748
1749 /*
1750 * If no dma configuration is set (channel_type == 0)
Linus Walleijef1872e2010-06-20 21:24:52 +00001751 * use default configuration (memcpy)
Linus Walleij8d318a52010-03-30 15:33:42 +02001752 */
1753 if (d40c->dma_cfg.channel_type == 0) {
Jonas Aabergaa182ae2010-08-09 12:08:26 +00001754
Linus Walleij8d318a52010-03-30 15:33:42 +02001755 err = d40_config_memcpy(d40c);
Jonas Aabergff0b12b2010-06-20 21:25:15 +00001756 if (err) {
1757 dev_err(&d40c->chan.dev->device,
1758 "[%s] Failed to configure memcpy channel\n",
1759 __func__);
1760 goto fail;
1761 }
Linus Walleij8d318a52010-03-30 15:33:42 +02001762 }
Linus Walleijef1872e2010-06-20 21:24:52 +00001763 is_free_phy = (d40c->phy_chan == NULL);
Linus Walleij8d318a52010-03-30 15:33:42 +02001764
1765 err = d40_allocate_channel(d40c);
1766 if (err) {
1767 dev_err(&d40c->chan.dev->device,
1768 "[%s] Failed to allocate channel\n", __func__);
Jonas Aabergff0b12b2010-06-20 21:25:15 +00001769 goto fail;
Linus Walleij8d318a52010-03-30 15:33:42 +02001770 }
1771
Linus Walleijef1872e2010-06-20 21:24:52 +00001772 /* Fill in basic CFG register values */
1773 d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg,
1774 &d40c->dst_def_cfg, d40c->log_num != D40_PHY_CHAN);
1775
1776 if (d40c->log_num != D40_PHY_CHAN) {
1777 d40_log_cfg(&d40c->dma_cfg,
1778 &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
1779
1780 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
1781 d40c->lcpa = d40c->base->lcpa_base +
1782 d40c->dma_cfg.src_dev_type * D40_LCPA_CHAN_SIZE;
1783 else
1784 d40c->lcpa = d40c->base->lcpa_base +
1785 d40c->dma_cfg.dst_dev_type *
1786 D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA;
1787 }
1788
1789 /*
1790 * Only write channel configuration to the DMA if the physical
1791 * resource is free. In case of multiple logical channels
1792 * on the same physical resource, only the first write is necessary.
1793 */
Jonas Aabergb55912c2010-08-09 12:08:02 +00001794 if (is_free_phy)
1795 d40_config_write(d40c);
Jonas Aabergff0b12b2010-06-20 21:25:15 +00001796fail:
Linus Walleij8d318a52010-03-30 15:33:42 +02001797 spin_unlock_irqrestore(&d40c->lock, flags);
Jonas Aabergff0b12b2010-06-20 21:25:15 +00001798 return err;
Linus Walleij8d318a52010-03-30 15:33:42 +02001799}
1800
1801static void d40_free_chan_resources(struct dma_chan *chan)
1802{
1803 struct d40_chan *d40c =
1804 container_of(chan, struct d40_chan, chan);
1805 int err;
1806 unsigned long flags;
1807
Jonas Aaberg0d0f6b82010-06-20 21:25:31 +00001808 if (d40c->phy_chan == NULL) {
1809 dev_err(&d40c->chan.dev->device,
1810 "[%s] Cannot free unallocated channel\n", __func__);
1811 return;
1812 }
1813
1814
Linus Walleij8d318a52010-03-30 15:33:42 +02001815 spin_lock_irqsave(&d40c->lock, flags);
1816
1817 err = d40_free_dma(d40c);
1818
1819 if (err)
1820 dev_err(&d40c->chan.dev->device,
1821 "[%s] Failed to free channel\n", __func__);
1822 spin_unlock_irqrestore(&d40c->lock, flags);
1823}
1824
1825static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
1826 dma_addr_t dst,
1827 dma_addr_t src,
1828 size_t size,
Jonas Aaberg2a614342010-06-20 21:25:24 +00001829 unsigned long dma_flags)
Linus Walleij8d318a52010-03-30 15:33:42 +02001830{
1831 struct d40_desc *d40d;
1832 struct d40_chan *d40c = container_of(chan, struct d40_chan,
1833 chan);
Jonas Aaberg2a614342010-06-20 21:25:24 +00001834 unsigned long flags;
Linus Walleij8d318a52010-03-30 15:33:42 +02001835 int err = 0;
1836
Jonas Aaberg0d0f6b82010-06-20 21:25:31 +00001837 if (d40c->phy_chan == NULL) {
1838 dev_err(&d40c->chan.dev->device,
1839 "[%s] Channel is not allocated.\n", __func__);
1840 return ERR_PTR(-EINVAL);
1841 }
1842
Jonas Aaberg2a614342010-06-20 21:25:24 +00001843 spin_lock_irqsave(&d40c->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02001844 d40d = d40_desc_get(d40c);
1845
1846 if (d40d == NULL) {
1847 dev_err(&d40c->chan.dev->device,
1848 "[%s] Descriptor is NULL\n", __func__);
1849 goto err;
1850 }
1851
Jonas Aaberg2a614342010-06-20 21:25:24 +00001852 d40d->txd.flags = dma_flags;
Linus Walleij8d318a52010-03-30 15:33:42 +02001853
1854 dma_async_tx_descriptor_init(&d40d->txd, chan);
1855
1856 d40d->txd.tx_submit = d40_tx_submit;
1857
1858 if (d40c->log_num != D40_PHY_CHAN) {
1859
1860 if (d40_pool_lli_alloc(d40d, 1, true) < 0) {
1861 dev_err(&d40c->chan.dev->device,
1862 "[%s] Out of memory\n", __func__);
1863 goto err;
1864 }
1865 d40d->lli_len = 1;
Per Friden941b77a2010-06-20 21:24:45 +00001866 d40d->lli_tx_len = 1;
Linus Walleij8d318a52010-03-30 15:33:42 +02001867
1868 d40_log_fill_lli(d40d->lli_log.src,
1869 src,
1870 size,
1871 0,
1872 d40c->log_def.lcsp1,
1873 d40c->dma_cfg.src_info.data_width,
Jonas Aaberg2123a612010-06-20 21:25:54 +00001874 false, true);
Linus Walleij8d318a52010-03-30 15:33:42 +02001875
1876 d40_log_fill_lli(d40d->lli_log.dst,
1877 dst,
1878 size,
1879 0,
1880 d40c->log_def.lcsp3,
1881 d40c->dma_cfg.dst_info.data_width,
1882 true, true);
1883
1884 } else {
1885
1886 if (d40_pool_lli_alloc(d40d, 1, false) < 0) {
1887 dev_err(&d40c->chan.dev->device,
1888 "[%s] Out of memory\n", __func__);
1889 goto err;
1890 }
1891
1892 err = d40_phy_fill_lli(d40d->lli_phy.src,
1893 src,
1894 size,
1895 d40c->dma_cfg.src_info.psize,
1896 0,
1897 d40c->src_def_cfg,
1898 true,
1899 d40c->dma_cfg.src_info.data_width,
1900 false);
1901 if (err)
1902 goto err_fill_lli;
1903
1904 err = d40_phy_fill_lli(d40d->lli_phy.dst,
1905 dst,
1906 size,
1907 d40c->dma_cfg.dst_info.psize,
1908 0,
1909 d40c->dst_def_cfg,
1910 true,
1911 d40c->dma_cfg.dst_info.data_width,
1912 false);
1913
1914 if (err)
1915 goto err_fill_lli;
1916
1917 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
1918 d40d->lli_pool.size, DMA_TO_DEVICE);
1919 }
1920
Jonas Aaberg2a614342010-06-20 21:25:24 +00001921 spin_unlock_irqrestore(&d40c->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02001922 return &d40d->txd;
1923
1924err_fill_lli:
1925 dev_err(&d40c->chan.dev->device,
1926 "[%s] Failed filling in PHY LLI\n", __func__);
1927 d40_pool_lli_free(d40d);
1928err:
Jonas Aaberg2a614342010-06-20 21:25:24 +00001929 spin_unlock_irqrestore(&d40c->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02001930 return NULL;
1931}
1932
1933static int d40_prep_slave_sg_log(struct d40_desc *d40d,
1934 struct d40_chan *d40c,
1935 struct scatterlist *sgl,
1936 unsigned int sg_len,
1937 enum dma_data_direction direction,
Jonas Aaberg2a614342010-06-20 21:25:24 +00001938 unsigned long dma_flags)
Linus Walleij8d318a52010-03-30 15:33:42 +02001939{
1940 dma_addr_t dev_addr = 0;
1941 int total_size;
Linus Walleij8d318a52010-03-30 15:33:42 +02001942
1943 if (d40_pool_lli_alloc(d40d, sg_len, true) < 0) {
1944 dev_err(&d40c->chan.dev->device,
1945 "[%s] Out of memory\n", __func__);
1946 return -ENOMEM;
1947 }
1948
1949 d40d->lli_len = sg_len;
Per Friden941b77a2010-06-20 21:24:45 +00001950 if (d40d->lli_len <= d40c->base->plat_data->llis_per_log)
1951 d40d->lli_tx_len = d40d->lli_len;
1952 else
1953 d40d->lli_tx_len = d40c->base->plat_data->llis_per_log;
Linus Walleij8d318a52010-03-30 15:33:42 +02001954
1955 if (sg_len > 1)
1956 /*
1957 * Check if there is space available in lcla.
1958 * If not, split list into 1-length and run only
1959 * in lcpa space.
1960 */
Linus Walleij508849a2010-06-20 21:26:07 +00001961 if (d40_lcla_id_get(d40c) != 0)
Per Friden941b77a2010-06-20 21:24:45 +00001962 d40d->lli_tx_len = 1;
Linus Walleij8d318a52010-03-30 15:33:42 +02001963
Jonas Aaberg2a614342010-06-20 21:25:24 +00001964 if (direction == DMA_FROM_DEVICE)
Linus Walleij95e14002010-08-04 13:37:45 +02001965 if (d40c->runtime_addr)
1966 dev_addr = d40c->runtime_addr;
1967 else
1968 dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
Jonas Aaberg2a614342010-06-20 21:25:24 +00001969 else if (direction == DMA_TO_DEVICE)
Linus Walleij95e14002010-08-04 13:37:45 +02001970 if (d40c->runtime_addr)
1971 dev_addr = d40c->runtime_addr;
1972 else
1973 dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
1974
Jonas Aaberg2a614342010-06-20 21:25:24 +00001975 else
Linus Walleij8d318a52010-03-30 15:33:42 +02001976 return -EINVAL;
Jonas Aaberg2a614342010-06-20 21:25:24 +00001977
1978 total_size = d40_log_sg_to_dev(&d40c->lcla,
1979 sgl, sg_len,
1980 &d40d->lli_log,
1981 &d40c->log_def,
1982 d40c->dma_cfg.src_info.data_width,
1983 d40c->dma_cfg.dst_info.data_width,
1984 direction,
Jonas Aaberg2a614342010-06-20 21:25:24 +00001985 dev_addr, d40d->lli_tx_len,
1986 d40c->base->plat_data->llis_per_log);
1987
Linus Walleij8d318a52010-03-30 15:33:42 +02001988 if (total_size < 0)
1989 return -EINVAL;
1990
1991 return 0;
1992}
1993
1994static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
1995 struct d40_chan *d40c,
1996 struct scatterlist *sgl,
1997 unsigned int sgl_len,
1998 enum dma_data_direction direction,
Jonas Aaberg2a614342010-06-20 21:25:24 +00001999 unsigned long dma_flags)
Linus Walleij8d318a52010-03-30 15:33:42 +02002000{
2001 dma_addr_t src_dev_addr;
2002 dma_addr_t dst_dev_addr;
2003 int res;
2004
2005 if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) {
2006 dev_err(&d40c->chan.dev->device,
2007 "[%s] Out of memory\n", __func__);
2008 return -ENOMEM;
2009 }
2010
2011 d40d->lli_len = sgl_len;
Per Friden941b77a2010-06-20 21:24:45 +00002012 d40d->lli_tx_len = sgl_len;
Linus Walleij8d318a52010-03-30 15:33:42 +02002013
2014 if (direction == DMA_FROM_DEVICE) {
2015 dst_dev_addr = 0;
Linus Walleij95e14002010-08-04 13:37:45 +02002016 if (d40c->runtime_addr)
2017 src_dev_addr = d40c->runtime_addr;
2018 else
2019 src_dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
Linus Walleij8d318a52010-03-30 15:33:42 +02002020 } else if (direction == DMA_TO_DEVICE) {
Linus Walleij95e14002010-08-04 13:37:45 +02002021 if (d40c->runtime_addr)
2022 dst_dev_addr = d40c->runtime_addr;
2023 else
2024 dst_dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
Linus Walleij8d318a52010-03-30 15:33:42 +02002025 src_dev_addr = 0;
2026 } else
2027 return -EINVAL;
2028
2029 res = d40_phy_sg_to_lli(sgl,
2030 sgl_len,
2031 src_dev_addr,
2032 d40d->lli_phy.src,
Jonas Aabergaa182ae2010-08-09 12:08:26 +00002033 virt_to_phys(d40d->lli_phy.src),
Linus Walleij8d318a52010-03-30 15:33:42 +02002034 d40c->src_def_cfg,
2035 d40c->dma_cfg.src_info.data_width,
Jonas Aaberg0246e772010-08-09 12:08:10 +00002036 d40c->dma_cfg.src_info.psize);
Linus Walleij8d318a52010-03-30 15:33:42 +02002037 if (res < 0)
2038 return res;
2039
2040 res = d40_phy_sg_to_lli(sgl,
2041 sgl_len,
2042 dst_dev_addr,
2043 d40d->lli_phy.dst,
Jonas Aabergaa182ae2010-08-09 12:08:26 +00002044 virt_to_phys(d40d->lli_phy.dst),
Linus Walleij8d318a52010-03-30 15:33:42 +02002045 d40c->dst_def_cfg,
2046 d40c->dma_cfg.dst_info.data_width,
Jonas Aaberg0246e772010-08-09 12:08:10 +00002047 d40c->dma_cfg.dst_info.psize);
Linus Walleij8d318a52010-03-30 15:33:42 +02002048 if (res < 0)
2049 return res;
2050
2051 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
2052 d40d->lli_pool.size, DMA_TO_DEVICE);
2053 return 0;
2054}
2055
2056static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
2057 struct scatterlist *sgl,
2058 unsigned int sg_len,
2059 enum dma_data_direction direction,
Jonas Aaberg2a614342010-06-20 21:25:24 +00002060 unsigned long dma_flags)
Linus Walleij8d318a52010-03-30 15:33:42 +02002061{
2062 struct d40_desc *d40d;
2063 struct d40_chan *d40c = container_of(chan, struct d40_chan,
2064 chan);
Jonas Aaberg2a614342010-06-20 21:25:24 +00002065 unsigned long flags;
Linus Walleij8d318a52010-03-30 15:33:42 +02002066 int err;
2067
Jonas Aaberg0d0f6b82010-06-20 21:25:31 +00002068 if (d40c->phy_chan == NULL) {
2069 dev_err(&d40c->chan.dev->device,
2070 "[%s] Cannot prepare unallocated channel\n", __func__);
2071 return ERR_PTR(-EINVAL);
2072 }
2073
Linus Walleij8d318a52010-03-30 15:33:42 +02002074 if (d40c->dma_cfg.pre_transfer)
2075 d40c->dma_cfg.pre_transfer(chan,
2076 d40c->dma_cfg.pre_transfer_data,
2077 sg_dma_len(sgl));
2078
Jonas Aaberg2a614342010-06-20 21:25:24 +00002079 spin_lock_irqsave(&d40c->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02002080 d40d = d40_desc_get(d40c);
Jonas Aaberg2a614342010-06-20 21:25:24 +00002081 spin_unlock_irqrestore(&d40c->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02002082
2083 if (d40d == NULL)
2084 return NULL;
2085
Linus Walleij8d318a52010-03-30 15:33:42 +02002086 if (d40c->log_num != D40_PHY_CHAN)
2087 err = d40_prep_slave_sg_log(d40d, d40c, sgl, sg_len,
Jonas Aaberg2a614342010-06-20 21:25:24 +00002088 direction, dma_flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02002089 else
2090 err = d40_prep_slave_sg_phy(d40d, d40c, sgl, sg_len,
Jonas Aaberg2a614342010-06-20 21:25:24 +00002091 direction, dma_flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02002092 if (err) {
2093 dev_err(&d40c->chan.dev->device,
2094 "[%s] Failed to prepare %s slave sg job: %d\n",
2095 __func__,
2096 d40c->log_num != D40_PHY_CHAN ? "log" : "phy", err);
2097 return NULL;
2098 }
2099
Jonas Aaberg2a614342010-06-20 21:25:24 +00002100 d40d->txd.flags = dma_flags;
Linus Walleij8d318a52010-03-30 15:33:42 +02002101
2102 dma_async_tx_descriptor_init(&d40d->txd, chan);
2103
2104 d40d->txd.tx_submit = d40_tx_submit;
2105
2106 return &d40d->txd;
2107}
2108
2109static enum dma_status d40_tx_status(struct dma_chan *chan,
2110 dma_cookie_t cookie,
2111 struct dma_tx_state *txstate)
2112{
2113 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2114 dma_cookie_t last_used;
2115 dma_cookie_t last_complete;
2116 int ret;
2117
Jonas Aaberg0d0f6b82010-06-20 21:25:31 +00002118 if (d40c->phy_chan == NULL) {
2119 dev_err(&d40c->chan.dev->device,
2120 "[%s] Cannot read status of unallocated channel\n",
2121 __func__);
2122 return -EINVAL;
2123 }
2124
Linus Walleij8d318a52010-03-30 15:33:42 +02002125 last_complete = d40c->completed;
2126 last_used = chan->cookie;
2127
Jonas Aaberga5ebca42010-05-18 00:41:09 +02002128 if (d40_is_paused(d40c))
2129 ret = DMA_PAUSED;
2130 else
2131 ret = dma_async_is_complete(cookie, last_complete, last_used);
Linus Walleij8d318a52010-03-30 15:33:42 +02002132
Jonas Aaberga5ebca42010-05-18 00:41:09 +02002133 dma_set_tx_state(txstate, last_complete, last_used,
2134 stedma40_residue(chan));
Linus Walleij8d318a52010-03-30 15:33:42 +02002135
2136 return ret;
2137}
2138
2139static void d40_issue_pending(struct dma_chan *chan)
2140{
2141 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2142 unsigned long flags;
2143
Jonas Aaberg0d0f6b82010-06-20 21:25:31 +00002144 if (d40c->phy_chan == NULL) {
2145 dev_err(&d40c->chan.dev->device,
2146 "[%s] Channel is not allocated!\n", __func__);
2147 return;
2148 }
2149
Linus Walleij8d318a52010-03-30 15:33:42 +02002150 spin_lock_irqsave(&d40c->lock, flags);
2151
2152 /* Busy means that pending jobs are already being processed */
2153 if (!d40c->busy)
2154 (void) d40_queue_start(d40c);
2155
2156 spin_unlock_irqrestore(&d40c->lock, flags);
2157}
2158
Linus Walleij95e14002010-08-04 13:37:45 +02002159/* Runtime reconfiguration extension */
2160static void d40_set_runtime_config(struct dma_chan *chan,
2161 struct dma_slave_config *config)
2162{
2163 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2164 struct stedma40_chan_cfg *cfg = &d40c->dma_cfg;
2165 enum dma_slave_buswidth config_addr_width;
2166 dma_addr_t config_addr;
2167 u32 config_maxburst;
2168 enum stedma40_periph_data_width addr_width;
2169 int psize;
2170
2171 if (config->direction == DMA_FROM_DEVICE) {
2172 dma_addr_t dev_addr_rx =
2173 d40c->base->plat_data->dev_rx[cfg->src_dev_type];
2174
2175 config_addr = config->src_addr;
2176 if (dev_addr_rx)
2177 dev_dbg(d40c->base->dev,
2178 "channel has a pre-wired RX address %08x "
2179 "overriding with %08x\n",
2180 dev_addr_rx, config_addr);
2181 if (cfg->dir != STEDMA40_PERIPH_TO_MEM)
2182 dev_dbg(d40c->base->dev,
2183 "channel was not configured for peripheral "
2184 "to memory transfer (%d) overriding\n",
2185 cfg->dir);
2186 cfg->dir = STEDMA40_PERIPH_TO_MEM;
2187
2188 config_addr_width = config->src_addr_width;
2189 config_maxburst = config->src_maxburst;
2190
2191 } else if (config->direction == DMA_TO_DEVICE) {
2192 dma_addr_t dev_addr_tx =
2193 d40c->base->plat_data->dev_tx[cfg->dst_dev_type];
2194
2195 config_addr = config->dst_addr;
2196 if (dev_addr_tx)
2197 dev_dbg(d40c->base->dev,
2198 "channel has a pre-wired TX address %08x "
2199 "overriding with %08x\n",
2200 dev_addr_tx, config_addr);
2201 if (cfg->dir != STEDMA40_MEM_TO_PERIPH)
2202 dev_dbg(d40c->base->dev,
2203 "channel was not configured for memory "
2204 "to peripheral transfer (%d) overriding\n",
2205 cfg->dir);
2206 cfg->dir = STEDMA40_MEM_TO_PERIPH;
2207
2208 config_addr_width = config->dst_addr_width;
2209 config_maxburst = config->dst_maxburst;
2210
2211 } else {
2212 dev_err(d40c->base->dev,
2213 "unrecognized channel direction %d\n",
2214 config->direction);
2215 return;
2216 }
2217
2218 switch (config_addr_width) {
2219 case DMA_SLAVE_BUSWIDTH_1_BYTE:
2220 addr_width = STEDMA40_BYTE_WIDTH;
2221 break;
2222 case DMA_SLAVE_BUSWIDTH_2_BYTES:
2223 addr_width = STEDMA40_HALFWORD_WIDTH;
2224 break;
2225 case DMA_SLAVE_BUSWIDTH_4_BYTES:
2226 addr_width = STEDMA40_WORD_WIDTH;
2227 break;
2228 case DMA_SLAVE_BUSWIDTH_8_BYTES:
2229 addr_width = STEDMA40_DOUBLEWORD_WIDTH;
2230 break;
2231 default:
2232 dev_err(d40c->base->dev,
2233 "illegal peripheral address width "
2234 "requested (%d)\n",
2235 config->src_addr_width);
2236 return;
2237 }
2238
2239 if (config_maxburst >= 16)
2240 psize = STEDMA40_PSIZE_LOG_16;
2241 else if (config_maxburst >= 8)
2242 psize = STEDMA40_PSIZE_LOG_8;
2243 else if (config_maxburst >= 4)
2244 psize = STEDMA40_PSIZE_LOG_4;
2245 else
2246 psize = STEDMA40_PSIZE_LOG_1;
2247
2248 /* Set up all the endpoint configs */
2249 cfg->src_info.data_width = addr_width;
2250 cfg->src_info.psize = psize;
2251 cfg->src_info.endianess = STEDMA40_LITTLE_ENDIAN;
2252 cfg->src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL;
2253 cfg->dst_info.data_width = addr_width;
2254 cfg->dst_info.psize = psize;
2255 cfg->dst_info.endianess = STEDMA40_LITTLE_ENDIAN;
2256 cfg->dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL;
2257
2258 /* These settings will take precedence later */
2259 d40c->runtime_addr = config_addr;
2260 d40c->runtime_direction = config->direction;
2261 dev_dbg(d40c->base->dev,
2262 "configured channel %s for %s, data width %d, "
2263 "maxburst %d bytes, LE, no flow control\n",
2264 dma_chan_name(chan),
2265 (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX",
2266 config_addr_width,
2267 config_maxburst);
2268}
2269
Linus Walleij05827632010-05-17 16:30:42 -07002270static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
2271 unsigned long arg)
Linus Walleij8d318a52010-03-30 15:33:42 +02002272{
2273 unsigned long flags;
2274 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2275
Jonas Aaberg0d0f6b82010-06-20 21:25:31 +00002276 if (d40c->phy_chan == NULL) {
2277 dev_err(&d40c->chan.dev->device,
2278 "[%s] Channel is not allocated!\n", __func__);
2279 return -EINVAL;
2280 }
2281
Linus Walleij8d318a52010-03-30 15:33:42 +02002282 switch (cmd) {
2283 case DMA_TERMINATE_ALL:
2284 spin_lock_irqsave(&d40c->lock, flags);
2285 d40_term_all(d40c);
2286 spin_unlock_irqrestore(&d40c->lock, flags);
2287 return 0;
2288 case DMA_PAUSE:
2289 return d40_pause(chan);
2290 case DMA_RESUME:
2291 return d40_resume(chan);
Linus Walleij95e14002010-08-04 13:37:45 +02002292 case DMA_SLAVE_CONFIG:
2293 d40_set_runtime_config(chan,
2294 (struct dma_slave_config *) arg);
2295 return 0;
2296 default:
2297 break;
Linus Walleij8d318a52010-03-30 15:33:42 +02002298 }
2299
2300 /* Other commands are unimplemented */
2301 return -ENXIO;
2302}
2303
2304/* Initialization functions */
2305
2306static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
2307 struct d40_chan *chans, int offset,
2308 int num_chans)
2309{
2310 int i = 0;
2311 struct d40_chan *d40c;
2312
2313 INIT_LIST_HEAD(&dma->channels);
2314
2315 for (i = offset; i < offset + num_chans; i++) {
2316 d40c = &chans[i];
2317 d40c->base = base;
2318 d40c->chan.device = dma;
2319
2320 /* Invalidate lcla element */
2321 d40c->lcla.src_id = -1;
2322 d40c->lcla.dst_id = -1;
2323
2324 spin_lock_init(&d40c->lock);
2325
2326 d40c->log_num = D40_PHY_CHAN;
2327
Linus Walleij8d318a52010-03-30 15:33:42 +02002328 INIT_LIST_HEAD(&d40c->active);
2329 INIT_LIST_HEAD(&d40c->queue);
2330 INIT_LIST_HEAD(&d40c->client);
2331
Linus Walleij8d318a52010-03-30 15:33:42 +02002332 tasklet_init(&d40c->tasklet, dma_tasklet,
2333 (unsigned long) d40c);
2334
2335 list_add_tail(&d40c->chan.device_node,
2336 &dma->channels);
2337 }
2338}
2339
2340static int __init d40_dmaengine_init(struct d40_base *base,
2341 int num_reserved_chans)
2342{
2343 int err ;
2344
2345 d40_chan_init(base, &base->dma_slave, base->log_chans,
2346 0, base->num_log_chans);
2347
2348 dma_cap_zero(base->dma_slave.cap_mask);
2349 dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask);
2350
2351 base->dma_slave.device_alloc_chan_resources = d40_alloc_chan_resources;
2352 base->dma_slave.device_free_chan_resources = d40_free_chan_resources;
2353 base->dma_slave.device_prep_dma_memcpy = d40_prep_memcpy;
2354 base->dma_slave.device_prep_slave_sg = d40_prep_slave_sg;
2355 base->dma_slave.device_tx_status = d40_tx_status;
2356 base->dma_slave.device_issue_pending = d40_issue_pending;
2357 base->dma_slave.device_control = d40_control;
2358 base->dma_slave.dev = base->dev;
2359
2360 err = dma_async_device_register(&base->dma_slave);
2361
2362 if (err) {
2363 dev_err(base->dev,
2364 "[%s] Failed to register slave channels\n",
2365 __func__);
2366 goto failure1;
2367 }
2368
2369 d40_chan_init(base, &base->dma_memcpy, base->log_chans,
2370 base->num_log_chans, base->plat_data->memcpy_len);
2371
2372 dma_cap_zero(base->dma_memcpy.cap_mask);
2373 dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
2374
2375 base->dma_memcpy.device_alloc_chan_resources = d40_alloc_chan_resources;
2376 base->dma_memcpy.device_free_chan_resources = d40_free_chan_resources;
2377 base->dma_memcpy.device_prep_dma_memcpy = d40_prep_memcpy;
2378 base->dma_memcpy.device_prep_slave_sg = d40_prep_slave_sg;
2379 base->dma_memcpy.device_tx_status = d40_tx_status;
2380 base->dma_memcpy.device_issue_pending = d40_issue_pending;
2381 base->dma_memcpy.device_control = d40_control;
2382 base->dma_memcpy.dev = base->dev;
2383 /*
2384 * This controller can only access address at even
2385 * 32bit boundaries, i.e. 2^2
2386 */
2387 base->dma_memcpy.copy_align = 2;
2388
2389 err = dma_async_device_register(&base->dma_memcpy);
2390
2391 if (err) {
2392 dev_err(base->dev,
2393 "[%s] Failed to regsiter memcpy only channels\n",
2394 __func__);
2395 goto failure2;
2396 }
2397
2398 d40_chan_init(base, &base->dma_both, base->phy_chans,
2399 0, num_reserved_chans);
2400
2401 dma_cap_zero(base->dma_both.cap_mask);
2402 dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask);
2403 dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask);
2404
2405 base->dma_both.device_alloc_chan_resources = d40_alloc_chan_resources;
2406 base->dma_both.device_free_chan_resources = d40_free_chan_resources;
2407 base->dma_both.device_prep_dma_memcpy = d40_prep_memcpy;
2408 base->dma_both.device_prep_slave_sg = d40_prep_slave_sg;
2409 base->dma_both.device_tx_status = d40_tx_status;
2410 base->dma_both.device_issue_pending = d40_issue_pending;
2411 base->dma_both.device_control = d40_control;
2412 base->dma_both.dev = base->dev;
2413 base->dma_both.copy_align = 2;
2414 err = dma_async_device_register(&base->dma_both);
2415
2416 if (err) {
2417 dev_err(base->dev,
2418 "[%s] Failed to register logical and physical capable channels\n",
2419 __func__);
2420 goto failure3;
2421 }
2422 return 0;
2423failure3:
2424 dma_async_device_unregister(&base->dma_memcpy);
2425failure2:
2426 dma_async_device_unregister(&base->dma_slave);
2427failure1:
2428 return err;
2429}
2430
2431/* Initialization functions. */
2432
2433static int __init d40_phy_res_init(struct d40_base *base)
2434{
2435 int i;
2436 int num_phy_chans_avail = 0;
2437 u32 val[2];
2438 int odd_even_bit = -2;
2439
2440 val[0] = readl(base->virtbase + D40_DREG_PRSME);
2441 val[1] = readl(base->virtbase + D40_DREG_PRSMO);
2442
2443 for (i = 0; i < base->num_phy_chans; i++) {
2444 base->phy_res[i].num = i;
2445 odd_even_bit += 2 * ((i % 2) == 0);
2446 if (((val[i % 2] >> odd_even_bit) & 3) == 1) {
2447 /* Mark security only channels as occupied */
2448 base->phy_res[i].allocated_src = D40_ALLOC_PHY;
2449 base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
2450 } else {
2451 base->phy_res[i].allocated_src = D40_ALLOC_FREE;
2452 base->phy_res[i].allocated_dst = D40_ALLOC_FREE;
2453 num_phy_chans_avail++;
2454 }
2455 spin_lock_init(&base->phy_res[i].lock);
2456 }
Jonas Aaberg6b7acd82010-06-20 21:26:59 +00002457
2458 /* Mark disabled channels as occupied */
2459 for (i = 0; base->plat_data->disabled_channels[i] != -1; i++) {
2460 base->phy_res[i].allocated_src = D40_ALLOC_PHY;
2461 base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
2462 num_phy_chans_avail--;
2463 }
2464
Linus Walleij8d318a52010-03-30 15:33:42 +02002465 dev_info(base->dev, "%d of %d physical DMA channels available\n",
2466 num_phy_chans_avail, base->num_phy_chans);
2467
2468 /* Verify settings extended vs standard */
2469 val[0] = readl(base->virtbase + D40_DREG_PRTYP);
2470
2471 for (i = 0; i < base->num_phy_chans; i++) {
2472
2473 if (base->phy_res[i].allocated_src == D40_ALLOC_FREE &&
2474 (val[0] & 0x3) != 1)
2475 dev_info(base->dev,
2476 "[%s] INFO: channel %d is misconfigured (%d)\n",
2477 __func__, i, val[0] & 0x3);
2478
2479 val[0] = val[0] >> 2;
2480 }
2481
2482 return num_phy_chans_avail;
2483}
2484
2485static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
2486{
2487 static const struct d40_reg_val dma_id_regs[] = {
2488 /* Peripheral Id */
2489 { .reg = D40_DREG_PERIPHID0, .val = 0x0040},
2490 { .reg = D40_DREG_PERIPHID1, .val = 0x0000},
2491 /*
2492 * D40_DREG_PERIPHID2 Depends on HW revision:
2493 * MOP500/HREF ED has 0x0008,
2494 * ? has 0x0018,
2495 * HREF V1 has 0x0028
2496 */
2497 { .reg = D40_DREG_PERIPHID3, .val = 0x0000},
2498
2499 /* PCell Id */
2500 { .reg = D40_DREG_CELLID0, .val = 0x000d},
2501 { .reg = D40_DREG_CELLID1, .val = 0x00f0},
2502 { .reg = D40_DREG_CELLID2, .val = 0x0005},
2503 { .reg = D40_DREG_CELLID3, .val = 0x00b1}
2504 };
2505 struct stedma40_platform_data *plat_data;
2506 struct clk *clk = NULL;
2507 void __iomem *virtbase = NULL;
2508 struct resource *res = NULL;
2509 struct d40_base *base = NULL;
2510 int num_log_chans = 0;
2511 int num_phy_chans;
2512 int i;
Linus Walleijf4185592010-06-22 18:06:42 -07002513 u32 val;
Jonas Aaberg3ae02672010-08-09 12:08:18 +00002514 u32 rev;
Linus Walleij8d318a52010-03-30 15:33:42 +02002515
2516 clk = clk_get(&pdev->dev, NULL);
2517
2518 if (IS_ERR(clk)) {
2519 dev_err(&pdev->dev, "[%s] No matching clock found\n",
2520 __func__);
2521 goto failure;
2522 }
2523
2524 clk_enable(clk);
2525
2526 /* Get IO for DMAC base address */
2527 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
2528 if (!res)
2529 goto failure;
2530
2531 if (request_mem_region(res->start, resource_size(res),
2532 D40_NAME " I/O base") == NULL)
2533 goto failure;
2534
2535 virtbase = ioremap(res->start, resource_size(res));
2536 if (!virtbase)
2537 goto failure;
2538
2539 /* HW version check */
2540 for (i = 0; i < ARRAY_SIZE(dma_id_regs); i++) {
2541 if (dma_id_regs[i].val !=
2542 readl(virtbase + dma_id_regs[i].reg)) {
2543 dev_err(&pdev->dev,
2544 "[%s] Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n",
2545 __func__,
2546 dma_id_regs[i].val,
2547 dma_id_regs[i].reg,
2548 readl(virtbase + dma_id_regs[i].reg));
2549 goto failure;
2550 }
2551 }
2552
Jonas Aaberg3ae02672010-08-09 12:08:18 +00002553 /* Get silicon revision and designer */
Linus Walleijf4185592010-06-22 18:06:42 -07002554 val = readl(virtbase + D40_DREG_PERIPHID2);
Linus Walleij8d318a52010-03-30 15:33:42 +02002555
Jonas Aaberg3ae02672010-08-09 12:08:18 +00002556 if ((val & D40_DREG_PERIPHID2_DESIGNER_MASK) !=
2557 D40_HW_DESIGNER) {
Linus Walleij8d318a52010-03-30 15:33:42 +02002558 dev_err(&pdev->dev,
2559 "[%s] Unknown designer! Got %x wanted %x\n",
Jonas Aaberg3ae02672010-08-09 12:08:18 +00002560 __func__, val & D40_DREG_PERIPHID2_DESIGNER_MASK,
2561 D40_HW_DESIGNER);
Linus Walleij8d318a52010-03-30 15:33:42 +02002562 goto failure;
2563 }
2564
Jonas Aaberg3ae02672010-08-09 12:08:18 +00002565 rev = (val & D40_DREG_PERIPHID2_REV_MASK) >>
2566 D40_DREG_PERIPHID2_REV_POS;
2567
Linus Walleij8d318a52010-03-30 15:33:42 +02002568 /* The number of physical channels on this HW */
2569 num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
2570
2571 dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n",
Jonas Aaberg3ae02672010-08-09 12:08:18 +00002572 rev, res->start);
Linus Walleij8d318a52010-03-30 15:33:42 +02002573
2574 plat_data = pdev->dev.platform_data;
2575
2576 /* Count the number of logical channels in use */
2577 for (i = 0; i < plat_data->dev_len; i++)
2578 if (plat_data->dev_rx[i] != 0)
2579 num_log_chans++;
2580
2581 for (i = 0; i < plat_data->dev_len; i++)
2582 if (plat_data->dev_tx[i] != 0)
2583 num_log_chans++;
2584
2585 base = kzalloc(ALIGN(sizeof(struct d40_base), 4) +
2586 (num_phy_chans + num_log_chans + plat_data->memcpy_len) *
2587 sizeof(struct d40_chan), GFP_KERNEL);
2588
2589 if (base == NULL) {
2590 dev_err(&pdev->dev, "[%s] Out of memory\n", __func__);
2591 goto failure;
2592 }
2593
Jonas Aaberg3ae02672010-08-09 12:08:18 +00002594 base->rev = rev;
Linus Walleij8d318a52010-03-30 15:33:42 +02002595 base->clk = clk;
2596 base->num_phy_chans = num_phy_chans;
2597 base->num_log_chans = num_log_chans;
2598 base->phy_start = res->start;
2599 base->phy_size = resource_size(res);
2600 base->virtbase = virtbase;
2601 base->plat_data = plat_data;
2602 base->dev = &pdev->dev;
2603 base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4);
2604 base->log_chans = &base->phy_chans[num_phy_chans];
2605
2606 base->phy_res = kzalloc(num_phy_chans * sizeof(struct d40_phy_res),
2607 GFP_KERNEL);
2608 if (!base->phy_res)
2609 goto failure;
2610
2611 base->lookup_phy_chans = kzalloc(num_phy_chans *
2612 sizeof(struct d40_chan *),
2613 GFP_KERNEL);
2614 if (!base->lookup_phy_chans)
2615 goto failure;
2616
2617 if (num_log_chans + plat_data->memcpy_len) {
2618 /*
2619 * The max number of logical channels are event lines for all
2620 * src devices and dst devices
2621 */
2622 base->lookup_log_chans = kzalloc(plat_data->dev_len * 2 *
2623 sizeof(struct d40_chan *),
2624 GFP_KERNEL);
2625 if (!base->lookup_log_chans)
2626 goto failure;
2627 }
2628 base->lcla_pool.alloc_map = kzalloc(num_phy_chans * sizeof(u32),
2629 GFP_KERNEL);
2630 if (!base->lcla_pool.alloc_map)
2631 goto failure;
2632
Jonas Aabergc675b1b2010-06-20 21:25:08 +00002633 base->desc_slab = kmem_cache_create(D40_NAME, sizeof(struct d40_desc),
2634 0, SLAB_HWCACHE_ALIGN,
2635 NULL);
2636 if (base->desc_slab == NULL)
2637 goto failure;
2638
Linus Walleij8d318a52010-03-30 15:33:42 +02002639 return base;
2640
2641failure:
2642 if (clk) {
2643 clk_disable(clk);
2644 clk_put(clk);
2645 }
2646 if (virtbase)
2647 iounmap(virtbase);
2648 if (res)
2649 release_mem_region(res->start,
2650 resource_size(res));
2651 if (virtbase)
2652 iounmap(virtbase);
2653
2654 if (base) {
2655 kfree(base->lcla_pool.alloc_map);
2656 kfree(base->lookup_log_chans);
2657 kfree(base->lookup_phy_chans);
2658 kfree(base->phy_res);
2659 kfree(base);
2660 }
2661
2662 return NULL;
2663}
2664
2665static void __init d40_hw_init(struct d40_base *base)
2666{
2667
2668 static const struct d40_reg_val dma_init_reg[] = {
2669 /* Clock every part of the DMA block from start */
2670 { .reg = D40_DREG_GCC, .val = 0x0000ff01},
2671
2672 /* Interrupts on all logical channels */
2673 { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF},
2674 { .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF},
2675 { .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF},
2676 { .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF},
2677 { .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF},
2678 { .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF},
2679 { .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF},
2680 { .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF},
2681 { .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF},
2682 { .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF},
2683 { .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF},
2684 { .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF}
2685 };
2686 int i;
2687 u32 prmseo[2] = {0, 0};
2688 u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF};
2689 u32 pcmis = 0;
2690 u32 pcicr = 0;
2691
2692 for (i = 0; i < ARRAY_SIZE(dma_init_reg); i++)
2693 writel(dma_init_reg[i].val,
2694 base->virtbase + dma_init_reg[i].reg);
2695
2696 /* Configure all our dma channels to default settings */
2697 for (i = 0; i < base->num_phy_chans; i++) {
2698
2699 activeo[i % 2] = activeo[i % 2] << 2;
2700
2701 if (base->phy_res[base->num_phy_chans - i - 1].allocated_src
2702 == D40_ALLOC_PHY) {
2703 activeo[i % 2] |= 3;
2704 continue;
2705 }
2706
2707 /* Enable interrupt # */
2708 pcmis = (pcmis << 1) | 1;
2709
2710 /* Clear interrupt # */
2711 pcicr = (pcicr << 1) | 1;
2712
2713 /* Set channel to physical mode */
2714 prmseo[i % 2] = prmseo[i % 2] << 2;
2715 prmseo[i % 2] |= 1;
2716
2717 }
2718
2719 writel(prmseo[1], base->virtbase + D40_DREG_PRMSE);
2720 writel(prmseo[0], base->virtbase + D40_DREG_PRMSO);
2721 writel(activeo[1], base->virtbase + D40_DREG_ACTIVE);
2722 writel(activeo[0], base->virtbase + D40_DREG_ACTIVO);
2723
2724 /* Write which interrupt to enable */
2725 writel(pcmis, base->virtbase + D40_DREG_PCMIS);
2726
2727 /* Write which interrupt to clear */
2728 writel(pcicr, base->virtbase + D40_DREG_PCICR);
2729
2730}
2731
Linus Walleij508849a2010-06-20 21:26:07 +00002732static int __init d40_lcla_allocate(struct d40_base *base)
2733{
2734 unsigned long *page_list;
2735 int i, j;
2736 int ret = 0;
2737
2738 /*
2739 * This is somewhat ugly. We need 8192 bytes that are 18 bit aligned,
2740 * To full fill this hardware requirement without wasting 256 kb
2741 * we allocate pages until we get an aligned one.
2742 */
2743 page_list = kmalloc(sizeof(unsigned long) * MAX_LCLA_ALLOC_ATTEMPTS,
2744 GFP_KERNEL);
2745
2746 if (!page_list) {
2747 ret = -ENOMEM;
2748 goto failure;
2749 }
2750
2751 /* Calculating how many pages that are required */
2752 base->lcla_pool.pages = SZ_1K * base->num_phy_chans / PAGE_SIZE;
2753
2754 for (i = 0; i < MAX_LCLA_ALLOC_ATTEMPTS; i++) {
2755 page_list[i] = __get_free_pages(GFP_KERNEL,
2756 base->lcla_pool.pages);
2757 if (!page_list[i]) {
2758
2759 dev_err(base->dev,
2760 "[%s] Failed to allocate %d pages.\n",
2761 __func__, base->lcla_pool.pages);
2762
2763 for (j = 0; j < i; j++)
2764 free_pages(page_list[j], base->lcla_pool.pages);
2765 goto failure;
2766 }
2767
2768 if ((virt_to_phys((void *)page_list[i]) &
2769 (LCLA_ALIGNMENT - 1)) == 0)
2770 break;
2771 }
2772
2773 for (j = 0; j < i; j++)
2774 free_pages(page_list[j], base->lcla_pool.pages);
2775
2776 if (i < MAX_LCLA_ALLOC_ATTEMPTS) {
2777 base->lcla_pool.base = (void *)page_list[i];
2778 } else {
Jonas Aaberg767a9672010-08-09 12:08:34 +00002779 /*
2780 * After many attempts and no succees with finding the correct
2781 * alignment, try with allocating a big buffer.
2782 */
Linus Walleij508849a2010-06-20 21:26:07 +00002783 dev_warn(base->dev,
2784 "[%s] Failed to get %d pages @ 18 bit align.\n",
2785 __func__, base->lcla_pool.pages);
2786 base->lcla_pool.base_unaligned = kmalloc(SZ_1K *
2787 base->num_phy_chans +
2788 LCLA_ALIGNMENT,
2789 GFP_KERNEL);
2790 if (!base->lcla_pool.base_unaligned) {
2791 ret = -ENOMEM;
2792 goto failure;
2793 }
2794
2795 base->lcla_pool.base = PTR_ALIGN(base->lcla_pool.base_unaligned,
2796 LCLA_ALIGNMENT);
2797 }
2798
2799 writel(virt_to_phys(base->lcla_pool.base),
2800 base->virtbase + D40_DREG_LCLA);
2801failure:
2802 kfree(page_list);
2803 return ret;
2804}
2805
Linus Walleij8d318a52010-03-30 15:33:42 +02002806static int __init d40_probe(struct platform_device *pdev)
2807{
2808 int err;
2809 int ret = -ENOENT;
2810 struct d40_base *base;
2811 struct resource *res = NULL;
2812 int num_reserved_chans;
2813 u32 val;
2814
2815 base = d40_hw_detect_init(pdev);
2816
2817 if (!base)
2818 goto failure;
2819
2820 num_reserved_chans = d40_phy_res_init(base);
2821
2822 platform_set_drvdata(pdev, base);
2823
2824 spin_lock_init(&base->interrupt_lock);
2825 spin_lock_init(&base->execmd_lock);
2826
2827 /* Get IO for logical channel parameter address */
2828 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa");
2829 if (!res) {
2830 ret = -ENOENT;
2831 dev_err(&pdev->dev,
2832 "[%s] No \"lcpa\" memory resource\n",
2833 __func__);
2834 goto failure;
2835 }
2836 base->lcpa_size = resource_size(res);
2837 base->phy_lcpa = res->start;
2838
2839 if (request_mem_region(res->start, resource_size(res),
2840 D40_NAME " I/O lcpa") == NULL) {
2841 ret = -EBUSY;
2842 dev_err(&pdev->dev,
2843 "[%s] Failed to request LCPA region 0x%x-0x%x\n",
2844 __func__, res->start, res->end);
2845 goto failure;
2846 }
2847
2848 /* We make use of ESRAM memory for this. */
2849 val = readl(base->virtbase + D40_DREG_LCPA);
2850 if (res->start != val && val != 0) {
2851 dev_warn(&pdev->dev,
2852 "[%s] Mismatch LCPA dma 0x%x, def 0x%x\n",
2853 __func__, val, res->start);
2854 } else
2855 writel(res->start, base->virtbase + D40_DREG_LCPA);
2856
2857 base->lcpa_base = ioremap(res->start, resource_size(res));
2858 if (!base->lcpa_base) {
2859 ret = -ENOMEM;
2860 dev_err(&pdev->dev,
2861 "[%s] Failed to ioremap LCPA region\n",
2862 __func__);
2863 goto failure;
2864 }
Linus Walleij508849a2010-06-20 21:26:07 +00002865
2866 ret = d40_lcla_allocate(base);
2867 if (ret) {
2868 dev_err(&pdev->dev, "[%s] Failed to allocate LCLA area\n",
Linus Walleij8d318a52010-03-30 15:33:42 +02002869 __func__);
2870 goto failure;
2871 }
2872
Linus Walleij8d318a52010-03-30 15:33:42 +02002873 spin_lock_init(&base->lcla_pool.lock);
2874
2875 base->lcla_pool.num_blocks = base->num_phy_chans;
2876
2877 base->irq = platform_get_irq(pdev, 0);
2878
2879 ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
2880
2881 if (ret) {
2882 dev_err(&pdev->dev, "[%s] No IRQ defined\n", __func__);
2883 goto failure;
2884 }
2885
2886 err = d40_dmaengine_init(base, num_reserved_chans);
2887 if (err)
2888 goto failure;
2889
2890 d40_hw_init(base);
2891
2892 dev_info(base->dev, "initialized\n");
2893 return 0;
2894
2895failure:
2896 if (base) {
Jonas Aabergc675b1b2010-06-20 21:25:08 +00002897 if (base->desc_slab)
2898 kmem_cache_destroy(base->desc_slab);
Linus Walleij8d318a52010-03-30 15:33:42 +02002899 if (base->virtbase)
2900 iounmap(base->virtbase);
Linus Walleij508849a2010-06-20 21:26:07 +00002901 if (!base->lcla_pool.base_unaligned && base->lcla_pool.base)
2902 free_pages((unsigned long)base->lcla_pool.base,
2903 base->lcla_pool.pages);
Jonas Aaberg767a9672010-08-09 12:08:34 +00002904
2905 kfree(base->lcla_pool.base_unaligned);
2906
Linus Walleij8d318a52010-03-30 15:33:42 +02002907 if (base->phy_lcpa)
2908 release_mem_region(base->phy_lcpa,
2909 base->lcpa_size);
2910 if (base->phy_start)
2911 release_mem_region(base->phy_start,
2912 base->phy_size);
2913 if (base->clk) {
2914 clk_disable(base->clk);
2915 clk_put(base->clk);
2916 }
2917
2918 kfree(base->lcla_pool.alloc_map);
2919 kfree(base->lookup_log_chans);
2920 kfree(base->lookup_phy_chans);
2921 kfree(base->phy_res);
2922 kfree(base);
2923 }
2924
2925 dev_err(&pdev->dev, "[%s] probe failed\n", __func__);
2926 return ret;
2927}
2928
2929static struct platform_driver d40_driver = {
2930 .driver = {
2931 .owner = THIS_MODULE,
2932 .name = D40_NAME,
2933 },
2934};
2935
2936int __init stedma40_init(void)
2937{
2938 return platform_driver_probe(&d40_driver, d40_probe);
2939}
2940arch_initcall(stedma40_init);