blob: 94b6bde6c86ab7d72a2944d8de7c72f51983f731 [file] [log] [blame]
Guennadi Liakhovetski9a7b8e02012-05-09 17:09:13 +02001/*
2 * Dmaengine driver base library for DMA controllers, found on SH-based SoCs
3 *
4 * extracted from shdma.c
5 *
6 * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
7 * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
8 * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
9 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
10 *
11 * This is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 */
15
16#include <linux/delay.h>
17#include <linux/shdma-base.h>
18#include <linux/dmaengine.h>
19#include <linux/init.h>
20#include <linux/interrupt.h>
21#include <linux/module.h>
22#include <linux/pm_runtime.h>
23#include <linux/slab.h>
24#include <linux/spinlock.h>
25
26#include "../dmaengine.h"
27
28/* DMA descriptor control */
29enum shdma_desc_status {
30 DESC_IDLE,
31 DESC_PREPARED,
32 DESC_SUBMITTED,
33 DESC_COMPLETED, /* completed, have to call callback */
34 DESC_WAITING, /* callback called, waiting for ack / re-submit */
35};
36
37#define NR_DESCS_PER_CHANNEL 32
38
39#define to_shdma_chan(c) container_of(c, struct shdma_chan, dma_chan)
40#define to_shdma_dev(d) container_of(d, struct shdma_dev, dma_dev)
41
42/*
43 * For slave DMA we assume, that there is a finite number of DMA slaves in the
44 * system, and that each such slave can only use a finite number of channels.
45 * We use slave channel IDs to make sure, that no such slave channel ID is
46 * allocated more than once.
47 */
48static unsigned int slave_num = 256;
49module_param(slave_num, uint, 0444);
50
51/* A bitmask with slave_num bits */
52static unsigned long *shdma_slave_used;
53
54/* Called under spin_lock_irq(&schan->chan_lock") */
55static void shdma_chan_xfer_ld_queue(struct shdma_chan *schan)
56{
57 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
58 const struct shdma_ops *ops = sdev->ops;
59 struct shdma_desc *sdesc;
60
61 /* DMA work check */
62 if (ops->channel_busy(schan))
63 return;
64
65 /* Find the first not transferred descriptor */
66 list_for_each_entry(sdesc, &schan->ld_queue, node)
67 if (sdesc->mark == DESC_SUBMITTED) {
68 ops->start_xfer(schan, sdesc);
69 break;
70 }
71}
72
73static dma_cookie_t shdma_tx_submit(struct dma_async_tx_descriptor *tx)
74{
75 struct shdma_desc *chunk, *c, *desc =
Kuninori Morimoto91ea74e2014-04-02 20:16:51 -070076 container_of(tx, struct shdma_desc, async_tx);
Guennadi Liakhovetski9a7b8e02012-05-09 17:09:13 +020077 struct shdma_chan *schan = to_shdma_chan(tx->chan);
Guennadi Liakhovetski9a7b8e02012-05-09 17:09:13 +020078 dma_async_tx_callback callback = tx->callback;
79 dma_cookie_t cookie;
80 bool power_up;
81
82 spin_lock_irq(&schan->chan_lock);
83
84 power_up = list_empty(&schan->ld_queue);
85
86 cookie = dma_cookie_assign(tx);
87
88 /* Mark all chunks of this descriptor as submitted, move to the queue */
89 list_for_each_entry_safe(chunk, c, desc->node.prev, node) {
90 /*
91 * All chunks are on the global ld_free, so, we have to find
92 * the end of the chain ourselves
93 */
94 if (chunk != desc && (chunk->mark == DESC_IDLE ||
95 chunk->async_tx.cookie > 0 ||
96 chunk->async_tx.cookie == -EBUSY ||
97 &chunk->node == &schan->ld_free))
98 break;
99 chunk->mark = DESC_SUBMITTED;
Kuninori Morimoto91ea74e2014-04-02 20:16:51 -0700100 if (chunk->chunks == 1) {
101 chunk->async_tx.callback = callback;
102 chunk->async_tx.callback_param = tx->callback_param;
103 } else {
104 /* Callback goes to the last chunk */
105 chunk->async_tx.callback = NULL;
106 }
Guennadi Liakhovetski9a7b8e02012-05-09 17:09:13 +0200107 chunk->cookie = cookie;
108 list_move_tail(&chunk->node, &schan->ld_queue);
Guennadi Liakhovetski9a7b8e02012-05-09 17:09:13 +0200109
110 dev_dbg(schan->dev, "submit #%d@%p on %d\n",
Kuninori Morimoto91ea74e2014-04-02 20:16:51 -0700111 tx->cookie, &chunk->async_tx, schan->id);
Guennadi Liakhovetski9a7b8e02012-05-09 17:09:13 +0200112 }
113
Guennadi Liakhovetski9a7b8e02012-05-09 17:09:13 +0200114 if (power_up) {
115 int ret;
116 schan->pm_state = SHDMA_PM_BUSY;
117
118 ret = pm_runtime_get(schan->dev);
119
120 spin_unlock_irq(&schan->chan_lock);
121 if (ret < 0)
122 dev_err(schan->dev, "%s(): GET = %d\n", __func__, ret);
123
124 pm_runtime_barrier(schan->dev);
125
126 spin_lock_irq(&schan->chan_lock);
127
128 /* Have we been reset, while waiting? */
129 if (schan->pm_state != SHDMA_PM_ESTABLISHED) {
130 struct shdma_dev *sdev =
131 to_shdma_dev(schan->dma_chan.device);
132 const struct shdma_ops *ops = sdev->ops;
133 dev_dbg(schan->dev, "Bring up channel %d\n",
134 schan->id);
135 /*
136 * TODO: .xfer_setup() might fail on some platforms.
137 * Make it int then, on error remove chunks from the
138 * queue again
139 */
Guennadi Liakhovetskic2cdb7e2012-07-05 12:29:41 +0200140 ops->setup_xfer(schan, schan->slave_id);
Guennadi Liakhovetski9a7b8e02012-05-09 17:09:13 +0200141
142 if (schan->pm_state == SHDMA_PM_PENDING)
143 shdma_chan_xfer_ld_queue(schan);
144 schan->pm_state = SHDMA_PM_ESTABLISHED;
145 }
146 } else {
147 /*
148 * Tell .device_issue_pending() not to run the queue, interrupts
149 * will do it anyway
150 */
151 schan->pm_state = SHDMA_PM_PENDING;
152 }
153
154 spin_unlock_irq(&schan->chan_lock);
155
156 return cookie;
157}
158
159/* Called with desc_lock held */
160static struct shdma_desc *shdma_get_desc(struct shdma_chan *schan)
161{
162 struct shdma_desc *sdesc;
163
164 list_for_each_entry(sdesc, &schan->ld_free, node)
165 if (sdesc->mark != DESC_PREPARED) {
166 BUG_ON(sdesc->mark != DESC_IDLE);
167 list_del(&sdesc->node);
168 return sdesc;
169 }
170
171 return NULL;
172}
173
Guennadi Liakhovetski4981c4d2013-08-02 16:50:36 +0200174static int shdma_setup_slave(struct shdma_chan *schan, int slave_id,
175 dma_addr_t slave_addr)
Guennadi Liakhovetski1ff8df42012-07-05 12:29:42 +0200176{
177 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
178 const struct shdma_ops *ops = sdev->ops;
Guennadi Liakhovetski67eacc12013-06-18 18:16:57 +0200179 int ret, match;
180
181 if (schan->dev->of_node) {
182 match = schan->hw_req;
Guennadi Liakhovetski4981c4d2013-08-02 16:50:36 +0200183 ret = ops->set_slave(schan, match, slave_addr, true);
Guennadi Liakhovetski67eacc12013-06-18 18:16:57 +0200184 if (ret < 0)
185 return ret;
186
187 slave_id = schan->slave_id;
188 } else {
189 match = slave_id;
190 }
Guennadi Liakhovetski1ff8df42012-07-05 12:29:42 +0200191
192 if (slave_id < 0 || slave_id >= slave_num)
193 return -EINVAL;
194
195 if (test_and_set_bit(slave_id, shdma_slave_used))
196 return -EBUSY;
197
Guennadi Liakhovetski4981c4d2013-08-02 16:50:36 +0200198 ret = ops->set_slave(schan, match, slave_addr, false);
Guennadi Liakhovetski1ff8df42012-07-05 12:29:42 +0200199 if (ret < 0) {
200 clear_bit(slave_id, shdma_slave_used);
201 return ret;
202 }
203
204 schan->slave_id = slave_id;
205
206 return 0;
207}
208
Guennadi Liakhovetski9a7b8e02012-05-09 17:09:13 +0200209static int shdma_alloc_chan_resources(struct dma_chan *chan)
210{
211 struct shdma_chan *schan = to_shdma_chan(chan);
212 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
213 const struct shdma_ops *ops = sdev->ops;
214 struct shdma_desc *desc;
215 struct shdma_slave *slave = chan->private;
216 int ret, i;
217
218 /*
219 * This relies on the guarantee from dmaengine that alloc_chan_resources
220 * never runs concurrently with itself or free_chan_resources.
221 */
222 if (slave) {
Guennadi Liakhovetski1ff8df42012-07-05 12:29:42 +0200223 /* Legacy mode: .private is set in filter */
Guennadi Liakhovetski4981c4d2013-08-02 16:50:36 +0200224 ret = shdma_setup_slave(schan, slave->slave_id, 0);
Guennadi Liakhovetski9a7b8e02012-05-09 17:09:13 +0200225 if (ret < 0)
226 goto esetslave;
Guennadi Liakhovetskic2cdb7e2012-07-05 12:29:41 +0200227 } else {
228 schan->slave_id = -EINVAL;
Guennadi Liakhovetski9a7b8e02012-05-09 17:09:13 +0200229 }
230
231 schan->desc = kcalloc(NR_DESCS_PER_CHANNEL,
232 sdev->desc_size, GFP_KERNEL);
233 if (!schan->desc) {
234 ret = -ENOMEM;
235 goto edescalloc;
236 }
237 schan->desc_num = NR_DESCS_PER_CHANNEL;
238
239 for (i = 0; i < NR_DESCS_PER_CHANNEL; i++) {
240 desc = ops->embedded_desc(schan->desc, i);
241 dma_async_tx_descriptor_init(&desc->async_tx,
242 &schan->dma_chan);
243 desc->async_tx.tx_submit = shdma_tx_submit;
244 desc->mark = DESC_IDLE;
245
246 list_add(&desc->node, &schan->ld_free);
247 }
248
249 return NR_DESCS_PER_CHANNEL;
250
251edescalloc:
252 if (slave)
253esetslave:
254 clear_bit(slave->slave_id, shdma_slave_used);
Guennadi Liakhovetski9a7b8e02012-05-09 17:09:13 +0200255 chan->private = NULL;
256 return ret;
257}
258
Laurent Pinchartc091ff52014-07-31 09:34:05 +0900259/*
260 * This is the standard shdma filter function to be used as a replacement to the
261 * "old" method, using the .private pointer. If for some reason you allocate a
262 * channel without slave data, use something like ERR_PTR(-EINVAL) as a filter
263 * parameter. If this filter is used, the slave driver, after calling
264 * dma_request_channel(), will also have to call dmaengine_slave_config() with
265 * .slave_id, .direction, and either .src_addr or .dst_addr set.
266 * NOTE: this filter doesn't support multiple DMAC drivers with the DMA_SLAVE
267 * capability! If this becomes a requirement, hardware glue drivers, using this
268 * services would have to provide their own filters, which first would check
269 * the device driver, similar to how other DMAC drivers, e.g., sa11x0-dma.c, do
270 * this, and only then, in case of a match, call this common filter.
271 * NOTE 2: This filter function is also used in the DT case by shdma_of_xlate().
272 * In that case the MID-RID value is used for slave channel filtering and is
273 * passed to this function in the "arg" parameter.
274 */
275bool shdma_chan_filter(struct dma_chan *chan, void *arg)
276{
277 struct shdma_chan *schan;
278 struct shdma_dev *sdev;
279 int match = (long)arg;
280 int ret;
281
282 /* Only support channels handled by this driver. */
283 if (chan->device->device_alloc_chan_resources !=
284 shdma_alloc_chan_resources)
285 return false;
286
287 if (match < 0)
288 /* No slave requested - arbitrary channel */
289 return true;
290
291 schan = to_shdma_chan(chan);
292 if (!schan->dev->of_node && match >= slave_num)
293 return false;
294
295 sdev = to_shdma_dev(schan->dma_chan.device);
296 ret = sdev->ops->set_slave(schan, match, 0, true);
297 if (ret < 0)
298 return false;
299
300 return true;
301}
302EXPORT_SYMBOL(shdma_chan_filter);
303
Guennadi Liakhovetski9a7b8e02012-05-09 17:09:13 +0200304static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all)
305{
306 struct shdma_desc *desc, *_desc;
307 /* Is the "exposed" head of a chain acked? */
308 bool head_acked = false;
309 dma_cookie_t cookie = 0;
310 dma_async_tx_callback callback = NULL;
311 void *param = NULL;
312 unsigned long flags;
Kuninori Morimotodfbb85c2014-04-02 20:17:00 -0700313 LIST_HEAD(cyclic_list);
Guennadi Liakhovetski9a7b8e02012-05-09 17:09:13 +0200314
315 spin_lock_irqsave(&schan->chan_lock, flags);
316 list_for_each_entry_safe(desc, _desc, &schan->ld_queue, node) {
317 struct dma_async_tx_descriptor *tx = &desc->async_tx;
318
319 BUG_ON(tx->cookie > 0 && tx->cookie != desc->cookie);
320 BUG_ON(desc->mark != DESC_SUBMITTED &&
321 desc->mark != DESC_COMPLETED &&
322 desc->mark != DESC_WAITING);
323
324 /*
325 * queue is ordered, and we use this loop to (1) clean up all
326 * completed descriptors, and to (2) update descriptor flags of
327 * any chunks in a (partially) completed chain
328 */
329 if (!all && desc->mark == DESC_SUBMITTED &&
330 desc->cookie != cookie)
331 break;
332
333 if (tx->cookie > 0)
334 cookie = tx->cookie;
335
336 if (desc->mark == DESC_COMPLETED && desc->chunks == 1) {
337 if (schan->dma_chan.completed_cookie != desc->cookie - 1)
338 dev_dbg(schan->dev,
339 "Completing cookie %d, expected %d\n",
340 desc->cookie,
341 schan->dma_chan.completed_cookie + 1);
342 schan->dma_chan.completed_cookie = desc->cookie;
343 }
344
345 /* Call callback on the last chunk */
346 if (desc->mark == DESC_COMPLETED && tx->callback) {
347 desc->mark = DESC_WAITING;
348 callback = tx->callback;
349 param = tx->callback_param;
350 dev_dbg(schan->dev, "descriptor #%d@%p on %d callback\n",
351 tx->cookie, tx, schan->id);
352 BUG_ON(desc->chunks != 1);
353 break;
354 }
355
356 if (tx->cookie > 0 || tx->cookie == -EBUSY) {
357 if (desc->mark == DESC_COMPLETED) {
358 BUG_ON(tx->cookie < 0);
359 desc->mark = DESC_WAITING;
360 }
361 head_acked = async_tx_test_ack(tx);
362 } else {
363 switch (desc->mark) {
364 case DESC_COMPLETED:
365 desc->mark = DESC_WAITING;
366 /* Fall through */
367 case DESC_WAITING:
368 if (head_acked)
369 async_tx_ack(&desc->async_tx);
370 }
371 }
372
373 dev_dbg(schan->dev, "descriptor %p #%d completed.\n",
374 tx, tx->cookie);
375
376 if (((desc->mark == DESC_COMPLETED ||
377 desc->mark == DESC_WAITING) &&
378 async_tx_test_ack(&desc->async_tx)) || all) {
Guennadi Liakhovetski9a7b8e02012-05-09 17:09:13 +0200379
Kuninori Morimotodfbb85c2014-04-02 20:17:00 -0700380 if (all || !desc->cyclic) {
381 /* Remove from ld_queue list */
382 desc->mark = DESC_IDLE;
383 list_move(&desc->node, &schan->ld_free);
384 } else {
385 /* reuse as cyclic */
386 desc->mark = DESC_SUBMITTED;
387 list_move_tail(&desc->node, &cyclic_list);
388 }
Guennadi Liakhovetski9a7b8e02012-05-09 17:09:13 +0200389
390 if (list_empty(&schan->ld_queue)) {
391 dev_dbg(schan->dev, "Bring down channel %d\n", schan->id);
392 pm_runtime_put(schan->dev);
393 schan->pm_state = SHDMA_PM_ESTABLISHED;
394 }
395 }
396 }
397
398 if (all && !callback)
399 /*
400 * Terminating and the loop completed normally: forgive
401 * uncompleted cookies
402 */
403 schan->dma_chan.completed_cookie = schan->dma_chan.cookie;
404
Kuninori Morimotodfbb85c2014-04-02 20:17:00 -0700405 list_splice_tail(&cyclic_list, &schan->ld_queue);
406
Guennadi Liakhovetski9a7b8e02012-05-09 17:09:13 +0200407 spin_unlock_irqrestore(&schan->chan_lock, flags);
408
409 if (callback)
410 callback(param);
411
412 return callback;
413}
414
415/*
416 * shdma_chan_ld_cleanup - Clean up link descriptors
417 *
418 * Clean up the ld_queue of DMA channel.
419 */
420static void shdma_chan_ld_cleanup(struct shdma_chan *schan, bool all)
421{
422 while (__ld_cleanup(schan, all))
423 ;
424}
425
426/*
427 * shdma_free_chan_resources - Free all resources of the channel.
428 */
429static void shdma_free_chan_resources(struct dma_chan *chan)
430{
431 struct shdma_chan *schan = to_shdma_chan(chan);
432 struct shdma_dev *sdev = to_shdma_dev(chan->device);
433 const struct shdma_ops *ops = sdev->ops;
434 LIST_HEAD(list);
435
436 /* Protect against ISR */
437 spin_lock_irq(&schan->chan_lock);
438 ops->halt_channel(schan);
439 spin_unlock_irq(&schan->chan_lock);
440
441 /* Now no new interrupts will occur */
442
443 /* Prepared and not submitted descriptors can still be on the queue */
444 if (!list_empty(&schan->ld_queue))
445 shdma_chan_ld_cleanup(schan, true);
446
Guennadi Liakhovetskic2cdb7e2012-07-05 12:29:41 +0200447 if (schan->slave_id >= 0) {
Guennadi Liakhovetski9a7b8e02012-05-09 17:09:13 +0200448 /* The caller is holding dma_list_mutex */
Guennadi Liakhovetskic2cdb7e2012-07-05 12:29:41 +0200449 clear_bit(schan->slave_id, shdma_slave_used);
Guennadi Liakhovetski9a7b8e02012-05-09 17:09:13 +0200450 chan->private = NULL;
451 }
452
453 spin_lock_irq(&schan->chan_lock);
454
455 list_splice_init(&schan->ld_free, &list);
456 schan->desc_num = 0;
457
458 spin_unlock_irq(&schan->chan_lock);
459
460 kfree(schan->desc);
461}
462
463/**
464 * shdma_add_desc - get, set up and return one transfer descriptor
465 * @schan: DMA channel
466 * @flags: DMA transfer flags
467 * @dst: destination DMA address, incremented when direction equals
468 * DMA_DEV_TO_MEM or DMA_MEM_TO_MEM
469 * @src: source DMA address, incremented when direction equals
470 * DMA_MEM_TO_DEV or DMA_MEM_TO_MEM
471 * @len: DMA transfer length
472 * @first: if NULL, set to the current descriptor and cookie set to -EBUSY
473 * @direction: needed for slave DMA to decide which address to keep constant,
474 * equals DMA_MEM_TO_MEM for MEMCPY
475 * Returns 0 or an error
476 * Locks: called with desc_lock held
477 */
478static struct shdma_desc *shdma_add_desc(struct shdma_chan *schan,
479 unsigned long flags, dma_addr_t *dst, dma_addr_t *src, size_t *len,
480 struct shdma_desc **first, enum dma_transfer_direction direction)
481{
482 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
483 const struct shdma_ops *ops = sdev->ops;
484 struct shdma_desc *new;
485 size_t copy_size = *len;
486
487 if (!copy_size)
488 return NULL;
489
490 /* Allocate the link descriptor from the free list */
491 new = shdma_get_desc(schan);
492 if (!new) {
493 dev_err(schan->dev, "No free link descriptor available\n");
494 return NULL;
495 }
496
497 ops->desc_setup(schan, new, *src, *dst, &copy_size);
498
499 if (!*first) {
500 /* First desc */
501 new->async_tx.cookie = -EBUSY;
502 *first = new;
503 } else {
504 /* Other desc - invisible to the user */
505 new->async_tx.cookie = -EINVAL;
506 }
507
508 dev_dbg(schan->dev,
Laurent Pinchart42e4a122013-12-11 15:29:15 +0100509 "chaining (%zu/%zu)@%pad -> %pad with %p, cookie %d\n",
510 copy_size, *len, src, dst, &new->async_tx,
Guennadi Liakhovetski9a7b8e02012-05-09 17:09:13 +0200511 new->async_tx.cookie);
512
513 new->mark = DESC_PREPARED;
514 new->async_tx.flags = flags;
515 new->direction = direction;
Guennadi Liakhovetski4f46f8a2012-07-30 21:28:27 +0200516 new->partial = 0;
Guennadi Liakhovetski9a7b8e02012-05-09 17:09:13 +0200517
518 *len -= copy_size;
519 if (direction == DMA_MEM_TO_MEM || direction == DMA_MEM_TO_DEV)
520 *src += copy_size;
521 if (direction == DMA_MEM_TO_MEM || direction == DMA_DEV_TO_MEM)
522 *dst += copy_size;
523
524 return new;
525}
526
527/*
528 * shdma_prep_sg - prepare transfer descriptors from an SG list
529 *
530 * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also
531 * converted to scatter-gather to guarantee consistent locking and a correct
532 * list manipulation. For slave DMA direction carries the usual meaning, and,
533 * logically, the SG list is RAM and the addr variable contains slave address,
534 * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_MEM_TO_MEM
535 * and the SG list contains only one element and points at the source buffer.
536 */
537static struct dma_async_tx_descriptor *shdma_prep_sg(struct shdma_chan *schan,
538 struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr,
Kuninori Morimotodfbb85c2014-04-02 20:17:00 -0700539 enum dma_transfer_direction direction, unsigned long flags, bool cyclic)
Guennadi Liakhovetski9a7b8e02012-05-09 17:09:13 +0200540{
541 struct scatterlist *sg;
542 struct shdma_desc *first = NULL, *new = NULL /* compiler... */;
543 LIST_HEAD(tx_list);
544 int chunks = 0;
545 unsigned long irq_flags;
546 int i;
547
548 for_each_sg(sgl, sg, sg_len, i)
549 chunks += DIV_ROUND_UP(sg_dma_len(sg), schan->max_xfer_len);
550
551 /* Have to lock the whole loop to protect against concurrent release */
552 spin_lock_irqsave(&schan->chan_lock, irq_flags);
553
554 /*
555 * Chaining:
556 * first descriptor is what user is dealing with in all API calls, its
557 * cookie is at first set to -EBUSY, at tx-submit to a positive
558 * number
559 * if more than one chunk is needed further chunks have cookie = -EINVAL
560 * the last chunk, if not equal to the first, has cookie = -ENOSPC
561 * all chunks are linked onto the tx_list head with their .node heads
562 * only during this function, then they are immediately spliced
563 * back onto the free list in form of a chain
564 */
565 for_each_sg(sgl, sg, sg_len, i) {
566 dma_addr_t sg_addr = sg_dma_address(sg);
567 size_t len = sg_dma_len(sg);
568
569 if (!len)
570 goto err_get_desc;
571
572 do {
Laurent Pinchart42e4a122013-12-11 15:29:15 +0100573 dev_dbg(schan->dev, "Add SG #%d@%p[%zu], dma %pad\n",
574 i, sg, len, &sg_addr);
Guennadi Liakhovetski9a7b8e02012-05-09 17:09:13 +0200575
576 if (direction == DMA_DEV_TO_MEM)
577 new = shdma_add_desc(schan, flags,
578 &sg_addr, addr, &len, &first,
579 direction);
580 else
581 new = shdma_add_desc(schan, flags,
582 addr, &sg_addr, &len, &first,
583 direction);
584 if (!new)
585 goto err_get_desc;
586
Kuninori Morimotodfbb85c2014-04-02 20:17:00 -0700587 new->cyclic = cyclic;
588 if (cyclic)
589 new->chunks = 1;
590 else
591 new->chunks = chunks--;
Guennadi Liakhovetski9a7b8e02012-05-09 17:09:13 +0200592 list_add_tail(&new->node, &tx_list);
593 } while (len);
594 }
595
596 if (new != first)
597 new->async_tx.cookie = -ENOSPC;
598
599 /* Put them back on the free list, so, they don't get lost */
600 list_splice_tail(&tx_list, &schan->ld_free);
601
602 spin_unlock_irqrestore(&schan->chan_lock, irq_flags);
603
604 return &first->async_tx;
605
606err_get_desc:
607 list_for_each_entry(new, &tx_list, node)
608 new->mark = DESC_IDLE;
609 list_splice(&tx_list, &schan->ld_free);
610
611 spin_unlock_irqrestore(&schan->chan_lock, irq_flags);
612
613 return NULL;
614}
615
616static struct dma_async_tx_descriptor *shdma_prep_memcpy(
617 struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src,
618 size_t len, unsigned long flags)
619{
620 struct shdma_chan *schan = to_shdma_chan(chan);
621 struct scatterlist sg;
622
623 if (!chan || !len)
624 return NULL;
625
626 BUG_ON(!schan->desc_num);
627
628 sg_init_table(&sg, 1);
629 sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_src)), len,
630 offset_in_page(dma_src));
631 sg_dma_address(&sg) = dma_src;
632 sg_dma_len(&sg) = len;
633
Kuninori Morimotodfbb85c2014-04-02 20:17:00 -0700634 return shdma_prep_sg(schan, &sg, 1, &dma_dest, DMA_MEM_TO_MEM,
635 flags, false);
Guennadi Liakhovetski9a7b8e02012-05-09 17:09:13 +0200636}
637
638static struct dma_async_tx_descriptor *shdma_prep_slave_sg(
639 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
640 enum dma_transfer_direction direction, unsigned long flags, void *context)
641{
642 struct shdma_chan *schan = to_shdma_chan(chan);
643 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
644 const struct shdma_ops *ops = sdev->ops;
Guennadi Liakhovetskic2cdb7e2012-07-05 12:29:41 +0200645 int slave_id = schan->slave_id;
Guennadi Liakhovetski9a7b8e02012-05-09 17:09:13 +0200646 dma_addr_t slave_addr;
647
648 if (!chan)
649 return NULL;
650
651 BUG_ON(!schan->desc_num);
652
653 /* Someone calling slave DMA on a generic channel? */
Guennadi Liakhovetskic2cdb7e2012-07-05 12:29:41 +0200654 if (slave_id < 0 || !sg_len) {
655 dev_warn(schan->dev, "%s: bad parameter: len=%d, id=%d\n",
656 __func__, sg_len, slave_id);
Guennadi Liakhovetski9a7b8e02012-05-09 17:09:13 +0200657 return NULL;
658 }
659
660 slave_addr = ops->slave_addr(schan);
661
662 return shdma_prep_sg(schan, sgl, sg_len, &slave_addr,
Kuninori Morimotodfbb85c2014-04-02 20:17:00 -0700663 direction, flags, false);
664}
665
Vinod Koul877d8422014-06-02 09:40:00 +0530666#define SHDMA_MAX_SG_LEN 32
667
Vinod Koula6876542014-06-02 09:22:03 +0530668static struct dma_async_tx_descriptor *shdma_prep_dma_cyclic(
Kuninori Morimotodfbb85c2014-04-02 20:17:00 -0700669 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
670 size_t period_len, enum dma_transfer_direction direction,
671 unsigned long flags, void *context)
672{
673 struct shdma_chan *schan = to_shdma_chan(chan);
674 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
675 const struct shdma_ops *ops = sdev->ops;
676 unsigned int sg_len = buf_len / period_len;
677 int slave_id = schan->slave_id;
678 dma_addr_t slave_addr;
Vinod Koul877d8422014-06-02 09:40:00 +0530679 struct scatterlist sgl[SHDMA_MAX_SG_LEN];
Kuninori Morimotodfbb85c2014-04-02 20:17:00 -0700680 int i;
681
682 if (!chan)
683 return NULL;
684
685 BUG_ON(!schan->desc_num);
686
Vinod Koul877d8422014-06-02 09:40:00 +0530687 if (sg_len > SHDMA_MAX_SG_LEN) {
688 dev_err(schan->dev, "sg length %d exceds limit %d",
689 sg_len, SHDMA_MAX_SG_LEN);
690 return NULL;
691 }
692
Kuninori Morimotodfbb85c2014-04-02 20:17:00 -0700693 /* Someone calling slave DMA on a generic channel? */
694 if (slave_id < 0 || (buf_len < period_len)) {
695 dev_warn(schan->dev,
Vinod Koul9d9f71a2014-06-02 09:32:59 +0530696 "%s: bad parameter: buf_len=%zu, period_len=%zu, id=%d\n",
Kuninori Morimotodfbb85c2014-04-02 20:17:00 -0700697 __func__, buf_len, period_len, slave_id);
698 return NULL;
699 }
700
701 slave_addr = ops->slave_addr(schan);
702
703 sg_init_table(sgl, sg_len);
704 for (i = 0; i < sg_len; i++) {
705 dma_addr_t src = buf_addr + (period_len * i);
706
707 sg_set_page(&sgl[i], pfn_to_page(PFN_DOWN(src)), period_len,
708 offset_in_page(src));
709 sg_dma_address(&sgl[i]) = src;
710 sg_dma_len(&sgl[i]) = period_len;
711 }
712
713 return shdma_prep_sg(schan, sgl, sg_len, &slave_addr,
714 direction, flags, true);
Guennadi Liakhovetski9a7b8e02012-05-09 17:09:13 +0200715}
716
717static int shdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
718 unsigned long arg)
719{
720 struct shdma_chan *schan = to_shdma_chan(chan);
721 struct shdma_dev *sdev = to_shdma_dev(chan->device);
722 const struct shdma_ops *ops = sdev->ops;
Guennadi Liakhovetski1ff8df42012-07-05 12:29:42 +0200723 struct dma_slave_config *config;
Guennadi Liakhovetski9a7b8e02012-05-09 17:09:13 +0200724 unsigned long flags;
Guennadi Liakhovetski1ff8df42012-07-05 12:29:42 +0200725 int ret;
Guennadi Liakhovetski9a7b8e02012-05-09 17:09:13 +0200726
Guennadi Liakhovetski1ff8df42012-07-05 12:29:42 +0200727 switch (cmd) {
728 case DMA_TERMINATE_ALL:
729 spin_lock_irqsave(&schan->chan_lock, flags);
730 ops->halt_channel(schan);
Guennadi Liakhovetski4f46f8a2012-07-30 21:28:27 +0200731
732 if (ops->get_partial && !list_empty(&schan->ld_queue)) {
733 /* Record partial transfer */
734 struct shdma_desc *desc = list_first_entry(&schan->ld_queue,
735 struct shdma_desc, node);
736 desc->partial = ops->get_partial(schan, desc);
737 }
738
Guennadi Liakhovetski1ff8df42012-07-05 12:29:42 +0200739 spin_unlock_irqrestore(&schan->chan_lock, flags);
Guennadi Liakhovetski9a7b8e02012-05-09 17:09:13 +0200740
Guennadi Liakhovetski1ff8df42012-07-05 12:29:42 +0200741 shdma_chan_ld_cleanup(schan, true);
742 break;
743 case DMA_SLAVE_CONFIG:
744 /*
745 * So far only .slave_id is used, but the slave drivers are
746 * encouraged to also set a transfer direction and an address.
747 */
748 if (!arg)
749 return -EINVAL;
750 /*
751 * We could lock this, but you shouldn't be configuring the
752 * channel, while using it...
753 */
754 config = (struct dma_slave_config *)arg;
Guennadi Liakhovetski4981c4d2013-08-02 16:50:36 +0200755 ret = shdma_setup_slave(schan, config->slave_id,
756 config->direction == DMA_DEV_TO_MEM ?
757 config->src_addr : config->dst_addr);
Guennadi Liakhovetski1ff8df42012-07-05 12:29:42 +0200758 if (ret < 0)
759 return ret;
760 break;
761 default:
762 return -ENXIO;
763 }
Guennadi Liakhovetski9a7b8e02012-05-09 17:09:13 +0200764
765 return 0;
766}
767
768static void shdma_issue_pending(struct dma_chan *chan)
769{
770 struct shdma_chan *schan = to_shdma_chan(chan);
771
772 spin_lock_irq(&schan->chan_lock);
773 if (schan->pm_state == SHDMA_PM_ESTABLISHED)
774 shdma_chan_xfer_ld_queue(schan);
775 else
776 schan->pm_state = SHDMA_PM_PENDING;
777 spin_unlock_irq(&schan->chan_lock);
778}
779
780static enum dma_status shdma_tx_status(struct dma_chan *chan,
781 dma_cookie_t cookie,
782 struct dma_tx_state *txstate)
783{
784 struct shdma_chan *schan = to_shdma_chan(chan);
785 enum dma_status status;
786 unsigned long flags;
787
788 shdma_chan_ld_cleanup(schan, false);
789
790 spin_lock_irqsave(&schan->chan_lock, flags);
791
792 status = dma_cookie_status(chan, cookie, txstate);
793
794 /*
795 * If we don't find cookie on the queue, it has been aborted and we have
796 * to report error
797 */
Vinod Koula8d8d262013-10-16 21:04:06 +0530798 if (status != DMA_COMPLETE) {
Guennadi Liakhovetski9a7b8e02012-05-09 17:09:13 +0200799 struct shdma_desc *sdesc;
800 status = DMA_ERROR;
801 list_for_each_entry(sdesc, &schan->ld_queue, node)
802 if (sdesc->cookie == cookie) {
803 status = DMA_IN_PROGRESS;
804 break;
805 }
806 }
807
808 spin_unlock_irqrestore(&schan->chan_lock, flags);
809
810 return status;
811}
812
813/* Called from error IRQ or NMI */
814bool shdma_reset(struct shdma_dev *sdev)
815{
816 const struct shdma_ops *ops = sdev->ops;
817 struct shdma_chan *schan;
818 unsigned int handled = 0;
819 int i;
820
821 /* Reset all channels */
822 shdma_for_each_chan(schan, sdev, i) {
823 struct shdma_desc *sdesc;
824 LIST_HEAD(dl);
825
826 if (!schan)
827 continue;
828
829 spin_lock(&schan->chan_lock);
830
831 /* Stop the channel */
832 ops->halt_channel(schan);
833
834 list_splice_init(&schan->ld_queue, &dl);
835
836 if (!list_empty(&dl)) {
837 dev_dbg(schan->dev, "Bring down channel %d\n", schan->id);
838 pm_runtime_put(schan->dev);
839 }
840 schan->pm_state = SHDMA_PM_ESTABLISHED;
841
842 spin_unlock(&schan->chan_lock);
843
844 /* Complete all */
845 list_for_each_entry(sdesc, &dl, node) {
846 struct dma_async_tx_descriptor *tx = &sdesc->async_tx;
847 sdesc->mark = DESC_IDLE;
848 if (tx->callback)
849 tx->callback(tx->callback_param);
850 }
851
852 spin_lock(&schan->chan_lock);
853 list_splice(&dl, &schan->ld_free);
854 spin_unlock(&schan->chan_lock);
855
856 handled++;
857 }
858
859 return !!handled;
860}
861EXPORT_SYMBOL(shdma_reset);
862
863static irqreturn_t chan_irq(int irq, void *dev)
864{
865 struct shdma_chan *schan = dev;
866 const struct shdma_ops *ops =
867 to_shdma_dev(schan->dma_chan.device)->ops;
868 irqreturn_t ret;
869
870 spin_lock(&schan->chan_lock);
871
872 ret = ops->chan_irq(schan, irq) ? IRQ_WAKE_THREAD : IRQ_NONE;
873
874 spin_unlock(&schan->chan_lock);
875
876 return ret;
877}
878
879static irqreturn_t chan_irqt(int irq, void *dev)
880{
881 struct shdma_chan *schan = dev;
882 const struct shdma_ops *ops =
883 to_shdma_dev(schan->dma_chan.device)->ops;
884 struct shdma_desc *sdesc;
885
886 spin_lock_irq(&schan->chan_lock);
887 list_for_each_entry(sdesc, &schan->ld_queue, node) {
888 if (sdesc->mark == DESC_SUBMITTED &&
889 ops->desc_completed(schan, sdesc)) {
890 dev_dbg(schan->dev, "done #%d@%p\n",
891 sdesc->async_tx.cookie, &sdesc->async_tx);
892 sdesc->mark = DESC_COMPLETED;
893 break;
894 }
895 }
896 /* Next desc */
897 shdma_chan_xfer_ld_queue(schan);
898 spin_unlock_irq(&schan->chan_lock);
899
900 shdma_chan_ld_cleanup(schan, false);
901
902 return IRQ_HANDLED;
903}
904
905int shdma_request_irq(struct shdma_chan *schan, int irq,
906 unsigned long flags, const char *name)
907{
Guennadi Liakhovetskic1c63a12013-07-02 17:45:55 +0200908 int ret = devm_request_threaded_irq(schan->dev, irq, chan_irq,
909 chan_irqt, flags, name, schan);
Guennadi Liakhovetski9a7b8e02012-05-09 17:09:13 +0200910
911 schan->irq = ret < 0 ? ret : irq;
912
913 return ret;
914}
915EXPORT_SYMBOL(shdma_request_irq);
916
Guennadi Liakhovetski9a7b8e02012-05-09 17:09:13 +0200917void shdma_chan_probe(struct shdma_dev *sdev,
918 struct shdma_chan *schan, int id)
919{
920 schan->pm_state = SHDMA_PM_ESTABLISHED;
921
922 /* reference struct dma_device */
923 schan->dma_chan.device = &sdev->dma_dev;
924 dma_cookie_init(&schan->dma_chan);
925
926 schan->dev = sdev->dma_dev.dev;
927 schan->id = id;
928
929 if (!schan->max_xfer_len)
930 schan->max_xfer_len = PAGE_SIZE;
931
932 spin_lock_init(&schan->chan_lock);
933
934 /* Init descripter manage list */
935 INIT_LIST_HEAD(&schan->ld_queue);
936 INIT_LIST_HEAD(&schan->ld_free);
937
938 /* Add the channel to DMA device channel list */
939 list_add_tail(&schan->dma_chan.device_node,
940 &sdev->dma_dev.channels);
941 sdev->schan[sdev->dma_dev.chancnt++] = schan;
942}
943EXPORT_SYMBOL(shdma_chan_probe);
944
945void shdma_chan_remove(struct shdma_chan *schan)
946{
947 list_del(&schan->dma_chan.device_node);
948}
949EXPORT_SYMBOL(shdma_chan_remove);
950
951int shdma_init(struct device *dev, struct shdma_dev *sdev,
952 int chan_num)
953{
954 struct dma_device *dma_dev = &sdev->dma_dev;
955
956 /*
957 * Require all call-backs for now, they can trivially be made optional
958 * later as required
959 */
960 if (!sdev->ops ||
961 !sdev->desc_size ||
962 !sdev->ops->embedded_desc ||
963 !sdev->ops->start_xfer ||
964 !sdev->ops->setup_xfer ||
965 !sdev->ops->set_slave ||
966 !sdev->ops->desc_setup ||
967 !sdev->ops->slave_addr ||
968 !sdev->ops->channel_busy ||
969 !sdev->ops->halt_channel ||
970 !sdev->ops->desc_completed)
971 return -EINVAL;
972
973 sdev->schan = kcalloc(chan_num, sizeof(*sdev->schan), GFP_KERNEL);
974 if (!sdev->schan)
975 return -ENOMEM;
976
977 INIT_LIST_HEAD(&dma_dev->channels);
978
979 /* Common and MEMCPY operations */
980 dma_dev->device_alloc_chan_resources
981 = shdma_alloc_chan_resources;
982 dma_dev->device_free_chan_resources = shdma_free_chan_resources;
983 dma_dev->device_prep_dma_memcpy = shdma_prep_memcpy;
984 dma_dev->device_tx_status = shdma_tx_status;
985 dma_dev->device_issue_pending = shdma_issue_pending;
986
987 /* Compulsory for DMA_SLAVE fields */
988 dma_dev->device_prep_slave_sg = shdma_prep_slave_sg;
Kuninori Morimotodfbb85c2014-04-02 20:17:00 -0700989 dma_dev->device_prep_dma_cyclic = shdma_prep_dma_cyclic;
Guennadi Liakhovetski9a7b8e02012-05-09 17:09:13 +0200990 dma_dev->device_control = shdma_control;
991
992 dma_dev->dev = dev;
993
994 return 0;
995}
996EXPORT_SYMBOL(shdma_init);
997
998void shdma_cleanup(struct shdma_dev *sdev)
999{
1000 kfree(sdev->schan);
1001}
1002EXPORT_SYMBOL(shdma_cleanup);
1003
1004static int __init shdma_enter(void)
1005{
1006 shdma_slave_used = kzalloc(DIV_ROUND_UP(slave_num, BITS_PER_LONG) *
1007 sizeof(long), GFP_KERNEL);
1008 if (!shdma_slave_used)
1009 return -ENOMEM;
1010 return 0;
1011}
1012module_init(shdma_enter);
1013
1014static void __exit shdma_exit(void)
1015{
1016 kfree(shdma_slave_used);
1017}
1018module_exit(shdma_exit);
1019
1020MODULE_LICENSE("GPL v2");
1021MODULE_DESCRIPTION("SH-DMA driver base library");
1022MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");