Russell King | 50437bf | 2012-04-13 12:07:23 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Virtual DMA channel support for DMAengine |
| 3 | * |
| 4 | * Copyright (C) 2012 Russell King |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License version 2 as |
| 8 | * published by the Free Software Foundation. |
| 9 | */ |
| 10 | #include <linux/device.h> |
| 11 | #include <linux/dmaengine.h> |
| 12 | #include <linux/module.h> |
| 13 | #include <linux/spinlock.h> |
| 14 | |
| 15 | #include "virt-dma.h" |
| 16 | |
| 17 | static struct virt_dma_desc *to_virt_desc(struct dma_async_tx_descriptor *tx) |
| 18 | { |
| 19 | return container_of(tx, struct virt_dma_desc, tx); |
| 20 | } |
| 21 | |
| 22 | dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx) |
| 23 | { |
| 24 | struct virt_dma_chan *vc = to_virt_chan(tx->chan); |
| 25 | struct virt_dma_desc *vd = to_virt_desc(tx); |
| 26 | unsigned long flags; |
| 27 | dma_cookie_t cookie; |
| 28 | |
| 29 | spin_lock_irqsave(&vc->lock, flags); |
| 30 | cookie = dma_cookie_assign(tx); |
| 31 | |
Robert Jarzmik | 13bb26a | 2015-10-13 21:54:28 +0200 | [diff] [blame] | 32 | list_move_tail(&vd->node, &vc->desc_submitted); |
Russell King | 50437bf | 2012-04-13 12:07:23 +0100 | [diff] [blame] | 33 | spin_unlock_irqrestore(&vc->lock, flags); |
| 34 | |
| 35 | dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n", |
| 36 | vc, vd, cookie); |
| 37 | |
| 38 | return cookie; |
| 39 | } |
| 40 | EXPORT_SYMBOL_GPL(vchan_tx_submit); |
| 41 | |
Robert Jarzmik | 13bb26a | 2015-10-13 21:54:28 +0200 | [diff] [blame] | 42 | /** |
| 43 | * vchan_tx_desc_free - free a reusable descriptor |
| 44 | * @tx: the transfer |
| 45 | * |
| 46 | * This function frees a previously allocated reusable descriptor. The only |
| 47 | * other way is to clear the DMA_CTRL_REUSE flag and submit one last time the |
| 48 | * transfer. |
| 49 | * |
| 50 | * Returns 0 upon success |
| 51 | */ |
| 52 | int vchan_tx_desc_free(struct dma_async_tx_descriptor *tx) |
| 53 | { |
| 54 | struct virt_dma_chan *vc = to_virt_chan(tx->chan); |
| 55 | struct virt_dma_desc *vd = to_virt_desc(tx); |
| 56 | unsigned long flags; |
| 57 | |
| 58 | spin_lock_irqsave(&vc->lock, flags); |
| 59 | list_del(&vd->node); |
| 60 | spin_unlock_irqrestore(&vc->lock, flags); |
| 61 | |
| 62 | dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: freeing\n", |
| 63 | vc, vd, vd->tx.cookie); |
| 64 | vc->desc_free(vd); |
| 65 | return 0; |
| 66 | } |
| 67 | EXPORT_SYMBOL_GPL(vchan_tx_desc_free); |
| 68 | |
Russell King | fe04587 | 2012-05-10 23:39:27 +0100 | [diff] [blame] | 69 | struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *vc, |
| 70 | dma_cookie_t cookie) |
| 71 | { |
| 72 | struct virt_dma_desc *vd; |
| 73 | |
| 74 | list_for_each_entry(vd, &vc->desc_issued, node) |
| 75 | if (vd->tx.cookie == cookie) |
| 76 | return vd; |
| 77 | |
| 78 | return NULL; |
| 79 | } |
| 80 | EXPORT_SYMBOL_GPL(vchan_find_desc); |
| 81 | |
Russell King | 50437bf | 2012-04-13 12:07:23 +0100 | [diff] [blame] | 82 | /* |
| 83 | * This tasklet handles the completion of a DMA descriptor by |
| 84 | * calling its callback and freeing it. |
| 85 | */ |
| 86 | static void vchan_complete(unsigned long arg) |
| 87 | { |
| 88 | struct virt_dma_chan *vc = (struct virt_dma_chan *)arg; |
Andy Shevchenko | fdb980f | 2017-04-25 22:36:38 +0300 | [diff] [blame] | 89 | struct virt_dma_desc *vd, *_vd; |
Dave Jiang | 4f03ac6 | 2016-07-20 13:13:33 -0700 | [diff] [blame] | 90 | struct dmaengine_desc_callback cb; |
Russell King | 50437bf | 2012-04-13 12:07:23 +0100 | [diff] [blame] | 91 | LIST_HEAD(head); |
| 92 | |
| 93 | spin_lock_irq(&vc->lock); |
| 94 | list_splice_tail_init(&vc->desc_completed, &head); |
Russell King | 571fa74 | 2012-05-14 15:17:20 +0100 | [diff] [blame] | 95 | vd = vc->cyclic; |
| 96 | if (vd) { |
| 97 | vc->cyclic = NULL; |
Dave Jiang | 4f03ac6 | 2016-07-20 13:13:33 -0700 | [diff] [blame] | 98 | dmaengine_desc_get_callback(&vd->tx, &cb); |
| 99 | } else { |
| 100 | memset(&cb, 0, sizeof(cb)); |
Russell King | 571fa74 | 2012-05-14 15:17:20 +0100 | [diff] [blame] | 101 | } |
Russell King | 50437bf | 2012-04-13 12:07:23 +0100 | [diff] [blame] | 102 | spin_unlock_irq(&vc->lock); |
| 103 | |
Dave Jiang | 4f03ac6 | 2016-07-20 13:13:33 -0700 | [diff] [blame] | 104 | dmaengine_desc_callback_invoke(&cb, NULL); |
Russell King | 571fa74 | 2012-05-14 15:17:20 +0100 | [diff] [blame] | 105 | |
Andy Shevchenko | fdb980f | 2017-04-25 22:36:38 +0300 | [diff] [blame] | 106 | list_for_each_entry_safe(vd, _vd, &head, node) { |
Dave Jiang | 4f03ac6 | 2016-07-20 13:13:33 -0700 | [diff] [blame] | 107 | dmaengine_desc_get_callback(&vd->tx, &cb); |
Russell King | 50437bf | 2012-04-13 12:07:23 +0100 | [diff] [blame] | 108 | |
| 109 | list_del(&vd->node); |
Robert Jarzmik | 13bb26a | 2015-10-13 21:54:28 +0200 | [diff] [blame] | 110 | if (dmaengine_desc_test_reuse(&vd->tx)) |
| 111 | list_add(&vd->node, &vc->desc_allocated); |
| 112 | else |
| 113 | vc->desc_free(vd); |
Russell King | 50437bf | 2012-04-13 12:07:23 +0100 | [diff] [blame] | 114 | |
Dave Jiang | 4f03ac6 | 2016-07-20 13:13:33 -0700 | [diff] [blame] | 115 | dmaengine_desc_callback_invoke(&cb, NULL); |
Russell King | 50437bf | 2012-04-13 12:07:23 +0100 | [diff] [blame] | 116 | } |
| 117 | } |
| 118 | |
| 119 | void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head) |
| 120 | { |
Andy Shevchenko | fdb980f | 2017-04-25 22:36:38 +0300 | [diff] [blame] | 121 | struct virt_dma_desc *vd, *_vd; |
| 122 | |
| 123 | list_for_each_entry_safe(vd, _vd, head, node) { |
Robert Jarzmik | 13bb26a | 2015-10-13 21:54:28 +0200 | [diff] [blame] | 124 | if (dmaengine_desc_test_reuse(&vd->tx)) { |
| 125 | list_move_tail(&vd->node, &vc->desc_allocated); |
| 126 | } else { |
| 127 | dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd); |
| 128 | list_del(&vd->node); |
| 129 | vc->desc_free(vd); |
| 130 | } |
Russell King | 50437bf | 2012-04-13 12:07:23 +0100 | [diff] [blame] | 131 | } |
| 132 | } |
| 133 | EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list); |
| 134 | |
| 135 | void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev) |
| 136 | { |
| 137 | dma_cookie_init(&vc->chan); |
| 138 | |
| 139 | spin_lock_init(&vc->lock); |
Robert Jarzmik | 13bb26a | 2015-10-13 21:54:28 +0200 | [diff] [blame] | 140 | INIT_LIST_HEAD(&vc->desc_allocated); |
Russell King | 50437bf | 2012-04-13 12:07:23 +0100 | [diff] [blame] | 141 | INIT_LIST_HEAD(&vc->desc_submitted); |
| 142 | INIT_LIST_HEAD(&vc->desc_issued); |
| 143 | INIT_LIST_HEAD(&vc->desc_completed); |
| 144 | |
| 145 | tasklet_init(&vc->task, vchan_complete, (unsigned long)vc); |
| 146 | |
| 147 | vc->chan.device = dmadev; |
| 148 | list_add_tail(&vc->chan.device_node, &dmadev->channels); |
| 149 | } |
| 150 | EXPORT_SYMBOL_GPL(vchan_init); |
| 151 | |
| 152 | MODULE_AUTHOR("Russell King"); |
| 153 | MODULE_LICENSE("GPL"); |