blob: d612b2e5abc4d8b5c122c560e3776e5c678e8ee0 [file] [log] [blame]
Linus Walleij61f135b2009-11-19 19:49:17 +01001/*
2 * driver/dma/coh901318_lli.c
3 *
4 * Copyright (C) 2007-2009 ST-Ericsson
5 * License terms: GNU General Public License (GPL) version 2
6 * Support functions for handling lli for dma
7 * Author: Per Friden <per.friden@stericsson.com>
8 */
9
Linus Walleij61f135b2009-11-19 19:49:17 +010010#include <linux/spinlock.h>
Linus Walleij61f135b2009-11-19 19:49:17 +010011#include <linux/memory.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090012#include <linux/gfp.h>
Vinod Kouldb8196d2011-10-13 22:34:23 +053013#include <linux/dmapool.h>
Linus Walleij03b53572013-01-04 14:07:51 +010014#include <linux/dmaengine.h>
Linus Walleij61f135b2009-11-19 19:49:17 +010015
Linus Walleij2b9277a2013-01-04 13:56:16 +010016#include "coh901318.h"
Linus Walleij61f135b2009-11-19 19:49:17 +010017
18#if (defined(CONFIG_DEBUG_FS) && defined(CONFIG_U300_DEBUG))
19#define DEBUGFS_POOL_COUNTER_RESET(pool) (pool->debugfs_pool_counter = 0)
20#define DEBUGFS_POOL_COUNTER_ADD(pool, add) (pool->debugfs_pool_counter += add)
21#else
22#define DEBUGFS_POOL_COUNTER_RESET(pool)
23#define DEBUGFS_POOL_COUNTER_ADD(pool, add)
24#endif
25
26static struct coh901318_lli *
27coh901318_lli_next(struct coh901318_lli *data)
28{
29 if (data == NULL || data->link_addr == 0)
30 return NULL;
31
32 return (struct coh901318_lli *) data->virt_link_addr;
33}
34
35int coh901318_pool_create(struct coh901318_pool *pool,
36 struct device *dev,
37 size_t size, size_t align)
38{
39 spin_lock_init(&pool->lock);
40 pool->dev = dev;
41 pool->dmapool = dma_pool_create("lli_pool", dev, size, align, 0);
42
43 DEBUGFS_POOL_COUNTER_RESET(pool);
44 return 0;
45}
46
47int coh901318_pool_destroy(struct coh901318_pool *pool)
48{
49
50 dma_pool_destroy(pool->dmapool);
51 return 0;
52}
53
54struct coh901318_lli *
55coh901318_lli_alloc(struct coh901318_pool *pool, unsigned int len)
56{
57 int i;
58 struct coh901318_lli *head;
59 struct coh901318_lli *lli;
60 struct coh901318_lli *lli_prev;
61 dma_addr_t phy;
62
63 if (len == 0)
Andy Shevchenko4168d0d2013-02-14 11:00:17 +020064 return NULL;
Linus Walleij61f135b2009-11-19 19:49:17 +010065
66 spin_lock(&pool->lock);
67
68 head = dma_pool_alloc(pool->dmapool, GFP_NOWAIT, &phy);
69
70 if (head == NULL)
71 goto err;
72
73 DEBUGFS_POOL_COUNTER_ADD(pool, 1);
74
75 lli = head;
76 lli->phy_this = phy;
Linus Walleij56a5d3c2010-03-02 20:12:56 +010077 lli->link_addr = 0x00000000;
Vinod Kould943df82016-09-13 22:55:01 +053078 lli->virt_link_addr = NULL;
Linus Walleij61f135b2009-11-19 19:49:17 +010079
80 for (i = 1; i < len; i++) {
81 lli_prev = lli;
82
83 lli = dma_pool_alloc(pool->dmapool, GFP_NOWAIT, &phy);
84
85 if (lli == NULL)
86 goto err_clean_up;
87
88 DEBUGFS_POOL_COUNTER_ADD(pool, 1);
89 lli->phy_this = phy;
Linus Walleij56a5d3c2010-03-02 20:12:56 +010090 lli->link_addr = 0x00000000;
Vinod Kould943df82016-09-13 22:55:01 +053091 lli->virt_link_addr = NULL;
Linus Walleij61f135b2009-11-19 19:49:17 +010092
93 lli_prev->link_addr = phy;
94 lli_prev->virt_link_addr = lli;
95 }
96
Linus Walleij61f135b2009-11-19 19:49:17 +010097 spin_unlock(&pool->lock);
98
99 return head;
100
101 err:
102 spin_unlock(&pool->lock);
103 return NULL;
104
105 err_clean_up:
106 lli_prev->link_addr = 0x00000000U;
107 spin_unlock(&pool->lock);
108 coh901318_lli_free(pool, &head);
109 return NULL;
110}
111
112void coh901318_lli_free(struct coh901318_pool *pool,
113 struct coh901318_lli **lli)
114{
115 struct coh901318_lli *l;
116 struct coh901318_lli *next;
117
118 if (lli == NULL)
119 return;
120
121 l = *lli;
122
123 if (l == NULL)
124 return;
125
126 spin_lock(&pool->lock);
127
128 while (l->link_addr) {
129 next = l->virt_link_addr;
130 dma_pool_free(pool->dmapool, l, l->phy_this);
131 DEBUGFS_POOL_COUNTER_ADD(pool, -1);
132 l = next;
133 }
134 dma_pool_free(pool->dmapool, l, l->phy_this);
135 DEBUGFS_POOL_COUNTER_ADD(pool, -1);
136
137 spin_unlock(&pool->lock);
138 *lli = NULL;
139}
140
141int
142coh901318_lli_fill_memcpy(struct coh901318_pool *pool,
143 struct coh901318_lli *lli,
144 dma_addr_t source, unsigned int size,
145 dma_addr_t destination, u32 ctrl_chained,
146 u32 ctrl_eom)
147{
148 int s = size;
149 dma_addr_t src = source;
150 dma_addr_t dst = destination;
151
152 lli->src_addr = src;
153 lli->dst_addr = dst;
154
155 while (lli->link_addr) {
156 lli->control = ctrl_chained | MAX_DMA_PACKET_SIZE;
157 lli->src_addr = src;
158 lli->dst_addr = dst;
159
160 s -= MAX_DMA_PACKET_SIZE;
161 lli = coh901318_lli_next(lli);
162
163 src += MAX_DMA_PACKET_SIZE;
164 dst += MAX_DMA_PACKET_SIZE;
165 }
166
167 lli->control = ctrl_eom | s;
168 lli->src_addr = src;
169 lli->dst_addr = dst;
170
Linus Walleij0b588282010-03-02 14:17:44 -0700171 return 0;
Linus Walleij61f135b2009-11-19 19:49:17 +0100172}
173
174int
175coh901318_lli_fill_single(struct coh901318_pool *pool,
176 struct coh901318_lli *lli,
177 dma_addr_t buf, unsigned int size,
178 dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl_eom,
Vinod Kouldb8196d2011-10-13 22:34:23 +0530179 enum dma_transfer_direction dir)
Linus Walleij61f135b2009-11-19 19:49:17 +0100180{
181 int s = size;
182 dma_addr_t src;
183 dma_addr_t dst;
184
185
Vinod Kouldb8196d2011-10-13 22:34:23 +0530186 if (dir == DMA_MEM_TO_DEV) {
Linus Walleij61f135b2009-11-19 19:49:17 +0100187 src = buf;
188 dst = dev_addr;
189
Vinod Kouldb8196d2011-10-13 22:34:23 +0530190 } else if (dir == DMA_DEV_TO_MEM) {
Linus Walleij61f135b2009-11-19 19:49:17 +0100191
192 src = dev_addr;
193 dst = buf;
194 } else {
195 return -EINVAL;
196 }
197
198 while (lli->link_addr) {
199 size_t block_size = MAX_DMA_PACKET_SIZE;
200 lli->control = ctrl_chained | MAX_DMA_PACKET_SIZE;
201
202 /* If we are on the next-to-final block and there will
203 * be less than half a DMA packet left for the last
204 * block, then we want to make this block a little
205 * smaller to balance the sizes. This is meant to
206 * avoid too small transfers if the buffer size is
207 * (MAX_DMA_PACKET_SIZE*N + 1) */
208 if (s < (MAX_DMA_PACKET_SIZE + MAX_DMA_PACKET_SIZE/2))
209 block_size = MAX_DMA_PACKET_SIZE/2;
210
211 s -= block_size;
212 lli->src_addr = src;
213 lli->dst_addr = dst;
214
215 lli = coh901318_lli_next(lli);
216
Vinod Kouldb8196d2011-10-13 22:34:23 +0530217 if (dir == DMA_MEM_TO_DEV)
Linus Walleij61f135b2009-11-19 19:49:17 +0100218 src += block_size;
Vinod Kouldb8196d2011-10-13 22:34:23 +0530219 else if (dir == DMA_DEV_TO_MEM)
Linus Walleij61f135b2009-11-19 19:49:17 +0100220 dst += block_size;
221 }
222
223 lli->control = ctrl_eom | s;
224 lli->src_addr = src;
225 lli->dst_addr = dst;
226
Linus Walleij0b588282010-03-02 14:17:44 -0700227 return 0;
Linus Walleij61f135b2009-11-19 19:49:17 +0100228}
229
230int
231coh901318_lli_fill_sg(struct coh901318_pool *pool,
232 struct coh901318_lli *lli,
233 struct scatterlist *sgl, unsigned int nents,
234 dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl,
235 u32 ctrl_last,
Vinod Kouldb8196d2011-10-13 22:34:23 +0530236 enum dma_transfer_direction dir, u32 ctrl_irq_mask)
Linus Walleij61f135b2009-11-19 19:49:17 +0100237{
238 int i;
239 struct scatterlist *sg;
240 u32 ctrl_sg;
241 dma_addr_t src = 0;
242 dma_addr_t dst = 0;
Linus Walleij61f135b2009-11-19 19:49:17 +0100243 u32 bytes_to_transfer;
244 u32 elem_size;
245
246 if (lli == NULL)
247 goto err;
248
249 spin_lock(&pool->lock);
250
Vinod Kouldb8196d2011-10-13 22:34:23 +0530251 if (dir == DMA_MEM_TO_DEV)
Linus Walleij61f135b2009-11-19 19:49:17 +0100252 dst = dev_addr;
Vinod Kouldb8196d2011-10-13 22:34:23 +0530253 else if (dir == DMA_DEV_TO_MEM)
Linus Walleij61f135b2009-11-19 19:49:17 +0100254 src = dev_addr;
255 else
256 goto err;
257
258 for_each_sg(sgl, sg, nents, i) {
259 if (sg_is_chain(sg)) {
260 /* sg continues to the next sg-element don't
261 * send ctrl_finish until the last
262 * sg-element in the chain
263 */
264 ctrl_sg = ctrl_chained;
265 } else if (i == nents - 1)
266 ctrl_sg = ctrl_last;
267 else
268 ctrl_sg = ctrl ? ctrl : ctrl_last;
269
270
Vinod Kouldb8196d2011-10-13 22:34:23 +0530271 if (dir == DMA_MEM_TO_DEV)
Linus Walleij61f135b2009-11-19 19:49:17 +0100272 /* increment source address */
Lars-Peter Clausencbb796c2012-04-25 20:50:51 +0200273 src = sg_dma_address(sg);
Linus Walleij61f135b2009-11-19 19:49:17 +0100274 else
275 /* increment destination address */
Lars-Peter Clausencbb796c2012-04-25 20:50:51 +0200276 dst = sg_dma_address(sg);
Linus Walleij61f135b2009-11-19 19:49:17 +0100277
278 bytes_to_transfer = sg_dma_len(sg);
279
280 while (bytes_to_transfer) {
281 u32 val;
282
283 if (bytes_to_transfer > MAX_DMA_PACKET_SIZE) {
284 elem_size = MAX_DMA_PACKET_SIZE;
285 val = ctrl_chained;
286 } else {
287 elem_size = bytes_to_transfer;
288 val = ctrl_sg;
289 }
290
291 lli->control = val | elem_size;
292 lli->src_addr = src;
293 lli->dst_addr = dst;
294
Vinod Kouldb8196d2011-10-13 22:34:23 +0530295 if (dir == DMA_DEV_TO_MEM)
Linus Walleij61f135b2009-11-19 19:49:17 +0100296 dst += elem_size;
297 else
298 src += elem_size;
299
300 BUG_ON(lli->link_addr & 3);
301
302 bytes_to_transfer -= elem_size;
303 lli = coh901318_lli_next(lli);
304 }
305
306 }
307 spin_unlock(&pool->lock);
308
Linus Walleij0b588282010-03-02 14:17:44 -0700309 return 0;
Linus Walleij61f135b2009-11-19 19:49:17 +0100310 err:
311 spin_unlock(&pool->lock);
312 return -EINVAL;
313}