blob: 5f9af1956eab02b845b1611c40de6d5065c684ff [file] [log] [blame]
Linus Walleij61f135b2009-11-19 19:49:17 +01001/*
2 * driver/dma/coh901318_lli.c
3 *
4 * Copyright (C) 2007-2009 ST-Ericsson
5 * License terms: GNU General Public License (GPL) version 2
6 * Support functions for handling lli for dma
7 * Author: Per Friden <per.friden@stericsson.com>
8 */
9
10#include <linux/dma-mapping.h>
11#include <linux/spinlock.h>
12#include <linux/dmapool.h>
13#include <linux/memory.h>
14#include <mach/coh901318.h>
15
16#include "coh901318_lli.h"
17
18#if (defined(CONFIG_DEBUG_FS) && defined(CONFIG_U300_DEBUG))
19#define DEBUGFS_POOL_COUNTER_RESET(pool) (pool->debugfs_pool_counter = 0)
20#define DEBUGFS_POOL_COUNTER_ADD(pool, add) (pool->debugfs_pool_counter += add)
21#else
22#define DEBUGFS_POOL_COUNTER_RESET(pool)
23#define DEBUGFS_POOL_COUNTER_ADD(pool, add)
24#endif
25
26static struct coh901318_lli *
27coh901318_lli_next(struct coh901318_lli *data)
28{
29 if (data == NULL || data->link_addr == 0)
30 return NULL;
31
32 return (struct coh901318_lli *) data->virt_link_addr;
33}
34
35int coh901318_pool_create(struct coh901318_pool *pool,
36 struct device *dev,
37 size_t size, size_t align)
38{
39 spin_lock_init(&pool->lock);
40 pool->dev = dev;
41 pool->dmapool = dma_pool_create("lli_pool", dev, size, align, 0);
42
43 DEBUGFS_POOL_COUNTER_RESET(pool);
44 return 0;
45}
46
47int coh901318_pool_destroy(struct coh901318_pool *pool)
48{
49
50 dma_pool_destroy(pool->dmapool);
51 return 0;
52}
53
54struct coh901318_lli *
55coh901318_lli_alloc(struct coh901318_pool *pool, unsigned int len)
56{
57 int i;
58 struct coh901318_lli *head;
59 struct coh901318_lli *lli;
60 struct coh901318_lli *lli_prev;
61 dma_addr_t phy;
62
63 if (len == 0)
64 goto err;
65
66 spin_lock(&pool->lock);
67
68 head = dma_pool_alloc(pool->dmapool, GFP_NOWAIT, &phy);
69
70 if (head == NULL)
71 goto err;
72
73 DEBUGFS_POOL_COUNTER_ADD(pool, 1);
74
75 lli = head;
76 lli->phy_this = phy;
77
78 for (i = 1; i < len; i++) {
79 lli_prev = lli;
80
81 lli = dma_pool_alloc(pool->dmapool, GFP_NOWAIT, &phy);
82
83 if (lli == NULL)
84 goto err_clean_up;
85
86 DEBUGFS_POOL_COUNTER_ADD(pool, 1);
87 lli->phy_this = phy;
88
89 lli_prev->link_addr = phy;
90 lli_prev->virt_link_addr = lli;
91 }
92
93 lli->link_addr = 0x00000000U;
94
95 spin_unlock(&pool->lock);
96
97 return head;
98
99 err:
100 spin_unlock(&pool->lock);
101 return NULL;
102
103 err_clean_up:
104 lli_prev->link_addr = 0x00000000U;
105 spin_unlock(&pool->lock);
106 coh901318_lli_free(pool, &head);
107 return NULL;
108}
109
110void coh901318_lli_free(struct coh901318_pool *pool,
111 struct coh901318_lli **lli)
112{
113 struct coh901318_lli *l;
114 struct coh901318_lli *next;
115
116 if (lli == NULL)
117 return;
118
119 l = *lli;
120
121 if (l == NULL)
122 return;
123
124 spin_lock(&pool->lock);
125
126 while (l->link_addr) {
127 next = l->virt_link_addr;
128 dma_pool_free(pool->dmapool, l, l->phy_this);
129 DEBUGFS_POOL_COUNTER_ADD(pool, -1);
130 l = next;
131 }
132 dma_pool_free(pool->dmapool, l, l->phy_this);
133 DEBUGFS_POOL_COUNTER_ADD(pool, -1);
134
135 spin_unlock(&pool->lock);
136 *lli = NULL;
137}
138
139int
140coh901318_lli_fill_memcpy(struct coh901318_pool *pool,
141 struct coh901318_lli *lli,
142 dma_addr_t source, unsigned int size,
143 dma_addr_t destination, u32 ctrl_chained,
144 u32 ctrl_eom)
145{
146 int s = size;
147 dma_addr_t src = source;
148 dma_addr_t dst = destination;
149
150 lli->src_addr = src;
151 lli->dst_addr = dst;
152
153 while (lli->link_addr) {
154 lli->control = ctrl_chained | MAX_DMA_PACKET_SIZE;
155 lli->src_addr = src;
156 lli->dst_addr = dst;
157
158 s -= MAX_DMA_PACKET_SIZE;
159 lli = coh901318_lli_next(lli);
160
161 src += MAX_DMA_PACKET_SIZE;
162 dst += MAX_DMA_PACKET_SIZE;
163 }
164
165 lli->control = ctrl_eom | s;
166 lli->src_addr = src;
167 lli->dst_addr = dst;
168
Linus Walleij0b588282010-03-02 14:17:44 -0700169 return 0;
Linus Walleij61f135b2009-11-19 19:49:17 +0100170}
171
172int
173coh901318_lli_fill_single(struct coh901318_pool *pool,
174 struct coh901318_lli *lli,
175 dma_addr_t buf, unsigned int size,
176 dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl_eom,
177 enum dma_data_direction dir)
178{
179 int s = size;
180 dma_addr_t src;
181 dma_addr_t dst;
182
183
184 if (dir == DMA_TO_DEVICE) {
185 src = buf;
186 dst = dev_addr;
187
188 } else if (dir == DMA_FROM_DEVICE) {
189
190 src = dev_addr;
191 dst = buf;
192 } else {
193 return -EINVAL;
194 }
195
196 while (lli->link_addr) {
197 size_t block_size = MAX_DMA_PACKET_SIZE;
198 lli->control = ctrl_chained | MAX_DMA_PACKET_SIZE;
199
200 /* If we are on the next-to-final block and there will
201 * be less than half a DMA packet left for the last
202 * block, then we want to make this block a little
203 * smaller to balance the sizes. This is meant to
204 * avoid too small transfers if the buffer size is
205 * (MAX_DMA_PACKET_SIZE*N + 1) */
206 if (s < (MAX_DMA_PACKET_SIZE + MAX_DMA_PACKET_SIZE/2))
207 block_size = MAX_DMA_PACKET_SIZE/2;
208
209 s -= block_size;
210 lli->src_addr = src;
211 lli->dst_addr = dst;
212
213 lli = coh901318_lli_next(lli);
214
215 if (dir == DMA_TO_DEVICE)
216 src += block_size;
217 else if (dir == DMA_FROM_DEVICE)
218 dst += block_size;
219 }
220
221 lli->control = ctrl_eom | s;
222 lli->src_addr = src;
223 lli->dst_addr = dst;
224
Linus Walleij0b588282010-03-02 14:17:44 -0700225 return 0;
Linus Walleij61f135b2009-11-19 19:49:17 +0100226}
227
228int
229coh901318_lli_fill_sg(struct coh901318_pool *pool,
230 struct coh901318_lli *lli,
231 struct scatterlist *sgl, unsigned int nents,
232 dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl,
233 u32 ctrl_last,
234 enum dma_data_direction dir, u32 ctrl_irq_mask)
235{
236 int i;
237 struct scatterlist *sg;
238 u32 ctrl_sg;
239 dma_addr_t src = 0;
240 dma_addr_t dst = 0;
Linus Walleij61f135b2009-11-19 19:49:17 +0100241 u32 bytes_to_transfer;
242 u32 elem_size;
243
244 if (lli == NULL)
245 goto err;
246
247 spin_lock(&pool->lock);
248
249 if (dir == DMA_TO_DEVICE)
250 dst = dev_addr;
251 else if (dir == DMA_FROM_DEVICE)
252 src = dev_addr;
253 else
254 goto err;
255
256 for_each_sg(sgl, sg, nents, i) {
257 if (sg_is_chain(sg)) {
258 /* sg continues to the next sg-element don't
259 * send ctrl_finish until the last
260 * sg-element in the chain
261 */
262 ctrl_sg = ctrl_chained;
263 } else if (i == nents - 1)
264 ctrl_sg = ctrl_last;
265 else
266 ctrl_sg = ctrl ? ctrl : ctrl_last;
267
268
Linus Walleij61f135b2009-11-19 19:49:17 +0100269 if (dir == DMA_TO_DEVICE)
270 /* increment source address */
271 src = sg_dma_address(sg);
272 else
273 /* increment destination address */
274 dst = sg_dma_address(sg);
275
276 bytes_to_transfer = sg_dma_len(sg);
277
278 while (bytes_to_transfer) {
279 u32 val;
280
281 if (bytes_to_transfer > MAX_DMA_PACKET_SIZE) {
282 elem_size = MAX_DMA_PACKET_SIZE;
283 val = ctrl_chained;
284 } else {
285 elem_size = bytes_to_transfer;
286 val = ctrl_sg;
287 }
288
289 lli->control = val | elem_size;
290 lli->src_addr = src;
291 lli->dst_addr = dst;
292
293 if (dir == DMA_FROM_DEVICE)
294 dst += elem_size;
295 else
296 src += elem_size;
297
298 BUG_ON(lli->link_addr & 3);
299
300 bytes_to_transfer -= elem_size;
301 lli = coh901318_lli_next(lli);
302 }
303
304 }
305 spin_unlock(&pool->lock);
306
Linus Walleij0b588282010-03-02 14:17:44 -0700307 return 0;
Linus Walleij61f135b2009-11-19 19:49:17 +0100308 err:
309 spin_unlock(&pool->lock);
310 return -EINVAL;
311}