blob: de1ee0abf77b219b2750e6ffb82a52e2c5021d56 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/slab.h>
15#include <linux/dma-mapping.h>
16#include <linux/init.h>
17#include <linux/module.h>
18#include <linux/fs.h>
19#include <linux/miscdevice.h>
20#include <linux/uaccess.h>
21
22#include <mach/dma.h>
23#include <mach/dma_test.h>
24
25
26/**********************************************************************
27 * User-space testing of the DMA driver.
28 * Intended to be loaded as a module. We have a bunch of static
29 * buffers that the user-side can refer to. The main DMA is simply
30 * used memory-to-memory. Device DMA is best tested with the specific
31 * device driver in question.
32 */
33#define MAX_TEST_BUFFERS 40
34#define MAX_TEST_BUFFER_SIZE 65536
35static void *(buffers[MAX_TEST_BUFFERS]);
36static int sizes[MAX_TEST_BUFFERS];
37
38/* Anything that allocates or deallocates buffers must lock with this
39 * mutex. */
40static DEFINE_SEMAPHORE(buffer_lock);
41
42/* Each buffer has a semaphore associated with it that will be held
43 * for the duration of any operations on that buffer. It also must be
44 * available to free the given buffer. */
45static struct semaphore buffer_sems[MAX_TEST_BUFFERS];
46
47#define buffer_up(num) up(&buffer_sems[num])
48#define buffer_down(num) down(&buffer_sems[num])
49
50/* Use the General Purpose DMA channel as our test channel. This channel
51 * should be available on any target. */
52#define TEST_CHANNEL DMOV_GP_CHAN
53
54struct private {
55 /* Each open instance is allowed a single pending
56 * operation. */
57 struct semaphore sem;
58
59 /* Simple command buffer. Allocated and freed by driver. */
60 /* TODO: Allocate these together. */
61 dmov_s *command_ptr;
62
63 /* Indirect. */
64 u32 *command_ptr_ptr;
65
66 /* Indicates completion with pending request. */
67 struct completion complete;
68};
69
70static void free_buffers(void)
71{
72 int i;
73
74 for (i = 0; i < MAX_TEST_BUFFERS; i++) {
75 if (sizes[i] > 0) {
76 kfree(buffers[i]);
77 sizes[i] = 0;
78 }
79 }
80}
81
82/* Copy between two buffers, using the DMA. */
83
84/* Allocate a buffer of a requested size. */
85static int buffer_req(struct msm_dma_alloc_req *req)
86{
87 int i;
88
89 if (req->size <= 0 || req->size > MAX_TEST_BUFFER_SIZE)
90 return -EINVAL;
91
92 down(&buffer_lock);
93
94 /* Find a free buffer. */
95 for (i = 0; i < MAX_TEST_BUFFERS; i++)
96 if (sizes[i] == 0)
97 break;
98
99 if (i >= MAX_TEST_BUFFERS)
100 goto error;
101
102 buffers[i] = kmalloc(req->size, GFP_KERNEL | __GFP_DMA);
103 if (buffers[i] == 0)
104 goto error;
105 sizes[i] = req->size;
106
107 req->bufnum = i;
108
109 up(&buffer_lock);
110 return 0;
111
112error:
113 up(&buffer_lock);
114 return -ENOSPC;
115}
116
117static int dma_scopy(struct msm_dma_scopy *scopy, struct private *priv)
118{
119 int err = 0;
120 dma_addr_t mapped_cmd;
121 dma_addr_t mapped_cmd_ptr;
122
123 buffer_down(scopy->srcbuf);
124 if (scopy->srcbuf != scopy->destbuf)
125 buffer_down(scopy->destbuf);
126
127 priv->command_ptr->cmd = CMD_PTR_LP | CMD_MODE_SINGLE;
128 priv->command_ptr->src = dma_map_single(NULL, buffers[scopy->srcbuf],
129 scopy->size, DMA_TO_DEVICE);
130 priv->command_ptr->dst = dma_map_single(NULL, buffers[scopy->destbuf],
131 scopy->size, DMA_FROM_DEVICE);
132 priv->command_ptr->len = scopy->size;
133
134 mapped_cmd =
135 dma_map_single(NULL, priv->command_ptr, sizeof(*priv->command_ptr),
136 DMA_TO_DEVICE);
137 *(priv->command_ptr_ptr) = CMD_PTR_ADDR(mapped_cmd) | CMD_PTR_LP;
138
139 mapped_cmd_ptr = dma_map_single(NULL, priv->command_ptr_ptr,
140 sizeof(*priv->command_ptr_ptr),
141 DMA_TO_DEVICE);
142
Jeff Ohlsteindc39f972011-09-02 13:55:16 -0700143 msm_dmov_exec_cmd(TEST_CHANNEL,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700144 DMOV_CMD_PTR_LIST | DMOV_CMD_ADDR(mapped_cmd_ptr));
145
146 dma_unmap_single(NULL, (dma_addr_t) mapped_cmd_ptr,
147 sizeof(*priv->command_ptr_ptr), DMA_TO_DEVICE);
148 dma_unmap_single(NULL, (dma_addr_t) mapped_cmd,
149 sizeof(*priv->command_ptr), DMA_TO_DEVICE);
150 dma_unmap_single(NULL, (dma_addr_t) priv->command_ptr->dst,
151 scopy->size, DMA_FROM_DEVICE);
152 dma_unmap_single(NULL, (dma_addr_t) priv->command_ptr->src,
153 scopy->size, DMA_TO_DEVICE);
154
155 if (scopy->srcbuf != scopy->destbuf)
156 buffer_up(scopy->destbuf);
157 buffer_up(scopy->srcbuf);
158
159 return err;
160}
161
162static int dma_test_open(struct inode *inode, struct file *file)
163{
164 struct private *priv;
165
166 printk(KERN_ALERT "%s\n", __func__);
167
168 priv = kmalloc(sizeof(struct private), GFP_KERNEL);
169 if (priv == NULL)
170 return -ENOMEM;
171 file->private_data = priv;
172
173 sema_init(&priv->sem, 1);
174
175 /* Note, that these should be allocated together so we don't
176 * waste 32 bytes for each. */
177
178 /* Allocate the command pointer. */
179 priv->command_ptr = kmalloc(sizeof(&priv->command_ptr),
180 GFP_KERNEL | __GFP_DMA);
181 if (priv->command_ptr == NULL) {
182 kfree(priv);
183 return -ENOSPC;
184 }
185
186 /* And the indirect pointer. */
187 priv->command_ptr_ptr = kmalloc(sizeof(u32), GFP_KERNEL | __GFP_DMA);
188 if (priv->command_ptr_ptr == NULL) {
189 kfree(priv->command_ptr);
190 kfree(priv);
191 return -ENOSPC;
192 }
193
194 return 0;
195}
196
197static int dma_test_release(struct inode *inode, struct file *file)
198{
199 struct private *priv;
200
201 printk(KERN_ALERT "%s\n", __func__);
202
203 if (file->private_data != NULL) {
204 priv = file->private_data;
205 kfree(priv->command_ptr_ptr);
206 kfree(priv->command_ptr);
207 }
208 kfree(file->private_data);
209 file->private_data = NULL;
210
211 return 0;
212}
213
214static long dma_test_ioctl(struct file *file, unsigned cmd, unsigned long arg)
215{
216 int err = 0;
217 int tmp;
218 struct msm_dma_alloc_req alloc_req;
219 struct msm_dma_bufxfer xfer;
220 struct msm_dma_scopy scopy;
221 struct private *priv = file->private_data;
222
223 /* Verify user arguments. */
224 if (_IOC_TYPE(cmd) != MSM_DMA_IOC_MAGIC)
225 return -ENOTTY;
226
227 switch (cmd) {
228 case MSM_DMA_IOALLOC:
229 if (!access_ok(VERIFY_WRITE, (void __user *)arg,
230 sizeof(alloc_req)))
231 return -EFAULT;
232 if (__copy_from_user(&alloc_req, (void __user *)arg,
233 sizeof(alloc_req)))
234 return -EFAULT;
235 err = buffer_req(&alloc_req);
236 if (err < 0)
237 return err;
238 if (__copy_to_user((void __user *)arg, &alloc_req,
239 sizeof(alloc_req)))
240 return -EFAULT;
241 break;
242
243 case MSM_DMA_IOFREEALL:
244 down(&buffer_lock);
245 for (tmp = 0; tmp < MAX_TEST_BUFFERS; tmp++) {
246 buffer_down(tmp);
247 if (sizes[tmp] > 0) {
248 kfree(buffers[tmp]);
249 sizes[tmp] = 0;
250 }
251 buffer_up(tmp);
252 }
253 up(&buffer_lock);
254 break;
255
256 case MSM_DMA_IOWBUF:
257 if (copy_from_user(&xfer, (void __user *)arg, sizeof(xfer)))
258 return -EFAULT;
259 if (xfer.bufnum < 0 || xfer.bufnum >= MAX_TEST_BUFFERS)
260 return -EINVAL;
261 buffer_down(xfer.bufnum);
262 if (sizes[xfer.bufnum] == 0 ||
263 xfer.size <= 0 || xfer.size > sizes[xfer.bufnum]) {
264 buffer_up(xfer.bufnum);
265 return -EINVAL;
266 }
267 if (copy_from_user(buffers[xfer.bufnum],
268 (void __user *)xfer.data, xfer.size))
269 err = -EFAULT;
270 buffer_up(xfer.bufnum);
271 break;
272
273 case MSM_DMA_IORBUF:
274 if (copy_from_user(&xfer, (void __user *)arg, sizeof(xfer)))
275 return -EFAULT;
276 if (xfer.bufnum < 0 || xfer.bufnum >= MAX_TEST_BUFFERS)
277 return -EINVAL;
278 buffer_down(xfer.bufnum);
279 if (sizes[xfer.bufnum] == 0 ||
280 xfer.size <= 0 || xfer.size > sizes[xfer.bufnum]) {
281 buffer_up(xfer.bufnum);
282 return -EINVAL;
283 }
284 if (copy_to_user((void __user *)xfer.data, buffers[xfer.bufnum],
285 xfer.size))
286 err = -EFAULT;
287 buffer_up(xfer.bufnum);
288 break;
289
290 case MSM_DMA_IOSCOPY:
291 if (copy_from_user(&scopy, (void __user *)arg, sizeof(scopy)))
292 return -EFAULT;
293 if (scopy.srcbuf < 0 || scopy.srcbuf >= MAX_TEST_BUFFERS ||
294 sizes[scopy.srcbuf] == 0 ||
295 scopy.destbuf < 0 || scopy.destbuf >= MAX_TEST_BUFFERS ||
296 sizes[scopy.destbuf] == 0 ||
297 scopy.size > sizes[scopy.destbuf] ||
298 scopy.size > sizes[scopy.srcbuf])
299 return -EINVAL;
300#if 0
301 /* Test interface using memcpy. */
302 memcpy(buffers[scopy.destbuf],
303 buffers[scopy.srcbuf], scopy.size);
304#else
305 err = dma_scopy(&scopy, priv);
306#endif
307 break;
308
309 default:
310 return -ENOTTY;
311 }
312
313 return err;
314}
315
316/**********************************************************************
317 * Register ourselves as a misc device to be able to test the DMA code
318 * from userspace. */
319
320static const struct file_operations dma_test_fops = {
321 .owner = THIS_MODULE,
322 .unlocked_ioctl = dma_test_ioctl,
323 .open = dma_test_open,
324 .release = dma_test_release,
325};
326
327static struct miscdevice dma_test_dev = {
328 .minor = MISC_DYNAMIC_MINOR,
329 .name = "msmdma",
330 .fops = &dma_test_fops,
331};
332static int dma_test_init(void)
333{
334 int ret, i;
335
336 ret = misc_register(&dma_test_dev);
337 if (ret < 0)
338 return ret;
339
340 for (i = 0; i < MAX_TEST_BUFFERS; i++)
341 sema_init(&buffer_sems[i], 1);
342
343 printk(KERN_ALERT "%s, minor number %d\n", __func__, dma_test_dev.minor);
344 return 0;
345}
346
347static void dma_test_exit(void)
348{
349 free_buffers();
350 misc_deregister(&dma_test_dev);
351 printk(KERN_ALERT "%s\n", __func__);
352}
353
354MODULE_LICENSE("GPL v2");
355MODULE_AUTHOR("David Brown, Qualcomm, Incorporated");
356MODULE_DESCRIPTION("Test for MSM DMA driver");
357MODULE_VERSION("1.01");
358
359module_init(dma_test_init);
360module_exit(dma_test_exit);