blob: eb1d8641cf5cc6b4bba303a68a9c79eb39f17dd0 [file] [log] [blame]
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -07001/*
2 * DMA Engine test module
3 *
4 * Copyright (C) 2007 Atmel Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/delay.h>
Alexey Dobriyanb7f080c2011-06-16 11:01:34 +000011#include <linux/dma-mapping.h>
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070012#include <linux/dmaengine.h>
Guennadi Liakhovetski981ed702011-08-18 16:50:51 +020013#include <linux/freezer.h>
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070014#include <linux/init.h>
15#include <linux/kthread.h>
16#include <linux/module.h>
17#include <linux/moduleparam.h>
18#include <linux/random.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090019#include <linux/slab.h>
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070020#include <linux/wait.h>
21
22static unsigned int test_buf_size = 16384;
23module_param(test_buf_size, uint, S_IRUGO);
24MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer");
25
Kay Sievers06190d82008-11-11 13:12:33 -070026static char test_channel[20];
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070027module_param_string(channel, test_channel, sizeof(test_channel), S_IRUGO);
28MODULE_PARM_DESC(channel, "Bus ID of the channel to test (default: any)");
29
Kay Sievers06190d82008-11-11 13:12:33 -070030static char test_device[20];
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070031module_param_string(device, test_device, sizeof(test_device), S_IRUGO);
32MODULE_PARM_DESC(device, "Bus ID of the DMA Engine to test (default: any)");
33
34static unsigned int threads_per_chan = 1;
35module_param(threads_per_chan, uint, S_IRUGO);
36MODULE_PARM_DESC(threads_per_chan,
37 "Number of threads to start per channel (default: 1)");
38
39static unsigned int max_channels;
40module_param(max_channels, uint, S_IRUGO);
Dan Williams33df8ca2009-01-06 11:38:15 -070041MODULE_PARM_DESC(max_channels,
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070042 "Maximum number of channels to use (default: all)");
43
Nicolas Ferre0a2ff57d2009-07-03 19:26:51 +020044static unsigned int iterations;
45module_param(iterations, uint, S_IRUGO);
46MODULE_PARM_DESC(iterations,
47 "Iterations before stopping test (default: infinite)");
48
Dan Williamsb54d5cb2009-03-25 09:13:25 -070049static unsigned int xor_sources = 3;
50module_param(xor_sources, uint, S_IRUGO);
51MODULE_PARM_DESC(xor_sources,
52 "Number of xor source buffers (default: 3)");
53
Dan Williams58691d62009-08-29 19:09:27 -070054static unsigned int pq_sources = 3;
55module_param(pq_sources, uint, S_IRUGO);
56MODULE_PARM_DESC(pq_sources,
57 "Number of p+q source buffers (default: 3)");
58
Viresh Kumard42efe62011-03-22 17:27:25 +053059static int timeout = 3000;
60module_param(timeout, uint, S_IRUGO);
Joe Perches85ee7a12011-04-23 20:38:19 -070061MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), "
62 "Pass -1 for infinite timeout");
Viresh Kumard42efe62011-03-22 17:27:25 +053063
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070064/*
65 * Initialization patterns. All bytes in the source buffer has bit 7
66 * set, all bytes in the destination buffer has bit 7 cleared.
67 *
68 * Bit 6 is set for all bytes which are to be copied by the DMA
69 * engine. Bit 5 is set for all bytes which are to be overwritten by
70 * the DMA engine.
71 *
72 * The remaining bits are the inverse of a counter which increments by
73 * one for each byte address.
74 */
75#define PATTERN_SRC 0x80
76#define PATTERN_DST 0x00
77#define PATTERN_COPY 0x40
78#define PATTERN_OVERWRITE 0x20
79#define PATTERN_COUNT_MASK 0x1f
80
81struct dmatest_thread {
82 struct list_head node;
83 struct task_struct *task;
84 struct dma_chan *chan;
Dan Williamsb54d5cb2009-03-25 09:13:25 -070085 u8 **srcs;
86 u8 **dsts;
87 enum dma_transaction_type type;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070088};
89
90struct dmatest_chan {
91 struct list_head node;
92 struct dma_chan *chan;
93 struct list_head threads;
94};
95
96/*
97 * These are protected by dma_list_mutex since they're only used by
Dan Williams33df8ca2009-01-06 11:38:15 -070098 * the DMA filter function callback
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070099 */
100static LIST_HEAD(dmatest_channels);
101static unsigned int nr_channels;
102
103static bool dmatest_match_channel(struct dma_chan *chan)
104{
105 if (test_channel[0] == '\0')
106 return true;
Dan Williams41d5e592009-01-06 11:38:21 -0700107 return strcmp(dma_chan_name(chan), test_channel) == 0;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700108}
109
110static bool dmatest_match_device(struct dma_device *device)
111{
112 if (test_device[0] == '\0')
113 return true;
Kay Sievers06190d82008-11-11 13:12:33 -0700114 return strcmp(dev_name(device->dev), test_device) == 0;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700115}
116
117static unsigned long dmatest_random(void)
118{
119 unsigned long buf;
120
121 get_random_bytes(&buf, sizeof(buf));
122 return buf;
123}
124
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700125static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700126{
127 unsigned int i;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700128 u8 *buf;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700129
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700130 for (; (buf = *bufs); bufs++) {
131 for (i = 0; i < start; i++)
132 buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
133 for ( ; i < start + len; i++)
134 buf[i] = PATTERN_SRC | PATTERN_COPY
Joe Perchesc0198942009-06-28 09:26:21 -0700135 | (~i & PATTERN_COUNT_MASK);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700136 for ( ; i < test_buf_size; i++)
137 buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
138 buf++;
139 }
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700140}
141
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700142static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700143{
144 unsigned int i;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700145 u8 *buf;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700146
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700147 for (; (buf = *bufs); bufs++) {
148 for (i = 0; i < start; i++)
149 buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
150 for ( ; i < start + len; i++)
151 buf[i] = PATTERN_DST | PATTERN_OVERWRITE
152 | (~i & PATTERN_COUNT_MASK);
153 for ( ; i < test_buf_size; i++)
154 buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
155 }
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700156}
157
158static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index,
159 unsigned int counter, bool is_srcbuf)
160{
161 u8 diff = actual ^ pattern;
162 u8 expected = pattern | (~counter & PATTERN_COUNT_MASK);
163 const char *thread_name = current->comm;
164
165 if (is_srcbuf)
166 pr_warning("%s: srcbuf[0x%x] overwritten!"
167 " Expected %02x, got %02x\n",
168 thread_name, index, expected, actual);
169 else if ((pattern & PATTERN_COPY)
170 && (diff & (PATTERN_COPY | PATTERN_OVERWRITE)))
171 pr_warning("%s: dstbuf[0x%x] not copied!"
172 " Expected %02x, got %02x\n",
173 thread_name, index, expected, actual);
174 else if (diff & PATTERN_SRC)
175 pr_warning("%s: dstbuf[0x%x] was copied!"
176 " Expected %02x, got %02x\n",
177 thread_name, index, expected, actual);
178 else
179 pr_warning("%s: dstbuf[0x%x] mismatch!"
180 " Expected %02x, got %02x\n",
181 thread_name, index, expected, actual);
182}
183
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700184static unsigned int dmatest_verify(u8 **bufs, unsigned int start,
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700185 unsigned int end, unsigned int counter, u8 pattern,
186 bool is_srcbuf)
187{
188 unsigned int i;
189 unsigned int error_count = 0;
190 u8 actual;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700191 u8 expected;
192 u8 *buf;
193 unsigned int counter_orig = counter;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700194
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700195 for (; (buf = *bufs); bufs++) {
196 counter = counter_orig;
197 for (i = start; i < end; i++) {
198 actual = buf[i];
199 expected = pattern | (~counter & PATTERN_COUNT_MASK);
200 if (actual != expected) {
201 if (error_count < 32)
202 dmatest_mismatch(actual, pattern, i,
203 counter, is_srcbuf);
204 error_count++;
205 }
206 counter++;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700207 }
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700208 }
209
210 if (error_count > 32)
211 pr_warning("%s: %u errors suppressed\n",
212 current->comm, error_count - 32);
213
214 return error_count;
215}
216
Dan Williamse44e0aa2009-03-25 09:13:25 -0700217static void dmatest_callback(void *completion)
218{
219 complete(completion);
220}
221
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700222/*
223 * This function repeatedly tests DMA transfers of various lengths and
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700224 * offsets for a given operation type until it is told to exit by
225 * kthread_stop(). There may be multiple threads running this function
226 * in parallel for a single channel, and there may be multiple channels
227 * being tested in parallel.
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700228 *
229 * Before each test, the source and destination buffer is initialized
230 * with a known pattern. This pattern is different depending on
231 * whether it's in an area which is supposed to be copied or
232 * overwritten, and different in the source and destination buffers.
233 * So if the DMA engine doesn't copy exactly what we tell it to copy,
234 * we'll notice.
235 */
236static int dmatest_func(void *data)
237{
238 struct dmatest_thread *thread = data;
239 struct dma_chan *chan;
240 const char *thread_name;
241 unsigned int src_off, dst_off, len;
242 unsigned int error_count;
243 unsigned int failed_tests = 0;
244 unsigned int total_tests = 0;
245 dma_cookie_t cookie;
246 enum dma_status status;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700247 enum dma_ctrl_flags flags;
Anatolij Gustschin94de6482010-02-15 22:35:23 +0100248 u8 pq_coefs[pq_sources + 1];
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700249 int ret;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700250 int src_cnt;
251 int dst_cnt;
252 int i;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700253
254 thread_name = current->comm;
Guennadi Liakhovetski981ed702011-08-18 16:50:51 +0200255 set_freezable_with_signal();
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700256
257 ret = -ENOMEM;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700258
259 smp_rmb();
260 chan = thread->chan;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700261 if (thread->type == DMA_MEMCPY)
262 src_cnt = dst_cnt = 1;
263 else if (thread->type == DMA_XOR) {
264 src_cnt = xor_sources | 1; /* force odd to ensure dst = src */
265 dst_cnt = 1;
Dan Williams58691d62009-08-29 19:09:27 -0700266 } else if (thread->type == DMA_PQ) {
267 src_cnt = pq_sources | 1; /* force odd to ensure dst = src */
268 dst_cnt = 2;
Anatolij Gustschin94de6482010-02-15 22:35:23 +0100269 for (i = 0; i < src_cnt; i++)
Dan Williams58691d62009-08-29 19:09:27 -0700270 pq_coefs[i] = 1;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700271 } else
272 goto err_srcs;
273
274 thread->srcs = kcalloc(src_cnt+1, sizeof(u8 *), GFP_KERNEL);
275 if (!thread->srcs)
276 goto err_srcs;
277 for (i = 0; i < src_cnt; i++) {
278 thread->srcs[i] = kmalloc(test_buf_size, GFP_KERNEL);
279 if (!thread->srcs[i])
280 goto err_srcbuf;
281 }
282 thread->srcs[i] = NULL;
283
284 thread->dsts = kcalloc(dst_cnt+1, sizeof(u8 *), GFP_KERNEL);
285 if (!thread->dsts)
286 goto err_dsts;
287 for (i = 0; i < dst_cnt; i++) {
288 thread->dsts[i] = kmalloc(test_buf_size, GFP_KERNEL);
289 if (!thread->dsts[i])
290 goto err_dstbuf;
291 }
292 thread->dsts[i] = NULL;
293
Dan Williamse44e0aa2009-03-25 09:13:25 -0700294 set_user_nice(current, 10);
295
Ira Snyderb203bd32011-03-03 07:54:53 +0000296 /*
297 * src buffers are freed by the DMAEngine code with dma_unmap_single()
298 * dst buffers are freed by ourselves below
299 */
300 flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT
301 | DMA_COMPL_SKIP_DEST_UNMAP | DMA_COMPL_SRC_UNMAP_SINGLE;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700302
Nicolas Ferre0a2ff57d2009-07-03 19:26:51 +0200303 while (!kthread_should_stop()
304 && !(iterations && total_tests >= iterations)) {
Atsushi Nemotod86be862009-01-13 09:22:20 -0700305 struct dma_device *dev = chan->device;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700306 struct dma_async_tx_descriptor *tx = NULL;
307 dma_addr_t dma_srcs[src_cnt];
308 dma_addr_t dma_dsts[dst_cnt];
Dan Williamse44e0aa2009-03-25 09:13:25 -0700309 struct completion cmp;
Guennadi Liakhovetski981ed702011-08-18 16:50:51 +0200310 unsigned long start, tmo, end = 0 /* compiler... */;
311 bool reload = true;
Dan Williams83544ae2009-09-08 17:42:53 -0700312 u8 align = 0;
Atsushi Nemotod86be862009-01-13 09:22:20 -0700313
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700314 total_tests++;
315
Dan Williams83544ae2009-09-08 17:42:53 -0700316 /* honor alignment restrictions */
317 if (thread->type == DMA_MEMCPY)
318 align = dev->copy_align;
319 else if (thread->type == DMA_XOR)
320 align = dev->xor_align;
321 else if (thread->type == DMA_PQ)
322 align = dev->pq_align;
323
Guennadi Liakhovetskicfe4f272009-12-04 19:44:48 +0100324 if (1 << align > test_buf_size) {
325 pr_err("%u-byte buffer too small for %d-byte alignment\n",
326 test_buf_size, 1 << align);
327 break;
328 }
329
330 len = dmatest_random() % test_buf_size + 1;
Dan Williams83544ae2009-09-08 17:42:53 -0700331 len = (len >> align) << align;
Guennadi Liakhovetskicfe4f272009-12-04 19:44:48 +0100332 if (!len)
333 len = 1 << align;
334 src_off = dmatest_random() % (test_buf_size - len + 1);
335 dst_off = dmatest_random() % (test_buf_size - len + 1);
336
Dan Williams83544ae2009-09-08 17:42:53 -0700337 src_off = (src_off >> align) << align;
338 dst_off = (dst_off >> align) << align;
339
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700340 dmatest_init_srcs(thread->srcs, src_off, len);
341 dmatest_init_dsts(thread->dsts, dst_off, len);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700342
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700343 for (i = 0; i < src_cnt; i++) {
344 u8 *buf = thread->srcs[i] + src_off;
345
346 dma_srcs[i] = dma_map_single(dev->dev, buf, len,
347 DMA_TO_DEVICE);
348 }
Atsushi Nemotod86be862009-01-13 09:22:20 -0700349 /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700350 for (i = 0; i < dst_cnt; i++) {
351 dma_dsts[i] = dma_map_single(dev->dev, thread->dsts[i],
352 test_buf_size,
353 DMA_BIDIRECTIONAL);
354 }
Atsushi Nemotod86be862009-01-13 09:22:20 -0700355
Dan Williams83544ae2009-09-08 17:42:53 -0700356
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700357 if (thread->type == DMA_MEMCPY)
358 tx = dev->device_prep_dma_memcpy(chan,
359 dma_dsts[0] + dst_off,
360 dma_srcs[0], len,
361 flags);
362 else if (thread->type == DMA_XOR)
363 tx = dev->device_prep_dma_xor(chan,
364 dma_dsts[0] + dst_off,
Dan Williams67b91242010-02-28 22:20:18 -0700365 dma_srcs, src_cnt,
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700366 len, flags);
Dan Williams58691d62009-08-29 19:09:27 -0700367 else if (thread->type == DMA_PQ) {
368 dma_addr_t dma_pq[dst_cnt];
369
370 for (i = 0; i < dst_cnt; i++)
371 dma_pq[i] = dma_dsts[i] + dst_off;
372 tx = dev->device_prep_dma_pq(chan, dma_pq, dma_srcs,
Anatolij Gustschin94de6482010-02-15 22:35:23 +0100373 src_cnt, pq_coefs,
Dan Williams58691d62009-08-29 19:09:27 -0700374 len, flags);
375 }
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700376
Atsushi Nemotod86be862009-01-13 09:22:20 -0700377 if (!tx) {
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700378 for (i = 0; i < src_cnt; i++)
379 dma_unmap_single(dev->dev, dma_srcs[i], len,
380 DMA_TO_DEVICE);
381 for (i = 0; i < dst_cnt; i++)
382 dma_unmap_single(dev->dev, dma_dsts[i],
383 test_buf_size,
384 DMA_BIDIRECTIONAL);
Atsushi Nemotod86be862009-01-13 09:22:20 -0700385 pr_warning("%s: #%u: prep error with src_off=0x%x "
386 "dst_off=0x%x len=0x%x\n",
387 thread_name, total_tests - 1,
388 src_off, dst_off, len);
389 msleep(100);
390 failed_tests++;
391 continue;
392 }
Dan Williamse44e0aa2009-03-25 09:13:25 -0700393
394 init_completion(&cmp);
395 tx->callback = dmatest_callback;
396 tx->callback_param = &cmp;
Atsushi Nemotod86be862009-01-13 09:22:20 -0700397 cookie = tx->tx_submit(tx);
398
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700399 if (dma_submit_error(cookie)) {
400 pr_warning("%s: #%u: submit error %d with src_off=0x%x "
401 "dst_off=0x%x len=0x%x\n",
402 thread_name, total_tests - 1, cookie,
403 src_off, dst_off, len);
404 msleep(100);
405 failed_tests++;
406 continue;
407 }
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700408 dma_async_issue_pending(chan);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700409
Guennadi Liakhovetski981ed702011-08-18 16:50:51 +0200410 do {
411 start = jiffies;
412 if (reload)
413 end = start + msecs_to_jiffies(timeout);
414 else if (end <= start)
415 end = start + 1;
416 tmo = wait_for_completion_interruptible_timeout(&cmp,
417 end - start);
418 reload = try_to_freeze();
419 } while (tmo == -ERESTARTSYS);
420
Dan Williamse44e0aa2009-03-25 09:13:25 -0700421 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700422
Dan Williamse44e0aa2009-03-25 09:13:25 -0700423 if (tmo == 0) {
424 pr_warning("%s: #%u: test timed out\n",
425 thread_name, total_tests - 1);
426 failed_tests++;
427 continue;
428 } else if (status != DMA_SUCCESS) {
429 pr_warning("%s: #%u: got completion callback,"
430 " but status is \'%s\'\n",
431 thread_name, total_tests - 1,
432 status == DMA_ERROR ? "error" : "in progress");
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700433 failed_tests++;
434 continue;
435 }
Dan Williamse44e0aa2009-03-25 09:13:25 -0700436
Atsushi Nemotod86be862009-01-13 09:22:20 -0700437 /* Unmap by myself (see DMA_COMPL_SKIP_DEST_UNMAP above) */
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700438 for (i = 0; i < dst_cnt; i++)
439 dma_unmap_single(dev->dev, dma_dsts[i], test_buf_size,
440 DMA_BIDIRECTIONAL);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700441
442 error_count = 0;
443
444 pr_debug("%s: verifying source buffer...\n", thread_name);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700445 error_count += dmatest_verify(thread->srcs, 0, src_off,
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700446 0, PATTERN_SRC, true);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700447 error_count += dmatest_verify(thread->srcs, src_off,
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700448 src_off + len, src_off,
449 PATTERN_SRC | PATTERN_COPY, true);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700450 error_count += dmatest_verify(thread->srcs, src_off + len,
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700451 test_buf_size, src_off + len,
452 PATTERN_SRC, true);
453
454 pr_debug("%s: verifying dest buffer...\n",
455 thread->task->comm);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700456 error_count += dmatest_verify(thread->dsts, 0, dst_off,
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700457 0, PATTERN_DST, false);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700458 error_count += dmatest_verify(thread->dsts, dst_off,
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700459 dst_off + len, src_off,
460 PATTERN_SRC | PATTERN_COPY, false);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700461 error_count += dmatest_verify(thread->dsts, dst_off + len,
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700462 test_buf_size, dst_off + len,
463 PATTERN_DST, false);
464
465 if (error_count) {
466 pr_warning("%s: #%u: %u errors with "
467 "src_off=0x%x dst_off=0x%x len=0x%x\n",
468 thread_name, total_tests - 1, error_count,
469 src_off, dst_off, len);
470 failed_tests++;
471 } else {
472 pr_debug("%s: #%u: No errors with "
473 "src_off=0x%x dst_off=0x%x len=0x%x\n",
474 thread_name, total_tests - 1,
475 src_off, dst_off, len);
476 }
477 }
478
479 ret = 0;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700480 for (i = 0; thread->dsts[i]; i++)
481 kfree(thread->dsts[i]);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700482err_dstbuf:
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700483 kfree(thread->dsts);
484err_dsts:
485 for (i = 0; thread->srcs[i]; i++)
486 kfree(thread->srcs[i]);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700487err_srcbuf:
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700488 kfree(thread->srcs);
489err_srcs:
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700490 pr_notice("%s: terminating after %u tests, %u failures (status %d)\n",
491 thread_name, total_tests, failed_tests, ret);
Nicolas Ferre0a2ff57d2009-07-03 19:26:51 +0200492
Viresh Kumar9704efa2011-07-29 16:21:57 +0530493 /* terminate all transfers on specified channels */
494 chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
Nicolas Ferre0a2ff57d2009-07-03 19:26:51 +0200495 if (iterations > 0)
496 while (!kthread_should_stop()) {
Yong Zhangb953df72010-02-05 21:52:37 +0800497 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit);
Nicolas Ferre0a2ff57d2009-07-03 19:26:51 +0200498 interruptible_sleep_on(&wait_dmatest_exit);
499 }
500
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700501 return ret;
502}
503
504static void dmatest_cleanup_channel(struct dmatest_chan *dtc)
505{
506 struct dmatest_thread *thread;
507 struct dmatest_thread *_thread;
508 int ret;
509
510 list_for_each_entry_safe(thread, _thread, &dtc->threads, node) {
511 ret = kthread_stop(thread->task);
512 pr_debug("dmatest: thread %s exited with status %d\n",
513 thread->task->comm, ret);
514 list_del(&thread->node);
515 kfree(thread);
516 }
Viresh Kumar9704efa2011-07-29 16:21:57 +0530517
518 /* terminate all transfers on specified channels */
519 dtc->chan->device->device_control(dtc->chan, DMA_TERMINATE_ALL, 0);
520
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700521 kfree(dtc);
522}
523
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700524static int dmatest_add_threads(struct dmatest_chan *dtc, enum dma_transaction_type type)
525{
526 struct dmatest_thread *thread;
527 struct dma_chan *chan = dtc->chan;
528 char *op;
529 unsigned int i;
530
531 if (type == DMA_MEMCPY)
532 op = "copy";
533 else if (type == DMA_XOR)
534 op = "xor";
Dan Williams58691d62009-08-29 19:09:27 -0700535 else if (type == DMA_PQ)
536 op = "pq";
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700537 else
538 return -EINVAL;
539
540 for (i = 0; i < threads_per_chan; i++) {
541 thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL);
542 if (!thread) {
543 pr_warning("dmatest: No memory for %s-%s%u\n",
544 dma_chan_name(chan), op, i);
545
546 break;
547 }
548 thread->chan = dtc->chan;
549 thread->type = type;
550 smp_wmb();
551 thread->task = kthread_run(dmatest_func, thread, "%s-%s%u",
552 dma_chan_name(chan), op, i);
553 if (IS_ERR(thread->task)) {
554 pr_warning("dmatest: Failed to run thread %s-%s%u\n",
555 dma_chan_name(chan), op, i);
556 kfree(thread);
557 break;
558 }
559
560 /* srcbuf and dstbuf are allocated by the thread itself */
561
562 list_add_tail(&thread->node, &dtc->threads);
563 }
564
565 return i;
566}
567
Dan Williams33df8ca2009-01-06 11:38:15 -0700568static int dmatest_add_channel(struct dma_chan *chan)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700569{
570 struct dmatest_chan *dtc;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700571 struct dma_device *dma_dev = chan->device;
572 unsigned int thread_count = 0;
Kulikov Vasiliyb9033e62010-07-17 19:19:48 +0400573 int cnt;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700574
Andrew Morton6fdb8bd2008-09-19 04:16:23 -0700575 dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700576 if (!dtc) {
Dan Williams41d5e592009-01-06 11:38:21 -0700577 pr_warning("dmatest: No memory for %s\n", dma_chan_name(chan));
Dan Williams33df8ca2009-01-06 11:38:15 -0700578 return -ENOMEM;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700579 }
580
581 dtc->chan = chan;
582 INIT_LIST_HEAD(&dtc->threads);
583
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700584 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
585 cnt = dmatest_add_threads(dtc, DMA_MEMCPY);
Nicolas Ferref1aef8b2009-07-06 18:19:44 +0200586 thread_count += cnt > 0 ? cnt : 0;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700587 }
588 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
589 cnt = dmatest_add_threads(dtc, DMA_XOR);
Nicolas Ferref1aef8b2009-07-06 18:19:44 +0200590 thread_count += cnt > 0 ? cnt : 0;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700591 }
Dan Williams58691d62009-08-29 19:09:27 -0700592 if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
593 cnt = dmatest_add_threads(dtc, DMA_PQ);
594 thread_count += cnt > 0 ?: 0;
595 }
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700596
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700597 pr_info("dmatest: Started %u threads using %s\n",
598 thread_count, dma_chan_name(chan));
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700599
600 list_add_tail(&dtc->node, &dmatest_channels);
601 nr_channels++;
602
Dan Williams33df8ca2009-01-06 11:38:15 -0700603 return 0;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700604}
605
Dan Williams7dd60252009-01-06 11:38:19 -0700606static bool filter(struct dma_chan *chan, void *param)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700607{
Dan Williams33df8ca2009-01-06 11:38:15 -0700608 if (!dmatest_match_channel(chan) || !dmatest_match_device(chan->device))
Dan Williams7dd60252009-01-06 11:38:19 -0700609 return false;
Dan Williams33df8ca2009-01-06 11:38:15 -0700610 else
Dan Williams7dd60252009-01-06 11:38:19 -0700611 return true;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700612}
613
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700614static int __init dmatest_init(void)
615{
Dan Williams33df8ca2009-01-06 11:38:15 -0700616 dma_cap_mask_t mask;
617 struct dma_chan *chan;
618 int err = 0;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700619
Dan Williams33df8ca2009-01-06 11:38:15 -0700620 dma_cap_zero(mask);
621 dma_cap_set(DMA_MEMCPY, mask);
622 for (;;) {
623 chan = dma_request_channel(mask, filter, NULL);
624 if (chan) {
625 err = dmatest_add_channel(chan);
Dan Williamsc56c81a2009-04-08 15:08:23 -0700626 if (err) {
Dan Williams33df8ca2009-01-06 11:38:15 -0700627 dma_release_channel(chan);
628 break; /* add_channel failed, punt */
629 }
630 } else
631 break; /* no more channels available */
632 if (max_channels && nr_channels >= max_channels)
633 break; /* we have all we need */
634 }
635
636 return err;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700637}
Dan Williams33df8ca2009-01-06 11:38:15 -0700638/* when compiled-in wait for drivers to load first */
639late_initcall(dmatest_init);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700640
641static void __exit dmatest_exit(void)
642{
Dan Williams33df8ca2009-01-06 11:38:15 -0700643 struct dmatest_chan *dtc, *_dtc;
Dan Williams7cbd4872009-03-04 16:06:03 -0700644 struct dma_chan *chan;
Dan Williams33df8ca2009-01-06 11:38:15 -0700645
646 list_for_each_entry_safe(dtc, _dtc, &dmatest_channels, node) {
647 list_del(&dtc->node);
Dan Williams7cbd4872009-03-04 16:06:03 -0700648 chan = dtc->chan;
Dan Williams33df8ca2009-01-06 11:38:15 -0700649 dmatest_cleanup_channel(dtc);
650 pr_debug("dmatest: dropped channel %s\n",
Dan Williams7cbd4872009-03-04 16:06:03 -0700651 dma_chan_name(chan));
652 dma_release_channel(chan);
Dan Williams33df8ca2009-01-06 11:38:15 -0700653 }
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700654}
655module_exit(dmatest_exit);
656
Jean Delvaree05503e2011-05-18 16:49:24 +0200657MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700658MODULE_LICENSE("GPL v2");