blob: 6be893baadd9bdf6c82fa2f4d931e7ff905480f1 [file] [log] [blame]
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -07001/*
2 * DMA Engine test module
3 *
4 * Copyright (C) 2007 Atmel Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/delay.h>
Alexey Dobriyanb7f080c2011-06-16 11:01:34 +000011#include <linux/dma-mapping.h>
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070012#include <linux/dmaengine.h>
Guennadi Liakhovetski981ed702011-08-18 16:50:51 +020013#include <linux/freezer.h>
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070014#include <linux/init.h>
15#include <linux/kthread.h>
16#include <linux/module.h>
17#include <linux/moduleparam.h>
18#include <linux/random.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090019#include <linux/slab.h>
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070020#include <linux/wait.h>
21
22static unsigned int test_buf_size = 16384;
23module_param(test_buf_size, uint, S_IRUGO);
24MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer");
25
Kay Sievers06190d82008-11-11 13:12:33 -070026static char test_channel[20];
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070027module_param_string(channel, test_channel, sizeof(test_channel), S_IRUGO);
28MODULE_PARM_DESC(channel, "Bus ID of the channel to test (default: any)");
29
Kay Sievers06190d82008-11-11 13:12:33 -070030static char test_device[20];
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070031module_param_string(device, test_device, sizeof(test_device), S_IRUGO);
32MODULE_PARM_DESC(device, "Bus ID of the DMA Engine to test (default: any)");
33
34static unsigned int threads_per_chan = 1;
35module_param(threads_per_chan, uint, S_IRUGO);
36MODULE_PARM_DESC(threads_per_chan,
37 "Number of threads to start per channel (default: 1)");
38
39static unsigned int max_channels;
40module_param(max_channels, uint, S_IRUGO);
Dan Williams33df8ca2009-01-06 11:38:15 -070041MODULE_PARM_DESC(max_channels,
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070042 "Maximum number of channels to use (default: all)");
43
Nicolas Ferre0a2ff57d2009-07-03 19:26:51 +020044static unsigned int iterations;
45module_param(iterations, uint, S_IRUGO);
46MODULE_PARM_DESC(iterations,
47 "Iterations before stopping test (default: infinite)");
48
Dan Williamsb54d5cb2009-03-25 09:13:25 -070049static unsigned int xor_sources = 3;
50module_param(xor_sources, uint, S_IRUGO);
51MODULE_PARM_DESC(xor_sources,
52 "Number of xor source buffers (default: 3)");
53
Dan Williams58691d62009-08-29 19:09:27 -070054static unsigned int pq_sources = 3;
55module_param(pq_sources, uint, S_IRUGO);
56MODULE_PARM_DESC(pq_sources,
57 "Number of p+q source buffers (default: 3)");
58
Viresh Kumard42efe62011-03-22 17:27:25 +053059static int timeout = 3000;
60module_param(timeout, uint, S_IRUGO);
Joe Perches85ee7a12011-04-23 20:38:19 -070061MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), "
62 "Pass -1 for infinite timeout");
Viresh Kumard42efe62011-03-22 17:27:25 +053063
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070064/*
65 * Initialization patterns. All bytes in the source buffer has bit 7
66 * set, all bytes in the destination buffer has bit 7 cleared.
67 *
68 * Bit 6 is set for all bytes which are to be copied by the DMA
69 * engine. Bit 5 is set for all bytes which are to be overwritten by
70 * the DMA engine.
71 *
72 * The remaining bits are the inverse of a counter which increments by
73 * one for each byte address.
74 */
75#define PATTERN_SRC 0x80
76#define PATTERN_DST 0x00
77#define PATTERN_COPY 0x40
78#define PATTERN_OVERWRITE 0x20
79#define PATTERN_COUNT_MASK 0x1f
80
81struct dmatest_thread {
82 struct list_head node;
83 struct task_struct *task;
84 struct dma_chan *chan;
Dan Williamsb54d5cb2009-03-25 09:13:25 -070085 u8 **srcs;
86 u8 **dsts;
87 enum dma_transaction_type type;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070088};
89
90struct dmatest_chan {
91 struct list_head node;
92 struct dma_chan *chan;
93 struct list_head threads;
94};
95
96/*
97 * These are protected by dma_list_mutex since they're only used by
Dan Williams33df8ca2009-01-06 11:38:15 -070098 * the DMA filter function callback
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070099 */
100static LIST_HEAD(dmatest_channels);
101static unsigned int nr_channels;
102
103static bool dmatest_match_channel(struct dma_chan *chan)
104{
105 if (test_channel[0] == '\0')
106 return true;
Dan Williams41d5e592009-01-06 11:38:21 -0700107 return strcmp(dma_chan_name(chan), test_channel) == 0;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700108}
109
110static bool dmatest_match_device(struct dma_device *device)
111{
112 if (test_device[0] == '\0')
113 return true;
Kay Sievers06190d82008-11-11 13:12:33 -0700114 return strcmp(dev_name(device->dev), test_device) == 0;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700115}
116
117static unsigned long dmatest_random(void)
118{
119 unsigned long buf;
120
121 get_random_bytes(&buf, sizeof(buf));
122 return buf;
123}
124
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700125static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700126{
127 unsigned int i;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700128 u8 *buf;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700129
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700130 for (; (buf = *bufs); bufs++) {
131 for (i = 0; i < start; i++)
132 buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
133 for ( ; i < start + len; i++)
134 buf[i] = PATTERN_SRC | PATTERN_COPY
Joe Perchesc0198942009-06-28 09:26:21 -0700135 | (~i & PATTERN_COUNT_MASK);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700136 for ( ; i < test_buf_size; i++)
137 buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
138 buf++;
139 }
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700140}
141
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700142static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700143{
144 unsigned int i;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700145 u8 *buf;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700146
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700147 for (; (buf = *bufs); bufs++) {
148 for (i = 0; i < start; i++)
149 buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
150 for ( ; i < start + len; i++)
151 buf[i] = PATTERN_DST | PATTERN_OVERWRITE
152 | (~i & PATTERN_COUNT_MASK);
153 for ( ; i < test_buf_size; i++)
154 buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
155 }
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700156}
157
158static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index,
159 unsigned int counter, bool is_srcbuf)
160{
161 u8 diff = actual ^ pattern;
162 u8 expected = pattern | (~counter & PATTERN_COUNT_MASK);
163 const char *thread_name = current->comm;
164
165 if (is_srcbuf)
166 pr_warning("%s: srcbuf[0x%x] overwritten!"
167 " Expected %02x, got %02x\n",
168 thread_name, index, expected, actual);
169 else if ((pattern & PATTERN_COPY)
170 && (diff & (PATTERN_COPY | PATTERN_OVERWRITE)))
171 pr_warning("%s: dstbuf[0x%x] not copied!"
172 " Expected %02x, got %02x\n",
173 thread_name, index, expected, actual);
174 else if (diff & PATTERN_SRC)
175 pr_warning("%s: dstbuf[0x%x] was copied!"
176 " Expected %02x, got %02x\n",
177 thread_name, index, expected, actual);
178 else
179 pr_warning("%s: dstbuf[0x%x] mismatch!"
180 " Expected %02x, got %02x\n",
181 thread_name, index, expected, actual);
182}
183
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700184static unsigned int dmatest_verify(u8 **bufs, unsigned int start,
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700185 unsigned int end, unsigned int counter, u8 pattern,
186 bool is_srcbuf)
187{
188 unsigned int i;
189 unsigned int error_count = 0;
190 u8 actual;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700191 u8 expected;
192 u8 *buf;
193 unsigned int counter_orig = counter;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700194
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700195 for (; (buf = *bufs); bufs++) {
196 counter = counter_orig;
197 for (i = start; i < end; i++) {
198 actual = buf[i];
199 expected = pattern | (~counter & PATTERN_COUNT_MASK);
200 if (actual != expected) {
201 if (error_count < 32)
202 dmatest_mismatch(actual, pattern, i,
203 counter, is_srcbuf);
204 error_count++;
205 }
206 counter++;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700207 }
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700208 }
209
210 if (error_count > 32)
211 pr_warning("%s: %u errors suppressed\n",
212 current->comm, error_count - 32);
213
214 return error_count;
215}
216
Tejun Heoadfa5432011-11-23 09:28:16 -0800217/* poor man's completion - we want to use wait_event_freezable() on it */
218struct dmatest_done {
219 bool done;
220 wait_queue_head_t *wait;
221};
222
223static void dmatest_callback(void *arg)
Dan Williamse44e0aa2009-03-25 09:13:25 -0700224{
Tejun Heoadfa5432011-11-23 09:28:16 -0800225 struct dmatest_done *done = arg;
226
227 done->done = true;
228 wake_up_all(done->wait);
Dan Williamse44e0aa2009-03-25 09:13:25 -0700229}
230
Andy Shevchenko632fd282012-12-17 15:59:52 -0800231static inline void unmap_src(struct device *dev, dma_addr_t *addr, size_t len,
232 unsigned int count)
233{
234 while (count--)
235 dma_unmap_single(dev, addr[count], len, DMA_TO_DEVICE);
236}
237
238static inline void unmap_dst(struct device *dev, dma_addr_t *addr, size_t len,
239 unsigned int count)
240{
241 while (count--)
242 dma_unmap_single(dev, addr[count], len, DMA_BIDIRECTIONAL);
243}
244
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700245/*
246 * This function repeatedly tests DMA transfers of various lengths and
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700247 * offsets for a given operation type until it is told to exit by
248 * kthread_stop(). There may be multiple threads running this function
249 * in parallel for a single channel, and there may be multiple channels
250 * being tested in parallel.
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700251 *
252 * Before each test, the source and destination buffer is initialized
253 * with a known pattern. This pattern is different depending on
254 * whether it's in an area which is supposed to be copied or
255 * overwritten, and different in the source and destination buffers.
256 * So if the DMA engine doesn't copy exactly what we tell it to copy,
257 * we'll notice.
258 */
259static int dmatest_func(void *data)
260{
Tejun Heoadfa5432011-11-23 09:28:16 -0800261 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_wait);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700262 struct dmatest_thread *thread = data;
Tejun Heoadfa5432011-11-23 09:28:16 -0800263 struct dmatest_done done = { .wait = &done_wait };
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700264 struct dma_chan *chan;
265 const char *thread_name;
266 unsigned int src_off, dst_off, len;
267 unsigned int error_count;
268 unsigned int failed_tests = 0;
269 unsigned int total_tests = 0;
270 dma_cookie_t cookie;
271 enum dma_status status;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700272 enum dma_ctrl_flags flags;
Anatolij Gustschin94de6482010-02-15 22:35:23 +0100273 u8 pq_coefs[pq_sources + 1];
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700274 int ret;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700275 int src_cnt;
276 int dst_cnt;
277 int i;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700278
279 thread_name = current->comm;
Tejun Heoadfa5432011-11-23 09:28:16 -0800280 set_freezable();
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700281
282 ret = -ENOMEM;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700283
284 smp_rmb();
285 chan = thread->chan;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700286 if (thread->type == DMA_MEMCPY)
287 src_cnt = dst_cnt = 1;
288 else if (thread->type == DMA_XOR) {
289 src_cnt = xor_sources | 1; /* force odd to ensure dst = src */
290 dst_cnt = 1;
Dan Williams58691d62009-08-29 19:09:27 -0700291 } else if (thread->type == DMA_PQ) {
292 src_cnt = pq_sources | 1; /* force odd to ensure dst = src */
293 dst_cnt = 2;
Anatolij Gustschin94de6482010-02-15 22:35:23 +0100294 for (i = 0; i < src_cnt; i++)
Dan Williams58691d62009-08-29 19:09:27 -0700295 pq_coefs[i] = 1;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700296 } else
297 goto err_srcs;
298
299 thread->srcs = kcalloc(src_cnt+1, sizeof(u8 *), GFP_KERNEL);
300 if (!thread->srcs)
301 goto err_srcs;
302 for (i = 0; i < src_cnt; i++) {
303 thread->srcs[i] = kmalloc(test_buf_size, GFP_KERNEL);
304 if (!thread->srcs[i])
305 goto err_srcbuf;
306 }
307 thread->srcs[i] = NULL;
308
309 thread->dsts = kcalloc(dst_cnt+1, sizeof(u8 *), GFP_KERNEL);
310 if (!thread->dsts)
311 goto err_dsts;
312 for (i = 0; i < dst_cnt; i++) {
313 thread->dsts[i] = kmalloc(test_buf_size, GFP_KERNEL);
314 if (!thread->dsts[i])
315 goto err_dstbuf;
316 }
317 thread->dsts[i] = NULL;
318
Dan Williamse44e0aa2009-03-25 09:13:25 -0700319 set_user_nice(current, 10);
320
Ira Snyderb203bd32011-03-03 07:54:53 +0000321 /*
322 * src buffers are freed by the DMAEngine code with dma_unmap_single()
323 * dst buffers are freed by ourselves below
324 */
325 flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT
326 | DMA_COMPL_SKIP_DEST_UNMAP | DMA_COMPL_SRC_UNMAP_SINGLE;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700327
Nicolas Ferre0a2ff57d2009-07-03 19:26:51 +0200328 while (!kthread_should_stop()
329 && !(iterations && total_tests >= iterations)) {
Atsushi Nemotod86be862009-01-13 09:22:20 -0700330 struct dma_device *dev = chan->device;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700331 struct dma_async_tx_descriptor *tx = NULL;
332 dma_addr_t dma_srcs[src_cnt];
333 dma_addr_t dma_dsts[dst_cnt];
Dan Williams83544ae2009-09-08 17:42:53 -0700334 u8 align = 0;
Atsushi Nemotod86be862009-01-13 09:22:20 -0700335
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700336 total_tests++;
337
Dan Williams83544ae2009-09-08 17:42:53 -0700338 /* honor alignment restrictions */
339 if (thread->type == DMA_MEMCPY)
340 align = dev->copy_align;
341 else if (thread->type == DMA_XOR)
342 align = dev->xor_align;
343 else if (thread->type == DMA_PQ)
344 align = dev->pq_align;
345
Guennadi Liakhovetskicfe4f272009-12-04 19:44:48 +0100346 if (1 << align > test_buf_size) {
347 pr_err("%u-byte buffer too small for %d-byte alignment\n",
348 test_buf_size, 1 << align);
349 break;
350 }
351
352 len = dmatest_random() % test_buf_size + 1;
Dan Williams83544ae2009-09-08 17:42:53 -0700353 len = (len >> align) << align;
Guennadi Liakhovetskicfe4f272009-12-04 19:44:48 +0100354 if (!len)
355 len = 1 << align;
356 src_off = dmatest_random() % (test_buf_size - len + 1);
357 dst_off = dmatest_random() % (test_buf_size - len + 1);
358
Dan Williams83544ae2009-09-08 17:42:53 -0700359 src_off = (src_off >> align) << align;
360 dst_off = (dst_off >> align) << align;
361
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700362 dmatest_init_srcs(thread->srcs, src_off, len);
363 dmatest_init_dsts(thread->dsts, dst_off, len);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700364
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700365 for (i = 0; i < src_cnt; i++) {
366 u8 *buf = thread->srcs[i] + src_off;
367
368 dma_srcs[i] = dma_map_single(dev->dev, buf, len,
369 DMA_TO_DEVICE);
370 }
Atsushi Nemotod86be862009-01-13 09:22:20 -0700371 /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700372 for (i = 0; i < dst_cnt; i++) {
373 dma_dsts[i] = dma_map_single(dev->dev, thread->dsts[i],
374 test_buf_size,
375 DMA_BIDIRECTIONAL);
376 }
Atsushi Nemotod86be862009-01-13 09:22:20 -0700377
Dan Williams83544ae2009-09-08 17:42:53 -0700378
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700379 if (thread->type == DMA_MEMCPY)
380 tx = dev->device_prep_dma_memcpy(chan,
381 dma_dsts[0] + dst_off,
382 dma_srcs[0], len,
383 flags);
384 else if (thread->type == DMA_XOR)
385 tx = dev->device_prep_dma_xor(chan,
386 dma_dsts[0] + dst_off,
Dan Williams67b91242010-02-28 22:20:18 -0700387 dma_srcs, src_cnt,
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700388 len, flags);
Dan Williams58691d62009-08-29 19:09:27 -0700389 else if (thread->type == DMA_PQ) {
390 dma_addr_t dma_pq[dst_cnt];
391
392 for (i = 0; i < dst_cnt; i++)
393 dma_pq[i] = dma_dsts[i] + dst_off;
394 tx = dev->device_prep_dma_pq(chan, dma_pq, dma_srcs,
Anatolij Gustschin94de6482010-02-15 22:35:23 +0100395 src_cnt, pq_coefs,
Dan Williams58691d62009-08-29 19:09:27 -0700396 len, flags);
397 }
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700398
Atsushi Nemotod86be862009-01-13 09:22:20 -0700399 if (!tx) {
Andy Shevchenko632fd282012-12-17 15:59:52 -0800400 unmap_src(dev->dev, dma_srcs, len, src_cnt);
401 unmap_dst(dev->dev, dma_dsts, test_buf_size, dst_cnt);
Atsushi Nemotod86be862009-01-13 09:22:20 -0700402 pr_warning("%s: #%u: prep error with src_off=0x%x "
403 "dst_off=0x%x len=0x%x\n",
404 thread_name, total_tests - 1,
405 src_off, dst_off, len);
406 msleep(100);
407 failed_tests++;
408 continue;
409 }
Dan Williamse44e0aa2009-03-25 09:13:25 -0700410
Tejun Heoadfa5432011-11-23 09:28:16 -0800411 done.done = false;
Dan Williamse44e0aa2009-03-25 09:13:25 -0700412 tx->callback = dmatest_callback;
Tejun Heoadfa5432011-11-23 09:28:16 -0800413 tx->callback_param = &done;
Atsushi Nemotod86be862009-01-13 09:22:20 -0700414 cookie = tx->tx_submit(tx);
415
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700416 if (dma_submit_error(cookie)) {
417 pr_warning("%s: #%u: submit error %d with src_off=0x%x "
418 "dst_off=0x%x len=0x%x\n",
419 thread_name, total_tests - 1, cookie,
420 src_off, dst_off, len);
421 msleep(100);
422 failed_tests++;
423 continue;
424 }
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700425 dma_async_issue_pending(chan);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700426
Tejun Heoadfa5432011-11-23 09:28:16 -0800427 wait_event_freezable_timeout(done_wait, done.done,
428 msecs_to_jiffies(timeout));
Guennadi Liakhovetski981ed702011-08-18 16:50:51 +0200429
Dan Williamse44e0aa2009-03-25 09:13:25 -0700430 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700431
Tejun Heoadfa5432011-11-23 09:28:16 -0800432 if (!done.done) {
433 /*
434 * We're leaving the timed out dma operation with
435 * dangling pointer to done_wait. To make this
436 * correct, we'll need to allocate wait_done for
437 * each test iteration and perform "who's gonna
438 * free it this time?" dancing. For now, just
439 * leave it dangling.
440 */
Dan Williamse44e0aa2009-03-25 09:13:25 -0700441 pr_warning("%s: #%u: test timed out\n",
442 thread_name, total_tests - 1);
443 failed_tests++;
444 continue;
445 } else if (status != DMA_SUCCESS) {
446 pr_warning("%s: #%u: got completion callback,"
447 " but status is \'%s\'\n",
448 thread_name, total_tests - 1,
449 status == DMA_ERROR ? "error" : "in progress");
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700450 failed_tests++;
451 continue;
452 }
Dan Williamse44e0aa2009-03-25 09:13:25 -0700453
Atsushi Nemotod86be862009-01-13 09:22:20 -0700454 /* Unmap by myself (see DMA_COMPL_SKIP_DEST_UNMAP above) */
Andy Shevchenko632fd282012-12-17 15:59:52 -0800455 unmap_dst(dev->dev, dma_dsts, test_buf_size, dst_cnt);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700456
457 error_count = 0;
458
459 pr_debug("%s: verifying source buffer...\n", thread_name);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700460 error_count += dmatest_verify(thread->srcs, 0, src_off,
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700461 0, PATTERN_SRC, true);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700462 error_count += dmatest_verify(thread->srcs, src_off,
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700463 src_off + len, src_off,
464 PATTERN_SRC | PATTERN_COPY, true);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700465 error_count += dmatest_verify(thread->srcs, src_off + len,
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700466 test_buf_size, src_off + len,
467 PATTERN_SRC, true);
468
469 pr_debug("%s: verifying dest buffer...\n",
470 thread->task->comm);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700471 error_count += dmatest_verify(thread->dsts, 0, dst_off,
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700472 0, PATTERN_DST, false);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700473 error_count += dmatest_verify(thread->dsts, dst_off,
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700474 dst_off + len, src_off,
475 PATTERN_SRC | PATTERN_COPY, false);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700476 error_count += dmatest_verify(thread->dsts, dst_off + len,
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700477 test_buf_size, dst_off + len,
478 PATTERN_DST, false);
479
480 if (error_count) {
481 pr_warning("%s: #%u: %u errors with "
482 "src_off=0x%x dst_off=0x%x len=0x%x\n",
483 thread_name, total_tests - 1, error_count,
484 src_off, dst_off, len);
485 failed_tests++;
486 } else {
487 pr_debug("%s: #%u: No errors with "
488 "src_off=0x%x dst_off=0x%x len=0x%x\n",
489 thread_name, total_tests - 1,
490 src_off, dst_off, len);
491 }
492 }
493
494 ret = 0;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700495 for (i = 0; thread->dsts[i]; i++)
496 kfree(thread->dsts[i]);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700497err_dstbuf:
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700498 kfree(thread->dsts);
499err_dsts:
500 for (i = 0; thread->srcs[i]; i++)
501 kfree(thread->srcs[i]);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700502err_srcbuf:
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700503 kfree(thread->srcs);
504err_srcs:
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700505 pr_notice("%s: terminating after %u tests, %u failures (status %d)\n",
506 thread_name, total_tests, failed_tests, ret);
Nicolas Ferre0a2ff57d2009-07-03 19:26:51 +0200507
Viresh Kumar9704efa2011-07-29 16:21:57 +0530508 /* terminate all transfers on specified channels */
509 chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
Nicolas Ferre0a2ff57d2009-07-03 19:26:51 +0200510 if (iterations > 0)
511 while (!kthread_should_stop()) {
Yong Zhangb953df72010-02-05 21:52:37 +0800512 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit);
Nicolas Ferre0a2ff57d2009-07-03 19:26:51 +0200513 interruptible_sleep_on(&wait_dmatest_exit);
514 }
515
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700516 return ret;
517}
518
519static void dmatest_cleanup_channel(struct dmatest_chan *dtc)
520{
521 struct dmatest_thread *thread;
522 struct dmatest_thread *_thread;
523 int ret;
524
525 list_for_each_entry_safe(thread, _thread, &dtc->threads, node) {
526 ret = kthread_stop(thread->task);
527 pr_debug("dmatest: thread %s exited with status %d\n",
528 thread->task->comm, ret);
529 list_del(&thread->node);
530 kfree(thread);
531 }
Viresh Kumar9704efa2011-07-29 16:21:57 +0530532
533 /* terminate all transfers on specified channels */
534 dtc->chan->device->device_control(dtc->chan, DMA_TERMINATE_ALL, 0);
535
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700536 kfree(dtc);
537}
538
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700539static int dmatest_add_threads(struct dmatest_chan *dtc, enum dma_transaction_type type)
540{
541 struct dmatest_thread *thread;
542 struct dma_chan *chan = dtc->chan;
543 char *op;
544 unsigned int i;
545
546 if (type == DMA_MEMCPY)
547 op = "copy";
548 else if (type == DMA_XOR)
549 op = "xor";
Dan Williams58691d62009-08-29 19:09:27 -0700550 else if (type == DMA_PQ)
551 op = "pq";
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700552 else
553 return -EINVAL;
554
555 for (i = 0; i < threads_per_chan; i++) {
556 thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL);
557 if (!thread) {
558 pr_warning("dmatest: No memory for %s-%s%u\n",
559 dma_chan_name(chan), op, i);
560
561 break;
562 }
563 thread->chan = dtc->chan;
564 thread->type = type;
565 smp_wmb();
566 thread->task = kthread_run(dmatest_func, thread, "%s-%s%u",
567 dma_chan_name(chan), op, i);
568 if (IS_ERR(thread->task)) {
569 pr_warning("dmatest: Failed to run thread %s-%s%u\n",
570 dma_chan_name(chan), op, i);
571 kfree(thread);
572 break;
573 }
574
575 /* srcbuf and dstbuf are allocated by the thread itself */
576
577 list_add_tail(&thread->node, &dtc->threads);
578 }
579
580 return i;
581}
582
Dan Williams33df8ca2009-01-06 11:38:15 -0700583static int dmatest_add_channel(struct dma_chan *chan)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700584{
585 struct dmatest_chan *dtc;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700586 struct dma_device *dma_dev = chan->device;
587 unsigned int thread_count = 0;
Kulikov Vasiliyb9033e62010-07-17 19:19:48 +0400588 int cnt;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700589
Andrew Morton6fdb8bd2008-09-19 04:16:23 -0700590 dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700591 if (!dtc) {
Dan Williams41d5e592009-01-06 11:38:21 -0700592 pr_warning("dmatest: No memory for %s\n", dma_chan_name(chan));
Dan Williams33df8ca2009-01-06 11:38:15 -0700593 return -ENOMEM;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700594 }
595
596 dtc->chan = chan;
597 INIT_LIST_HEAD(&dtc->threads);
598
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700599 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
600 cnt = dmatest_add_threads(dtc, DMA_MEMCPY);
Nicolas Ferref1aef8b2009-07-06 18:19:44 +0200601 thread_count += cnt > 0 ? cnt : 0;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700602 }
603 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
604 cnt = dmatest_add_threads(dtc, DMA_XOR);
Nicolas Ferref1aef8b2009-07-06 18:19:44 +0200605 thread_count += cnt > 0 ? cnt : 0;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700606 }
Dan Williams58691d62009-08-29 19:09:27 -0700607 if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
608 cnt = dmatest_add_threads(dtc, DMA_PQ);
Dr. David Alan Gilbertd07a74a2011-08-25 16:13:55 -0700609 thread_count += cnt > 0 ? cnt : 0;
Dan Williams58691d62009-08-29 19:09:27 -0700610 }
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700611
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700612 pr_info("dmatest: Started %u threads using %s\n",
613 thread_count, dma_chan_name(chan));
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700614
615 list_add_tail(&dtc->node, &dmatest_channels);
616 nr_channels++;
617
Dan Williams33df8ca2009-01-06 11:38:15 -0700618 return 0;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700619}
620
Dan Williams7dd60252009-01-06 11:38:19 -0700621static bool filter(struct dma_chan *chan, void *param)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700622{
Dan Williams33df8ca2009-01-06 11:38:15 -0700623 if (!dmatest_match_channel(chan) || !dmatest_match_device(chan->device))
Dan Williams7dd60252009-01-06 11:38:19 -0700624 return false;
Dan Williams33df8ca2009-01-06 11:38:15 -0700625 else
Dan Williams7dd60252009-01-06 11:38:19 -0700626 return true;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700627}
628
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700629static int __init dmatest_init(void)
630{
Dan Williams33df8ca2009-01-06 11:38:15 -0700631 dma_cap_mask_t mask;
632 struct dma_chan *chan;
633 int err = 0;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700634
Dan Williams33df8ca2009-01-06 11:38:15 -0700635 dma_cap_zero(mask);
636 dma_cap_set(DMA_MEMCPY, mask);
637 for (;;) {
638 chan = dma_request_channel(mask, filter, NULL);
639 if (chan) {
640 err = dmatest_add_channel(chan);
Dan Williamsc56c81a2009-04-08 15:08:23 -0700641 if (err) {
Dan Williams33df8ca2009-01-06 11:38:15 -0700642 dma_release_channel(chan);
643 break; /* add_channel failed, punt */
644 }
645 } else
646 break; /* no more channels available */
647 if (max_channels && nr_channels >= max_channels)
648 break; /* we have all we need */
649 }
650
651 return err;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700652}
Dan Williams33df8ca2009-01-06 11:38:15 -0700653/* when compiled-in wait for drivers to load first */
654late_initcall(dmatest_init);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700655
656static void __exit dmatest_exit(void)
657{
Dan Williams33df8ca2009-01-06 11:38:15 -0700658 struct dmatest_chan *dtc, *_dtc;
Dan Williams7cbd4872009-03-04 16:06:03 -0700659 struct dma_chan *chan;
Dan Williams33df8ca2009-01-06 11:38:15 -0700660
661 list_for_each_entry_safe(dtc, _dtc, &dmatest_channels, node) {
662 list_del(&dtc->node);
Dan Williams7cbd4872009-03-04 16:06:03 -0700663 chan = dtc->chan;
Dan Williams33df8ca2009-01-06 11:38:15 -0700664 dmatest_cleanup_channel(dtc);
665 pr_debug("dmatest: dropped channel %s\n",
Dan Williams7cbd4872009-03-04 16:06:03 -0700666 dma_chan_name(chan));
667 dma_release_channel(chan);
Dan Williams33df8ca2009-01-06 11:38:15 -0700668 }
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700669}
670module_exit(dmatest_exit);
671
Jean Delvaree05503e2011-05-18 16:49:24 +0200672MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700673MODULE_LICENSE("GPL v2");