blob: d19234b0834275f3a34914f9cf2444c0e2a341a6 [file] [log] [blame]
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -07001/*
2 * DMA Engine test module
3 *
4 * Copyright (C) 2007 Atmel Corporation
Andy Shevchenko851b7e12013-03-04 11:09:30 +02005 * Copyright (C) 2013 Intel Corporation
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -07006 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/delay.h>
Alexey Dobriyanb7f080c2011-06-16 11:01:34 +000012#include <linux/dma-mapping.h>
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070013#include <linux/dmaengine.h>
Guennadi Liakhovetski981ed702011-08-18 16:50:51 +020014#include <linux/freezer.h>
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070015#include <linux/init.h>
16#include <linux/kthread.h>
17#include <linux/module.h>
18#include <linux/moduleparam.h>
19#include <linux/random.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090020#include <linux/slab.h>
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070021#include <linux/wait.h>
Andy Shevchenko851b7e12013-03-04 11:09:30 +020022#include <linux/ctype.h>
23#include <linux/debugfs.h>
24#include <linux/uaccess.h>
25#include <linux/seq_file.h>
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070026
27static unsigned int test_buf_size = 16384;
28module_param(test_buf_size, uint, S_IRUGO);
29MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer");
30
Kay Sievers06190d82008-11-11 13:12:33 -070031static char test_channel[20];
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070032module_param_string(channel, test_channel, sizeof(test_channel), S_IRUGO);
33MODULE_PARM_DESC(channel, "Bus ID of the channel to test (default: any)");
34
Kay Sievers06190d82008-11-11 13:12:33 -070035static char test_device[20];
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070036module_param_string(device, test_device, sizeof(test_device), S_IRUGO);
37MODULE_PARM_DESC(device, "Bus ID of the DMA Engine to test (default: any)");
38
39static unsigned int threads_per_chan = 1;
40module_param(threads_per_chan, uint, S_IRUGO);
41MODULE_PARM_DESC(threads_per_chan,
42 "Number of threads to start per channel (default: 1)");
43
44static unsigned int max_channels;
45module_param(max_channels, uint, S_IRUGO);
Dan Williams33df8ca2009-01-06 11:38:15 -070046MODULE_PARM_DESC(max_channels,
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070047 "Maximum number of channels to use (default: all)");
48
Nicolas Ferre0a2ff57d2009-07-03 19:26:51 +020049static unsigned int iterations;
50module_param(iterations, uint, S_IRUGO);
51MODULE_PARM_DESC(iterations,
52 "Iterations before stopping test (default: infinite)");
53
Dan Williamsb54d5cb2009-03-25 09:13:25 -070054static unsigned int xor_sources = 3;
55module_param(xor_sources, uint, S_IRUGO);
56MODULE_PARM_DESC(xor_sources,
57 "Number of xor source buffers (default: 3)");
58
Dan Williams58691d62009-08-29 19:09:27 -070059static unsigned int pq_sources = 3;
60module_param(pq_sources, uint, S_IRUGO);
61MODULE_PARM_DESC(pq_sources,
62 "Number of p+q source buffers (default: 3)");
63
Viresh Kumard42efe62011-03-22 17:27:25 +053064static int timeout = 3000;
65module_param(timeout, uint, S_IRUGO);
Joe Perches85ee7a12011-04-23 20:38:19 -070066MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), "
67 "Pass -1 for infinite timeout");
Viresh Kumard42efe62011-03-22 17:27:25 +053068
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070069/*
70 * Initialization patterns. All bytes in the source buffer has bit 7
71 * set, all bytes in the destination buffer has bit 7 cleared.
72 *
73 * Bit 6 is set for all bytes which are to be copied by the DMA
74 * engine. Bit 5 is set for all bytes which are to be overwritten by
75 * the DMA engine.
76 *
77 * The remaining bits are the inverse of a counter which increments by
78 * one for each byte address.
79 */
80#define PATTERN_SRC 0x80
81#define PATTERN_DST 0x00
82#define PATTERN_COPY 0x40
83#define PATTERN_OVERWRITE 0x20
84#define PATTERN_COUNT_MASK 0x1f
85
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +020086struct dmatest_info;
87
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070088struct dmatest_thread {
89 struct list_head node;
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +020090 struct dmatest_info *info;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070091 struct task_struct *task;
92 struct dma_chan *chan;
Dan Williamsb54d5cb2009-03-25 09:13:25 -070093 u8 **srcs;
94 u8 **dsts;
95 enum dma_transaction_type type;
Andy Shevchenko3e5ccd82013-03-04 11:09:31 +020096 bool done;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070097};
98
99struct dmatest_chan {
100 struct list_head node;
101 struct dma_chan *chan;
102 struct list_head threads;
103};
104
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200105/**
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200106 * struct dmatest_params - test parameters.
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200107 * @buf_size: size of the memcpy test buffer
108 * @channel: bus ID of the channel to test
109 * @device: bus ID of the DMA Engine to test
110 * @threads_per_chan: number of threads to start per channel
111 * @max_channels: maximum number of channels to use
112 * @iterations: iterations before stopping test
113 * @xor_sources: number of xor source buffers
114 * @pq_sources: number of p+q source buffers
115 * @timeout: transfer timeout in msec, -1 for infinite timeout
116 */
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200117struct dmatest_params {
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200118 unsigned int buf_size;
119 char channel[20];
120 char device[20];
121 unsigned int threads_per_chan;
122 unsigned int max_channels;
123 unsigned int iterations;
124 unsigned int xor_sources;
125 unsigned int pq_sources;
126 int timeout;
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200127};
128
129/**
130 * struct dmatest_info - test information.
131 * @params: test parameters
Andy Shevchenko851b7e12013-03-04 11:09:30 +0200132 * @lock: access protection to the fields of this structure
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200133 */
134struct dmatest_info {
135 /* Test parameters */
136 struct dmatest_params params;
Andy Shevchenko838cc702013-03-04 11:09:28 +0200137
138 /* Internal state */
139 struct list_head channels;
140 unsigned int nr_channels;
Andy Shevchenko851b7e12013-03-04 11:09:30 +0200141 struct mutex lock;
142
143 /* debugfs related stuff */
144 struct dentry *root;
145 struct dmatest_params dbgfs_params;
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200146};
147
148static struct dmatest_info test_info;
149
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200150static bool dmatest_match_channel(struct dmatest_params *params,
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200151 struct dma_chan *chan)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700152{
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200153 if (params->channel[0] == '\0')
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700154 return true;
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200155 return strcmp(dma_chan_name(chan), params->channel) == 0;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700156}
157
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200158static bool dmatest_match_device(struct dmatest_params *params,
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200159 struct dma_device *device)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700160{
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200161 if (params->device[0] == '\0')
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700162 return true;
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200163 return strcmp(dev_name(device->dev), params->device) == 0;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700164}
165
166static unsigned long dmatest_random(void)
167{
168 unsigned long buf;
169
170 get_random_bytes(&buf, sizeof(buf));
171 return buf;
172}
173
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200174static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len,
175 unsigned int buf_size)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700176{
177 unsigned int i;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700178 u8 *buf;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700179
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700180 for (; (buf = *bufs); bufs++) {
181 for (i = 0; i < start; i++)
182 buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
183 for ( ; i < start + len; i++)
184 buf[i] = PATTERN_SRC | PATTERN_COPY
Joe Perchesc0198942009-06-28 09:26:21 -0700185 | (~i & PATTERN_COUNT_MASK);
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200186 for ( ; i < buf_size; i++)
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700187 buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
188 buf++;
189 }
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700190}
191
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200192static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len,
193 unsigned int buf_size)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700194{
195 unsigned int i;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700196 u8 *buf;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700197
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700198 for (; (buf = *bufs); bufs++) {
199 for (i = 0; i < start; i++)
200 buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
201 for ( ; i < start + len; i++)
202 buf[i] = PATTERN_DST | PATTERN_OVERWRITE
203 | (~i & PATTERN_COUNT_MASK);
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200204 for ( ; i < buf_size; i++)
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700205 buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
206 }
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700207}
208
209static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index,
210 unsigned int counter, bool is_srcbuf)
211{
212 u8 diff = actual ^ pattern;
213 u8 expected = pattern | (~counter & PATTERN_COUNT_MASK);
214 const char *thread_name = current->comm;
215
216 if (is_srcbuf)
217 pr_warning("%s: srcbuf[0x%x] overwritten!"
218 " Expected %02x, got %02x\n",
219 thread_name, index, expected, actual);
220 else if ((pattern & PATTERN_COPY)
221 && (diff & (PATTERN_COPY | PATTERN_OVERWRITE)))
222 pr_warning("%s: dstbuf[0x%x] not copied!"
223 " Expected %02x, got %02x\n",
224 thread_name, index, expected, actual);
225 else if (diff & PATTERN_SRC)
226 pr_warning("%s: dstbuf[0x%x] was copied!"
227 " Expected %02x, got %02x\n",
228 thread_name, index, expected, actual);
229 else
230 pr_warning("%s: dstbuf[0x%x] mismatch!"
231 " Expected %02x, got %02x\n",
232 thread_name, index, expected, actual);
233}
234
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700235static unsigned int dmatest_verify(u8 **bufs, unsigned int start,
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700236 unsigned int end, unsigned int counter, u8 pattern,
237 bool is_srcbuf)
238{
239 unsigned int i;
240 unsigned int error_count = 0;
241 u8 actual;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700242 u8 expected;
243 u8 *buf;
244 unsigned int counter_orig = counter;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700245
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700246 for (; (buf = *bufs); bufs++) {
247 counter = counter_orig;
248 for (i = start; i < end; i++) {
249 actual = buf[i];
250 expected = pattern | (~counter & PATTERN_COUNT_MASK);
251 if (actual != expected) {
252 if (error_count < 32)
253 dmatest_mismatch(actual, pattern, i,
254 counter, is_srcbuf);
255 error_count++;
256 }
257 counter++;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700258 }
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700259 }
260
261 if (error_count > 32)
262 pr_warning("%s: %u errors suppressed\n",
263 current->comm, error_count - 32);
264
265 return error_count;
266}
267
Tejun Heoadfa5432011-11-23 09:28:16 -0800268/* poor man's completion - we want to use wait_event_freezable() on it */
269struct dmatest_done {
270 bool done;
271 wait_queue_head_t *wait;
272};
273
274static void dmatest_callback(void *arg)
Dan Williamse44e0aa2009-03-25 09:13:25 -0700275{
Tejun Heoadfa5432011-11-23 09:28:16 -0800276 struct dmatest_done *done = arg;
277
278 done->done = true;
279 wake_up_all(done->wait);
Dan Williamse44e0aa2009-03-25 09:13:25 -0700280}
281
Andy Shevchenko632fd282012-12-17 15:59:52 -0800282static inline void unmap_src(struct device *dev, dma_addr_t *addr, size_t len,
283 unsigned int count)
284{
285 while (count--)
286 dma_unmap_single(dev, addr[count], len, DMA_TO_DEVICE);
287}
288
289static inline void unmap_dst(struct device *dev, dma_addr_t *addr, size_t len,
290 unsigned int count)
291{
292 while (count--)
293 dma_unmap_single(dev, addr[count], len, DMA_BIDIRECTIONAL);
294}
295
Akinobu Mita8be9e32b2012-10-28 00:49:32 +0900296static unsigned int min_odd(unsigned int x, unsigned int y)
297{
298 unsigned int val = min(x, y);
299
300 return val % 2 ? val : val - 1;
301}
302
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700303/*
304 * This function repeatedly tests DMA transfers of various lengths and
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700305 * offsets for a given operation type until it is told to exit by
306 * kthread_stop(). There may be multiple threads running this function
307 * in parallel for a single channel, and there may be multiple channels
308 * being tested in parallel.
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700309 *
310 * Before each test, the source and destination buffer is initialized
311 * with a known pattern. This pattern is different depending on
312 * whether it's in an area which is supposed to be copied or
313 * overwritten, and different in the source and destination buffers.
314 * So if the DMA engine doesn't copy exactly what we tell it to copy,
315 * we'll notice.
316 */
317static int dmatest_func(void *data)
318{
Tejun Heoadfa5432011-11-23 09:28:16 -0800319 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_wait);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700320 struct dmatest_thread *thread = data;
Tejun Heoadfa5432011-11-23 09:28:16 -0800321 struct dmatest_done done = { .wait = &done_wait };
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200322 struct dmatest_info *info;
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200323 struct dmatest_params *params;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700324 struct dma_chan *chan;
Akinobu Mita8be9e32b2012-10-28 00:49:32 +0900325 struct dma_device *dev;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700326 const char *thread_name;
327 unsigned int src_off, dst_off, len;
328 unsigned int error_count;
329 unsigned int failed_tests = 0;
330 unsigned int total_tests = 0;
331 dma_cookie_t cookie;
332 enum dma_status status;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700333 enum dma_ctrl_flags flags;
Andy Shevchenko945b5af2013-03-04 11:09:26 +0200334 u8 *pq_coefs = NULL;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700335 int ret;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700336 int src_cnt;
337 int dst_cnt;
338 int i;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700339
340 thread_name = current->comm;
Tejun Heoadfa5432011-11-23 09:28:16 -0800341 set_freezable();
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700342
343 ret = -ENOMEM;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700344
345 smp_rmb();
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200346 info = thread->info;
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200347 params = &info->params;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700348 chan = thread->chan;
Akinobu Mita8be9e32b2012-10-28 00:49:32 +0900349 dev = chan->device;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700350 if (thread->type == DMA_MEMCPY)
351 src_cnt = dst_cnt = 1;
352 else if (thread->type == DMA_XOR) {
Akinobu Mita8be9e32b2012-10-28 00:49:32 +0900353 /* force odd to ensure dst = src */
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200354 src_cnt = min_odd(params->xor_sources | 1, dev->max_xor);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700355 dst_cnt = 1;
Dan Williams58691d62009-08-29 19:09:27 -0700356 } else if (thread->type == DMA_PQ) {
Akinobu Mita8be9e32b2012-10-28 00:49:32 +0900357 /* force odd to ensure dst = src */
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200358 src_cnt = min_odd(params->pq_sources | 1, dma_maxpq(dev, 0));
Dan Williams58691d62009-08-29 19:09:27 -0700359 dst_cnt = 2;
Andy Shevchenko945b5af2013-03-04 11:09:26 +0200360
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200361 pq_coefs = kmalloc(params->pq_sources+1, GFP_KERNEL);
Andy Shevchenko945b5af2013-03-04 11:09:26 +0200362 if (!pq_coefs)
363 goto err_thread_type;
364
Anatolij Gustschin94de6482010-02-15 22:35:23 +0100365 for (i = 0; i < src_cnt; i++)
Dan Williams58691d62009-08-29 19:09:27 -0700366 pq_coefs[i] = 1;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700367 } else
Andy Shevchenko945b5af2013-03-04 11:09:26 +0200368 goto err_thread_type;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700369
370 thread->srcs = kcalloc(src_cnt+1, sizeof(u8 *), GFP_KERNEL);
371 if (!thread->srcs)
372 goto err_srcs;
373 for (i = 0; i < src_cnt; i++) {
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200374 thread->srcs[i] = kmalloc(params->buf_size, GFP_KERNEL);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700375 if (!thread->srcs[i])
376 goto err_srcbuf;
377 }
378 thread->srcs[i] = NULL;
379
380 thread->dsts = kcalloc(dst_cnt+1, sizeof(u8 *), GFP_KERNEL);
381 if (!thread->dsts)
382 goto err_dsts;
383 for (i = 0; i < dst_cnt; i++) {
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200384 thread->dsts[i] = kmalloc(params->buf_size, GFP_KERNEL);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700385 if (!thread->dsts[i])
386 goto err_dstbuf;
387 }
388 thread->dsts[i] = NULL;
389
Dan Williamse44e0aa2009-03-25 09:13:25 -0700390 set_user_nice(current, 10);
391
Ira Snyderb203bd32011-03-03 07:54:53 +0000392 /*
393 * src buffers are freed by the DMAEngine code with dma_unmap_single()
394 * dst buffers are freed by ourselves below
395 */
396 flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT
397 | DMA_COMPL_SKIP_DEST_UNMAP | DMA_COMPL_SRC_UNMAP_SINGLE;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700398
Nicolas Ferre0a2ff57d2009-07-03 19:26:51 +0200399 while (!kthread_should_stop()
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200400 && !(params->iterations && total_tests >= params->iterations)) {
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700401 struct dma_async_tx_descriptor *tx = NULL;
402 dma_addr_t dma_srcs[src_cnt];
403 dma_addr_t dma_dsts[dst_cnt];
Dan Williams83544ae2009-09-08 17:42:53 -0700404 u8 align = 0;
Atsushi Nemotod86be862009-01-13 09:22:20 -0700405
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700406 total_tests++;
407
Dan Williams83544ae2009-09-08 17:42:53 -0700408 /* honor alignment restrictions */
409 if (thread->type == DMA_MEMCPY)
410 align = dev->copy_align;
411 else if (thread->type == DMA_XOR)
412 align = dev->xor_align;
413 else if (thread->type == DMA_PQ)
414 align = dev->pq_align;
415
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200416 if (1 << align > params->buf_size) {
Guennadi Liakhovetskicfe4f272009-12-04 19:44:48 +0100417 pr_err("%u-byte buffer too small for %d-byte alignment\n",
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200418 params->buf_size, 1 << align);
Guennadi Liakhovetskicfe4f272009-12-04 19:44:48 +0100419 break;
420 }
421
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200422 len = dmatest_random() % params->buf_size + 1;
Dan Williams83544ae2009-09-08 17:42:53 -0700423 len = (len >> align) << align;
Guennadi Liakhovetskicfe4f272009-12-04 19:44:48 +0100424 if (!len)
425 len = 1 << align;
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200426 src_off = dmatest_random() % (params->buf_size - len + 1);
427 dst_off = dmatest_random() % (params->buf_size - len + 1);
Guennadi Liakhovetskicfe4f272009-12-04 19:44:48 +0100428
Dan Williams83544ae2009-09-08 17:42:53 -0700429 src_off = (src_off >> align) << align;
430 dst_off = (dst_off >> align) << align;
431
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200432 dmatest_init_srcs(thread->srcs, src_off, len, params->buf_size);
433 dmatest_init_dsts(thread->dsts, dst_off, len, params->buf_size);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700434
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700435 for (i = 0; i < src_cnt; i++) {
436 u8 *buf = thread->srcs[i] + src_off;
437
438 dma_srcs[i] = dma_map_single(dev->dev, buf, len,
439 DMA_TO_DEVICE);
Andy Shevchenkoafde3be2012-12-17 15:59:53 -0800440 ret = dma_mapping_error(dev->dev, dma_srcs[i]);
441 if (ret) {
442 unmap_src(dev->dev, dma_srcs, len, i);
443 pr_warn("%s: #%u: mapping error %d with "
444 "src_off=0x%x len=0x%x\n",
445 thread_name, total_tests - 1, ret,
446 src_off, len);
447 failed_tests++;
448 continue;
449 }
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700450 }
Atsushi Nemotod86be862009-01-13 09:22:20 -0700451 /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700452 for (i = 0; i < dst_cnt; i++) {
453 dma_dsts[i] = dma_map_single(dev->dev, thread->dsts[i],
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200454 params->buf_size,
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700455 DMA_BIDIRECTIONAL);
Andy Shevchenkoafde3be2012-12-17 15:59:53 -0800456 ret = dma_mapping_error(dev->dev, dma_dsts[i]);
457 if (ret) {
458 unmap_src(dev->dev, dma_srcs, len, src_cnt);
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200459 unmap_dst(dev->dev, dma_dsts, params->buf_size,
460 i);
Andy Shevchenkoafde3be2012-12-17 15:59:53 -0800461 pr_warn("%s: #%u: mapping error %d with "
462 "dst_off=0x%x len=0x%x\n",
463 thread_name, total_tests - 1, ret,
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200464 dst_off, params->buf_size);
Andy Shevchenkoafde3be2012-12-17 15:59:53 -0800465 failed_tests++;
466 continue;
467 }
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700468 }
Atsushi Nemotod86be862009-01-13 09:22:20 -0700469
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700470 if (thread->type == DMA_MEMCPY)
471 tx = dev->device_prep_dma_memcpy(chan,
472 dma_dsts[0] + dst_off,
473 dma_srcs[0], len,
474 flags);
475 else if (thread->type == DMA_XOR)
476 tx = dev->device_prep_dma_xor(chan,
477 dma_dsts[0] + dst_off,
Dan Williams67b91242010-02-28 22:20:18 -0700478 dma_srcs, src_cnt,
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700479 len, flags);
Dan Williams58691d62009-08-29 19:09:27 -0700480 else if (thread->type == DMA_PQ) {
481 dma_addr_t dma_pq[dst_cnt];
482
483 for (i = 0; i < dst_cnt; i++)
484 dma_pq[i] = dma_dsts[i] + dst_off;
485 tx = dev->device_prep_dma_pq(chan, dma_pq, dma_srcs,
Anatolij Gustschin94de6482010-02-15 22:35:23 +0100486 src_cnt, pq_coefs,
Dan Williams58691d62009-08-29 19:09:27 -0700487 len, flags);
488 }
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700489
Atsushi Nemotod86be862009-01-13 09:22:20 -0700490 if (!tx) {
Andy Shevchenko632fd282012-12-17 15:59:52 -0800491 unmap_src(dev->dev, dma_srcs, len, src_cnt);
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200492 unmap_dst(dev->dev, dma_dsts, params->buf_size,
493 dst_cnt);
Atsushi Nemotod86be862009-01-13 09:22:20 -0700494 pr_warning("%s: #%u: prep error with src_off=0x%x "
495 "dst_off=0x%x len=0x%x\n",
496 thread_name, total_tests - 1,
497 src_off, dst_off, len);
498 msleep(100);
499 failed_tests++;
500 continue;
501 }
Dan Williamse44e0aa2009-03-25 09:13:25 -0700502
Tejun Heoadfa5432011-11-23 09:28:16 -0800503 done.done = false;
Dan Williamse44e0aa2009-03-25 09:13:25 -0700504 tx->callback = dmatest_callback;
Tejun Heoadfa5432011-11-23 09:28:16 -0800505 tx->callback_param = &done;
Atsushi Nemotod86be862009-01-13 09:22:20 -0700506 cookie = tx->tx_submit(tx);
507
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700508 if (dma_submit_error(cookie)) {
509 pr_warning("%s: #%u: submit error %d with src_off=0x%x "
510 "dst_off=0x%x len=0x%x\n",
511 thread_name, total_tests - 1, cookie,
512 src_off, dst_off, len);
513 msleep(100);
514 failed_tests++;
515 continue;
516 }
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700517 dma_async_issue_pending(chan);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700518
Andy Shevchenko77101ce2013-03-04 11:09:25 +0200519 wait_event_freezable_timeout(done_wait,
520 done.done || kthread_should_stop(),
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200521 msecs_to_jiffies(params->timeout));
Guennadi Liakhovetski981ed702011-08-18 16:50:51 +0200522
Dan Williamse44e0aa2009-03-25 09:13:25 -0700523 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700524
Tejun Heoadfa5432011-11-23 09:28:16 -0800525 if (!done.done) {
526 /*
527 * We're leaving the timed out dma operation with
528 * dangling pointer to done_wait. To make this
529 * correct, we'll need to allocate wait_done for
530 * each test iteration and perform "who's gonna
531 * free it this time?" dancing. For now, just
532 * leave it dangling.
533 */
Dan Williamse44e0aa2009-03-25 09:13:25 -0700534 pr_warning("%s: #%u: test timed out\n",
535 thread_name, total_tests - 1);
536 failed_tests++;
537 continue;
538 } else if (status != DMA_SUCCESS) {
539 pr_warning("%s: #%u: got completion callback,"
540 " but status is \'%s\'\n",
541 thread_name, total_tests - 1,
542 status == DMA_ERROR ? "error" : "in progress");
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700543 failed_tests++;
544 continue;
545 }
Dan Williamse44e0aa2009-03-25 09:13:25 -0700546
Atsushi Nemotod86be862009-01-13 09:22:20 -0700547 /* Unmap by myself (see DMA_COMPL_SKIP_DEST_UNMAP above) */
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200548 unmap_dst(dev->dev, dma_dsts, params->buf_size, dst_cnt);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700549
550 error_count = 0;
551
552 pr_debug("%s: verifying source buffer...\n", thread_name);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700553 error_count += dmatest_verify(thread->srcs, 0, src_off,
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700554 0, PATTERN_SRC, true);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700555 error_count += dmatest_verify(thread->srcs, src_off,
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700556 src_off + len, src_off,
557 PATTERN_SRC | PATTERN_COPY, true);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700558 error_count += dmatest_verify(thread->srcs, src_off + len,
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200559 params->buf_size, src_off + len,
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700560 PATTERN_SRC, true);
561
562 pr_debug("%s: verifying dest buffer...\n",
563 thread->task->comm);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700564 error_count += dmatest_verify(thread->dsts, 0, dst_off,
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700565 0, PATTERN_DST, false);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700566 error_count += dmatest_verify(thread->dsts, dst_off,
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700567 dst_off + len, src_off,
568 PATTERN_SRC | PATTERN_COPY, false);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700569 error_count += dmatest_verify(thread->dsts, dst_off + len,
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200570 params->buf_size, dst_off + len,
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700571 PATTERN_DST, false);
572
573 if (error_count) {
574 pr_warning("%s: #%u: %u errors with "
575 "src_off=0x%x dst_off=0x%x len=0x%x\n",
576 thread_name, total_tests - 1, error_count,
577 src_off, dst_off, len);
578 failed_tests++;
579 } else {
580 pr_debug("%s: #%u: No errors with "
581 "src_off=0x%x dst_off=0x%x len=0x%x\n",
582 thread_name, total_tests - 1,
583 src_off, dst_off, len);
584 }
585 }
586
587 ret = 0;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700588 for (i = 0; thread->dsts[i]; i++)
589 kfree(thread->dsts[i]);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700590err_dstbuf:
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700591 kfree(thread->dsts);
592err_dsts:
593 for (i = 0; thread->srcs[i]; i++)
594 kfree(thread->srcs[i]);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700595err_srcbuf:
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700596 kfree(thread->srcs);
597err_srcs:
Andy Shevchenko945b5af2013-03-04 11:09:26 +0200598 kfree(pq_coefs);
599err_thread_type:
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700600 pr_notice("%s: terminating after %u tests, %u failures (status %d)\n",
601 thread_name, total_tests, failed_tests, ret);
Nicolas Ferre0a2ff57d2009-07-03 19:26:51 +0200602
Viresh Kumar9704efa2011-07-29 16:21:57 +0530603 /* terminate all transfers on specified channels */
Shiraz Hashim5e034f72012-11-09 15:26:29 +0000604 if (ret)
605 dmaengine_terminate_all(chan);
606
Andy Shevchenko3e5ccd82013-03-04 11:09:31 +0200607 thread->done = true;
608
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200609 if (params->iterations > 0)
Nicolas Ferre0a2ff57d2009-07-03 19:26:51 +0200610 while (!kthread_should_stop()) {
Yong Zhangb953df72010-02-05 21:52:37 +0800611 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit);
Nicolas Ferre0a2ff57d2009-07-03 19:26:51 +0200612 interruptible_sleep_on(&wait_dmatest_exit);
613 }
614
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700615 return ret;
616}
617
618static void dmatest_cleanup_channel(struct dmatest_chan *dtc)
619{
620 struct dmatest_thread *thread;
621 struct dmatest_thread *_thread;
622 int ret;
623
624 list_for_each_entry_safe(thread, _thread, &dtc->threads, node) {
625 ret = kthread_stop(thread->task);
626 pr_debug("dmatest: thread %s exited with status %d\n",
627 thread->task->comm, ret);
628 list_del(&thread->node);
629 kfree(thread);
630 }
Viresh Kumar9704efa2011-07-29 16:21:57 +0530631
632 /* terminate all transfers on specified channels */
Jon Mason944ea4d2012-11-11 23:03:20 +0000633 dmaengine_terminate_all(dtc->chan);
Viresh Kumar9704efa2011-07-29 16:21:57 +0530634
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700635 kfree(dtc);
636}
637
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200638static int dmatest_add_threads(struct dmatest_info *info,
639 struct dmatest_chan *dtc, enum dma_transaction_type type)
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700640{
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200641 struct dmatest_params *params = &info->params;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700642 struct dmatest_thread *thread;
643 struct dma_chan *chan = dtc->chan;
644 char *op;
645 unsigned int i;
646
647 if (type == DMA_MEMCPY)
648 op = "copy";
649 else if (type == DMA_XOR)
650 op = "xor";
Dan Williams58691d62009-08-29 19:09:27 -0700651 else if (type == DMA_PQ)
652 op = "pq";
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700653 else
654 return -EINVAL;
655
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200656 for (i = 0; i < params->threads_per_chan; i++) {
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700657 thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL);
658 if (!thread) {
659 pr_warning("dmatest: No memory for %s-%s%u\n",
660 dma_chan_name(chan), op, i);
661
662 break;
663 }
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200664 thread->info = info;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700665 thread->chan = dtc->chan;
666 thread->type = type;
667 smp_wmb();
668 thread->task = kthread_run(dmatest_func, thread, "%s-%s%u",
669 dma_chan_name(chan), op, i);
670 if (IS_ERR(thread->task)) {
671 pr_warning("dmatest: Failed to run thread %s-%s%u\n",
672 dma_chan_name(chan), op, i);
673 kfree(thread);
674 break;
675 }
676
677 /* srcbuf and dstbuf are allocated by the thread itself */
678
679 list_add_tail(&thread->node, &dtc->threads);
680 }
681
682 return i;
683}
684
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200685static int dmatest_add_channel(struct dmatest_info *info,
686 struct dma_chan *chan)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700687{
688 struct dmatest_chan *dtc;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700689 struct dma_device *dma_dev = chan->device;
690 unsigned int thread_count = 0;
Kulikov Vasiliyb9033e62010-07-17 19:19:48 +0400691 int cnt;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700692
Andrew Morton6fdb8bd2008-09-19 04:16:23 -0700693 dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700694 if (!dtc) {
Dan Williams41d5e592009-01-06 11:38:21 -0700695 pr_warning("dmatest: No memory for %s\n", dma_chan_name(chan));
Dan Williams33df8ca2009-01-06 11:38:15 -0700696 return -ENOMEM;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700697 }
698
699 dtc->chan = chan;
700 INIT_LIST_HEAD(&dtc->threads);
701
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700702 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200703 cnt = dmatest_add_threads(info, dtc, DMA_MEMCPY);
Nicolas Ferref1aef8b2009-07-06 18:19:44 +0200704 thread_count += cnt > 0 ? cnt : 0;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700705 }
706 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200707 cnt = dmatest_add_threads(info, dtc, DMA_XOR);
Nicolas Ferref1aef8b2009-07-06 18:19:44 +0200708 thread_count += cnt > 0 ? cnt : 0;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700709 }
Dan Williams58691d62009-08-29 19:09:27 -0700710 if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200711 cnt = dmatest_add_threads(info, dtc, DMA_PQ);
Dr. David Alan Gilbertd07a74a2011-08-25 16:13:55 -0700712 thread_count += cnt > 0 ? cnt : 0;
Dan Williams58691d62009-08-29 19:09:27 -0700713 }
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700714
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700715 pr_info("dmatest: Started %u threads using %s\n",
716 thread_count, dma_chan_name(chan));
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700717
Andy Shevchenko838cc702013-03-04 11:09:28 +0200718 list_add_tail(&dtc->node, &info->channels);
719 info->nr_channels++;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700720
Dan Williams33df8ca2009-01-06 11:38:15 -0700721 return 0;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700722}
723
Dan Williams7dd60252009-01-06 11:38:19 -0700724static bool filter(struct dma_chan *chan, void *param)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700725{
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200726 struct dmatest_params *params = param;
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200727
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200728 if (!dmatest_match_channel(params, chan) ||
729 !dmatest_match_device(params, chan->device))
Dan Williams7dd60252009-01-06 11:38:19 -0700730 return false;
Dan Williams33df8ca2009-01-06 11:38:15 -0700731 else
Dan Williams7dd60252009-01-06 11:38:19 -0700732 return true;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700733}
734
Andy Shevchenko851b7e12013-03-04 11:09:30 +0200735static int __run_threaded_test(struct dmatest_info *info)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700736{
Dan Williams33df8ca2009-01-06 11:38:15 -0700737 dma_cap_mask_t mask;
738 struct dma_chan *chan;
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200739 struct dmatest_params *params = &info->params;
Dan Williams33df8ca2009-01-06 11:38:15 -0700740 int err = 0;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700741
Dan Williams33df8ca2009-01-06 11:38:15 -0700742 dma_cap_zero(mask);
743 dma_cap_set(DMA_MEMCPY, mask);
744 for (;;) {
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200745 chan = dma_request_channel(mask, filter, params);
Dan Williams33df8ca2009-01-06 11:38:15 -0700746 if (chan) {
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200747 err = dmatest_add_channel(info, chan);
Dan Williamsc56c81a2009-04-08 15:08:23 -0700748 if (err) {
Dan Williams33df8ca2009-01-06 11:38:15 -0700749 dma_release_channel(chan);
750 break; /* add_channel failed, punt */
751 }
752 } else
753 break; /* no more channels available */
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200754 if (params->max_channels &&
755 info->nr_channels >= params->max_channels)
Dan Williams33df8ca2009-01-06 11:38:15 -0700756 break; /* we have all we need */
757 }
Dan Williams33df8ca2009-01-06 11:38:15 -0700758 return err;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700759}
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700760
Andy Shevchenko851b7e12013-03-04 11:09:30 +0200761#ifndef MODULE
762static int run_threaded_test(struct dmatest_info *info)
763{
764 int ret;
765
766 mutex_lock(&info->lock);
767 ret = __run_threaded_test(info);
768 mutex_unlock(&info->lock);
769 return ret;
770}
771#endif
772
773static void __stop_threaded_test(struct dmatest_info *info)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700774{
Dan Williams33df8ca2009-01-06 11:38:15 -0700775 struct dmatest_chan *dtc, *_dtc;
Dan Williams7cbd4872009-03-04 16:06:03 -0700776 struct dma_chan *chan;
Dan Williams33df8ca2009-01-06 11:38:15 -0700777
Andy Shevchenko838cc702013-03-04 11:09:28 +0200778 list_for_each_entry_safe(dtc, _dtc, &info->channels, node) {
Dan Williams33df8ca2009-01-06 11:38:15 -0700779 list_del(&dtc->node);
Dan Williams7cbd4872009-03-04 16:06:03 -0700780 chan = dtc->chan;
Dan Williams33df8ca2009-01-06 11:38:15 -0700781 dmatest_cleanup_channel(dtc);
Andy Shevchenko838cc702013-03-04 11:09:28 +0200782 pr_debug("dmatest: dropped channel %s\n", dma_chan_name(chan));
Dan Williams7cbd4872009-03-04 16:06:03 -0700783 dma_release_channel(chan);
Dan Williams33df8ca2009-01-06 11:38:15 -0700784 }
Andy Shevchenko838cc702013-03-04 11:09:28 +0200785
786 info->nr_channels = 0;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700787}
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200788
Andy Shevchenko851b7e12013-03-04 11:09:30 +0200789static void stop_threaded_test(struct dmatest_info *info)
790{
791 mutex_lock(&info->lock);
792 __stop_threaded_test(info);
793 mutex_unlock(&info->lock);
794}
795
796static int __restart_threaded_test(struct dmatest_info *info, bool run)
797{
798 struct dmatest_params *params = &info->params;
799 int ret;
800
801 /* Stop any running test first */
802 __stop_threaded_test(info);
803
804 if (run == false)
805 return 0;
806
807 /* Copy test parameters */
808 memcpy(params, &info->dbgfs_params, sizeof(*params));
809
810 /* Run test with new parameters */
811 ret = __run_threaded_test(info);
812 if (ret) {
813 __stop_threaded_test(info);
814 pr_err("dmatest: Can't run test\n");
815 }
816
817 return ret;
818}
819
820static ssize_t dtf_write_string(void *to, size_t available, loff_t *ppos,
821 const void __user *from, size_t count)
822{
823 char tmp[20];
824 ssize_t len;
825
826 len = simple_write_to_buffer(tmp, sizeof(tmp) - 1, ppos, from, count);
827 if (len >= 0) {
828 tmp[len] = '\0';
829 strlcpy(to, strim(tmp), available);
830 }
831
832 return len;
833}
834
835static ssize_t dtf_read_channel(struct file *file, char __user *buf,
836 size_t count, loff_t *ppos)
837{
838 struct dmatest_info *info = file->private_data;
839 return simple_read_from_buffer(buf, count, ppos,
840 info->dbgfs_params.channel,
841 strlen(info->dbgfs_params.channel));
842}
843
844static ssize_t dtf_write_channel(struct file *file, const char __user *buf,
845 size_t size, loff_t *ppos)
846{
847 struct dmatest_info *info = file->private_data;
848 return dtf_write_string(info->dbgfs_params.channel,
849 sizeof(info->dbgfs_params.channel),
850 ppos, buf, size);
851}
852
853static const struct file_operations dtf_channel_fops = {
854 .read = dtf_read_channel,
855 .write = dtf_write_channel,
856 .open = simple_open,
857 .llseek = default_llseek,
858};
859
860static ssize_t dtf_read_device(struct file *file, char __user *buf,
861 size_t count, loff_t *ppos)
862{
863 struct dmatest_info *info = file->private_data;
864 return simple_read_from_buffer(buf, count, ppos,
865 info->dbgfs_params.device,
866 strlen(info->dbgfs_params.device));
867}
868
869static ssize_t dtf_write_device(struct file *file, const char __user *buf,
870 size_t size, loff_t *ppos)
871{
872 struct dmatest_info *info = file->private_data;
873 return dtf_write_string(info->dbgfs_params.device,
874 sizeof(info->dbgfs_params.device),
875 ppos, buf, size);
876}
877
878static const struct file_operations dtf_device_fops = {
879 .read = dtf_read_device,
880 .write = dtf_write_device,
881 .open = simple_open,
882 .llseek = default_llseek,
883};
884
885static ssize_t dtf_read_run(struct file *file, char __user *user_buf,
886 size_t count, loff_t *ppos)
887{
888 struct dmatest_info *info = file->private_data;
889 char buf[3];
Andy Shevchenko3e5ccd82013-03-04 11:09:31 +0200890 struct dmatest_chan *dtc;
891 bool alive = false;
Andy Shevchenko851b7e12013-03-04 11:09:30 +0200892
893 mutex_lock(&info->lock);
Andy Shevchenko3e5ccd82013-03-04 11:09:31 +0200894 list_for_each_entry(dtc, &info->channels, node) {
895 struct dmatest_thread *thread;
896
897 list_for_each_entry(thread, &dtc->threads, node) {
898 if (!thread->done) {
899 alive = true;
900 break;
901 }
902 }
903 }
904
905 if (alive) {
Andy Shevchenko851b7e12013-03-04 11:09:30 +0200906 buf[0] = 'Y';
Andy Shevchenko3e5ccd82013-03-04 11:09:31 +0200907 } else {
908 __stop_threaded_test(info);
Andy Shevchenko851b7e12013-03-04 11:09:30 +0200909 buf[0] = 'N';
Andy Shevchenko3e5ccd82013-03-04 11:09:31 +0200910 }
911
Andy Shevchenko851b7e12013-03-04 11:09:30 +0200912 mutex_unlock(&info->lock);
913 buf[1] = '\n';
914 buf[2] = 0x00;
915 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
916}
917
918static ssize_t dtf_write_run(struct file *file, const char __user *user_buf,
919 size_t count, loff_t *ppos)
920{
921 struct dmatest_info *info = file->private_data;
922 char buf[16];
923 bool bv;
924 int ret = 0;
925
926 if (copy_from_user(buf, user_buf, min(count, (sizeof(buf) - 1))))
927 return -EFAULT;
928
929 if (strtobool(buf, &bv) == 0) {
930 mutex_lock(&info->lock);
931 ret = __restart_threaded_test(info, bv);
932 mutex_unlock(&info->lock);
933 }
934
935 return ret ? ret : count;
936}
937
938static const struct file_operations dtf_run_fops = {
939 .read = dtf_read_run,
940 .write = dtf_write_run,
941 .open = simple_open,
942 .llseek = default_llseek,
943};
944
945static int dmatest_register_dbgfs(struct dmatest_info *info)
946{
947 struct dentry *d;
948 struct dmatest_params *params = &info->dbgfs_params;
949 int ret = -ENOMEM;
950
951 d = debugfs_create_dir("dmatest", NULL);
952 if (IS_ERR(d))
953 return PTR_ERR(d);
954 if (!d)
955 goto err_root;
956
957 info->root = d;
958
959 /* Copy initial values */
960 memcpy(params, &info->params, sizeof(*params));
961
962 /* Test parameters */
963
964 d = debugfs_create_u32("test_buf_size", S_IWUSR | S_IRUGO, info->root,
965 (u32 *)&params->buf_size);
966 if (IS_ERR_OR_NULL(d))
967 goto err_node;
968
969 d = debugfs_create_file("channel", S_IRUGO | S_IWUSR, info->root,
970 info, &dtf_channel_fops);
971 if (IS_ERR_OR_NULL(d))
972 goto err_node;
973
974 d = debugfs_create_file("device", S_IRUGO | S_IWUSR, info->root,
975 info, &dtf_device_fops);
976 if (IS_ERR_OR_NULL(d))
977 goto err_node;
978
979 d = debugfs_create_u32("threads_per_chan", S_IWUSR | S_IRUGO, info->root,
980 (u32 *)&params->threads_per_chan);
981 if (IS_ERR_OR_NULL(d))
982 goto err_node;
983
984 d = debugfs_create_u32("max_channels", S_IWUSR | S_IRUGO, info->root,
985 (u32 *)&params->max_channels);
986 if (IS_ERR_OR_NULL(d))
987 goto err_node;
988
989 d = debugfs_create_u32("iterations", S_IWUSR | S_IRUGO, info->root,
990 (u32 *)&params->iterations);
991 if (IS_ERR_OR_NULL(d))
992 goto err_node;
993
994 d = debugfs_create_u32("xor_sources", S_IWUSR | S_IRUGO, info->root,
995 (u32 *)&params->xor_sources);
996 if (IS_ERR_OR_NULL(d))
997 goto err_node;
998
999 d = debugfs_create_u32("pq_sources", S_IWUSR | S_IRUGO, info->root,
1000 (u32 *)&params->pq_sources);
1001 if (IS_ERR_OR_NULL(d))
1002 goto err_node;
1003
1004 d = debugfs_create_u32("timeout", S_IWUSR | S_IRUGO, info->root,
1005 (u32 *)&params->timeout);
1006 if (IS_ERR_OR_NULL(d))
1007 goto err_node;
1008
1009 /* Run or stop threaded test */
1010 d = debugfs_create_file("run", S_IWUSR | S_IRUGO, info->root,
1011 info, &dtf_run_fops);
1012 if (IS_ERR_OR_NULL(d))
1013 goto err_node;
1014
1015 return 0;
1016
1017err_node:
1018 debugfs_remove_recursive(info->root);
1019err_root:
1020 pr_err("dmatest: Failed to initialize debugfs\n");
1021 return ret;
1022}
1023
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +02001024static int __init dmatest_init(void)
1025{
1026 struct dmatest_info *info = &test_info;
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +02001027 struct dmatest_params *params = &info->params;
Andy Shevchenko851b7e12013-03-04 11:09:30 +02001028 int ret;
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +02001029
1030 memset(info, 0, sizeof(*info));
1031
Andy Shevchenko851b7e12013-03-04 11:09:30 +02001032 mutex_init(&info->lock);
Andy Shevchenko838cc702013-03-04 11:09:28 +02001033 INIT_LIST_HEAD(&info->channels);
1034
1035 /* Set default parameters */
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +02001036 params->buf_size = test_buf_size;
1037 strlcpy(params->channel, test_channel, sizeof(params->channel));
1038 strlcpy(params->device, test_device, sizeof(params->device));
1039 params->threads_per_chan = threads_per_chan;
1040 params->max_channels = max_channels;
1041 params->iterations = iterations;
1042 params->xor_sources = xor_sources;
1043 params->pq_sources = pq_sources;
1044 params->timeout = timeout;
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +02001045
Andy Shevchenko851b7e12013-03-04 11:09:30 +02001046 ret = dmatest_register_dbgfs(info);
1047 if (ret)
1048 return ret;
1049
1050#ifdef MODULE
1051 return 0;
1052#else
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +02001053 return run_threaded_test(info);
Andy Shevchenko851b7e12013-03-04 11:09:30 +02001054#endif
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +02001055}
1056/* when compiled-in wait for drivers to load first */
1057late_initcall(dmatest_init);
1058
1059static void __exit dmatest_exit(void)
1060{
1061 struct dmatest_info *info = &test_info;
1062
Andy Shevchenko851b7e12013-03-04 11:09:30 +02001063 debugfs_remove_recursive(info->root);
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +02001064 stop_threaded_test(info);
1065}
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -07001066module_exit(dmatest_exit);
1067
Jean Delvaree05503e2011-05-18 16:49:24 +02001068MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -07001069MODULE_LICENSE("GPL v2");