blob: ea829659149ab351bd1979ada8f5100d0e8dd101 [file] [log] [blame]
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -07001/*
2 * DMA Engine test module
3 *
4 * Copyright (C) 2007 Atmel Corporation
Andy Shevchenko851b7e12013-03-04 11:09:30 +02005 * Copyright (C) 2013 Intel Corporation
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -07006 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
Dan Williams872f05c2013-11-06 16:29:58 -080011#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070013#include <linux/delay.h>
Alexey Dobriyanb7f080c2011-06-16 11:01:34 +000014#include <linux/dma-mapping.h>
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070015#include <linux/dmaengine.h>
Guennadi Liakhovetski981ed702011-08-18 16:50:51 +020016#include <linux/freezer.h>
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070017#include <linux/init.h>
18#include <linux/kthread.h>
19#include <linux/module.h>
20#include <linux/moduleparam.h>
21#include <linux/random.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090022#include <linux/slab.h>
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070023#include <linux/wait.h>
24
25static unsigned int test_buf_size = 16384;
Andy Shevchenkoa6c268d2013-07-23 18:36:46 +030026module_param(test_buf_size, uint, S_IRUGO | S_IWUSR);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070027MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer");
28
Kay Sievers06190d82008-11-11 13:12:33 -070029static char test_channel[20];
Andy Shevchenkoa6c268d2013-07-23 18:36:46 +030030module_param_string(channel, test_channel, sizeof(test_channel),
31 S_IRUGO | S_IWUSR);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070032MODULE_PARM_DESC(channel, "Bus ID of the channel to test (default: any)");
33
Kay Sievers06190d82008-11-11 13:12:33 -070034static char test_device[20];
Andy Shevchenkoa6c268d2013-07-23 18:36:46 +030035module_param_string(device, test_device, sizeof(test_device),
36 S_IRUGO | S_IWUSR);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070037MODULE_PARM_DESC(device, "Bus ID of the DMA Engine to test (default: any)");
38
39static unsigned int threads_per_chan = 1;
Andy Shevchenkoa6c268d2013-07-23 18:36:46 +030040module_param(threads_per_chan, uint, S_IRUGO | S_IWUSR);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070041MODULE_PARM_DESC(threads_per_chan,
42 "Number of threads to start per channel (default: 1)");
43
44static unsigned int max_channels;
Andy Shevchenkoa6c268d2013-07-23 18:36:46 +030045module_param(max_channels, uint, S_IRUGO | S_IWUSR);
Dan Williams33df8ca2009-01-06 11:38:15 -070046MODULE_PARM_DESC(max_channels,
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070047 "Maximum number of channels to use (default: all)");
48
Nicolas Ferre0a2ff57d2009-07-03 19:26:51 +020049static unsigned int iterations;
Andy Shevchenkoa6c268d2013-07-23 18:36:46 +030050module_param(iterations, uint, S_IRUGO | S_IWUSR);
Nicolas Ferre0a2ff57d2009-07-03 19:26:51 +020051MODULE_PARM_DESC(iterations,
52 "Iterations before stopping test (default: infinite)");
53
Dan Williamsb54d5cb2009-03-25 09:13:25 -070054static unsigned int xor_sources = 3;
Andy Shevchenkoa6c268d2013-07-23 18:36:46 +030055module_param(xor_sources, uint, S_IRUGO | S_IWUSR);
Dan Williamsb54d5cb2009-03-25 09:13:25 -070056MODULE_PARM_DESC(xor_sources,
57 "Number of xor source buffers (default: 3)");
58
Dan Williams58691d62009-08-29 19:09:27 -070059static unsigned int pq_sources = 3;
Andy Shevchenkoa6c268d2013-07-23 18:36:46 +030060module_param(pq_sources, uint, S_IRUGO | S_IWUSR);
Dan Williams58691d62009-08-29 19:09:27 -070061MODULE_PARM_DESC(pq_sources,
62 "Number of p+q source buffers (default: 3)");
63
Viresh Kumard42efe62011-03-22 17:27:25 +053064static int timeout = 3000;
Andy Shevchenkoa6c268d2013-07-23 18:36:46 +030065module_param(timeout, uint, S_IRUGO | S_IWUSR);
Joe Perches85ee7a12011-04-23 20:38:19 -070066MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), "
67 "Pass -1 for infinite timeout");
Viresh Kumard42efe62011-03-22 17:27:25 +053068
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +020069/**
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +020070 * struct dmatest_params - test parameters.
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +020071 * @buf_size: size of the memcpy test buffer
72 * @channel: bus ID of the channel to test
73 * @device: bus ID of the DMA Engine to test
74 * @threads_per_chan: number of threads to start per channel
75 * @max_channels: maximum number of channels to use
76 * @iterations: iterations before stopping test
77 * @xor_sources: number of xor source buffers
78 * @pq_sources: number of p+q source buffers
79 * @timeout: transfer timeout in msec, -1 for infinite timeout
80 */
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +020081struct dmatest_params {
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +020082 unsigned int buf_size;
83 char channel[20];
84 char device[20];
85 unsigned int threads_per_chan;
86 unsigned int max_channels;
87 unsigned int iterations;
88 unsigned int xor_sources;
89 unsigned int pq_sources;
90 int timeout;
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +020091};
92
93/**
94 * struct dmatest_info - test information.
95 * @params: test parameters
Andy Shevchenko851b7e12013-03-04 11:09:30 +020096 * @lock: access protection to the fields of this structure
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +020097 */
Dan Williamsa310d032013-11-06 16:30:01 -080098static struct dmatest_info {
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +020099 /* Test parameters */
100 struct dmatest_params params;
Andy Shevchenko838cc702013-03-04 11:09:28 +0200101
102 /* Internal state */
103 struct list_head channels;
104 unsigned int nr_channels;
Andy Shevchenko851b7e12013-03-04 11:09:30 +0200105 struct mutex lock;
Dan Williamsa310d032013-11-06 16:30:01 -0800106 bool did_init;
107} test_info = {
108 .channels = LIST_HEAD_INIT(test_info.channels),
109 .lock = __MUTEX_INITIALIZER(test_info.lock),
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200110};
111
Dan Williamsa310d032013-11-06 16:30:01 -0800112static int dmatest_run_set(const char *val, const struct kernel_param *kp);
113static int dmatest_run_get(char *val, const struct kernel_param *kp);
114static struct kernel_param_ops run_ops = {
115 .set = dmatest_run_set,
116 .get = dmatest_run_get,
117};
118static bool dmatest_run;
119module_param_cb(run, &run_ops, &dmatest_run, S_IRUGO | S_IWUSR);
120MODULE_PARM_DESC(run, "Run the test (default: false)");
121
122/* Maximum amount of mismatched bytes in buffer to print */
123#define MAX_ERROR_COUNT 32
124
125/*
126 * Initialization patterns. All bytes in the source buffer has bit 7
127 * set, all bytes in the destination buffer has bit 7 cleared.
128 *
129 * Bit 6 is set for all bytes which are to be copied by the DMA
130 * engine. Bit 5 is set for all bytes which are to be overwritten by
131 * the DMA engine.
132 *
133 * The remaining bits are the inverse of a counter which increments by
134 * one for each byte address.
135 */
136#define PATTERN_SRC 0x80
137#define PATTERN_DST 0x00
138#define PATTERN_COPY 0x40
139#define PATTERN_OVERWRITE 0x20
140#define PATTERN_COUNT_MASK 0x1f
141
142struct dmatest_thread {
143 struct list_head node;
144 struct dmatest_info *info;
145 struct task_struct *task;
146 struct dma_chan *chan;
147 u8 **srcs;
148 u8 **dsts;
149 enum dma_transaction_type type;
150 bool done;
151};
152
153struct dmatest_chan {
154 struct list_head node;
155 struct dma_chan *chan;
156 struct list_head threads;
157};
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200158
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200159static bool dmatest_match_channel(struct dmatest_params *params,
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200160 struct dma_chan *chan)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700161{
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200162 if (params->channel[0] == '\0')
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700163 return true;
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200164 return strcmp(dma_chan_name(chan), params->channel) == 0;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700165}
166
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200167static bool dmatest_match_device(struct dmatest_params *params,
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200168 struct dma_device *device)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700169{
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200170 if (params->device[0] == '\0')
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700171 return true;
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200172 return strcmp(dev_name(device->dev), params->device) == 0;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700173}
174
175static unsigned long dmatest_random(void)
176{
177 unsigned long buf;
178
179 get_random_bytes(&buf, sizeof(buf));
180 return buf;
181}
182
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200183static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len,
184 unsigned int buf_size)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700185{
186 unsigned int i;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700187 u8 *buf;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700188
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700189 for (; (buf = *bufs); bufs++) {
190 for (i = 0; i < start; i++)
191 buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
192 for ( ; i < start + len; i++)
193 buf[i] = PATTERN_SRC | PATTERN_COPY
Joe Perchesc0198942009-06-28 09:26:21 -0700194 | (~i & PATTERN_COUNT_MASK);
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200195 for ( ; i < buf_size; i++)
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700196 buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
197 buf++;
198 }
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700199}
200
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200201static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len,
202 unsigned int buf_size)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700203{
204 unsigned int i;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700205 u8 *buf;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700206
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700207 for (; (buf = *bufs); bufs++) {
208 for (i = 0; i < start; i++)
209 buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
210 for ( ; i < start + len; i++)
211 buf[i] = PATTERN_DST | PATTERN_OVERWRITE
212 | (~i & PATTERN_COUNT_MASK);
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200213 for ( ; i < buf_size; i++)
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700214 buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
215 }
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700216}
217
Dan Williams7b610172013-11-06 16:29:57 -0800218static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index,
219 unsigned int counter, bool is_srcbuf)
220{
221 u8 diff = actual ^ pattern;
222 u8 expected = pattern | (~counter & PATTERN_COUNT_MASK);
223 const char *thread_name = current->comm;
224
225 if (is_srcbuf)
226 pr_warn("%s: srcbuf[0x%x] overwritten! Expected %02x, got %02x\n",
227 thread_name, index, expected, actual);
228 else if ((pattern & PATTERN_COPY)
229 && (diff & (PATTERN_COPY | PATTERN_OVERWRITE)))
230 pr_warn("%s: dstbuf[0x%x] not copied! Expected %02x, got %02x\n",
231 thread_name, index, expected, actual);
232 else if (diff & PATTERN_SRC)
233 pr_warn("%s: dstbuf[0x%x] was copied! Expected %02x, got %02x\n",
234 thread_name, index, expected, actual);
235 else
236 pr_warn("%s: dstbuf[0x%x] mismatch! Expected %02x, got %02x\n",
237 thread_name, index, expected, actual);
238}
239
240static unsigned int dmatest_verify(u8 **bufs, unsigned int start,
241 unsigned int end, unsigned int counter, u8 pattern,
242 bool is_srcbuf)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700243{
244 unsigned int i;
245 unsigned int error_count = 0;
246 u8 actual;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700247 u8 expected;
248 u8 *buf;
249 unsigned int counter_orig = counter;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700250
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700251 for (; (buf = *bufs); bufs++) {
252 counter = counter_orig;
253 for (i = start; i < end; i++) {
254 actual = buf[i];
255 expected = pattern | (~counter & PATTERN_COUNT_MASK);
256 if (actual != expected) {
Dan Williams7b610172013-11-06 16:29:57 -0800257 if (error_count < MAX_ERROR_COUNT)
258 dmatest_mismatch(actual, pattern, i,
259 counter, is_srcbuf);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700260 error_count++;
261 }
262 counter++;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700263 }
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700264 }
265
Andy Shevchenko74b5c072013-03-04 11:09:32 +0200266 if (error_count > MAX_ERROR_COUNT)
Dan Williams7b610172013-11-06 16:29:57 -0800267 pr_warn("%s: %u errors suppressed\n",
Andy Shevchenko74b5c072013-03-04 11:09:32 +0200268 current->comm, error_count - MAX_ERROR_COUNT);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700269
270 return error_count;
271}
272
Tejun Heoadfa5432011-11-23 09:28:16 -0800273/* poor man's completion - we want to use wait_event_freezable() on it */
274struct dmatest_done {
275 bool done;
276 wait_queue_head_t *wait;
277};
278
279static void dmatest_callback(void *arg)
Dan Williamse44e0aa2009-03-25 09:13:25 -0700280{
Tejun Heoadfa5432011-11-23 09:28:16 -0800281 struct dmatest_done *done = arg;
282
283 done->done = true;
284 wake_up_all(done->wait);
Dan Williamse44e0aa2009-03-25 09:13:25 -0700285}
286
Andy Shevchenko632fd282012-12-17 15:59:52 -0800287static inline void unmap_src(struct device *dev, dma_addr_t *addr, size_t len,
288 unsigned int count)
289{
290 while (count--)
291 dma_unmap_single(dev, addr[count], len, DMA_TO_DEVICE);
292}
293
294static inline void unmap_dst(struct device *dev, dma_addr_t *addr, size_t len,
295 unsigned int count)
296{
297 while (count--)
298 dma_unmap_single(dev, addr[count], len, DMA_BIDIRECTIONAL);
299}
300
Akinobu Mita8be9e32b2012-10-28 00:49:32 +0900301static unsigned int min_odd(unsigned int x, unsigned int y)
302{
303 unsigned int val = min(x, y);
304
305 return val % 2 ? val : val - 1;
306}
307
Dan Williams872f05c2013-11-06 16:29:58 -0800308static void result(const char *err, unsigned int n, unsigned int src_off,
309 unsigned int dst_off, unsigned int len, unsigned long data)
Andy Shevchenko95019c82013-03-04 11:09:33 +0200310{
Dan Williams872f05c2013-11-06 16:29:58 -0800311 pr_info("%s: result #%u: '%s' with src_off=0x%x ""dst_off=0x%x len=0x%x (%lu)",
312 current->comm, n, err, src_off, dst_off, len, data);
Andy Shevchenko95019c82013-03-04 11:09:33 +0200313}
314
Dan Williams872f05c2013-11-06 16:29:58 -0800315static void dbg_result(const char *err, unsigned int n, unsigned int src_off,
316 unsigned int dst_off, unsigned int len,
317 unsigned long data)
Andy Shevchenko95019c82013-03-04 11:09:33 +0200318{
Dan Williams872f05c2013-11-06 16:29:58 -0800319 pr_debug("%s: result #%u: '%s' with src_off=0x%x ""dst_off=0x%x len=0x%x (%lu)",
320 current->comm, n, err, src_off, dst_off, len, data);
Andy Shevchenko95019c82013-03-04 11:09:33 +0200321}
322
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700323/*
324 * This function repeatedly tests DMA transfers of various lengths and
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700325 * offsets for a given operation type until it is told to exit by
326 * kthread_stop(). There may be multiple threads running this function
327 * in parallel for a single channel, and there may be multiple channels
328 * being tested in parallel.
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700329 *
330 * Before each test, the source and destination buffer is initialized
331 * with a known pattern. This pattern is different depending on
332 * whether it's in an area which is supposed to be copied or
333 * overwritten, and different in the source and destination buffers.
334 * So if the DMA engine doesn't copy exactly what we tell it to copy,
335 * we'll notice.
336 */
337static int dmatest_func(void *data)
338{
Tejun Heoadfa5432011-11-23 09:28:16 -0800339 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_wait);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700340 struct dmatest_thread *thread = data;
Tejun Heoadfa5432011-11-23 09:28:16 -0800341 struct dmatest_done done = { .wait = &done_wait };
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200342 struct dmatest_info *info;
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200343 struct dmatest_params *params;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700344 struct dma_chan *chan;
Akinobu Mita8be9e32b2012-10-28 00:49:32 +0900345 struct dma_device *dev;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700346 unsigned int src_off, dst_off, len;
347 unsigned int error_count;
348 unsigned int failed_tests = 0;
349 unsigned int total_tests = 0;
350 dma_cookie_t cookie;
351 enum dma_status status;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700352 enum dma_ctrl_flags flags;
Andy Shevchenko945b5af2013-03-04 11:09:26 +0200353 u8 *pq_coefs = NULL;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700354 int ret;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700355 int src_cnt;
356 int dst_cnt;
357 int i;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700358
Tejun Heoadfa5432011-11-23 09:28:16 -0800359 set_freezable();
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700360
361 ret = -ENOMEM;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700362
363 smp_rmb();
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200364 info = thread->info;
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200365 params = &info->params;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700366 chan = thread->chan;
Akinobu Mita8be9e32b2012-10-28 00:49:32 +0900367 dev = chan->device;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700368 if (thread->type == DMA_MEMCPY)
369 src_cnt = dst_cnt = 1;
370 else if (thread->type == DMA_XOR) {
Akinobu Mita8be9e32b2012-10-28 00:49:32 +0900371 /* force odd to ensure dst = src */
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200372 src_cnt = min_odd(params->xor_sources | 1, dev->max_xor);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700373 dst_cnt = 1;
Dan Williams58691d62009-08-29 19:09:27 -0700374 } else if (thread->type == DMA_PQ) {
Akinobu Mita8be9e32b2012-10-28 00:49:32 +0900375 /* force odd to ensure dst = src */
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200376 src_cnt = min_odd(params->pq_sources | 1, dma_maxpq(dev, 0));
Dan Williams58691d62009-08-29 19:09:27 -0700377 dst_cnt = 2;
Andy Shevchenko945b5af2013-03-04 11:09:26 +0200378
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200379 pq_coefs = kmalloc(params->pq_sources+1, GFP_KERNEL);
Andy Shevchenko945b5af2013-03-04 11:09:26 +0200380 if (!pq_coefs)
381 goto err_thread_type;
382
Anatolij Gustschin94de6482010-02-15 22:35:23 +0100383 for (i = 0; i < src_cnt; i++)
Dan Williams58691d62009-08-29 19:09:27 -0700384 pq_coefs[i] = 1;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700385 } else
Andy Shevchenko945b5af2013-03-04 11:09:26 +0200386 goto err_thread_type;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700387
388 thread->srcs = kcalloc(src_cnt+1, sizeof(u8 *), GFP_KERNEL);
389 if (!thread->srcs)
390 goto err_srcs;
391 for (i = 0; i < src_cnt; i++) {
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200392 thread->srcs[i] = kmalloc(params->buf_size, GFP_KERNEL);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700393 if (!thread->srcs[i])
394 goto err_srcbuf;
395 }
396 thread->srcs[i] = NULL;
397
398 thread->dsts = kcalloc(dst_cnt+1, sizeof(u8 *), GFP_KERNEL);
399 if (!thread->dsts)
400 goto err_dsts;
401 for (i = 0; i < dst_cnt; i++) {
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200402 thread->dsts[i] = kmalloc(params->buf_size, GFP_KERNEL);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700403 if (!thread->dsts[i])
404 goto err_dstbuf;
405 }
406 thread->dsts[i] = NULL;
407
Dan Williamse44e0aa2009-03-25 09:13:25 -0700408 set_user_nice(current, 10);
409
Ira Snyderb203bd32011-03-03 07:54:53 +0000410 /*
Bartlomiej Zolnierkiewiczd1cab342013-10-18 19:35:21 +0200411 * src and dst buffers are freed by ourselves below
Ira Snyderb203bd32011-03-03 07:54:53 +0000412 */
Bartlomiej Zolnierkiewicz0776ae72013-10-18 19:35:33 +0200413 flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700414
Nicolas Ferre0a2ff57d2009-07-03 19:26:51 +0200415 while (!kthread_should_stop()
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200416 && !(params->iterations && total_tests >= params->iterations)) {
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700417 struct dma_async_tx_descriptor *tx = NULL;
418 dma_addr_t dma_srcs[src_cnt];
419 dma_addr_t dma_dsts[dst_cnt];
Dan Williams83544ae2009-09-08 17:42:53 -0700420 u8 align = 0;
Atsushi Nemotod86be862009-01-13 09:22:20 -0700421
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700422 total_tests++;
423
Dan Williams83544ae2009-09-08 17:42:53 -0700424 /* honor alignment restrictions */
425 if (thread->type == DMA_MEMCPY)
426 align = dev->copy_align;
427 else if (thread->type == DMA_XOR)
428 align = dev->xor_align;
429 else if (thread->type == DMA_PQ)
430 align = dev->pq_align;
431
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200432 if (1 << align > params->buf_size) {
Guennadi Liakhovetskicfe4f272009-12-04 19:44:48 +0100433 pr_err("%u-byte buffer too small for %d-byte alignment\n",
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200434 params->buf_size, 1 << align);
Guennadi Liakhovetskicfe4f272009-12-04 19:44:48 +0100435 break;
436 }
437
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200438 len = dmatest_random() % params->buf_size + 1;
Dan Williams83544ae2009-09-08 17:42:53 -0700439 len = (len >> align) << align;
Guennadi Liakhovetskicfe4f272009-12-04 19:44:48 +0100440 if (!len)
441 len = 1 << align;
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200442 src_off = dmatest_random() % (params->buf_size - len + 1);
443 dst_off = dmatest_random() % (params->buf_size - len + 1);
Guennadi Liakhovetskicfe4f272009-12-04 19:44:48 +0100444
Dan Williams83544ae2009-09-08 17:42:53 -0700445 src_off = (src_off >> align) << align;
446 dst_off = (dst_off >> align) << align;
447
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200448 dmatest_init_srcs(thread->srcs, src_off, len, params->buf_size);
449 dmatest_init_dsts(thread->dsts, dst_off, len, params->buf_size);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700450
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700451 for (i = 0; i < src_cnt; i++) {
452 u8 *buf = thread->srcs[i] + src_off;
453
454 dma_srcs[i] = dma_map_single(dev->dev, buf, len,
455 DMA_TO_DEVICE);
Andy Shevchenkoafde3be2012-12-17 15:59:53 -0800456 ret = dma_mapping_error(dev->dev, dma_srcs[i]);
457 if (ret) {
458 unmap_src(dev->dev, dma_srcs, len, i);
Dan Williams872f05c2013-11-06 16:29:58 -0800459 result("src mapping error", total_tests,
460 src_off, dst_off, len, ret);
Andy Shevchenkoafde3be2012-12-17 15:59:53 -0800461 failed_tests++;
462 continue;
463 }
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700464 }
Atsushi Nemotod86be862009-01-13 09:22:20 -0700465 /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700466 for (i = 0; i < dst_cnt; i++) {
467 dma_dsts[i] = dma_map_single(dev->dev, thread->dsts[i],
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200468 params->buf_size,
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700469 DMA_BIDIRECTIONAL);
Andy Shevchenkoafde3be2012-12-17 15:59:53 -0800470 ret = dma_mapping_error(dev->dev, dma_dsts[i]);
471 if (ret) {
472 unmap_src(dev->dev, dma_srcs, len, src_cnt);
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200473 unmap_dst(dev->dev, dma_dsts, params->buf_size,
474 i);
Dan Williams872f05c2013-11-06 16:29:58 -0800475 result("dst mapping error", total_tests,
476 src_off, dst_off, len, ret);
Andy Shevchenkoafde3be2012-12-17 15:59:53 -0800477 failed_tests++;
478 continue;
479 }
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700480 }
Atsushi Nemotod86be862009-01-13 09:22:20 -0700481
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700482 if (thread->type == DMA_MEMCPY)
483 tx = dev->device_prep_dma_memcpy(chan,
484 dma_dsts[0] + dst_off,
485 dma_srcs[0], len,
486 flags);
487 else if (thread->type == DMA_XOR)
488 tx = dev->device_prep_dma_xor(chan,
489 dma_dsts[0] + dst_off,
Dan Williams67b91242010-02-28 22:20:18 -0700490 dma_srcs, src_cnt,
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700491 len, flags);
Dan Williams58691d62009-08-29 19:09:27 -0700492 else if (thread->type == DMA_PQ) {
493 dma_addr_t dma_pq[dst_cnt];
494
495 for (i = 0; i < dst_cnt; i++)
496 dma_pq[i] = dma_dsts[i] + dst_off;
497 tx = dev->device_prep_dma_pq(chan, dma_pq, dma_srcs,
Anatolij Gustschin94de6482010-02-15 22:35:23 +0100498 src_cnt, pq_coefs,
Dan Williams58691d62009-08-29 19:09:27 -0700499 len, flags);
500 }
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700501
Atsushi Nemotod86be862009-01-13 09:22:20 -0700502 if (!tx) {
Andy Shevchenko632fd282012-12-17 15:59:52 -0800503 unmap_src(dev->dev, dma_srcs, len, src_cnt);
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200504 unmap_dst(dev->dev, dma_dsts, params->buf_size,
505 dst_cnt);
Dan Williams872f05c2013-11-06 16:29:58 -0800506 result("prep error", total_tests, src_off,
507 dst_off, len, ret);
Atsushi Nemotod86be862009-01-13 09:22:20 -0700508 msleep(100);
509 failed_tests++;
510 continue;
511 }
Dan Williamse44e0aa2009-03-25 09:13:25 -0700512
Tejun Heoadfa5432011-11-23 09:28:16 -0800513 done.done = false;
Dan Williamse44e0aa2009-03-25 09:13:25 -0700514 tx->callback = dmatest_callback;
Tejun Heoadfa5432011-11-23 09:28:16 -0800515 tx->callback_param = &done;
Atsushi Nemotod86be862009-01-13 09:22:20 -0700516 cookie = tx->tx_submit(tx);
517
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700518 if (dma_submit_error(cookie)) {
Dan Williams872f05c2013-11-06 16:29:58 -0800519 result("submit error", total_tests, src_off,
520 dst_off, len, ret);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700521 msleep(100);
522 failed_tests++;
523 continue;
524 }
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700525 dma_async_issue_pending(chan);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700526
Andy Shevchenkobcc567e2013-05-23 14:29:53 +0300527 wait_event_freezable_timeout(done_wait, done.done,
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200528 msecs_to_jiffies(params->timeout));
Guennadi Liakhovetski981ed702011-08-18 16:50:51 +0200529
Dan Williamse44e0aa2009-03-25 09:13:25 -0700530 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700531
Tejun Heoadfa5432011-11-23 09:28:16 -0800532 if (!done.done) {
533 /*
534 * We're leaving the timed out dma operation with
535 * dangling pointer to done_wait. To make this
536 * correct, we'll need to allocate wait_done for
537 * each test iteration and perform "who's gonna
538 * free it this time?" dancing. For now, just
539 * leave it dangling.
540 */
Dan Williams872f05c2013-11-06 16:29:58 -0800541 result("test timed out", total_tests, src_off, dst_off,
542 len, 0);
Dan Williamse44e0aa2009-03-25 09:13:25 -0700543 failed_tests++;
544 continue;
545 } else if (status != DMA_SUCCESS) {
Dan Williams872f05c2013-11-06 16:29:58 -0800546 result(status == DMA_ERROR ?
547 "completion error status" :
548 "completion busy status", total_tests, src_off,
549 dst_off, len, ret);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700550 failed_tests++;
551 continue;
552 }
Dan Williamse44e0aa2009-03-25 09:13:25 -0700553
Bartlomiej Zolnierkiewiczd1cab342013-10-18 19:35:21 +0200554 /* Unmap by myself */
555 unmap_src(dev->dev, dma_srcs, len, src_cnt);
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200556 unmap_dst(dev->dev, dma_dsts, params->buf_size, dst_cnt);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700557
558 error_count = 0;
559
Dan Williams872f05c2013-11-06 16:29:58 -0800560 pr_debug("%s: verifying source buffer...\n", current->comm);
Dan Williams7b610172013-11-06 16:29:57 -0800561 error_count += dmatest_verify(thread->srcs, 0, src_off,
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700562 0, PATTERN_SRC, true);
Dan Williams7b610172013-11-06 16:29:57 -0800563 error_count += dmatest_verify(thread->srcs, src_off,
564 src_off + len, src_off,
565 PATTERN_SRC | PATTERN_COPY, true);
566 error_count += dmatest_verify(thread->srcs, src_off + len,
567 params->buf_size, src_off + len,
568 PATTERN_SRC, true);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700569
Dan Williams872f05c2013-11-06 16:29:58 -0800570 pr_debug("%s: verifying dest buffer...\n", current->comm);
Dan Williams7b610172013-11-06 16:29:57 -0800571 error_count += dmatest_verify(thread->dsts, 0, dst_off,
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700572 0, PATTERN_DST, false);
Dan Williams7b610172013-11-06 16:29:57 -0800573 error_count += dmatest_verify(thread->dsts, dst_off,
574 dst_off + len, src_off,
575 PATTERN_SRC | PATTERN_COPY, false);
576 error_count += dmatest_verify(thread->dsts, dst_off + len,
577 params->buf_size, dst_off + len,
578 PATTERN_DST, false);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700579
580 if (error_count) {
Dan Williams872f05c2013-11-06 16:29:58 -0800581 result("data error", total_tests, src_off, dst_off,
582 len, error_count);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700583 failed_tests++;
584 } else {
Dan Williams872f05c2013-11-06 16:29:58 -0800585 dbg_result("test passed", total_tests, src_off, dst_off,
586 len, 0);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700587 }
588 }
589
590 ret = 0;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700591 for (i = 0; thread->dsts[i]; i++)
592 kfree(thread->dsts[i]);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700593err_dstbuf:
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700594 kfree(thread->dsts);
595err_dsts:
596 for (i = 0; thread->srcs[i]; i++)
597 kfree(thread->srcs[i]);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700598err_srcbuf:
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700599 kfree(thread->srcs);
600err_srcs:
Andy Shevchenko945b5af2013-03-04 11:09:26 +0200601 kfree(pq_coefs);
602err_thread_type:
Dan Williams872f05c2013-11-06 16:29:58 -0800603 pr_info("%s: terminating after %u tests, %u failures (status %d)\n",
604 current->comm, total_tests, failed_tests, ret);
Nicolas Ferre0a2ff57d2009-07-03 19:26:51 +0200605
Viresh Kumar9704efa2011-07-29 16:21:57 +0530606 /* terminate all transfers on specified channels */
Shiraz Hashim5e034f72012-11-09 15:26:29 +0000607 if (ret)
608 dmaengine_terminate_all(chan);
609
Andy Shevchenko3e5ccd82013-03-04 11:09:31 +0200610 thread->done = true;
611
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200612 if (params->iterations > 0)
Nicolas Ferre0a2ff57d2009-07-03 19:26:51 +0200613 while (!kthread_should_stop()) {
Yong Zhangb953df72010-02-05 21:52:37 +0800614 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit);
Nicolas Ferre0a2ff57d2009-07-03 19:26:51 +0200615 interruptible_sleep_on(&wait_dmatest_exit);
616 }
617
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700618 return ret;
619}
620
621static void dmatest_cleanup_channel(struct dmatest_chan *dtc)
622{
623 struct dmatest_thread *thread;
624 struct dmatest_thread *_thread;
625 int ret;
626
627 list_for_each_entry_safe(thread, _thread, &dtc->threads, node) {
628 ret = kthread_stop(thread->task);
Dan Williams0adff802013-11-06 16:30:00 -0800629 pr_debug("thread %s exited with status %d\n",
630 thread->task->comm, ret);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700631 list_del(&thread->node);
632 kfree(thread);
633 }
Viresh Kumar9704efa2011-07-29 16:21:57 +0530634
635 /* terminate all transfers on specified channels */
Jon Mason944ea4d2012-11-11 23:03:20 +0000636 dmaengine_terminate_all(dtc->chan);
Viresh Kumar9704efa2011-07-29 16:21:57 +0530637
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700638 kfree(dtc);
639}
640
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200641static int dmatest_add_threads(struct dmatest_info *info,
642 struct dmatest_chan *dtc, enum dma_transaction_type type)
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700643{
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200644 struct dmatest_params *params = &info->params;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700645 struct dmatest_thread *thread;
646 struct dma_chan *chan = dtc->chan;
647 char *op;
648 unsigned int i;
649
650 if (type == DMA_MEMCPY)
651 op = "copy";
652 else if (type == DMA_XOR)
653 op = "xor";
Dan Williams58691d62009-08-29 19:09:27 -0700654 else if (type == DMA_PQ)
655 op = "pq";
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700656 else
657 return -EINVAL;
658
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200659 for (i = 0; i < params->threads_per_chan; i++) {
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700660 thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL);
661 if (!thread) {
Dan Williams0adff802013-11-06 16:30:00 -0800662 pr_warn("No memory for %s-%s%u\n",
663 dma_chan_name(chan), op, i);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700664 break;
665 }
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200666 thread->info = info;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700667 thread->chan = dtc->chan;
668 thread->type = type;
669 smp_wmb();
670 thread->task = kthread_run(dmatest_func, thread, "%s-%s%u",
671 dma_chan_name(chan), op, i);
672 if (IS_ERR(thread->task)) {
Dan Williams0adff802013-11-06 16:30:00 -0800673 pr_warn("Failed to run thread %s-%s%u\n",
674 dma_chan_name(chan), op, i);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700675 kfree(thread);
676 break;
677 }
678
679 /* srcbuf and dstbuf are allocated by the thread itself */
680
681 list_add_tail(&thread->node, &dtc->threads);
682 }
683
684 return i;
685}
686
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200687static int dmatest_add_channel(struct dmatest_info *info,
688 struct dma_chan *chan)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700689{
690 struct dmatest_chan *dtc;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700691 struct dma_device *dma_dev = chan->device;
692 unsigned int thread_count = 0;
Kulikov Vasiliyb9033e62010-07-17 19:19:48 +0400693 int cnt;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700694
Andrew Morton6fdb8bd2008-09-19 04:16:23 -0700695 dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700696 if (!dtc) {
Dan Williams0adff802013-11-06 16:30:00 -0800697 pr_warn("No memory for %s\n", dma_chan_name(chan));
Dan Williams33df8ca2009-01-06 11:38:15 -0700698 return -ENOMEM;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700699 }
700
701 dtc->chan = chan;
702 INIT_LIST_HEAD(&dtc->threads);
703
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700704 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200705 cnt = dmatest_add_threads(info, dtc, DMA_MEMCPY);
Nicolas Ferref1aef8b2009-07-06 18:19:44 +0200706 thread_count += cnt > 0 ? cnt : 0;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700707 }
708 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200709 cnt = dmatest_add_threads(info, dtc, DMA_XOR);
Nicolas Ferref1aef8b2009-07-06 18:19:44 +0200710 thread_count += cnt > 0 ? cnt : 0;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700711 }
Dan Williams58691d62009-08-29 19:09:27 -0700712 if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200713 cnt = dmatest_add_threads(info, dtc, DMA_PQ);
Dr. David Alan Gilbertd07a74a2011-08-25 16:13:55 -0700714 thread_count += cnt > 0 ? cnt : 0;
Dan Williams58691d62009-08-29 19:09:27 -0700715 }
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700716
Dan Williams0adff802013-11-06 16:30:00 -0800717 pr_info("Started %u threads using %s\n",
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700718 thread_count, dma_chan_name(chan));
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700719
Andy Shevchenko838cc702013-03-04 11:09:28 +0200720 list_add_tail(&dtc->node, &info->channels);
721 info->nr_channels++;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700722
Dan Williams33df8ca2009-01-06 11:38:15 -0700723 return 0;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700724}
725
Dan Williams7dd60252009-01-06 11:38:19 -0700726static bool filter(struct dma_chan *chan, void *param)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700727{
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200728 struct dmatest_params *params = param;
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200729
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200730 if (!dmatest_match_channel(params, chan) ||
731 !dmatest_match_device(params, chan->device))
Dan Williams7dd60252009-01-06 11:38:19 -0700732 return false;
Dan Williams33df8ca2009-01-06 11:38:15 -0700733 else
Dan Williams7dd60252009-01-06 11:38:19 -0700734 return true;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700735}
736
Dan Williamsa9e55492013-11-06 16:30:02 -0800737static void request_channels(struct dmatest_info *info,
738 enum dma_transaction_type type)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700739{
Dan Williams33df8ca2009-01-06 11:38:15 -0700740 dma_cap_mask_t mask;
Dan Williamsa9e55492013-11-06 16:30:02 -0800741
742 dma_cap_zero(mask);
743 dma_cap_set(type, mask);
744 for (;;) {
745 struct dmatest_params *params = &info->params;
746 struct dma_chan *chan;
747
748 chan = dma_request_channel(mask, filter, params);
749 if (chan) {
750 if (dmatest_add_channel(info, chan)) {
751 dma_release_channel(chan);
752 break; /* add_channel failed, punt */
753 }
754 } else
755 break; /* no more channels available */
756 if (params->max_channels &&
757 info->nr_channels >= params->max_channels)
758 break; /* we have all we need */
759 }
760}
761
762static void run_threaded_test(struct dmatest_info *info)
763{
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200764 struct dmatest_params *params = &info->params;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700765
Dan Williamsa310d032013-11-06 16:30:01 -0800766 /* Copy test parameters */
767 params->buf_size = test_buf_size;
768 strlcpy(params->channel, strim(test_channel), sizeof(params->channel));
769 strlcpy(params->device, strim(test_device), sizeof(params->device));
770 params->threads_per_chan = threads_per_chan;
771 params->max_channels = max_channels;
772 params->iterations = iterations;
773 params->xor_sources = xor_sources;
774 params->pq_sources = pq_sources;
775 params->timeout = timeout;
776
Dan Williamsa9e55492013-11-06 16:30:02 -0800777 request_channels(info, DMA_MEMCPY);
778 request_channels(info, DMA_XOR);
779 request_channels(info, DMA_PQ);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700780}
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700781
Dan Williamsa310d032013-11-06 16:30:01 -0800782static void stop_threaded_test(struct dmatest_info *info)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700783{
Dan Williams33df8ca2009-01-06 11:38:15 -0700784 struct dmatest_chan *dtc, *_dtc;
Dan Williams7cbd4872009-03-04 16:06:03 -0700785 struct dma_chan *chan;
Dan Williams33df8ca2009-01-06 11:38:15 -0700786
Andy Shevchenko838cc702013-03-04 11:09:28 +0200787 list_for_each_entry_safe(dtc, _dtc, &info->channels, node) {
Dan Williams33df8ca2009-01-06 11:38:15 -0700788 list_del(&dtc->node);
Dan Williams7cbd4872009-03-04 16:06:03 -0700789 chan = dtc->chan;
Dan Williams33df8ca2009-01-06 11:38:15 -0700790 dmatest_cleanup_channel(dtc);
Dan Williams0adff802013-11-06 16:30:00 -0800791 pr_debug("dropped channel %s\n", dma_chan_name(chan));
Dan Williams7cbd4872009-03-04 16:06:03 -0700792 dma_release_channel(chan);
Dan Williams33df8ca2009-01-06 11:38:15 -0700793 }
Andy Shevchenko838cc702013-03-04 11:09:28 +0200794
795 info->nr_channels = 0;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700796}
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200797
Dan Williamsa9e55492013-11-06 16:30:02 -0800798static void restart_threaded_test(struct dmatest_info *info, bool run)
Andy Shevchenko851b7e12013-03-04 11:09:30 +0200799{
Dan Williamsa310d032013-11-06 16:30:01 -0800800 /* we might be called early to set run=, defer running until all
801 * parameters have been evaluated
802 */
803 if (!info->did_init)
Dan Williamsa9e55492013-11-06 16:30:02 -0800804 return;
Andy Shevchenko851b7e12013-03-04 11:09:30 +0200805
Dan Williamsa310d032013-11-06 16:30:01 -0800806 /* Stop any running test first */
807 stop_threaded_test(info);
Andy Shevchenko851b7e12013-03-04 11:09:30 +0200808
809 /* Run test with new parameters */
Dan Williamsa9e55492013-11-06 16:30:02 -0800810 run_threaded_test(info);
Andy Shevchenkobcc567e2013-05-23 14:29:53 +0300811}
812
Dan Williamsa310d032013-11-06 16:30:01 -0800813static bool is_threaded_test_run(struct dmatest_info *info)
Andy Shevchenkobcc567e2013-05-23 14:29:53 +0300814{
815 struct dmatest_chan *dtc;
816
817 list_for_each_entry(dtc, &info->channels, node) {
818 struct dmatest_thread *thread;
819
820 list_for_each_entry(thread, &dtc->threads, node) {
821 if (!thread->done)
822 return true;
823 }
Andy Shevchenko851b7e12013-03-04 11:09:30 +0200824 }
825
Andy Shevchenkobcc567e2013-05-23 14:29:53 +0300826 return false;
Andy Shevchenko851b7e12013-03-04 11:09:30 +0200827}
828
Dan Williamsa310d032013-11-06 16:30:01 -0800829static int dmatest_run_get(char *val, const struct kernel_param *kp)
Andy Shevchenko851b7e12013-03-04 11:09:30 +0200830{
Dan Williamsa310d032013-11-06 16:30:01 -0800831 struct dmatest_info *info = &test_info;
Andy Shevchenko851b7e12013-03-04 11:09:30 +0200832
833 mutex_lock(&info->lock);
Dan Williamsa310d032013-11-06 16:30:01 -0800834 if (is_threaded_test_run(info)) {
835 dmatest_run = true;
Andy Shevchenko3e5ccd82013-03-04 11:09:31 +0200836 } else {
Dan Williamsa310d032013-11-06 16:30:01 -0800837 stop_threaded_test(info);
838 dmatest_run = false;
Andy Shevchenko3e5ccd82013-03-04 11:09:31 +0200839 }
Dan Williamsa310d032013-11-06 16:30:01 -0800840 mutex_unlock(&info->lock);
841
842 return param_get_bool(val, kp);
843}
844
845static int dmatest_run_set(const char *val, const struct kernel_param *kp)
846{
847 struct dmatest_info *info = &test_info;
848 int ret;
849
850 mutex_lock(&info->lock);
851 ret = param_set_bool(val, kp);
852 if (ret) {
853 mutex_unlock(&info->lock);
854 return ret;
855 }
856
857 if (is_threaded_test_run(info))
858 ret = -EBUSY;
859 else if (dmatest_run)
Dan Williamsa9e55492013-11-06 16:30:02 -0800860 restart_threaded_test(info, dmatest_run);
Andy Shevchenko3e5ccd82013-03-04 11:09:31 +0200861
Andy Shevchenko851b7e12013-03-04 11:09:30 +0200862 mutex_unlock(&info->lock);
Andy Shevchenko851b7e12013-03-04 11:09:30 +0200863
Dan Williamsa310d032013-11-06 16:30:01 -0800864 return ret;
Andy Shevchenko851b7e12013-03-04 11:09:30 +0200865}
866
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200867static int __init dmatest_init(void)
868{
869 struct dmatest_info *info = &test_info;
870
Dan Williamsa310d032013-11-06 16:30:01 -0800871 if (dmatest_run) {
872 mutex_lock(&info->lock);
Dan Williamsa9e55492013-11-06 16:30:02 -0800873 run_threaded_test(info);
Dan Williamsa310d032013-11-06 16:30:01 -0800874 mutex_unlock(&info->lock);
875 }
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200876
Dan Williamsa310d032013-11-06 16:30:01 -0800877 /* module parameters are stable, inittime tests are started,
878 * let userspace take over 'run' control
879 */
880 info->did_init = true;
Andy Shevchenko838cc702013-03-04 11:09:28 +0200881
Dan Williamsa9e55492013-11-06 16:30:02 -0800882 return 0;
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200883}
884/* when compiled-in wait for drivers to load first */
885late_initcall(dmatest_init);
886
887static void __exit dmatest_exit(void)
888{
889 struct dmatest_info *info = &test_info;
890
Dan Williamsa310d032013-11-06 16:30:01 -0800891 mutex_lock(&info->lock);
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200892 stop_threaded_test(info);
Dan Williamsa310d032013-11-06 16:30:01 -0800893 mutex_unlock(&info->lock);
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200894}
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700895module_exit(dmatest_exit);
896
Jean Delvaree05503e2011-05-18 16:49:24 +0200897MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700898MODULE_LICENSE("GPL v2");