blob: b9339524d5bd38859e00e701c062acda5dba69e7 [file] [log] [blame]
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -07001/*
2 * DMA Engine test module
3 *
4 * Copyright (C) 2007 Atmel Corporation
Andy Shevchenko851b7e12013-03-04 11:09:30 +02005 * Copyright (C) 2013 Intel Corporation
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -07006 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
Dan Williams872f05c2013-11-06 16:29:58 -080011#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070013#include <linux/delay.h>
Alexey Dobriyanb7f080c2011-06-16 11:01:34 +000014#include <linux/dma-mapping.h>
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070015#include <linux/dmaengine.h>
Guennadi Liakhovetski981ed702011-08-18 16:50:51 +020016#include <linux/freezer.h>
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070017#include <linux/init.h>
18#include <linux/kthread.h>
Ingo Molnar0881e7b2017-02-05 15:30:50 +010019#include <linux/sched/task.h>
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070020#include <linux/module.h>
21#include <linux/moduleparam.h>
22#include <linux/random.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090023#include <linux/slab.h>
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070024#include <linux/wait.h>
25
26static unsigned int test_buf_size = 16384;
Andy Shevchenkoa6c268d2013-07-23 18:36:46 +030027module_param(test_buf_size, uint, S_IRUGO | S_IWUSR);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070028MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer");
29
Kay Sievers06190d82008-11-11 13:12:33 -070030static char test_channel[20];
Andy Shevchenkoa6c268d2013-07-23 18:36:46 +030031module_param_string(channel, test_channel, sizeof(test_channel),
32 S_IRUGO | S_IWUSR);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070033MODULE_PARM_DESC(channel, "Bus ID of the channel to test (default: any)");
34
Guennadi Liakhovetskia85159f2013-12-30 14:58:04 +010035static char test_device[32];
Andy Shevchenkoa6c268d2013-07-23 18:36:46 +030036module_param_string(device, test_device, sizeof(test_device),
37 S_IRUGO | S_IWUSR);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070038MODULE_PARM_DESC(device, "Bus ID of the DMA Engine to test (default: any)");
39
40static unsigned int threads_per_chan = 1;
Andy Shevchenkoa6c268d2013-07-23 18:36:46 +030041module_param(threads_per_chan, uint, S_IRUGO | S_IWUSR);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070042MODULE_PARM_DESC(threads_per_chan,
43 "Number of threads to start per channel (default: 1)");
44
45static unsigned int max_channels;
Andy Shevchenkoa6c268d2013-07-23 18:36:46 +030046module_param(max_channels, uint, S_IRUGO | S_IWUSR);
Dan Williams33df8ca2009-01-06 11:38:15 -070047MODULE_PARM_DESC(max_channels,
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070048 "Maximum number of channels to use (default: all)");
49
Nicolas Ferre0a2ff57d2009-07-03 19:26:51 +020050static unsigned int iterations;
Andy Shevchenkoa6c268d2013-07-23 18:36:46 +030051module_param(iterations, uint, S_IRUGO | S_IWUSR);
Nicolas Ferre0a2ff57d2009-07-03 19:26:51 +020052MODULE_PARM_DESC(iterations,
53 "Iterations before stopping test (default: infinite)");
54
Eugeniy Paltsevd8646722016-09-14 20:40:38 +030055static unsigned int dmatest;
Kedareswara rao Appanaa0d4cb42016-06-09 21:10:14 +053056module_param(dmatest, uint, S_IRUGO | S_IWUSR);
57MODULE_PARM_DESC(dmatest,
Dave Jiangc678fa62017-08-21 10:23:13 -070058 "dmatest 0-memcpy 1-memset (default: 0)");
Kedareswara rao Appanaa0d4cb42016-06-09 21:10:14 +053059
Dan Williamsb54d5cb2009-03-25 09:13:25 -070060static unsigned int xor_sources = 3;
Andy Shevchenkoa6c268d2013-07-23 18:36:46 +030061module_param(xor_sources, uint, S_IRUGO | S_IWUSR);
Dan Williamsb54d5cb2009-03-25 09:13:25 -070062MODULE_PARM_DESC(xor_sources,
63 "Number of xor source buffers (default: 3)");
64
Dan Williams58691d62009-08-29 19:09:27 -070065static unsigned int pq_sources = 3;
Andy Shevchenkoa6c268d2013-07-23 18:36:46 +030066module_param(pq_sources, uint, S_IRUGO | S_IWUSR);
Dan Williams58691d62009-08-29 19:09:27 -070067MODULE_PARM_DESC(pq_sources,
68 "Number of p+q source buffers (default: 3)");
69
Viresh Kumard42efe62011-03-22 17:27:25 +053070static int timeout = 3000;
Andy Shevchenkoa6c268d2013-07-23 18:36:46 +030071module_param(timeout, uint, S_IRUGO | S_IWUSR);
Joe Perches85ee7a12011-04-23 20:38:19 -070072MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), "
73 "Pass -1 for infinite timeout");
Viresh Kumard42efe62011-03-22 17:27:25 +053074
Dan Williamse3b9c342013-11-06 16:30:05 -080075static bool noverify;
76module_param(noverify, bool, S_IRUGO | S_IWUSR);
Yang Shunyong2e67a082018-02-02 17:51:09 +080077MODULE_PARM_DESC(noverify, "Disable data verification (default: verify)");
78
79static bool norandom;
80module_param(norandom, bool, 0644);
81MODULE_PARM_DESC(norandom, "Disable random offset setup (default: random)");
Andy Shevchenko74b5c072013-03-04 11:09:32 +020082
Dan Williams50137a72013-11-08 12:26:26 -080083static bool verbose;
84module_param(verbose, bool, S_IRUGO | S_IWUSR);
85MODULE_PARM_DESC(verbose, "Enable \"success\" result messages (default: off)");
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070086
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +020087/**
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +020088 * struct dmatest_params - test parameters.
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +020089 * @buf_size: size of the memcpy test buffer
90 * @channel: bus ID of the channel to test
91 * @device: bus ID of the DMA Engine to test
92 * @threads_per_chan: number of threads to start per channel
93 * @max_channels: maximum number of channels to use
94 * @iterations: iterations before stopping test
95 * @xor_sources: number of xor source buffers
96 * @pq_sources: number of p+q source buffers
97 * @timeout: transfer timeout in msec, -1 for infinite timeout
98 */
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +020099struct dmatest_params {
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200100 unsigned int buf_size;
101 char channel[20];
Guennadi Liakhovetskia85159f2013-12-30 14:58:04 +0100102 char device[32];
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200103 unsigned int threads_per_chan;
104 unsigned int max_channels;
105 unsigned int iterations;
106 unsigned int xor_sources;
107 unsigned int pq_sources;
108 int timeout;
Dan Williamse3b9c342013-11-06 16:30:05 -0800109 bool noverify;
Yang Shunyong2e67a082018-02-02 17:51:09 +0800110 bool norandom;
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200111};
112
113/**
114 * struct dmatest_info - test information.
115 * @params: test parameters
Andy Shevchenko851b7e12013-03-04 11:09:30 +0200116 * @lock: access protection to the fields of this structure
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200117 */
Dan Williamsa310d032013-11-06 16:30:01 -0800118static struct dmatest_info {
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200119 /* Test parameters */
120 struct dmatest_params params;
Andy Shevchenko838cc702013-03-04 11:09:28 +0200121
122 /* Internal state */
123 struct list_head channels;
124 unsigned int nr_channels;
Andy Shevchenko851b7e12013-03-04 11:09:30 +0200125 struct mutex lock;
Dan Williamsa310d032013-11-06 16:30:01 -0800126 bool did_init;
127} test_info = {
128 .channels = LIST_HEAD_INIT(test_info.channels),
129 .lock = __MUTEX_INITIALIZER(test_info.lock),
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200130};
131
Dan Williamsa310d032013-11-06 16:30:01 -0800132static int dmatest_run_set(const char *val, const struct kernel_param *kp);
133static int dmatest_run_get(char *val, const struct kernel_param *kp);
Luis R. Rodriguez9c278472015-05-27 11:09:38 +0930134static const struct kernel_param_ops run_ops = {
Dan Williamsa310d032013-11-06 16:30:01 -0800135 .set = dmatest_run_set,
136 .get = dmatest_run_get,
137};
138static bool dmatest_run;
139module_param_cb(run, &run_ops, &dmatest_run, S_IRUGO | S_IWUSR);
140MODULE_PARM_DESC(run, "Run the test (default: false)");
141
142/* Maximum amount of mismatched bytes in buffer to print */
143#define MAX_ERROR_COUNT 32
144
145/*
146 * Initialization patterns. All bytes in the source buffer has bit 7
147 * set, all bytes in the destination buffer has bit 7 cleared.
148 *
149 * Bit 6 is set for all bytes which are to be copied by the DMA
150 * engine. Bit 5 is set for all bytes which are to be overwritten by
151 * the DMA engine.
152 *
153 * The remaining bits are the inverse of a counter which increments by
154 * one for each byte address.
155 */
156#define PATTERN_SRC 0x80
157#define PATTERN_DST 0x00
158#define PATTERN_COPY 0x40
159#define PATTERN_OVERWRITE 0x20
160#define PATTERN_COUNT_MASK 0x1f
Sinan Kaya61b5f542017-06-29 22:30:58 -0400161#define PATTERN_MEMSET_IDX 0x01
Dan Williamsa310d032013-11-06 16:30:01 -0800162
Adam Wallis6f6a23a2017-11-27 10:45:01 -0500163/* poor man's completion - we want to use wait_event_freezable() on it */
164struct dmatest_done {
165 bool done;
166 wait_queue_head_t *wait;
167};
168
Dan Williamsa310d032013-11-06 16:30:01 -0800169struct dmatest_thread {
170 struct list_head node;
171 struct dmatest_info *info;
172 struct task_struct *task;
173 struct dma_chan *chan;
174 u8 **srcs;
Dave Jiangd6481602016-11-29 13:22:20 -0700175 u8 **usrcs;
Dan Williamsa310d032013-11-06 16:30:01 -0800176 u8 **dsts;
Dave Jiangd6481602016-11-29 13:22:20 -0700177 u8 **udsts;
Dan Williamsa310d032013-11-06 16:30:01 -0800178 enum dma_transaction_type type;
Adam Wallis6f6a23a2017-11-27 10:45:01 -0500179 wait_queue_head_t done_wait;
180 struct dmatest_done test_done;
Dan Williamsa310d032013-11-06 16:30:01 -0800181 bool done;
182};
183
184struct dmatest_chan {
185 struct list_head node;
186 struct dma_chan *chan;
187 struct list_head threads;
188};
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200189
Dan Williams2d88ce72013-11-06 16:30:09 -0800190static DECLARE_WAIT_QUEUE_HEAD(thread_wait);
191static bool wait;
192
193static bool is_threaded_test_run(struct dmatest_info *info)
194{
195 struct dmatest_chan *dtc;
196
197 list_for_each_entry(dtc, &info->channels, node) {
198 struct dmatest_thread *thread;
199
200 list_for_each_entry(thread, &dtc->threads, node) {
201 if (!thread->done)
202 return true;
203 }
204 }
205
206 return false;
207}
208
209static int dmatest_wait_get(char *val, const struct kernel_param *kp)
210{
211 struct dmatest_info *info = &test_info;
212 struct dmatest_params *params = &info->params;
213
214 if (params->iterations)
215 wait_event(thread_wait, !is_threaded_test_run(info));
216 wait = true;
217 return param_get_bool(val, kp);
218}
219
Luis R. Rodriguez9c278472015-05-27 11:09:38 +0930220static const struct kernel_param_ops wait_ops = {
Dan Williams2d88ce72013-11-06 16:30:09 -0800221 .get = dmatest_wait_get,
222 .set = param_set_bool,
223};
224module_param_cb(wait, &wait_ops, &wait, S_IRUGO);
225MODULE_PARM_DESC(wait, "Wait for tests to complete (default: false)");
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700226
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200227static bool dmatest_match_channel(struct dmatest_params *params,
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200228 struct dma_chan *chan)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700229{
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200230 if (params->channel[0] == '\0')
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700231 return true;
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200232 return strcmp(dma_chan_name(chan), params->channel) == 0;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700233}
234
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200235static bool dmatest_match_device(struct dmatest_params *params,
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200236 struct dma_device *device)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700237{
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200238 if (params->device[0] == '\0')
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700239 return true;
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200240 return strcmp(dev_name(device->dev), params->device) == 0;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700241}
242
243static unsigned long dmatest_random(void)
244{
245 unsigned long buf;
246
Dan Williamsbe9fa5a2013-11-06 16:30:03 -0800247 prandom_bytes(&buf, sizeof(buf));
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700248 return buf;
249}
250
Sinan Kaya61b5f542017-06-29 22:30:58 -0400251static inline u8 gen_inv_idx(u8 index, bool is_memset)
252{
253 u8 val = is_memset ? PATTERN_MEMSET_IDX : index;
254
255 return ~val & PATTERN_COUNT_MASK;
256}
257
258static inline u8 gen_src_value(u8 index, bool is_memset)
259{
260 return PATTERN_SRC | gen_inv_idx(index, is_memset);
261}
262
263static inline u8 gen_dst_value(u8 index, bool is_memset)
264{
265 return PATTERN_DST | gen_inv_idx(index, is_memset);
266}
267
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200268static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len,
Sinan Kaya61b5f542017-06-29 22:30:58 -0400269 unsigned int buf_size, bool is_memset)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700270{
271 unsigned int i;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700272 u8 *buf;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700273
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700274 for (; (buf = *bufs); bufs++) {
275 for (i = 0; i < start; i++)
Sinan Kaya61b5f542017-06-29 22:30:58 -0400276 buf[i] = gen_src_value(i, is_memset);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700277 for ( ; i < start + len; i++)
Sinan Kaya61b5f542017-06-29 22:30:58 -0400278 buf[i] = gen_src_value(i, is_memset) | PATTERN_COPY;
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200279 for ( ; i < buf_size; i++)
Sinan Kaya61b5f542017-06-29 22:30:58 -0400280 buf[i] = gen_src_value(i, is_memset);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700281 buf++;
282 }
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700283}
284
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200285static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len,
Sinan Kaya61b5f542017-06-29 22:30:58 -0400286 unsigned int buf_size, bool is_memset)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700287{
288 unsigned int i;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700289 u8 *buf;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700290
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700291 for (; (buf = *bufs); bufs++) {
292 for (i = 0; i < start; i++)
Sinan Kaya61b5f542017-06-29 22:30:58 -0400293 buf[i] = gen_dst_value(i, is_memset);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700294 for ( ; i < start + len; i++)
Sinan Kaya61b5f542017-06-29 22:30:58 -0400295 buf[i] = gen_dst_value(i, is_memset) |
296 PATTERN_OVERWRITE;
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200297 for ( ; i < buf_size; i++)
Sinan Kaya61b5f542017-06-29 22:30:58 -0400298 buf[i] = gen_dst_value(i, is_memset);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700299 }
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700300}
301
Dan Williams7b610172013-11-06 16:29:57 -0800302static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index,
Sinan Kaya61b5f542017-06-29 22:30:58 -0400303 unsigned int counter, bool is_srcbuf, bool is_memset)
Dan Williams7b610172013-11-06 16:29:57 -0800304{
305 u8 diff = actual ^ pattern;
Sinan Kaya61b5f542017-06-29 22:30:58 -0400306 u8 expected = pattern | gen_inv_idx(counter, is_memset);
Dan Williams7b610172013-11-06 16:29:57 -0800307 const char *thread_name = current->comm;
308
309 if (is_srcbuf)
310 pr_warn("%s: srcbuf[0x%x] overwritten! Expected %02x, got %02x\n",
311 thread_name, index, expected, actual);
312 else if ((pattern & PATTERN_COPY)
313 && (diff & (PATTERN_COPY | PATTERN_OVERWRITE)))
314 pr_warn("%s: dstbuf[0x%x] not copied! Expected %02x, got %02x\n",
315 thread_name, index, expected, actual);
316 else if (diff & PATTERN_SRC)
317 pr_warn("%s: dstbuf[0x%x] was copied! Expected %02x, got %02x\n",
318 thread_name, index, expected, actual);
319 else
320 pr_warn("%s: dstbuf[0x%x] mismatch! Expected %02x, got %02x\n",
321 thread_name, index, expected, actual);
322}
323
324static unsigned int dmatest_verify(u8 **bufs, unsigned int start,
325 unsigned int end, unsigned int counter, u8 pattern,
Sinan Kaya61b5f542017-06-29 22:30:58 -0400326 bool is_srcbuf, bool is_memset)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700327{
328 unsigned int i;
329 unsigned int error_count = 0;
330 u8 actual;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700331 u8 expected;
332 u8 *buf;
333 unsigned int counter_orig = counter;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700334
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700335 for (; (buf = *bufs); bufs++) {
336 counter = counter_orig;
337 for (i = start; i < end; i++) {
338 actual = buf[i];
Sinan Kaya61b5f542017-06-29 22:30:58 -0400339 expected = pattern | gen_inv_idx(counter, is_memset);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700340 if (actual != expected) {
Dan Williams7b610172013-11-06 16:29:57 -0800341 if (error_count < MAX_ERROR_COUNT)
342 dmatest_mismatch(actual, pattern, i,
Sinan Kaya61b5f542017-06-29 22:30:58 -0400343 counter, is_srcbuf,
344 is_memset);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700345 error_count++;
346 }
347 counter++;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700348 }
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700349 }
350
Andy Shevchenko74b5c072013-03-04 11:09:32 +0200351 if (error_count > MAX_ERROR_COUNT)
Dan Williams7b610172013-11-06 16:29:57 -0800352 pr_warn("%s: %u errors suppressed\n",
Andy Shevchenko74b5c072013-03-04 11:09:32 +0200353 current->comm, error_count - MAX_ERROR_COUNT);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700354
355 return error_count;
356}
357
Tejun Heoadfa5432011-11-23 09:28:16 -0800358
359static void dmatest_callback(void *arg)
Dan Williamse44e0aa2009-03-25 09:13:25 -0700360{
Tejun Heoadfa5432011-11-23 09:28:16 -0800361 struct dmatest_done *done = arg;
Adam Wallis6f6a23a2017-11-27 10:45:01 -0500362 struct dmatest_thread *thread =
Yang Shunyong66b3bd22018-01-29 14:40:11 +0800363 container_of(done, struct dmatest_thread, test_done);
Adam Wallis6f6a23a2017-11-27 10:45:01 -0500364 if (!thread->done) {
365 done->done = true;
366 wake_up_all(done->wait);
367 } else {
368 /*
369 * If thread->done, it means that this callback occurred
370 * after the parent thread has cleaned up. This can
371 * happen in the case that driver doesn't implement
372 * the terminate_all() functionality and a dma operation
373 * did not occur within the timeout period
374 */
375 WARN(1, "dmatest: Kernel memory may be corrupted!!\n");
376 }
Dan Williamse44e0aa2009-03-25 09:13:25 -0700377}
378
Akinobu Mita8be9e32b2012-10-28 00:49:32 +0900379static unsigned int min_odd(unsigned int x, unsigned int y)
380{
381 unsigned int val = min(x, y);
382
383 return val % 2 ? val : val - 1;
384}
385
Dan Williams872f05c2013-11-06 16:29:58 -0800386static void result(const char *err, unsigned int n, unsigned int src_off,
387 unsigned int dst_off, unsigned int len, unsigned long data)
Andy Shevchenkod86b2f22013-03-04 11:09:34 +0200388{
Jerome Blin2acec152014-03-04 10:38:55 +0100389 pr_info("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)\n",
Dan Williams872f05c2013-11-06 16:29:58 -0800390 current->comm, n, err, src_off, dst_off, len, data);
Andy Shevchenkod86b2f22013-03-04 11:09:34 +0200391}
392
Dan Williams872f05c2013-11-06 16:29:58 -0800393static void dbg_result(const char *err, unsigned int n, unsigned int src_off,
394 unsigned int dst_off, unsigned int len,
395 unsigned long data)
Andy Shevchenko95019c82013-03-04 11:09:33 +0200396{
Jerome Blin2acec152014-03-04 10:38:55 +0100397 pr_debug("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)\n",
Andy Shevchenkoa835bb82014-10-22 16:16:42 +0300398 current->comm, n, err, src_off, dst_off, len, data);
Andy Shevchenko95019c82013-03-04 11:09:33 +0200399}
400
Andy Shevchenkoa835bb82014-10-22 16:16:42 +0300401#define verbose_result(err, n, src_off, dst_off, len, data) ({ \
402 if (verbose) \
403 result(err, n, src_off, dst_off, len, data); \
404 else \
405 dbg_result(err, n, src_off, dst_off, len, data);\
Dan Williams50137a72013-11-08 12:26:26 -0800406})
407
Dan Williams86727442013-11-06 16:30:07 -0800408static unsigned long long dmatest_persec(s64 runtime, unsigned int val)
Andy Shevchenko95019c82013-03-04 11:09:33 +0200409{
Dan Williams86727442013-11-06 16:30:07 -0800410 unsigned long long per_sec = 1000000;
Andy Shevchenko95019c82013-03-04 11:09:33 +0200411
Dan Williams86727442013-11-06 16:30:07 -0800412 if (runtime <= 0)
413 return 0;
Andy Shevchenko95019c82013-03-04 11:09:33 +0200414
Dan Williams86727442013-11-06 16:30:07 -0800415 /* drop precision until runtime is 32-bits */
416 while (runtime > UINT_MAX) {
417 runtime >>= 1;
418 per_sec <<= 1;
419 }
Andy Shevchenko95019c82013-03-04 11:09:33 +0200420
Dan Williams86727442013-11-06 16:30:07 -0800421 per_sec *= val;
422 do_div(per_sec, runtime);
423 return per_sec;
Andy Shevchenko95019c82013-03-04 11:09:33 +0200424}
425
Dan Williams86727442013-11-06 16:30:07 -0800426static unsigned long long dmatest_KBs(s64 runtime, unsigned long long len)
Andy Shevchenkod86b2f22013-03-04 11:09:34 +0200427{
Dan Williams86727442013-11-06 16:30:07 -0800428 return dmatest_persec(runtime, len >> 10);
Andy Shevchenko95019c82013-03-04 11:09:33 +0200429}
430
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700431/*
432 * This function repeatedly tests DMA transfers of various lengths and
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700433 * offsets for a given operation type until it is told to exit by
434 * kthread_stop(). There may be multiple threads running this function
435 * in parallel for a single channel, and there may be multiple channels
436 * being tested in parallel.
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700437 *
438 * Before each test, the source and destination buffer is initialized
439 * with a known pattern. This pattern is different depending on
440 * whether it's in an area which is supposed to be copied or
441 * overwritten, and different in the source and destination buffers.
442 * So if the DMA engine doesn't copy exactly what we tell it to copy,
443 * we'll notice.
444 */
445static int dmatest_func(void *data)
446{
447 struct dmatest_thread *thread = data;
Adam Wallis6f6a23a2017-11-27 10:45:01 -0500448 struct dmatest_done *done = &thread->test_done;
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200449 struct dmatest_info *info;
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200450 struct dmatest_params *params;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700451 struct dma_chan *chan;
Akinobu Mita8be9e32b2012-10-28 00:49:32 +0900452 struct dma_device *dev;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700453 unsigned int error_count;
454 unsigned int failed_tests = 0;
455 unsigned int total_tests = 0;
456 dma_cookie_t cookie;
457 enum dma_status status;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700458 enum dma_ctrl_flags flags;
Andy Shevchenko945b5af2013-03-04 11:09:26 +0200459 u8 *pq_coefs = NULL;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700460 int ret;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700461 int src_cnt;
462 int dst_cnt;
463 int i;
Sinan Kayae9405ef2016-09-01 10:02:55 -0400464 ktime_t ktime, start, diff;
Thomas Gleixner8b0e1952016-12-25 12:30:41 +0100465 ktime_t filltime = 0;
466 ktime_t comparetime = 0;
Dan Williams86727442013-11-06 16:30:07 -0800467 s64 runtime = 0;
468 unsigned long long total_len = 0;
Dave Jiangd6481602016-11-29 13:22:20 -0700469 u8 align = 0;
Sinan Kaya61b5f542017-06-29 22:30:58 -0400470 bool is_memset = false;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700471
Tejun Heoadfa5432011-11-23 09:28:16 -0800472 set_freezable();
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700473
474 ret = -ENOMEM;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700475
476 smp_rmb();
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200477 info = thread->info;
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200478 params = &info->params;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700479 chan = thread->chan;
Akinobu Mita8be9e32b2012-10-28 00:49:32 +0900480 dev = chan->device;
Dave Jiangd6481602016-11-29 13:22:20 -0700481 if (thread->type == DMA_MEMCPY) {
482 align = dev->copy_align;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700483 src_cnt = dst_cnt = 1;
Sinan Kaya61b5f542017-06-29 22:30:58 -0400484 } else if (thread->type == DMA_MEMSET) {
485 align = dev->fill_align;
486 src_cnt = dst_cnt = 1;
487 is_memset = true;
Dave Jiangd6481602016-11-29 13:22:20 -0700488 } else if (thread->type == DMA_XOR) {
Akinobu Mita8be9e32b2012-10-28 00:49:32 +0900489 /* force odd to ensure dst = src */
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200490 src_cnt = min_odd(params->xor_sources | 1, dev->max_xor);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700491 dst_cnt = 1;
Dave Jiangd6481602016-11-29 13:22:20 -0700492 align = dev->xor_align;
Dan Williams58691d62009-08-29 19:09:27 -0700493 } else if (thread->type == DMA_PQ) {
Akinobu Mita8be9e32b2012-10-28 00:49:32 +0900494 /* force odd to ensure dst = src */
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200495 src_cnt = min_odd(params->pq_sources | 1, dma_maxpq(dev, 0));
Dan Williams58691d62009-08-29 19:09:27 -0700496 dst_cnt = 2;
Dave Jiangd6481602016-11-29 13:22:20 -0700497 align = dev->pq_align;
Andy Shevchenko945b5af2013-03-04 11:09:26 +0200498
Dave Jiang31d18252016-11-29 13:22:01 -0700499 pq_coefs = kmalloc(params->pq_sources + 1, GFP_KERNEL);
Andy Shevchenko945b5af2013-03-04 11:09:26 +0200500 if (!pq_coefs)
501 goto err_thread_type;
502
Anatolij Gustschin94de6482010-02-15 22:35:23 +0100503 for (i = 0; i < src_cnt; i++)
Dan Williams58691d62009-08-29 19:09:27 -0700504 pq_coefs[i] = 1;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700505 } else
Andy Shevchenko945b5af2013-03-04 11:09:26 +0200506 goto err_thread_type;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700507
Dave Jiang31d18252016-11-29 13:22:01 -0700508 thread->srcs = kcalloc(src_cnt + 1, sizeof(u8 *), GFP_KERNEL);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700509 if (!thread->srcs)
510 goto err_srcs;
Dave Jiangd6481602016-11-29 13:22:20 -0700511
512 thread->usrcs = kcalloc(src_cnt + 1, sizeof(u8 *), GFP_KERNEL);
513 if (!thread->usrcs)
514 goto err_usrcs;
515
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700516 for (i = 0; i < src_cnt; i++) {
Dave Jiangd6481602016-11-29 13:22:20 -0700517 thread->usrcs[i] = kmalloc(params->buf_size + align,
518 GFP_KERNEL);
519 if (!thread->usrcs[i])
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700520 goto err_srcbuf;
Dave Jiangd6481602016-11-29 13:22:20 -0700521
522 /* align srcs to alignment restriction */
523 if (align)
524 thread->srcs[i] = PTR_ALIGN(thread->usrcs[i], align);
525 else
526 thread->srcs[i] = thread->usrcs[i];
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700527 }
528 thread->srcs[i] = NULL;
529
Dave Jiang31d18252016-11-29 13:22:01 -0700530 thread->dsts = kcalloc(dst_cnt + 1, sizeof(u8 *), GFP_KERNEL);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700531 if (!thread->dsts)
532 goto err_dsts;
Dave Jiangd6481602016-11-29 13:22:20 -0700533
534 thread->udsts = kcalloc(dst_cnt + 1, sizeof(u8 *), GFP_KERNEL);
535 if (!thread->udsts)
536 goto err_udsts;
537
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700538 for (i = 0; i < dst_cnt; i++) {
Dave Jiangd6481602016-11-29 13:22:20 -0700539 thread->udsts[i] = kmalloc(params->buf_size + align,
540 GFP_KERNEL);
541 if (!thread->udsts[i])
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700542 goto err_dstbuf;
Dave Jiangd6481602016-11-29 13:22:20 -0700543
544 /* align dsts to alignment restriction */
545 if (align)
546 thread->dsts[i] = PTR_ALIGN(thread->udsts[i], align);
547 else
548 thread->dsts[i] = thread->udsts[i];
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700549 }
550 thread->dsts[i] = NULL;
551
Dan Williamse44e0aa2009-03-25 09:13:25 -0700552 set_user_nice(current, 10);
553
Ira Snyderb203bd32011-03-03 07:54:53 +0000554 /*
Bartlomiej Zolnierkiewiczd1cab342013-10-18 19:35:21 +0200555 * src and dst buffers are freed by ourselves below
Ira Snyderb203bd32011-03-03 07:54:53 +0000556 */
Bartlomiej Zolnierkiewicz0776ae72013-10-18 19:35:33 +0200557 flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700558
Dan Williams86727442013-11-06 16:30:07 -0800559 ktime = ktime_get();
Nicolas Ferre0a2ff57d2009-07-03 19:26:51 +0200560 while (!kthread_should_stop()
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200561 && !(params->iterations && total_tests >= params->iterations)) {
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700562 struct dma_async_tx_descriptor *tx = NULL;
Dan Williams4076e752013-11-06 16:30:10 -0800563 struct dmaengine_unmap_data *um;
564 dma_addr_t srcs[src_cnt];
565 dma_addr_t *dsts;
Andy Shevchenkoede23a52014-10-22 16:16:43 +0300566 unsigned int src_off, dst_off, len;
Atsushi Nemotod86be862009-01-13 09:22:20 -0700567
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700568 total_tests++;
569
Stefan Roesefbfb8e12017-04-27 14:21:41 +0200570 /* Check if buffer count fits into map count variable (u8) */
571 if ((src_cnt + dst_cnt) >= 255) {
572 pr_err("too many buffers (%d of 255 supported)\n",
573 src_cnt + dst_cnt);
574 break;
575 }
576
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200577 if (1 << align > params->buf_size) {
Guennadi Liakhovetskicfe4f272009-12-04 19:44:48 +0100578 pr_err("%u-byte buffer too small for %d-byte alignment\n",
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200579 params->buf_size, 1 << align);
Guennadi Liakhovetskicfe4f272009-12-04 19:44:48 +0100580 break;
581 }
582
Yang Shunyong2e67a082018-02-02 17:51:09 +0800583 if (params->norandom)
Dan Williamse3b9c342013-11-06 16:30:05 -0800584 len = params->buf_size;
Andy Shevchenkoede23a52014-10-22 16:16:43 +0300585 else
586 len = dmatest_random() % params->buf_size + 1;
587
588 len = (len >> align) << align;
589 if (!len)
590 len = 1 << align;
591
592 total_len += len;
593
Yang Shunyong2e67a082018-02-02 17:51:09 +0800594 if (params->norandom) {
Dan Williamse3b9c342013-11-06 16:30:05 -0800595 src_off = 0;
596 dst_off = 0;
597 } else {
Dan Williamse3b9c342013-11-06 16:30:05 -0800598 src_off = dmatest_random() % (params->buf_size - len + 1);
599 dst_off = dmatest_random() % (params->buf_size - len + 1);
600
601 src_off = (src_off >> align) << align;
602 dst_off = (dst_off >> align) << align;
Yang Shunyong2e67a082018-02-02 17:51:09 +0800603 }
Dan Williamse3b9c342013-11-06 16:30:05 -0800604
Yang Shunyong2e67a082018-02-02 17:51:09 +0800605 if (!params->noverify) {
606 start = ktime_get();
Dan Williamse3b9c342013-11-06 16:30:05 -0800607 dmatest_init_srcs(thread->srcs, src_off, len,
Sinan Kaya61b5f542017-06-29 22:30:58 -0400608 params->buf_size, is_memset);
Dan Williamse3b9c342013-11-06 16:30:05 -0800609 dmatest_init_dsts(thread->dsts, dst_off, len,
Sinan Kaya61b5f542017-06-29 22:30:58 -0400610 params->buf_size, is_memset);
Sinan Kayae9405ef2016-09-01 10:02:55 -0400611
612 diff = ktime_sub(ktime_get(), start);
613 filltime = ktime_add(filltime, diff);
Dan Williamse3b9c342013-11-06 16:30:05 -0800614 }
615
Dave Jiang31d18252016-11-29 13:22:01 -0700616 um = dmaengine_get_unmap_data(dev->dev, src_cnt + dst_cnt,
Dan Williams4076e752013-11-06 16:30:10 -0800617 GFP_KERNEL);
618 if (!um) {
619 failed_tests++;
620 result("unmap data NULL", total_tests,
621 src_off, dst_off, len, ret);
622 continue;
623 }
Dan Williams83544ae2009-09-08 17:42:53 -0700624
Dan Williams4076e752013-11-06 16:30:10 -0800625 um->len = params->buf_size;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700626 for (i = 0; i < src_cnt; i++) {
Dan Williams745c00d2013-12-09 11:16:01 -0800627 void *buf = thread->srcs[i];
Dan Williams4076e752013-11-06 16:30:10 -0800628 struct page *pg = virt_to_page(buf);
Geliang Tangf62e5f62017-04-22 09:18:03 +0800629 unsigned long pg_off = offset_in_page(buf);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700630
Dan Williams4076e752013-11-06 16:30:10 -0800631 um->addr[i] = dma_map_page(dev->dev, pg, pg_off,
632 um->len, DMA_TO_DEVICE);
633 srcs[i] = um->addr[i] + src_off;
634 ret = dma_mapping_error(dev->dev, um->addr[i]);
Andy Shevchenkoafde3be2012-12-17 15:59:53 -0800635 if (ret) {
Dan Williams4076e752013-11-06 16:30:10 -0800636 dmaengine_unmap_put(um);
Dan Williams872f05c2013-11-06 16:29:58 -0800637 result("src mapping error", total_tests,
638 src_off, dst_off, len, ret);
Andy Shevchenkoafde3be2012-12-17 15:59:53 -0800639 failed_tests++;
640 continue;
641 }
Dan Williams4076e752013-11-06 16:30:10 -0800642 um->to_cnt++;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700643 }
Atsushi Nemotod86be862009-01-13 09:22:20 -0700644 /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */
Dan Williams4076e752013-11-06 16:30:10 -0800645 dsts = &um->addr[src_cnt];
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700646 for (i = 0; i < dst_cnt; i++) {
Dan Williams745c00d2013-12-09 11:16:01 -0800647 void *buf = thread->dsts[i];
Dan Williams4076e752013-11-06 16:30:10 -0800648 struct page *pg = virt_to_page(buf);
Geliang Tangf62e5f62017-04-22 09:18:03 +0800649 unsigned long pg_off = offset_in_page(buf);
Dan Williams4076e752013-11-06 16:30:10 -0800650
651 dsts[i] = dma_map_page(dev->dev, pg, pg_off, um->len,
652 DMA_BIDIRECTIONAL);
653 ret = dma_mapping_error(dev->dev, dsts[i]);
Andy Shevchenkoafde3be2012-12-17 15:59:53 -0800654 if (ret) {
Dan Williams4076e752013-11-06 16:30:10 -0800655 dmaengine_unmap_put(um);
Dan Williams872f05c2013-11-06 16:29:58 -0800656 result("dst mapping error", total_tests,
657 src_off, dst_off, len, ret);
Andy Shevchenkoafde3be2012-12-17 15:59:53 -0800658 failed_tests++;
659 continue;
660 }
Dan Williams4076e752013-11-06 16:30:10 -0800661 um->bidi_cnt++;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700662 }
Atsushi Nemotod86be862009-01-13 09:22:20 -0700663
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700664 if (thread->type == DMA_MEMCPY)
665 tx = dev->device_prep_dma_memcpy(chan,
Dan Williams4076e752013-11-06 16:30:10 -0800666 dsts[0] + dst_off,
667 srcs[0], len, flags);
Sinan Kaya61b5f542017-06-29 22:30:58 -0400668 else if (thread->type == DMA_MEMSET)
669 tx = dev->device_prep_dma_memset(chan,
670 dsts[0] + dst_off,
671 *(thread->srcs[0] + src_off),
672 len, flags);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700673 else if (thread->type == DMA_XOR)
674 tx = dev->device_prep_dma_xor(chan,
Dan Williams4076e752013-11-06 16:30:10 -0800675 dsts[0] + dst_off,
676 srcs, src_cnt,
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700677 len, flags);
Dan Williams58691d62009-08-29 19:09:27 -0700678 else if (thread->type == DMA_PQ) {
679 dma_addr_t dma_pq[dst_cnt];
680
681 for (i = 0; i < dst_cnt; i++)
Dan Williams4076e752013-11-06 16:30:10 -0800682 dma_pq[i] = dsts[i] + dst_off;
683 tx = dev->device_prep_dma_pq(chan, dma_pq, srcs,
Anatolij Gustschin94de6482010-02-15 22:35:23 +0100684 src_cnt, pq_coefs,
Dan Williams58691d62009-08-29 19:09:27 -0700685 len, flags);
686 }
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700687
Atsushi Nemotod86be862009-01-13 09:22:20 -0700688 if (!tx) {
Dan Williams4076e752013-11-06 16:30:10 -0800689 dmaengine_unmap_put(um);
Dan Williams872f05c2013-11-06 16:29:58 -0800690 result("prep error", total_tests, src_off,
691 dst_off, len, ret);
Atsushi Nemotod86be862009-01-13 09:22:20 -0700692 msleep(100);
693 failed_tests++;
694 continue;
695 }
Dan Williamse44e0aa2009-03-25 09:13:25 -0700696
Adam Wallis6f6a23a2017-11-27 10:45:01 -0500697 done->done = false;
Dan Williamse44e0aa2009-03-25 09:13:25 -0700698 tx->callback = dmatest_callback;
Adam Wallis6f6a23a2017-11-27 10:45:01 -0500699 tx->callback_param = done;
Atsushi Nemotod86be862009-01-13 09:22:20 -0700700 cookie = tx->tx_submit(tx);
701
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700702 if (dma_submit_error(cookie)) {
Dan Williams4076e752013-11-06 16:30:10 -0800703 dmaengine_unmap_put(um);
Dan Williams872f05c2013-11-06 16:29:58 -0800704 result("submit error", total_tests, src_off,
705 dst_off, len, ret);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700706 msleep(100);
707 failed_tests++;
708 continue;
709 }
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700710 dma_async_issue_pending(chan);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700711
Adam Wallis6f6a23a2017-11-27 10:45:01 -0500712 wait_event_freezable_timeout(thread->done_wait, done->done,
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200713 msecs_to_jiffies(params->timeout));
Guennadi Liakhovetski981ed702011-08-18 16:50:51 +0200714
Dan Williamse44e0aa2009-03-25 09:13:25 -0700715 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700716
Adam Wallis6f6a23a2017-11-27 10:45:01 -0500717 if (!done->done) {
Dan Williams4076e752013-11-06 16:30:10 -0800718 dmaengine_unmap_put(um);
Dan Williams872f05c2013-11-06 16:29:58 -0800719 result("test timed out", total_tests, src_off, dst_off,
720 len, 0);
Dan Williamse44e0aa2009-03-25 09:13:25 -0700721 failed_tests++;
722 continue;
Vinod Koul19e9f992013-10-16 13:37:27 +0530723 } else if (status != DMA_COMPLETE) {
Dan Williams4076e752013-11-06 16:30:10 -0800724 dmaengine_unmap_put(um);
Dan Williams872f05c2013-11-06 16:29:58 -0800725 result(status == DMA_ERROR ?
726 "completion error status" :
727 "completion busy status", total_tests, src_off,
728 dst_off, len, ret);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700729 failed_tests++;
730 continue;
731 }
Dan Williamse44e0aa2009-03-25 09:13:25 -0700732
Dan Williams4076e752013-11-06 16:30:10 -0800733 dmaengine_unmap_put(um);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700734
Dan Williamse3b9c342013-11-06 16:30:05 -0800735 if (params->noverify) {
Dan Williams50137a72013-11-08 12:26:26 -0800736 verbose_result("test passed", total_tests, src_off,
737 dst_off, len, 0);
Dan Williamse3b9c342013-11-06 16:30:05 -0800738 continue;
739 }
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700740
Sinan Kayae9405ef2016-09-01 10:02:55 -0400741 start = ktime_get();
Dan Williams872f05c2013-11-06 16:29:58 -0800742 pr_debug("%s: verifying source buffer...\n", current->comm);
Dan Williamse3b9c342013-11-06 16:30:05 -0800743 error_count = dmatest_verify(thread->srcs, 0, src_off,
Sinan Kaya61b5f542017-06-29 22:30:58 -0400744 0, PATTERN_SRC, true, is_memset);
Dan Williams7b610172013-11-06 16:29:57 -0800745 error_count += dmatest_verify(thread->srcs, src_off,
746 src_off + len, src_off,
Sinan Kaya61b5f542017-06-29 22:30:58 -0400747 PATTERN_SRC | PATTERN_COPY, true, is_memset);
Dan Williams7b610172013-11-06 16:29:57 -0800748 error_count += dmatest_verify(thread->srcs, src_off + len,
749 params->buf_size, src_off + len,
Sinan Kaya61b5f542017-06-29 22:30:58 -0400750 PATTERN_SRC, true, is_memset);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700751
Dan Williams872f05c2013-11-06 16:29:58 -0800752 pr_debug("%s: verifying dest buffer...\n", current->comm);
Dan Williams7b610172013-11-06 16:29:57 -0800753 error_count += dmatest_verify(thread->dsts, 0, dst_off,
Sinan Kaya61b5f542017-06-29 22:30:58 -0400754 0, PATTERN_DST, false, is_memset);
755
Dan Williams7b610172013-11-06 16:29:57 -0800756 error_count += dmatest_verify(thread->dsts, dst_off,
757 dst_off + len, src_off,
Sinan Kaya61b5f542017-06-29 22:30:58 -0400758 PATTERN_SRC | PATTERN_COPY, false, is_memset);
759
Dan Williams7b610172013-11-06 16:29:57 -0800760 error_count += dmatest_verify(thread->dsts, dst_off + len,
761 params->buf_size, dst_off + len,
Sinan Kaya61b5f542017-06-29 22:30:58 -0400762 PATTERN_DST, false, is_memset);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700763
Sinan Kayae9405ef2016-09-01 10:02:55 -0400764 diff = ktime_sub(ktime_get(), start);
765 comparetime = ktime_add(comparetime, diff);
766
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700767 if (error_count) {
Dan Williams872f05c2013-11-06 16:29:58 -0800768 result("data error", total_tests, src_off, dst_off,
769 len, error_count);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700770 failed_tests++;
771 } else {
Dan Williams50137a72013-11-08 12:26:26 -0800772 verbose_result("test passed", total_tests, src_off,
773 dst_off, len, 0);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700774 }
775 }
Sinan Kayae9405ef2016-09-01 10:02:55 -0400776 ktime = ktime_sub(ktime_get(), ktime);
777 ktime = ktime_sub(ktime, comparetime);
778 ktime = ktime_sub(ktime, filltime);
779 runtime = ktime_to_us(ktime);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700780
781 ret = 0;
Andy Shevchenko8e1f50d2014-08-22 15:19:44 +0300782err_dstbuf:
Dave Jiangd6481602016-11-29 13:22:20 -0700783 for (i = 0; thread->udsts[i]; i++)
784 kfree(thread->udsts[i]);
785 kfree(thread->udsts);
786err_udsts:
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700787 kfree(thread->dsts);
788err_dsts:
Andy Shevchenko8e1f50d2014-08-22 15:19:44 +0300789err_srcbuf:
Dave Jiangd6481602016-11-29 13:22:20 -0700790 for (i = 0; thread->usrcs[i]; i++)
791 kfree(thread->usrcs[i]);
792 kfree(thread->usrcs);
793err_usrcs:
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700794 kfree(thread->srcs);
795err_srcs:
Andy Shevchenko945b5af2013-03-04 11:09:26 +0200796 kfree(pq_coefs);
797err_thread_type:
Dan Williams86727442013-11-06 16:30:07 -0800798 pr_info("%s: summary %u tests, %u failures %llu iops %llu KB/s (%d)\n",
799 current->comm, total_tests, failed_tests,
800 dmatest_persec(runtime, total_tests),
801 dmatest_KBs(runtime, total_len), ret);
Nicolas Ferre0a2ff57d2009-07-03 19:26:51 +0200802
Viresh Kumar9704efa2011-07-29 16:21:57 +0530803 /* terminate all transfers on specified channels */
Adam Wallis6f6a23a2017-11-27 10:45:01 -0500804 if (ret || failed_tests)
Shiraz Hashim5e034f72012-11-09 15:26:29 +0000805 dmaengine_terminate_all(chan);
806
Andy Shevchenko3e5ccd82013-03-04 11:09:31 +0200807 thread->done = true;
Dan Williams2d88ce72013-11-06 16:30:09 -0800808 wake_up(&thread_wait);
Nicolas Ferre0a2ff57d2009-07-03 19:26:51 +0200809
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700810 return ret;
811}
812
813static void dmatest_cleanup_channel(struct dmatest_chan *dtc)
814{
815 struct dmatest_thread *thread;
816 struct dmatest_thread *_thread;
817 int ret;
818
819 list_for_each_entry_safe(thread, _thread, &dtc->threads, node) {
820 ret = kthread_stop(thread->task);
Dan Williams0adff802013-11-06 16:30:00 -0800821 pr_debug("thread %s exited with status %d\n",
822 thread->task->comm, ret);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700823 list_del(&thread->node);
Dan Williams2d88ce72013-11-06 16:30:09 -0800824 put_task_struct(thread->task);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700825 kfree(thread);
826 }
Viresh Kumar9704efa2011-07-29 16:21:57 +0530827
828 /* terminate all transfers on specified channels */
Jon Mason944ea4d2012-11-11 23:03:20 +0000829 dmaengine_terminate_all(dtc->chan);
Viresh Kumar9704efa2011-07-29 16:21:57 +0530830
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700831 kfree(dtc);
832}
833
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200834static int dmatest_add_threads(struct dmatest_info *info,
835 struct dmatest_chan *dtc, enum dma_transaction_type type)
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700836{
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200837 struct dmatest_params *params = &info->params;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700838 struct dmatest_thread *thread;
839 struct dma_chan *chan = dtc->chan;
840 char *op;
841 unsigned int i;
842
843 if (type == DMA_MEMCPY)
844 op = "copy";
Sinan Kaya61b5f542017-06-29 22:30:58 -0400845 else if (type == DMA_MEMSET)
846 op = "set";
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700847 else if (type == DMA_XOR)
848 op = "xor";
Dan Williams58691d62009-08-29 19:09:27 -0700849 else if (type == DMA_PQ)
850 op = "pq";
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700851 else
852 return -EINVAL;
853
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200854 for (i = 0; i < params->threads_per_chan; i++) {
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700855 thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL);
856 if (!thread) {
Dan Williams0adff802013-11-06 16:30:00 -0800857 pr_warn("No memory for %s-%s%u\n",
858 dma_chan_name(chan), op, i);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700859 break;
860 }
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200861 thread->info = info;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700862 thread->chan = dtc->chan;
863 thread->type = type;
Adam Wallis6f6a23a2017-11-27 10:45:01 -0500864 thread->test_done.wait = &thread->done_wait;
865 init_waitqueue_head(&thread->done_wait);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700866 smp_wmb();
Dan Williams2d88ce72013-11-06 16:30:09 -0800867 thread->task = kthread_create(dmatest_func, thread, "%s-%s%u",
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700868 dma_chan_name(chan), op, i);
869 if (IS_ERR(thread->task)) {
Dan Williams2d88ce72013-11-06 16:30:09 -0800870 pr_warn("Failed to create thread %s-%s%u\n",
Dan Williams0adff802013-11-06 16:30:00 -0800871 dma_chan_name(chan), op, i);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700872 kfree(thread);
873 break;
874 }
875
876 /* srcbuf and dstbuf are allocated by the thread itself */
Dan Williams2d88ce72013-11-06 16:30:09 -0800877 get_task_struct(thread->task);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700878 list_add_tail(&thread->node, &dtc->threads);
Dan Williams2d88ce72013-11-06 16:30:09 -0800879 wake_up_process(thread->task);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700880 }
881
882 return i;
883}
884
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200885static int dmatest_add_channel(struct dmatest_info *info,
886 struct dma_chan *chan)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700887{
888 struct dmatest_chan *dtc;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700889 struct dma_device *dma_dev = chan->device;
890 unsigned int thread_count = 0;
Kulikov Vasiliyb9033e62010-07-17 19:19:48 +0400891 int cnt;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700892
Andrew Morton6fdb8bd2008-09-19 04:16:23 -0700893 dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700894 if (!dtc) {
Dan Williams0adff802013-11-06 16:30:00 -0800895 pr_warn("No memory for %s\n", dma_chan_name(chan));
Dan Williams33df8ca2009-01-06 11:38:15 -0700896 return -ENOMEM;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700897 }
898
899 dtc->chan = chan;
900 INIT_LIST_HEAD(&dtc->threads);
901
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700902 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
Kedareswara rao Appanaa0d4cb42016-06-09 21:10:14 +0530903 if (dmatest == 0) {
904 cnt = dmatest_add_threads(info, dtc, DMA_MEMCPY);
905 thread_count += cnt > 0 ? cnt : 0;
906 }
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700907 }
Kedareswara rao Appanaa0d4cb42016-06-09 21:10:14 +0530908
Sinan Kaya61b5f542017-06-29 22:30:58 -0400909 if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)) {
Kedareswara rao Appanaa0d4cb42016-06-09 21:10:14 +0530910 if (dmatest == 1) {
Dave Jiangc678fa62017-08-21 10:23:13 -0700911 cnt = dmatest_add_threads(info, dtc, DMA_MEMSET);
Kedareswara rao Appanaa0d4cb42016-06-09 21:10:14 +0530912 thread_count += cnt > 0 ? cnt : 0;
913 }
914 }
915
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700916 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200917 cnt = dmatest_add_threads(info, dtc, DMA_XOR);
Nicolas Ferref1aef8b2009-07-06 18:19:44 +0200918 thread_count += cnt > 0 ? cnt : 0;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700919 }
Dan Williams58691d62009-08-29 19:09:27 -0700920 if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200921 cnt = dmatest_add_threads(info, dtc, DMA_PQ);
Dr. David Alan Gilbertd07a74a2011-08-25 16:13:55 -0700922 thread_count += cnt > 0 ? cnt : 0;
Dan Williams58691d62009-08-29 19:09:27 -0700923 }
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700924
Dan Williams0adff802013-11-06 16:30:00 -0800925 pr_info("Started %u threads using %s\n",
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700926 thread_count, dma_chan_name(chan));
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700927
Andy Shevchenko838cc702013-03-04 11:09:28 +0200928 list_add_tail(&dtc->node, &info->channels);
929 info->nr_channels++;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700930
Dan Williams33df8ca2009-01-06 11:38:15 -0700931 return 0;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700932}
933
Dan Williams7dd60252009-01-06 11:38:19 -0700934static bool filter(struct dma_chan *chan, void *param)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700935{
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200936 struct dmatest_params *params = param;
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200937
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200938 if (!dmatest_match_channel(params, chan) ||
939 !dmatest_match_device(params, chan->device))
Dan Williams7dd60252009-01-06 11:38:19 -0700940 return false;
Dan Williams33df8ca2009-01-06 11:38:15 -0700941 else
Dan Williams7dd60252009-01-06 11:38:19 -0700942 return true;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700943}
944
Dan Williamsa9e55492013-11-06 16:30:02 -0800945static void request_channels(struct dmatest_info *info,
946 enum dma_transaction_type type)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700947{
Dan Williams33df8ca2009-01-06 11:38:15 -0700948 dma_cap_mask_t mask;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700949
Dan Williams33df8ca2009-01-06 11:38:15 -0700950 dma_cap_zero(mask);
Dan Williamsa9e55492013-11-06 16:30:02 -0800951 dma_cap_set(type, mask);
Dan Williams33df8ca2009-01-06 11:38:15 -0700952 for (;;) {
Dan Williamsa9e55492013-11-06 16:30:02 -0800953 struct dmatest_params *params = &info->params;
954 struct dma_chan *chan;
955
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200956 chan = dma_request_channel(mask, filter, params);
Dan Williams33df8ca2009-01-06 11:38:15 -0700957 if (chan) {
Dan Williamsa9e55492013-11-06 16:30:02 -0800958 if (dmatest_add_channel(info, chan)) {
Dan Williams33df8ca2009-01-06 11:38:15 -0700959 dma_release_channel(chan);
960 break; /* add_channel failed, punt */
961 }
962 } else
963 break; /* no more channels available */
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200964 if (params->max_channels &&
965 info->nr_channels >= params->max_channels)
Dan Williams33df8ca2009-01-06 11:38:15 -0700966 break; /* we have all we need */
967 }
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700968}
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700969
Dan Williamsa9e55492013-11-06 16:30:02 -0800970static void run_threaded_test(struct dmatest_info *info)
Andy Shevchenko851b7e12013-03-04 11:09:30 +0200971{
972 struct dmatest_params *params = &info->params;
Andy Shevchenko851b7e12013-03-04 11:09:30 +0200973
Andy Shevchenko851b7e12013-03-04 11:09:30 +0200974 /* Copy test parameters */
Andy Shevchenkoa6c268d2013-07-23 18:36:46 +0300975 params->buf_size = test_buf_size;
976 strlcpy(params->channel, strim(test_channel), sizeof(params->channel));
977 strlcpy(params->device, strim(test_device), sizeof(params->device));
978 params->threads_per_chan = threads_per_chan;
979 params->max_channels = max_channels;
980 params->iterations = iterations;
981 params->xor_sources = xor_sources;
982 params->pq_sources = pq_sources;
983 params->timeout = timeout;
Dan Williamse3b9c342013-11-06 16:30:05 -0800984 params->noverify = noverify;
Yang Shunyong2e67a082018-02-02 17:51:09 +0800985 params->norandom = norandom;
Dan Williamsa310d032013-11-06 16:30:01 -0800986
Dan Williamsa9e55492013-11-06 16:30:02 -0800987 request_channels(info, DMA_MEMCPY);
Sinan Kaya61b5f542017-06-29 22:30:58 -0400988 request_channels(info, DMA_MEMSET);
Dan Williamsa9e55492013-11-06 16:30:02 -0800989 request_channels(info, DMA_XOR);
990 request_channels(info, DMA_PQ);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700991}
992
Dan Williamsa310d032013-11-06 16:30:01 -0800993static void stop_threaded_test(struct dmatest_info *info)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700994{
995 struct dmatest_chan *dtc, *_dtc;
996 struct dma_chan *chan;
997
998 list_for_each_entry_safe(dtc, _dtc, &info->channels, node) {
999 list_del(&dtc->node);
1000 chan = dtc->chan;
1001 dmatest_cleanup_channel(dtc);
Dan Williams0adff802013-11-06 16:30:00 -08001002 pr_debug("dropped channel %s\n", dma_chan_name(chan));
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +02001003 dma_release_channel(chan);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -07001004 }
Dan Williams33df8ca2009-01-06 11:38:15 -07001005
Dan Williams7cbd4872009-03-04 16:06:03 -07001006 info->nr_channels = 0;
Dan Williams33df8ca2009-01-06 11:38:15 -07001007}
Andy Shevchenko838cc702013-03-04 11:09:28 +02001008
Dan Williamsa9e55492013-11-06 16:30:02 -08001009static void restart_threaded_test(struct dmatest_info *info, bool run)
Dan Williams7cbd4872009-03-04 16:06:03 -07001010{
Dan Williamsa310d032013-11-06 16:30:01 -08001011 /* we might be called early to set run=, defer running until all
1012 * parameters have been evaluated
1013 */
1014 if (!info->did_init)
Dan Williamsa9e55492013-11-06 16:30:02 -08001015 return;
Andy Shevchenko851b7e12013-03-04 11:09:30 +02001016
Dan Williamsa310d032013-11-06 16:30:01 -08001017 /* Stop any running test first */
1018 stop_threaded_test(info);
Andy Shevchenko851b7e12013-03-04 11:09:30 +02001019
1020 /* Run test with new parameters */
Dan Williamsa9e55492013-11-06 16:30:02 -08001021 run_threaded_test(info);
Andy Shevchenkobcc567e2013-05-23 14:29:53 +03001022}
1023
Dan Williamsa310d032013-11-06 16:30:01 -08001024static int dmatest_run_get(char *val, const struct kernel_param *kp)
Andy Shevchenkobcc567e2013-05-23 14:29:53 +03001025{
Dan Williamsa310d032013-11-06 16:30:01 -08001026 struct dmatest_info *info = &test_info;
Andy Shevchenko851b7e12013-03-04 11:09:30 +02001027
1028 mutex_lock(&info->lock);
Dan Williamsa310d032013-11-06 16:30:01 -08001029 if (is_threaded_test_run(info)) {
1030 dmatest_run = true;
Andy Shevchenko3e5ccd82013-03-04 11:09:31 +02001031 } else {
Dan Williamsa310d032013-11-06 16:30:01 -08001032 stop_threaded_test(info);
1033 dmatest_run = false;
Andy Shevchenko3e5ccd82013-03-04 11:09:31 +02001034 }
Dan Williamsa310d032013-11-06 16:30:01 -08001035 mutex_unlock(&info->lock);
1036
1037 return param_get_bool(val, kp);
1038}
1039
1040static int dmatest_run_set(const char *val, const struct kernel_param *kp)
1041{
1042 struct dmatest_info *info = &test_info;
1043 int ret;
1044
1045 mutex_lock(&info->lock);
1046 ret = param_set_bool(val, kp);
1047 if (ret) {
1048 mutex_unlock(&info->lock);
1049 return ret;
1050 }
1051
1052 if (is_threaded_test_run(info))
1053 ret = -EBUSY;
1054 else if (dmatest_run)
Dan Williamsa9e55492013-11-06 16:30:02 -08001055 restart_threaded_test(info, dmatest_run);
Andy Shevchenko3e5ccd82013-03-04 11:09:31 +02001056
Andy Shevchenko851b7e12013-03-04 11:09:30 +02001057 mutex_unlock(&info->lock);
Andy Shevchenko851b7e12013-03-04 11:09:30 +02001058
Dan Williamsa310d032013-11-06 16:30:01 -08001059 return ret;
Andy Shevchenko851b7e12013-03-04 11:09:30 +02001060}
1061
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +02001062static int __init dmatest_init(void)
1063{
1064 struct dmatest_info *info = &test_info;
Dan Williams2d88ce72013-11-06 16:30:09 -08001065 struct dmatest_params *params = &info->params;
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +02001066
Dan Williamsa310d032013-11-06 16:30:01 -08001067 if (dmatest_run) {
1068 mutex_lock(&info->lock);
Dan Williamsa9e55492013-11-06 16:30:02 -08001069 run_threaded_test(info);
Dan Williamsa310d032013-11-06 16:30:01 -08001070 mutex_unlock(&info->lock);
1071 }
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +02001072
Dan Williams2d88ce72013-11-06 16:30:09 -08001073 if (params->iterations && wait)
1074 wait_event(thread_wait, !is_threaded_test_run(info));
Andy Shevchenko838cc702013-03-04 11:09:28 +02001075
Dan Williamsa310d032013-11-06 16:30:01 -08001076 /* module parameters are stable, inittime tests are started,
1077 * let userspace take over 'run' control
1078 */
1079 info->did_init = true;
Andy Shevchenko95019c82013-03-04 11:09:33 +02001080
Andy Shevchenko851b7e12013-03-04 11:09:30 +02001081 return 0;
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +02001082}
1083/* when compiled-in wait for drivers to load first */
1084late_initcall(dmatest_init);
1085
1086static void __exit dmatest_exit(void)
1087{
1088 struct dmatest_info *info = &test_info;
1089
Dan Williamsa310d032013-11-06 16:30:01 -08001090 mutex_lock(&info->lock);
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +02001091 stop_threaded_test(info);
Dan Williamsa310d032013-11-06 16:30:01 -08001092 mutex_unlock(&info->lock);
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +02001093}
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -07001094module_exit(dmatest_exit);
1095
Jean Delvaree05503e2011-05-18 16:49:24 +02001096MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -07001097MODULE_LICENSE("GPL v2");