blob: a07ef3d6b3ec42b471003d8a52fe86c11286c615 [file] [log] [blame]
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -07001/*
2 * DMA Engine test module
3 *
4 * Copyright (C) 2007 Atmel Corporation
Andy Shevchenko851b7e12013-03-04 11:09:30 +02005 * Copyright (C) 2013 Intel Corporation
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -07006 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
Dan Williams872f05c2013-11-06 16:29:58 -080011#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070013#include <linux/delay.h>
Alexey Dobriyanb7f080c2011-06-16 11:01:34 +000014#include <linux/dma-mapping.h>
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070015#include <linux/dmaengine.h>
Guennadi Liakhovetski981ed702011-08-18 16:50:51 +020016#include <linux/freezer.h>
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070017#include <linux/init.h>
18#include <linux/kthread.h>
Ingo Molnar0881e7b2017-02-05 15:30:50 +010019#include <linux/sched/task.h>
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070020#include <linux/module.h>
21#include <linux/moduleparam.h>
22#include <linux/random.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090023#include <linux/slab.h>
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070024#include <linux/wait.h>
25
26static unsigned int test_buf_size = 16384;
Andy Shevchenkoa6c268d2013-07-23 18:36:46 +030027module_param(test_buf_size, uint, S_IRUGO | S_IWUSR);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070028MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer");
29
Kay Sievers06190d82008-11-11 13:12:33 -070030static char test_channel[20];
Andy Shevchenkoa6c268d2013-07-23 18:36:46 +030031module_param_string(channel, test_channel, sizeof(test_channel),
32 S_IRUGO | S_IWUSR);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070033MODULE_PARM_DESC(channel, "Bus ID of the channel to test (default: any)");
34
Guennadi Liakhovetskia85159f2013-12-30 14:58:04 +010035static char test_device[32];
Andy Shevchenkoa6c268d2013-07-23 18:36:46 +030036module_param_string(device, test_device, sizeof(test_device),
37 S_IRUGO | S_IWUSR);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070038MODULE_PARM_DESC(device, "Bus ID of the DMA Engine to test (default: any)");
39
40static unsigned int threads_per_chan = 1;
Andy Shevchenkoa6c268d2013-07-23 18:36:46 +030041module_param(threads_per_chan, uint, S_IRUGO | S_IWUSR);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070042MODULE_PARM_DESC(threads_per_chan,
43 "Number of threads to start per channel (default: 1)");
44
45static unsigned int max_channels;
Andy Shevchenkoa6c268d2013-07-23 18:36:46 +030046module_param(max_channels, uint, S_IRUGO | S_IWUSR);
Dan Williams33df8ca2009-01-06 11:38:15 -070047MODULE_PARM_DESC(max_channels,
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070048 "Maximum number of channels to use (default: all)");
49
Nicolas Ferre0a2ff57d2009-07-03 19:26:51 +020050static unsigned int iterations;
Andy Shevchenkoa6c268d2013-07-23 18:36:46 +030051module_param(iterations, uint, S_IRUGO | S_IWUSR);
Nicolas Ferre0a2ff57d2009-07-03 19:26:51 +020052MODULE_PARM_DESC(iterations,
53 "Iterations before stopping test (default: infinite)");
54
Kedareswara rao Appanaa0d4cb42016-06-09 21:10:14 +053055static unsigned int sg_buffers = 1;
56module_param(sg_buffers, uint, S_IRUGO | S_IWUSR);
57MODULE_PARM_DESC(sg_buffers,
58 "Number of scatter gather buffers (default: 1)");
59
Eugeniy Paltsevd8646722016-09-14 20:40:38 +030060static unsigned int dmatest;
Kedareswara rao Appanaa0d4cb42016-06-09 21:10:14 +053061module_param(dmatest, uint, S_IRUGO | S_IWUSR);
62MODULE_PARM_DESC(dmatest,
Eugeniy Paltsevd8646722016-09-14 20:40:38 +030063 "dmatest 0-memcpy 1-slave_sg (default: 0)");
Kedareswara rao Appanaa0d4cb42016-06-09 21:10:14 +053064
Dan Williamsb54d5cb2009-03-25 09:13:25 -070065static unsigned int xor_sources = 3;
Andy Shevchenkoa6c268d2013-07-23 18:36:46 +030066module_param(xor_sources, uint, S_IRUGO | S_IWUSR);
Dan Williamsb54d5cb2009-03-25 09:13:25 -070067MODULE_PARM_DESC(xor_sources,
68 "Number of xor source buffers (default: 3)");
69
Dan Williams58691d62009-08-29 19:09:27 -070070static unsigned int pq_sources = 3;
Andy Shevchenkoa6c268d2013-07-23 18:36:46 +030071module_param(pq_sources, uint, S_IRUGO | S_IWUSR);
Dan Williams58691d62009-08-29 19:09:27 -070072MODULE_PARM_DESC(pq_sources,
73 "Number of p+q source buffers (default: 3)");
74
Viresh Kumard42efe62011-03-22 17:27:25 +053075static int timeout = 3000;
Andy Shevchenkoa6c268d2013-07-23 18:36:46 +030076module_param(timeout, uint, S_IRUGO | S_IWUSR);
Joe Perches85ee7a12011-04-23 20:38:19 -070077MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), "
78 "Pass -1 for infinite timeout");
Viresh Kumard42efe62011-03-22 17:27:25 +053079
Dan Williamse3b9c342013-11-06 16:30:05 -080080static bool noverify;
81module_param(noverify, bool, S_IRUGO | S_IWUSR);
82MODULE_PARM_DESC(noverify, "Disable random data setup and verification");
Andy Shevchenko74b5c072013-03-04 11:09:32 +020083
Dan Williams50137a72013-11-08 12:26:26 -080084static bool verbose;
85module_param(verbose, bool, S_IRUGO | S_IWUSR);
86MODULE_PARM_DESC(verbose, "Enable \"success\" result messages (default: off)");
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070087
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +020088/**
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +020089 * struct dmatest_params - test parameters.
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +020090 * @buf_size: size of the memcpy test buffer
91 * @channel: bus ID of the channel to test
92 * @device: bus ID of the DMA Engine to test
93 * @threads_per_chan: number of threads to start per channel
94 * @max_channels: maximum number of channels to use
95 * @iterations: iterations before stopping test
96 * @xor_sources: number of xor source buffers
97 * @pq_sources: number of p+q source buffers
98 * @timeout: transfer timeout in msec, -1 for infinite timeout
99 */
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200100struct dmatest_params {
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200101 unsigned int buf_size;
102 char channel[20];
Guennadi Liakhovetskia85159f2013-12-30 14:58:04 +0100103 char device[32];
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200104 unsigned int threads_per_chan;
105 unsigned int max_channels;
106 unsigned int iterations;
107 unsigned int xor_sources;
108 unsigned int pq_sources;
109 int timeout;
Dan Williamse3b9c342013-11-06 16:30:05 -0800110 bool noverify;
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200111};
112
113/**
114 * struct dmatest_info - test information.
115 * @params: test parameters
Andy Shevchenko851b7e12013-03-04 11:09:30 +0200116 * @lock: access protection to the fields of this structure
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200117 */
Dan Williamsa310d032013-11-06 16:30:01 -0800118static struct dmatest_info {
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200119 /* Test parameters */
120 struct dmatest_params params;
Andy Shevchenko838cc702013-03-04 11:09:28 +0200121
122 /* Internal state */
123 struct list_head channels;
124 unsigned int nr_channels;
Andy Shevchenko851b7e12013-03-04 11:09:30 +0200125 struct mutex lock;
Dan Williamsa310d032013-11-06 16:30:01 -0800126 bool did_init;
127} test_info = {
128 .channels = LIST_HEAD_INIT(test_info.channels),
129 .lock = __MUTEX_INITIALIZER(test_info.lock),
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200130};
131
Dan Williamsa310d032013-11-06 16:30:01 -0800132static int dmatest_run_set(const char *val, const struct kernel_param *kp);
133static int dmatest_run_get(char *val, const struct kernel_param *kp);
Luis R. Rodriguez9c278472015-05-27 11:09:38 +0930134static const struct kernel_param_ops run_ops = {
Dan Williamsa310d032013-11-06 16:30:01 -0800135 .set = dmatest_run_set,
136 .get = dmatest_run_get,
137};
138static bool dmatest_run;
139module_param_cb(run, &run_ops, &dmatest_run, S_IRUGO | S_IWUSR);
140MODULE_PARM_DESC(run, "Run the test (default: false)");
141
142/* Maximum amount of mismatched bytes in buffer to print */
143#define MAX_ERROR_COUNT 32
144
145/*
146 * Initialization patterns. All bytes in the source buffer has bit 7
147 * set, all bytes in the destination buffer has bit 7 cleared.
148 *
149 * Bit 6 is set for all bytes which are to be copied by the DMA
150 * engine. Bit 5 is set for all bytes which are to be overwritten by
151 * the DMA engine.
152 *
153 * The remaining bits are the inverse of a counter which increments by
154 * one for each byte address.
155 */
156#define PATTERN_SRC 0x80
157#define PATTERN_DST 0x00
158#define PATTERN_COPY 0x40
159#define PATTERN_OVERWRITE 0x20
160#define PATTERN_COUNT_MASK 0x1f
161
162struct dmatest_thread {
163 struct list_head node;
164 struct dmatest_info *info;
165 struct task_struct *task;
166 struct dma_chan *chan;
167 u8 **srcs;
Dave Jiangd6481602016-11-29 13:22:20 -0700168 u8 **usrcs;
Dan Williamsa310d032013-11-06 16:30:01 -0800169 u8 **dsts;
Dave Jiangd6481602016-11-29 13:22:20 -0700170 u8 **udsts;
Dan Williamsa310d032013-11-06 16:30:01 -0800171 enum dma_transaction_type type;
172 bool done;
173};
174
175struct dmatest_chan {
176 struct list_head node;
177 struct dma_chan *chan;
178 struct list_head threads;
179};
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200180
Dan Williams2d88ce72013-11-06 16:30:09 -0800181static DECLARE_WAIT_QUEUE_HEAD(thread_wait);
182static bool wait;
183
184static bool is_threaded_test_run(struct dmatest_info *info)
185{
186 struct dmatest_chan *dtc;
187
188 list_for_each_entry(dtc, &info->channels, node) {
189 struct dmatest_thread *thread;
190
191 list_for_each_entry(thread, &dtc->threads, node) {
192 if (!thread->done)
193 return true;
194 }
195 }
196
197 return false;
198}
199
200static int dmatest_wait_get(char *val, const struct kernel_param *kp)
201{
202 struct dmatest_info *info = &test_info;
203 struct dmatest_params *params = &info->params;
204
205 if (params->iterations)
206 wait_event(thread_wait, !is_threaded_test_run(info));
207 wait = true;
208 return param_get_bool(val, kp);
209}
210
Luis R. Rodriguez9c278472015-05-27 11:09:38 +0930211static const struct kernel_param_ops wait_ops = {
Dan Williams2d88ce72013-11-06 16:30:09 -0800212 .get = dmatest_wait_get,
213 .set = param_set_bool,
214};
215module_param_cb(wait, &wait_ops, &wait, S_IRUGO);
216MODULE_PARM_DESC(wait, "Wait for tests to complete (default: false)");
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700217
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200218static bool dmatest_match_channel(struct dmatest_params *params,
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200219 struct dma_chan *chan)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700220{
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200221 if (params->channel[0] == '\0')
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700222 return true;
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200223 return strcmp(dma_chan_name(chan), params->channel) == 0;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700224}
225
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200226static bool dmatest_match_device(struct dmatest_params *params,
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200227 struct dma_device *device)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700228{
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200229 if (params->device[0] == '\0')
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700230 return true;
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200231 return strcmp(dev_name(device->dev), params->device) == 0;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700232}
233
234static unsigned long dmatest_random(void)
235{
236 unsigned long buf;
237
Dan Williamsbe9fa5a2013-11-06 16:30:03 -0800238 prandom_bytes(&buf, sizeof(buf));
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700239 return buf;
240}
241
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200242static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len,
243 unsigned int buf_size)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700244{
245 unsigned int i;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700246 u8 *buf;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700247
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700248 for (; (buf = *bufs); bufs++) {
249 for (i = 0; i < start; i++)
250 buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
251 for ( ; i < start + len; i++)
252 buf[i] = PATTERN_SRC | PATTERN_COPY
Joe Perchesc0198942009-06-28 09:26:21 -0700253 | (~i & PATTERN_COUNT_MASK);
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200254 for ( ; i < buf_size; i++)
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700255 buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
256 buf++;
257 }
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700258}
259
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200260static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len,
261 unsigned int buf_size)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700262{
263 unsigned int i;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700264 u8 *buf;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700265
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700266 for (; (buf = *bufs); bufs++) {
267 for (i = 0; i < start; i++)
268 buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
269 for ( ; i < start + len; i++)
270 buf[i] = PATTERN_DST | PATTERN_OVERWRITE
271 | (~i & PATTERN_COUNT_MASK);
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200272 for ( ; i < buf_size; i++)
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700273 buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
274 }
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700275}
276
Dan Williams7b610172013-11-06 16:29:57 -0800277static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index,
278 unsigned int counter, bool is_srcbuf)
279{
280 u8 diff = actual ^ pattern;
281 u8 expected = pattern | (~counter & PATTERN_COUNT_MASK);
282 const char *thread_name = current->comm;
283
284 if (is_srcbuf)
285 pr_warn("%s: srcbuf[0x%x] overwritten! Expected %02x, got %02x\n",
286 thread_name, index, expected, actual);
287 else if ((pattern & PATTERN_COPY)
288 && (diff & (PATTERN_COPY | PATTERN_OVERWRITE)))
289 pr_warn("%s: dstbuf[0x%x] not copied! Expected %02x, got %02x\n",
290 thread_name, index, expected, actual);
291 else if (diff & PATTERN_SRC)
292 pr_warn("%s: dstbuf[0x%x] was copied! Expected %02x, got %02x\n",
293 thread_name, index, expected, actual);
294 else
295 pr_warn("%s: dstbuf[0x%x] mismatch! Expected %02x, got %02x\n",
296 thread_name, index, expected, actual);
297}
298
299static unsigned int dmatest_verify(u8 **bufs, unsigned int start,
300 unsigned int end, unsigned int counter, u8 pattern,
301 bool is_srcbuf)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700302{
303 unsigned int i;
304 unsigned int error_count = 0;
305 u8 actual;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700306 u8 expected;
307 u8 *buf;
308 unsigned int counter_orig = counter;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700309
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700310 for (; (buf = *bufs); bufs++) {
311 counter = counter_orig;
312 for (i = start; i < end; i++) {
313 actual = buf[i];
314 expected = pattern | (~counter & PATTERN_COUNT_MASK);
315 if (actual != expected) {
Dan Williams7b610172013-11-06 16:29:57 -0800316 if (error_count < MAX_ERROR_COUNT)
317 dmatest_mismatch(actual, pattern, i,
318 counter, is_srcbuf);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700319 error_count++;
320 }
321 counter++;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700322 }
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700323 }
324
Andy Shevchenko74b5c072013-03-04 11:09:32 +0200325 if (error_count > MAX_ERROR_COUNT)
Dan Williams7b610172013-11-06 16:29:57 -0800326 pr_warn("%s: %u errors suppressed\n",
Andy Shevchenko74b5c072013-03-04 11:09:32 +0200327 current->comm, error_count - MAX_ERROR_COUNT);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700328
329 return error_count;
330}
331
Tejun Heoadfa5432011-11-23 09:28:16 -0800332/* poor man's completion - we want to use wait_event_freezable() on it */
333struct dmatest_done {
334 bool done;
335 wait_queue_head_t *wait;
336};
337
338static void dmatest_callback(void *arg)
Dan Williamse44e0aa2009-03-25 09:13:25 -0700339{
Tejun Heoadfa5432011-11-23 09:28:16 -0800340 struct dmatest_done *done = arg;
341
342 done->done = true;
343 wake_up_all(done->wait);
Dan Williamse44e0aa2009-03-25 09:13:25 -0700344}
345
Akinobu Mita8be9e32b2012-10-28 00:49:32 +0900346static unsigned int min_odd(unsigned int x, unsigned int y)
347{
348 unsigned int val = min(x, y);
349
350 return val % 2 ? val : val - 1;
351}
352
Dan Williams872f05c2013-11-06 16:29:58 -0800353static void result(const char *err, unsigned int n, unsigned int src_off,
354 unsigned int dst_off, unsigned int len, unsigned long data)
Andy Shevchenkod86b2f22013-03-04 11:09:34 +0200355{
Jerome Blin2acec152014-03-04 10:38:55 +0100356 pr_info("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)\n",
Dan Williams872f05c2013-11-06 16:29:58 -0800357 current->comm, n, err, src_off, dst_off, len, data);
Andy Shevchenkod86b2f22013-03-04 11:09:34 +0200358}
359
Dan Williams872f05c2013-11-06 16:29:58 -0800360static void dbg_result(const char *err, unsigned int n, unsigned int src_off,
361 unsigned int dst_off, unsigned int len,
362 unsigned long data)
Andy Shevchenko95019c82013-03-04 11:09:33 +0200363{
Jerome Blin2acec152014-03-04 10:38:55 +0100364 pr_debug("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)\n",
Andy Shevchenkoa835bb82014-10-22 16:16:42 +0300365 current->comm, n, err, src_off, dst_off, len, data);
Andy Shevchenko95019c82013-03-04 11:09:33 +0200366}
367
Andy Shevchenkoa835bb82014-10-22 16:16:42 +0300368#define verbose_result(err, n, src_off, dst_off, len, data) ({ \
369 if (verbose) \
370 result(err, n, src_off, dst_off, len, data); \
371 else \
372 dbg_result(err, n, src_off, dst_off, len, data);\
Dan Williams50137a72013-11-08 12:26:26 -0800373})
374
Dan Williams86727442013-11-06 16:30:07 -0800375static unsigned long long dmatest_persec(s64 runtime, unsigned int val)
Andy Shevchenko95019c82013-03-04 11:09:33 +0200376{
Dan Williams86727442013-11-06 16:30:07 -0800377 unsigned long long per_sec = 1000000;
Andy Shevchenko95019c82013-03-04 11:09:33 +0200378
Dan Williams86727442013-11-06 16:30:07 -0800379 if (runtime <= 0)
380 return 0;
Andy Shevchenko95019c82013-03-04 11:09:33 +0200381
Dan Williams86727442013-11-06 16:30:07 -0800382 /* drop precision until runtime is 32-bits */
383 while (runtime > UINT_MAX) {
384 runtime >>= 1;
385 per_sec <<= 1;
386 }
Andy Shevchenko95019c82013-03-04 11:09:33 +0200387
Dan Williams86727442013-11-06 16:30:07 -0800388 per_sec *= val;
389 do_div(per_sec, runtime);
390 return per_sec;
Andy Shevchenko95019c82013-03-04 11:09:33 +0200391}
392
Dan Williams86727442013-11-06 16:30:07 -0800393static unsigned long long dmatest_KBs(s64 runtime, unsigned long long len)
Andy Shevchenkod86b2f22013-03-04 11:09:34 +0200394{
Dan Williams86727442013-11-06 16:30:07 -0800395 return dmatest_persec(runtime, len >> 10);
Andy Shevchenko95019c82013-03-04 11:09:33 +0200396}
397
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700398/*
399 * This function repeatedly tests DMA transfers of various lengths and
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700400 * offsets for a given operation type until it is told to exit by
401 * kthread_stop(). There may be multiple threads running this function
402 * in parallel for a single channel, and there may be multiple channels
403 * being tested in parallel.
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700404 *
405 * Before each test, the source and destination buffer is initialized
406 * with a known pattern. This pattern is different depending on
407 * whether it's in an area which is supposed to be copied or
408 * overwritten, and different in the source and destination buffers.
409 * So if the DMA engine doesn't copy exactly what we tell it to copy,
410 * we'll notice.
411 */
412static int dmatest_func(void *data)
413{
Tejun Heoadfa5432011-11-23 09:28:16 -0800414 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_wait);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700415 struct dmatest_thread *thread = data;
Tejun Heoadfa5432011-11-23 09:28:16 -0800416 struct dmatest_done done = { .wait = &done_wait };
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200417 struct dmatest_info *info;
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200418 struct dmatest_params *params;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700419 struct dma_chan *chan;
Akinobu Mita8be9e32b2012-10-28 00:49:32 +0900420 struct dma_device *dev;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700421 unsigned int error_count;
422 unsigned int failed_tests = 0;
423 unsigned int total_tests = 0;
424 dma_cookie_t cookie;
425 enum dma_status status;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700426 enum dma_ctrl_flags flags;
Andy Shevchenko945b5af2013-03-04 11:09:26 +0200427 u8 *pq_coefs = NULL;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700428 int ret;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700429 int src_cnt;
430 int dst_cnt;
431 int i;
Sinan Kayae9405ef2016-09-01 10:02:55 -0400432 ktime_t ktime, start, diff;
Thomas Gleixner8b0e1952016-12-25 12:30:41 +0100433 ktime_t filltime = 0;
434 ktime_t comparetime = 0;
Dan Williams86727442013-11-06 16:30:07 -0800435 s64 runtime = 0;
436 unsigned long long total_len = 0;
Dave Jiangd6481602016-11-29 13:22:20 -0700437 u8 align = 0;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700438
Tejun Heoadfa5432011-11-23 09:28:16 -0800439 set_freezable();
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700440
441 ret = -ENOMEM;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700442
443 smp_rmb();
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200444 info = thread->info;
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200445 params = &info->params;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700446 chan = thread->chan;
Akinobu Mita8be9e32b2012-10-28 00:49:32 +0900447 dev = chan->device;
Dave Jiangd6481602016-11-29 13:22:20 -0700448 if (thread->type == DMA_MEMCPY) {
449 align = dev->copy_align;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700450 src_cnt = dst_cnt = 1;
Dave Jiangd6481602016-11-29 13:22:20 -0700451 } else if (thread->type == DMA_SG) {
452 align = dev->copy_align;
Kedareswara rao Appanaa0d4cb42016-06-09 21:10:14 +0530453 src_cnt = dst_cnt = sg_buffers;
Dave Jiangd6481602016-11-29 13:22:20 -0700454 } else if (thread->type == DMA_XOR) {
Akinobu Mita8be9e32b2012-10-28 00:49:32 +0900455 /* force odd to ensure dst = src */
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200456 src_cnt = min_odd(params->xor_sources | 1, dev->max_xor);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700457 dst_cnt = 1;
Dave Jiangd6481602016-11-29 13:22:20 -0700458 align = dev->xor_align;
Dan Williams58691d62009-08-29 19:09:27 -0700459 } else if (thread->type == DMA_PQ) {
Akinobu Mita8be9e32b2012-10-28 00:49:32 +0900460 /* force odd to ensure dst = src */
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200461 src_cnt = min_odd(params->pq_sources | 1, dma_maxpq(dev, 0));
Dan Williams58691d62009-08-29 19:09:27 -0700462 dst_cnt = 2;
Dave Jiangd6481602016-11-29 13:22:20 -0700463 align = dev->pq_align;
Andy Shevchenko945b5af2013-03-04 11:09:26 +0200464
Dave Jiang31d18252016-11-29 13:22:01 -0700465 pq_coefs = kmalloc(params->pq_sources + 1, GFP_KERNEL);
Andy Shevchenko945b5af2013-03-04 11:09:26 +0200466 if (!pq_coefs)
467 goto err_thread_type;
468
Anatolij Gustschin94de6482010-02-15 22:35:23 +0100469 for (i = 0; i < src_cnt; i++)
Dan Williams58691d62009-08-29 19:09:27 -0700470 pq_coefs[i] = 1;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700471 } else
Andy Shevchenko945b5af2013-03-04 11:09:26 +0200472 goto err_thread_type;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700473
Dave Jiang31d18252016-11-29 13:22:01 -0700474 thread->srcs = kcalloc(src_cnt + 1, sizeof(u8 *), GFP_KERNEL);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700475 if (!thread->srcs)
476 goto err_srcs;
Dave Jiangd6481602016-11-29 13:22:20 -0700477
478 thread->usrcs = kcalloc(src_cnt + 1, sizeof(u8 *), GFP_KERNEL);
479 if (!thread->usrcs)
480 goto err_usrcs;
481
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700482 for (i = 0; i < src_cnt; i++) {
Dave Jiangd6481602016-11-29 13:22:20 -0700483 thread->usrcs[i] = kmalloc(params->buf_size + align,
484 GFP_KERNEL);
485 if (!thread->usrcs[i])
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700486 goto err_srcbuf;
Dave Jiangd6481602016-11-29 13:22:20 -0700487
488 /* align srcs to alignment restriction */
489 if (align)
490 thread->srcs[i] = PTR_ALIGN(thread->usrcs[i], align);
491 else
492 thread->srcs[i] = thread->usrcs[i];
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700493 }
494 thread->srcs[i] = NULL;
495
Dave Jiang31d18252016-11-29 13:22:01 -0700496 thread->dsts = kcalloc(dst_cnt + 1, sizeof(u8 *), GFP_KERNEL);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700497 if (!thread->dsts)
498 goto err_dsts;
Dave Jiangd6481602016-11-29 13:22:20 -0700499
500 thread->udsts = kcalloc(dst_cnt + 1, sizeof(u8 *), GFP_KERNEL);
501 if (!thread->udsts)
502 goto err_udsts;
503
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700504 for (i = 0; i < dst_cnt; i++) {
Dave Jiangd6481602016-11-29 13:22:20 -0700505 thread->udsts[i] = kmalloc(params->buf_size + align,
506 GFP_KERNEL);
507 if (!thread->udsts[i])
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700508 goto err_dstbuf;
Dave Jiangd6481602016-11-29 13:22:20 -0700509
510 /* align dsts to alignment restriction */
511 if (align)
512 thread->dsts[i] = PTR_ALIGN(thread->udsts[i], align);
513 else
514 thread->dsts[i] = thread->udsts[i];
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700515 }
516 thread->dsts[i] = NULL;
517
Dan Williamse44e0aa2009-03-25 09:13:25 -0700518 set_user_nice(current, 10);
519
Ira Snyderb203bd32011-03-03 07:54:53 +0000520 /*
Bartlomiej Zolnierkiewiczd1cab342013-10-18 19:35:21 +0200521 * src and dst buffers are freed by ourselves below
Ira Snyderb203bd32011-03-03 07:54:53 +0000522 */
Bartlomiej Zolnierkiewicz0776ae72013-10-18 19:35:33 +0200523 flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700524
Dan Williams86727442013-11-06 16:30:07 -0800525 ktime = ktime_get();
Nicolas Ferre0a2ff57d2009-07-03 19:26:51 +0200526 while (!kthread_should_stop()
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200527 && !(params->iterations && total_tests >= params->iterations)) {
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700528 struct dma_async_tx_descriptor *tx = NULL;
Dan Williams4076e752013-11-06 16:30:10 -0800529 struct dmaengine_unmap_data *um;
530 dma_addr_t srcs[src_cnt];
531 dma_addr_t *dsts;
Andy Shevchenkoede23a52014-10-22 16:16:43 +0300532 unsigned int src_off, dst_off, len;
Kedareswara rao Appanaa0d4cb42016-06-09 21:10:14 +0530533 struct scatterlist tx_sg[src_cnt];
534 struct scatterlist rx_sg[src_cnt];
Atsushi Nemotod86be862009-01-13 09:22:20 -0700535
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700536 total_tests++;
537
Stefan Roesefbfb8e12017-04-27 14:21:41 +0200538 /* Check if buffer count fits into map count variable (u8) */
539 if ((src_cnt + dst_cnt) >= 255) {
540 pr_err("too many buffers (%d of 255 supported)\n",
541 src_cnt + dst_cnt);
542 break;
543 }
544
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200545 if (1 << align > params->buf_size) {
Guennadi Liakhovetskicfe4f272009-12-04 19:44:48 +0100546 pr_err("%u-byte buffer too small for %d-byte alignment\n",
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200547 params->buf_size, 1 << align);
Guennadi Liakhovetskicfe4f272009-12-04 19:44:48 +0100548 break;
549 }
550
Andy Shevchenkoede23a52014-10-22 16:16:43 +0300551 if (params->noverify)
Dan Williamse3b9c342013-11-06 16:30:05 -0800552 len = params->buf_size;
Andy Shevchenkoede23a52014-10-22 16:16:43 +0300553 else
554 len = dmatest_random() % params->buf_size + 1;
555
556 len = (len >> align) << align;
557 if (!len)
558 len = 1 << align;
559
560 total_len += len;
561
562 if (params->noverify) {
Dan Williamse3b9c342013-11-06 16:30:05 -0800563 src_off = 0;
564 dst_off = 0;
565 } else {
Sinan Kayae9405ef2016-09-01 10:02:55 -0400566 start = ktime_get();
Dan Williamse3b9c342013-11-06 16:30:05 -0800567 src_off = dmatest_random() % (params->buf_size - len + 1);
568 dst_off = dmatest_random() % (params->buf_size - len + 1);
569
570 src_off = (src_off >> align) << align;
571 dst_off = (dst_off >> align) << align;
572
573 dmatest_init_srcs(thread->srcs, src_off, len,
574 params->buf_size);
575 dmatest_init_dsts(thread->dsts, dst_off, len,
576 params->buf_size);
Sinan Kayae9405ef2016-09-01 10:02:55 -0400577
578 diff = ktime_sub(ktime_get(), start);
579 filltime = ktime_add(filltime, diff);
Dan Williamse3b9c342013-11-06 16:30:05 -0800580 }
581
Dave Jiang31d18252016-11-29 13:22:01 -0700582 um = dmaengine_get_unmap_data(dev->dev, src_cnt + dst_cnt,
Dan Williams4076e752013-11-06 16:30:10 -0800583 GFP_KERNEL);
584 if (!um) {
585 failed_tests++;
586 result("unmap data NULL", total_tests,
587 src_off, dst_off, len, ret);
588 continue;
589 }
Dan Williams83544ae2009-09-08 17:42:53 -0700590
Dan Williams4076e752013-11-06 16:30:10 -0800591 um->len = params->buf_size;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700592 for (i = 0; i < src_cnt; i++) {
Dan Williams745c00d2013-12-09 11:16:01 -0800593 void *buf = thread->srcs[i];
Dan Williams4076e752013-11-06 16:30:10 -0800594 struct page *pg = virt_to_page(buf);
Geliang Tangf62e5f62017-04-22 09:18:03 +0800595 unsigned long pg_off = offset_in_page(buf);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700596
Dan Williams4076e752013-11-06 16:30:10 -0800597 um->addr[i] = dma_map_page(dev->dev, pg, pg_off,
598 um->len, DMA_TO_DEVICE);
599 srcs[i] = um->addr[i] + src_off;
600 ret = dma_mapping_error(dev->dev, um->addr[i]);
Andy Shevchenkoafde3be2012-12-17 15:59:53 -0800601 if (ret) {
Dan Williams4076e752013-11-06 16:30:10 -0800602 dmaengine_unmap_put(um);
Dan Williams872f05c2013-11-06 16:29:58 -0800603 result("src mapping error", total_tests,
604 src_off, dst_off, len, ret);
Andy Shevchenkoafde3be2012-12-17 15:59:53 -0800605 failed_tests++;
606 continue;
607 }
Dan Williams4076e752013-11-06 16:30:10 -0800608 um->to_cnt++;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700609 }
Atsushi Nemotod86be862009-01-13 09:22:20 -0700610 /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */
Dan Williams4076e752013-11-06 16:30:10 -0800611 dsts = &um->addr[src_cnt];
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700612 for (i = 0; i < dst_cnt; i++) {
Dan Williams745c00d2013-12-09 11:16:01 -0800613 void *buf = thread->dsts[i];
Dan Williams4076e752013-11-06 16:30:10 -0800614 struct page *pg = virt_to_page(buf);
Geliang Tangf62e5f62017-04-22 09:18:03 +0800615 unsigned long pg_off = offset_in_page(buf);
Dan Williams4076e752013-11-06 16:30:10 -0800616
617 dsts[i] = dma_map_page(dev->dev, pg, pg_off, um->len,
618 DMA_BIDIRECTIONAL);
619 ret = dma_mapping_error(dev->dev, dsts[i]);
Andy Shevchenkoafde3be2012-12-17 15:59:53 -0800620 if (ret) {
Dan Williams4076e752013-11-06 16:30:10 -0800621 dmaengine_unmap_put(um);
Dan Williams872f05c2013-11-06 16:29:58 -0800622 result("dst mapping error", total_tests,
623 src_off, dst_off, len, ret);
Andy Shevchenkoafde3be2012-12-17 15:59:53 -0800624 failed_tests++;
625 continue;
626 }
Dan Williams4076e752013-11-06 16:30:10 -0800627 um->bidi_cnt++;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700628 }
Atsushi Nemotod86be862009-01-13 09:22:20 -0700629
Kedareswara rao Appanaa0d4cb42016-06-09 21:10:14 +0530630 sg_init_table(tx_sg, src_cnt);
631 sg_init_table(rx_sg, src_cnt);
632 for (i = 0; i < src_cnt; i++) {
633 sg_dma_address(&rx_sg[i]) = srcs[i];
634 sg_dma_address(&tx_sg[i]) = dsts[i] + dst_off;
635 sg_dma_len(&tx_sg[i]) = len;
636 sg_dma_len(&rx_sg[i]) = len;
637 }
638
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700639 if (thread->type == DMA_MEMCPY)
640 tx = dev->device_prep_dma_memcpy(chan,
Dan Williams4076e752013-11-06 16:30:10 -0800641 dsts[0] + dst_off,
642 srcs[0], len, flags);
Kedareswara rao Appanaa0d4cb42016-06-09 21:10:14 +0530643 else if (thread->type == DMA_SG)
644 tx = dev->device_prep_dma_sg(chan, tx_sg, src_cnt,
645 rx_sg, src_cnt, flags);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700646 else if (thread->type == DMA_XOR)
647 tx = dev->device_prep_dma_xor(chan,
Dan Williams4076e752013-11-06 16:30:10 -0800648 dsts[0] + dst_off,
649 srcs, src_cnt,
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700650 len, flags);
Dan Williams58691d62009-08-29 19:09:27 -0700651 else if (thread->type == DMA_PQ) {
652 dma_addr_t dma_pq[dst_cnt];
653
654 for (i = 0; i < dst_cnt; i++)
Dan Williams4076e752013-11-06 16:30:10 -0800655 dma_pq[i] = dsts[i] + dst_off;
656 tx = dev->device_prep_dma_pq(chan, dma_pq, srcs,
Anatolij Gustschin94de6482010-02-15 22:35:23 +0100657 src_cnt, pq_coefs,
Dan Williams58691d62009-08-29 19:09:27 -0700658 len, flags);
659 }
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700660
Atsushi Nemotod86be862009-01-13 09:22:20 -0700661 if (!tx) {
Dan Williams4076e752013-11-06 16:30:10 -0800662 dmaengine_unmap_put(um);
Dan Williams872f05c2013-11-06 16:29:58 -0800663 result("prep error", total_tests, src_off,
664 dst_off, len, ret);
Atsushi Nemotod86be862009-01-13 09:22:20 -0700665 msleep(100);
666 failed_tests++;
667 continue;
668 }
Dan Williamse44e0aa2009-03-25 09:13:25 -0700669
Tejun Heoadfa5432011-11-23 09:28:16 -0800670 done.done = false;
Dan Williamse44e0aa2009-03-25 09:13:25 -0700671 tx->callback = dmatest_callback;
Tejun Heoadfa5432011-11-23 09:28:16 -0800672 tx->callback_param = &done;
Atsushi Nemotod86be862009-01-13 09:22:20 -0700673 cookie = tx->tx_submit(tx);
674
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700675 if (dma_submit_error(cookie)) {
Dan Williams4076e752013-11-06 16:30:10 -0800676 dmaengine_unmap_put(um);
Dan Williams872f05c2013-11-06 16:29:58 -0800677 result("submit error", total_tests, src_off,
678 dst_off, len, ret);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700679 msleep(100);
680 failed_tests++;
681 continue;
682 }
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700683 dma_async_issue_pending(chan);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700684
Andy Shevchenkobcc567e2013-05-23 14:29:53 +0300685 wait_event_freezable_timeout(done_wait, done.done,
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200686 msecs_to_jiffies(params->timeout));
Guennadi Liakhovetski981ed702011-08-18 16:50:51 +0200687
Dan Williamse44e0aa2009-03-25 09:13:25 -0700688 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700689
Tejun Heoadfa5432011-11-23 09:28:16 -0800690 if (!done.done) {
691 /*
692 * We're leaving the timed out dma operation with
693 * dangling pointer to done_wait. To make this
694 * correct, we'll need to allocate wait_done for
695 * each test iteration and perform "who's gonna
696 * free it this time?" dancing. For now, just
697 * leave it dangling.
698 */
Dan Williams4076e752013-11-06 16:30:10 -0800699 dmaengine_unmap_put(um);
Dan Williams872f05c2013-11-06 16:29:58 -0800700 result("test timed out", total_tests, src_off, dst_off,
701 len, 0);
Dan Williamse44e0aa2009-03-25 09:13:25 -0700702 failed_tests++;
703 continue;
Vinod Koul19e9f992013-10-16 13:37:27 +0530704 } else if (status != DMA_COMPLETE) {
Dan Williams4076e752013-11-06 16:30:10 -0800705 dmaengine_unmap_put(um);
Dan Williams872f05c2013-11-06 16:29:58 -0800706 result(status == DMA_ERROR ?
707 "completion error status" :
708 "completion busy status", total_tests, src_off,
709 dst_off, len, ret);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700710 failed_tests++;
711 continue;
712 }
Dan Williamse44e0aa2009-03-25 09:13:25 -0700713
Dan Williams4076e752013-11-06 16:30:10 -0800714 dmaengine_unmap_put(um);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700715
Dan Williamse3b9c342013-11-06 16:30:05 -0800716 if (params->noverify) {
Dan Williams50137a72013-11-08 12:26:26 -0800717 verbose_result("test passed", total_tests, src_off,
718 dst_off, len, 0);
Dan Williamse3b9c342013-11-06 16:30:05 -0800719 continue;
720 }
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700721
Sinan Kayae9405ef2016-09-01 10:02:55 -0400722 start = ktime_get();
Dan Williams872f05c2013-11-06 16:29:58 -0800723 pr_debug("%s: verifying source buffer...\n", current->comm);
Dan Williamse3b9c342013-11-06 16:30:05 -0800724 error_count = dmatest_verify(thread->srcs, 0, src_off,
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700725 0, PATTERN_SRC, true);
Dan Williams7b610172013-11-06 16:29:57 -0800726 error_count += dmatest_verify(thread->srcs, src_off,
727 src_off + len, src_off,
728 PATTERN_SRC | PATTERN_COPY, true);
729 error_count += dmatest_verify(thread->srcs, src_off + len,
730 params->buf_size, src_off + len,
731 PATTERN_SRC, true);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700732
Dan Williams872f05c2013-11-06 16:29:58 -0800733 pr_debug("%s: verifying dest buffer...\n", current->comm);
Dan Williams7b610172013-11-06 16:29:57 -0800734 error_count += dmatest_verify(thread->dsts, 0, dst_off,
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700735 0, PATTERN_DST, false);
Dan Williams7b610172013-11-06 16:29:57 -0800736 error_count += dmatest_verify(thread->dsts, dst_off,
737 dst_off + len, src_off,
738 PATTERN_SRC | PATTERN_COPY, false);
739 error_count += dmatest_verify(thread->dsts, dst_off + len,
740 params->buf_size, dst_off + len,
741 PATTERN_DST, false);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700742
Sinan Kayae9405ef2016-09-01 10:02:55 -0400743 diff = ktime_sub(ktime_get(), start);
744 comparetime = ktime_add(comparetime, diff);
745
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700746 if (error_count) {
Dan Williams872f05c2013-11-06 16:29:58 -0800747 result("data error", total_tests, src_off, dst_off,
748 len, error_count);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700749 failed_tests++;
750 } else {
Dan Williams50137a72013-11-08 12:26:26 -0800751 verbose_result("test passed", total_tests, src_off,
752 dst_off, len, 0);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700753 }
754 }
Sinan Kayae9405ef2016-09-01 10:02:55 -0400755 ktime = ktime_sub(ktime_get(), ktime);
756 ktime = ktime_sub(ktime, comparetime);
757 ktime = ktime_sub(ktime, filltime);
758 runtime = ktime_to_us(ktime);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700759
760 ret = 0;
Andy Shevchenko8e1f50d2014-08-22 15:19:44 +0300761err_dstbuf:
Dave Jiangd6481602016-11-29 13:22:20 -0700762 for (i = 0; thread->udsts[i]; i++)
763 kfree(thread->udsts[i]);
764 kfree(thread->udsts);
765err_udsts:
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700766 kfree(thread->dsts);
767err_dsts:
Andy Shevchenko8e1f50d2014-08-22 15:19:44 +0300768err_srcbuf:
Dave Jiangd6481602016-11-29 13:22:20 -0700769 for (i = 0; thread->usrcs[i]; i++)
770 kfree(thread->usrcs[i]);
771 kfree(thread->usrcs);
772err_usrcs:
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700773 kfree(thread->srcs);
774err_srcs:
Andy Shevchenko945b5af2013-03-04 11:09:26 +0200775 kfree(pq_coefs);
776err_thread_type:
Dan Williams86727442013-11-06 16:30:07 -0800777 pr_info("%s: summary %u tests, %u failures %llu iops %llu KB/s (%d)\n",
778 current->comm, total_tests, failed_tests,
779 dmatest_persec(runtime, total_tests),
780 dmatest_KBs(runtime, total_len), ret);
Nicolas Ferre0a2ff57d2009-07-03 19:26:51 +0200781
Viresh Kumar9704efa2011-07-29 16:21:57 +0530782 /* terminate all transfers on specified channels */
Shiraz Hashim5e034f72012-11-09 15:26:29 +0000783 if (ret)
784 dmaengine_terminate_all(chan);
785
Andy Shevchenko3e5ccd82013-03-04 11:09:31 +0200786 thread->done = true;
Dan Williams2d88ce72013-11-06 16:30:09 -0800787 wake_up(&thread_wait);
Nicolas Ferre0a2ff57d2009-07-03 19:26:51 +0200788
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700789 return ret;
790}
791
792static void dmatest_cleanup_channel(struct dmatest_chan *dtc)
793{
794 struct dmatest_thread *thread;
795 struct dmatest_thread *_thread;
796 int ret;
797
798 list_for_each_entry_safe(thread, _thread, &dtc->threads, node) {
799 ret = kthread_stop(thread->task);
Dan Williams0adff802013-11-06 16:30:00 -0800800 pr_debug("thread %s exited with status %d\n",
801 thread->task->comm, ret);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700802 list_del(&thread->node);
Dan Williams2d88ce72013-11-06 16:30:09 -0800803 put_task_struct(thread->task);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700804 kfree(thread);
805 }
Viresh Kumar9704efa2011-07-29 16:21:57 +0530806
807 /* terminate all transfers on specified channels */
Jon Mason944ea4d2012-11-11 23:03:20 +0000808 dmaengine_terminate_all(dtc->chan);
Viresh Kumar9704efa2011-07-29 16:21:57 +0530809
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700810 kfree(dtc);
811}
812
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200813static int dmatest_add_threads(struct dmatest_info *info,
814 struct dmatest_chan *dtc, enum dma_transaction_type type)
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700815{
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200816 struct dmatest_params *params = &info->params;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700817 struct dmatest_thread *thread;
818 struct dma_chan *chan = dtc->chan;
819 char *op;
820 unsigned int i;
821
822 if (type == DMA_MEMCPY)
823 op = "copy";
Kedareswara rao Appanaa0d4cb42016-06-09 21:10:14 +0530824 else if (type == DMA_SG)
825 op = "sg";
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700826 else if (type == DMA_XOR)
827 op = "xor";
Dan Williams58691d62009-08-29 19:09:27 -0700828 else if (type == DMA_PQ)
829 op = "pq";
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700830 else
831 return -EINVAL;
832
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200833 for (i = 0; i < params->threads_per_chan; i++) {
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700834 thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL);
835 if (!thread) {
Dan Williams0adff802013-11-06 16:30:00 -0800836 pr_warn("No memory for %s-%s%u\n",
837 dma_chan_name(chan), op, i);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700838 break;
839 }
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200840 thread->info = info;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700841 thread->chan = dtc->chan;
842 thread->type = type;
843 smp_wmb();
Dan Williams2d88ce72013-11-06 16:30:09 -0800844 thread->task = kthread_create(dmatest_func, thread, "%s-%s%u",
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700845 dma_chan_name(chan), op, i);
846 if (IS_ERR(thread->task)) {
Dan Williams2d88ce72013-11-06 16:30:09 -0800847 pr_warn("Failed to create thread %s-%s%u\n",
Dan Williams0adff802013-11-06 16:30:00 -0800848 dma_chan_name(chan), op, i);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700849 kfree(thread);
850 break;
851 }
852
853 /* srcbuf and dstbuf are allocated by the thread itself */
Dan Williams2d88ce72013-11-06 16:30:09 -0800854 get_task_struct(thread->task);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700855 list_add_tail(&thread->node, &dtc->threads);
Dan Williams2d88ce72013-11-06 16:30:09 -0800856 wake_up_process(thread->task);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700857 }
858
859 return i;
860}
861
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200862static int dmatest_add_channel(struct dmatest_info *info,
863 struct dma_chan *chan)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700864{
865 struct dmatest_chan *dtc;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700866 struct dma_device *dma_dev = chan->device;
867 unsigned int thread_count = 0;
Kulikov Vasiliyb9033e62010-07-17 19:19:48 +0400868 int cnt;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700869
Andrew Morton6fdb8bd2008-09-19 04:16:23 -0700870 dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700871 if (!dtc) {
Dan Williams0adff802013-11-06 16:30:00 -0800872 pr_warn("No memory for %s\n", dma_chan_name(chan));
Dan Williams33df8ca2009-01-06 11:38:15 -0700873 return -ENOMEM;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700874 }
875
876 dtc->chan = chan;
877 INIT_LIST_HEAD(&dtc->threads);
878
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700879 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
Kedareswara rao Appanaa0d4cb42016-06-09 21:10:14 +0530880 if (dmatest == 0) {
881 cnt = dmatest_add_threads(info, dtc, DMA_MEMCPY);
882 thread_count += cnt > 0 ? cnt : 0;
883 }
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700884 }
Kedareswara rao Appanaa0d4cb42016-06-09 21:10:14 +0530885
886 if (dma_has_cap(DMA_SG, dma_dev->cap_mask)) {
887 if (dmatest == 1) {
888 cnt = dmatest_add_threads(info, dtc, DMA_SG);
889 thread_count += cnt > 0 ? cnt : 0;
890 }
891 }
892
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700893 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200894 cnt = dmatest_add_threads(info, dtc, DMA_XOR);
Nicolas Ferref1aef8b2009-07-06 18:19:44 +0200895 thread_count += cnt > 0 ? cnt : 0;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700896 }
Dan Williams58691d62009-08-29 19:09:27 -0700897 if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200898 cnt = dmatest_add_threads(info, dtc, DMA_PQ);
Dr. David Alan Gilbertd07a74a2011-08-25 16:13:55 -0700899 thread_count += cnt > 0 ? cnt : 0;
Dan Williams58691d62009-08-29 19:09:27 -0700900 }
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700901
Dan Williams0adff802013-11-06 16:30:00 -0800902 pr_info("Started %u threads using %s\n",
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700903 thread_count, dma_chan_name(chan));
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700904
Andy Shevchenko838cc702013-03-04 11:09:28 +0200905 list_add_tail(&dtc->node, &info->channels);
906 info->nr_channels++;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700907
Dan Williams33df8ca2009-01-06 11:38:15 -0700908 return 0;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700909}
910
Dan Williams7dd60252009-01-06 11:38:19 -0700911static bool filter(struct dma_chan *chan, void *param)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700912{
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200913 struct dmatest_params *params = param;
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200914
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200915 if (!dmatest_match_channel(params, chan) ||
916 !dmatest_match_device(params, chan->device))
Dan Williams7dd60252009-01-06 11:38:19 -0700917 return false;
Dan Williams33df8ca2009-01-06 11:38:15 -0700918 else
Dan Williams7dd60252009-01-06 11:38:19 -0700919 return true;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700920}
921
Dan Williamsa9e55492013-11-06 16:30:02 -0800922static void request_channels(struct dmatest_info *info,
923 enum dma_transaction_type type)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700924{
Dan Williams33df8ca2009-01-06 11:38:15 -0700925 dma_cap_mask_t mask;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700926
Dan Williams33df8ca2009-01-06 11:38:15 -0700927 dma_cap_zero(mask);
Dan Williamsa9e55492013-11-06 16:30:02 -0800928 dma_cap_set(type, mask);
Dan Williams33df8ca2009-01-06 11:38:15 -0700929 for (;;) {
Dan Williamsa9e55492013-11-06 16:30:02 -0800930 struct dmatest_params *params = &info->params;
931 struct dma_chan *chan;
932
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200933 chan = dma_request_channel(mask, filter, params);
Dan Williams33df8ca2009-01-06 11:38:15 -0700934 if (chan) {
Dan Williamsa9e55492013-11-06 16:30:02 -0800935 if (dmatest_add_channel(info, chan)) {
Dan Williams33df8ca2009-01-06 11:38:15 -0700936 dma_release_channel(chan);
937 break; /* add_channel failed, punt */
938 }
939 } else
940 break; /* no more channels available */
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200941 if (params->max_channels &&
942 info->nr_channels >= params->max_channels)
Dan Williams33df8ca2009-01-06 11:38:15 -0700943 break; /* we have all we need */
944 }
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700945}
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700946
Dan Williamsa9e55492013-11-06 16:30:02 -0800947static void run_threaded_test(struct dmatest_info *info)
Andy Shevchenko851b7e12013-03-04 11:09:30 +0200948{
949 struct dmatest_params *params = &info->params;
Andy Shevchenko851b7e12013-03-04 11:09:30 +0200950
Andy Shevchenko851b7e12013-03-04 11:09:30 +0200951 /* Copy test parameters */
Andy Shevchenkoa6c268d2013-07-23 18:36:46 +0300952 params->buf_size = test_buf_size;
953 strlcpy(params->channel, strim(test_channel), sizeof(params->channel));
954 strlcpy(params->device, strim(test_device), sizeof(params->device));
955 params->threads_per_chan = threads_per_chan;
956 params->max_channels = max_channels;
957 params->iterations = iterations;
958 params->xor_sources = xor_sources;
959 params->pq_sources = pq_sources;
960 params->timeout = timeout;
Dan Williamse3b9c342013-11-06 16:30:05 -0800961 params->noverify = noverify;
Dan Williamsa310d032013-11-06 16:30:01 -0800962
Dan Williamsa9e55492013-11-06 16:30:02 -0800963 request_channels(info, DMA_MEMCPY);
964 request_channels(info, DMA_XOR);
Kedareswara rao Appanaa0d4cb42016-06-09 21:10:14 +0530965 request_channels(info, DMA_SG);
Dan Williamsa9e55492013-11-06 16:30:02 -0800966 request_channels(info, DMA_PQ);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700967}
968
Dan Williamsa310d032013-11-06 16:30:01 -0800969static void stop_threaded_test(struct dmatest_info *info)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700970{
971 struct dmatest_chan *dtc, *_dtc;
972 struct dma_chan *chan;
973
974 list_for_each_entry_safe(dtc, _dtc, &info->channels, node) {
975 list_del(&dtc->node);
976 chan = dtc->chan;
977 dmatest_cleanup_channel(dtc);
Dan Williams0adff802013-11-06 16:30:00 -0800978 pr_debug("dropped channel %s\n", dma_chan_name(chan));
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200979 dma_release_channel(chan);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700980 }
Dan Williams33df8ca2009-01-06 11:38:15 -0700981
Dan Williams7cbd4872009-03-04 16:06:03 -0700982 info->nr_channels = 0;
Dan Williams33df8ca2009-01-06 11:38:15 -0700983}
Andy Shevchenko838cc702013-03-04 11:09:28 +0200984
Dan Williamsa9e55492013-11-06 16:30:02 -0800985static void restart_threaded_test(struct dmatest_info *info, bool run)
Dan Williams7cbd4872009-03-04 16:06:03 -0700986{
Dan Williamsa310d032013-11-06 16:30:01 -0800987 /* we might be called early to set run=, defer running until all
988 * parameters have been evaluated
989 */
990 if (!info->did_init)
Dan Williamsa9e55492013-11-06 16:30:02 -0800991 return;
Andy Shevchenko851b7e12013-03-04 11:09:30 +0200992
Dan Williamsa310d032013-11-06 16:30:01 -0800993 /* Stop any running test first */
994 stop_threaded_test(info);
Andy Shevchenko851b7e12013-03-04 11:09:30 +0200995
996 /* Run test with new parameters */
Dan Williamsa9e55492013-11-06 16:30:02 -0800997 run_threaded_test(info);
Andy Shevchenkobcc567e2013-05-23 14:29:53 +0300998}
999
Dan Williamsa310d032013-11-06 16:30:01 -08001000static int dmatest_run_get(char *val, const struct kernel_param *kp)
Andy Shevchenkobcc567e2013-05-23 14:29:53 +03001001{
Dan Williamsa310d032013-11-06 16:30:01 -08001002 struct dmatest_info *info = &test_info;
Andy Shevchenko851b7e12013-03-04 11:09:30 +02001003
1004 mutex_lock(&info->lock);
Dan Williamsa310d032013-11-06 16:30:01 -08001005 if (is_threaded_test_run(info)) {
1006 dmatest_run = true;
Andy Shevchenko3e5ccd82013-03-04 11:09:31 +02001007 } else {
Dan Williamsa310d032013-11-06 16:30:01 -08001008 stop_threaded_test(info);
1009 dmatest_run = false;
Andy Shevchenko3e5ccd82013-03-04 11:09:31 +02001010 }
Dan Williamsa310d032013-11-06 16:30:01 -08001011 mutex_unlock(&info->lock);
1012
1013 return param_get_bool(val, kp);
1014}
1015
1016static int dmatest_run_set(const char *val, const struct kernel_param *kp)
1017{
1018 struct dmatest_info *info = &test_info;
1019 int ret;
1020
1021 mutex_lock(&info->lock);
1022 ret = param_set_bool(val, kp);
1023 if (ret) {
1024 mutex_unlock(&info->lock);
1025 return ret;
1026 }
1027
1028 if (is_threaded_test_run(info))
1029 ret = -EBUSY;
1030 else if (dmatest_run)
Dan Williamsa9e55492013-11-06 16:30:02 -08001031 restart_threaded_test(info, dmatest_run);
Andy Shevchenko3e5ccd82013-03-04 11:09:31 +02001032
Andy Shevchenko851b7e12013-03-04 11:09:30 +02001033 mutex_unlock(&info->lock);
Andy Shevchenko851b7e12013-03-04 11:09:30 +02001034
Dan Williamsa310d032013-11-06 16:30:01 -08001035 return ret;
Andy Shevchenko851b7e12013-03-04 11:09:30 +02001036}
1037
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +02001038static int __init dmatest_init(void)
1039{
1040 struct dmatest_info *info = &test_info;
Dan Williams2d88ce72013-11-06 16:30:09 -08001041 struct dmatest_params *params = &info->params;
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +02001042
Dan Williamsa310d032013-11-06 16:30:01 -08001043 if (dmatest_run) {
1044 mutex_lock(&info->lock);
Dan Williamsa9e55492013-11-06 16:30:02 -08001045 run_threaded_test(info);
Dan Williamsa310d032013-11-06 16:30:01 -08001046 mutex_unlock(&info->lock);
1047 }
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +02001048
Dan Williams2d88ce72013-11-06 16:30:09 -08001049 if (params->iterations && wait)
1050 wait_event(thread_wait, !is_threaded_test_run(info));
Andy Shevchenko838cc702013-03-04 11:09:28 +02001051
Dan Williamsa310d032013-11-06 16:30:01 -08001052 /* module parameters are stable, inittime tests are started,
1053 * let userspace take over 'run' control
1054 */
1055 info->did_init = true;
Andy Shevchenko95019c82013-03-04 11:09:33 +02001056
Andy Shevchenko851b7e12013-03-04 11:09:30 +02001057 return 0;
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +02001058}
1059/* when compiled-in wait for drivers to load first */
1060late_initcall(dmatest_init);
1061
1062static void __exit dmatest_exit(void)
1063{
1064 struct dmatest_info *info = &test_info;
1065
Dan Williamsa310d032013-11-06 16:30:01 -08001066 mutex_lock(&info->lock);
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +02001067 stop_threaded_test(info);
Dan Williamsa310d032013-11-06 16:30:01 -08001068 mutex_unlock(&info->lock);
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +02001069}
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -07001070module_exit(dmatest_exit);
1071
Jean Delvaree05503e2011-05-18 16:49:24 +02001072MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -07001073MODULE_LICENSE("GPL v2");