blob: 26b502069638ab09426a75b416b8f83ef290aa00 [file] [log] [blame]
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -07001/*
2 * DMA Engine test module
3 *
4 * Copyright (C) 2007 Atmel Corporation
Andy Shevchenko851b7e12013-03-04 11:09:30 +02005 * Copyright (C) 2013 Intel Corporation
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -07006 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
Dan Williams872f05c2013-11-06 16:29:58 -080011#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070013#include <linux/delay.h>
Alexey Dobriyanb7f080c2011-06-16 11:01:34 +000014#include <linux/dma-mapping.h>
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070015#include <linux/dmaengine.h>
Guennadi Liakhovetski981ed702011-08-18 16:50:51 +020016#include <linux/freezer.h>
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070017#include <linux/init.h>
18#include <linux/kthread.h>
19#include <linux/module.h>
20#include <linux/moduleparam.h>
21#include <linux/random.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090022#include <linux/slab.h>
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070023#include <linux/wait.h>
24
25static unsigned int test_buf_size = 16384;
Andy Shevchenkoa6c268d2013-07-23 18:36:46 +030026module_param(test_buf_size, uint, S_IRUGO | S_IWUSR);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070027MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer");
28
Kay Sievers06190d82008-11-11 13:12:33 -070029static char test_channel[20];
Andy Shevchenkoa6c268d2013-07-23 18:36:46 +030030module_param_string(channel, test_channel, sizeof(test_channel),
31 S_IRUGO | S_IWUSR);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070032MODULE_PARM_DESC(channel, "Bus ID of the channel to test (default: any)");
33
Kay Sievers06190d82008-11-11 13:12:33 -070034static char test_device[20];
Andy Shevchenkoa6c268d2013-07-23 18:36:46 +030035module_param_string(device, test_device, sizeof(test_device),
36 S_IRUGO | S_IWUSR);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070037MODULE_PARM_DESC(device, "Bus ID of the DMA Engine to test (default: any)");
38
39static unsigned int threads_per_chan = 1;
Andy Shevchenkoa6c268d2013-07-23 18:36:46 +030040module_param(threads_per_chan, uint, S_IRUGO | S_IWUSR);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070041MODULE_PARM_DESC(threads_per_chan,
42 "Number of threads to start per channel (default: 1)");
43
44static unsigned int max_channels;
Andy Shevchenkoa6c268d2013-07-23 18:36:46 +030045module_param(max_channels, uint, S_IRUGO | S_IWUSR);
Dan Williams33df8ca2009-01-06 11:38:15 -070046MODULE_PARM_DESC(max_channels,
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070047 "Maximum number of channels to use (default: all)");
48
Nicolas Ferre0a2ff57d2009-07-03 19:26:51 +020049static unsigned int iterations;
Andy Shevchenkoa6c268d2013-07-23 18:36:46 +030050module_param(iterations, uint, S_IRUGO | S_IWUSR);
Nicolas Ferre0a2ff57d2009-07-03 19:26:51 +020051MODULE_PARM_DESC(iterations,
52 "Iterations before stopping test (default: infinite)");
53
Dan Williamsb54d5cb2009-03-25 09:13:25 -070054static unsigned int xor_sources = 3;
Andy Shevchenkoa6c268d2013-07-23 18:36:46 +030055module_param(xor_sources, uint, S_IRUGO | S_IWUSR);
Dan Williamsb54d5cb2009-03-25 09:13:25 -070056MODULE_PARM_DESC(xor_sources,
57 "Number of xor source buffers (default: 3)");
58
Dan Williams58691d62009-08-29 19:09:27 -070059static unsigned int pq_sources = 3;
Andy Shevchenkoa6c268d2013-07-23 18:36:46 +030060module_param(pq_sources, uint, S_IRUGO | S_IWUSR);
Dan Williams58691d62009-08-29 19:09:27 -070061MODULE_PARM_DESC(pq_sources,
62 "Number of p+q source buffers (default: 3)");
63
Viresh Kumard42efe62011-03-22 17:27:25 +053064static int timeout = 3000;
Andy Shevchenkoa6c268d2013-07-23 18:36:46 +030065module_param(timeout, uint, S_IRUGO | S_IWUSR);
Joe Perches85ee7a12011-04-23 20:38:19 -070066MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), "
67 "Pass -1 for infinite timeout");
Viresh Kumard42efe62011-03-22 17:27:25 +053068
Dan Williamse3b9c342013-11-06 16:30:05 -080069static bool noverify;
70module_param(noverify, bool, S_IRUGO | S_IWUSR);
71MODULE_PARM_DESC(noverify, "Disable random data setup and verification");
72
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +020073/**
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +020074 * struct dmatest_params - test parameters.
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +020075 * @buf_size: size of the memcpy test buffer
76 * @channel: bus ID of the channel to test
77 * @device: bus ID of the DMA Engine to test
78 * @threads_per_chan: number of threads to start per channel
79 * @max_channels: maximum number of channels to use
80 * @iterations: iterations before stopping test
81 * @xor_sources: number of xor source buffers
82 * @pq_sources: number of p+q source buffers
83 * @timeout: transfer timeout in msec, -1 for infinite timeout
84 */
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +020085struct dmatest_params {
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +020086 unsigned int buf_size;
87 char channel[20];
88 char device[20];
89 unsigned int threads_per_chan;
90 unsigned int max_channels;
91 unsigned int iterations;
92 unsigned int xor_sources;
93 unsigned int pq_sources;
94 int timeout;
Dan Williamse3b9c342013-11-06 16:30:05 -080095 bool noverify;
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +020096};
97
98/**
99 * struct dmatest_info - test information.
100 * @params: test parameters
Andy Shevchenko851b7e12013-03-04 11:09:30 +0200101 * @lock: access protection to the fields of this structure
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200102 */
Dan Williamsa310d032013-11-06 16:30:01 -0800103static struct dmatest_info {
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200104 /* Test parameters */
105 struct dmatest_params params;
Andy Shevchenko838cc702013-03-04 11:09:28 +0200106
107 /* Internal state */
108 struct list_head channels;
109 unsigned int nr_channels;
Andy Shevchenko851b7e12013-03-04 11:09:30 +0200110 struct mutex lock;
Dan Williamsa310d032013-11-06 16:30:01 -0800111 bool did_init;
112} test_info = {
113 .channels = LIST_HEAD_INIT(test_info.channels),
114 .lock = __MUTEX_INITIALIZER(test_info.lock),
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200115};
116
Dan Williamsa310d032013-11-06 16:30:01 -0800117static int dmatest_run_set(const char *val, const struct kernel_param *kp);
118static int dmatest_run_get(char *val, const struct kernel_param *kp);
119static struct kernel_param_ops run_ops = {
120 .set = dmatest_run_set,
121 .get = dmatest_run_get,
122};
123static bool dmatest_run;
124module_param_cb(run, &run_ops, &dmatest_run, S_IRUGO | S_IWUSR);
125MODULE_PARM_DESC(run, "Run the test (default: false)");
126
127/* Maximum amount of mismatched bytes in buffer to print */
128#define MAX_ERROR_COUNT 32
129
130/*
131 * Initialization patterns. All bytes in the source buffer has bit 7
132 * set, all bytes in the destination buffer has bit 7 cleared.
133 *
134 * Bit 6 is set for all bytes which are to be copied by the DMA
135 * engine. Bit 5 is set for all bytes which are to be overwritten by
136 * the DMA engine.
137 *
138 * The remaining bits are the inverse of a counter which increments by
139 * one for each byte address.
140 */
141#define PATTERN_SRC 0x80
142#define PATTERN_DST 0x00
143#define PATTERN_COPY 0x40
144#define PATTERN_OVERWRITE 0x20
145#define PATTERN_COUNT_MASK 0x1f
146
147struct dmatest_thread {
148 struct list_head node;
149 struct dmatest_info *info;
150 struct task_struct *task;
151 struct dma_chan *chan;
152 u8 **srcs;
153 u8 **dsts;
154 enum dma_transaction_type type;
155 bool done;
156};
157
158struct dmatest_chan {
159 struct list_head node;
160 struct dma_chan *chan;
161 struct list_head threads;
162};
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200163
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200164static bool dmatest_match_channel(struct dmatest_params *params,
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200165 struct dma_chan *chan)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700166{
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200167 if (params->channel[0] == '\0')
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700168 return true;
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200169 return strcmp(dma_chan_name(chan), params->channel) == 0;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700170}
171
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200172static bool dmatest_match_device(struct dmatest_params *params,
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200173 struct dma_device *device)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700174{
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200175 if (params->device[0] == '\0')
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700176 return true;
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200177 return strcmp(dev_name(device->dev), params->device) == 0;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700178}
179
180static unsigned long dmatest_random(void)
181{
182 unsigned long buf;
183
Dan Williamsbe9fa5a2013-11-06 16:30:03 -0800184 prandom_bytes(&buf, sizeof(buf));
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700185 return buf;
186}
187
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200188static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len,
189 unsigned int buf_size)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700190{
191 unsigned int i;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700192 u8 *buf;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700193
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700194 for (; (buf = *bufs); bufs++) {
195 for (i = 0; i < start; i++)
196 buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
197 for ( ; i < start + len; i++)
198 buf[i] = PATTERN_SRC | PATTERN_COPY
Joe Perchesc0198942009-06-28 09:26:21 -0700199 | (~i & PATTERN_COUNT_MASK);
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200200 for ( ; i < buf_size; i++)
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700201 buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
202 buf++;
203 }
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700204}
205
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200206static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len,
207 unsigned int buf_size)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700208{
209 unsigned int i;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700210 u8 *buf;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700211
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700212 for (; (buf = *bufs); bufs++) {
213 for (i = 0; i < start; i++)
214 buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
215 for ( ; i < start + len; i++)
216 buf[i] = PATTERN_DST | PATTERN_OVERWRITE
217 | (~i & PATTERN_COUNT_MASK);
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200218 for ( ; i < buf_size; i++)
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700219 buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
220 }
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700221}
222
Dan Williams7b610172013-11-06 16:29:57 -0800223static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index,
224 unsigned int counter, bool is_srcbuf)
225{
226 u8 diff = actual ^ pattern;
227 u8 expected = pattern | (~counter & PATTERN_COUNT_MASK);
228 const char *thread_name = current->comm;
229
230 if (is_srcbuf)
231 pr_warn("%s: srcbuf[0x%x] overwritten! Expected %02x, got %02x\n",
232 thread_name, index, expected, actual);
233 else if ((pattern & PATTERN_COPY)
234 && (diff & (PATTERN_COPY | PATTERN_OVERWRITE)))
235 pr_warn("%s: dstbuf[0x%x] not copied! Expected %02x, got %02x\n",
236 thread_name, index, expected, actual);
237 else if (diff & PATTERN_SRC)
238 pr_warn("%s: dstbuf[0x%x] was copied! Expected %02x, got %02x\n",
239 thread_name, index, expected, actual);
240 else
241 pr_warn("%s: dstbuf[0x%x] mismatch! Expected %02x, got %02x\n",
242 thread_name, index, expected, actual);
243}
244
245static unsigned int dmatest_verify(u8 **bufs, unsigned int start,
246 unsigned int end, unsigned int counter, u8 pattern,
247 bool is_srcbuf)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700248{
249 unsigned int i;
250 unsigned int error_count = 0;
251 u8 actual;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700252 u8 expected;
253 u8 *buf;
254 unsigned int counter_orig = counter;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700255
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700256 for (; (buf = *bufs); bufs++) {
257 counter = counter_orig;
258 for (i = start; i < end; i++) {
259 actual = buf[i];
260 expected = pattern | (~counter & PATTERN_COUNT_MASK);
261 if (actual != expected) {
Dan Williams7b610172013-11-06 16:29:57 -0800262 if (error_count < MAX_ERROR_COUNT)
263 dmatest_mismatch(actual, pattern, i,
264 counter, is_srcbuf);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700265 error_count++;
266 }
267 counter++;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700268 }
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700269 }
270
Andy Shevchenko74b5c072013-03-04 11:09:32 +0200271 if (error_count > MAX_ERROR_COUNT)
Dan Williams7b610172013-11-06 16:29:57 -0800272 pr_warn("%s: %u errors suppressed\n",
Andy Shevchenko74b5c072013-03-04 11:09:32 +0200273 current->comm, error_count - MAX_ERROR_COUNT);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700274
275 return error_count;
276}
277
Tejun Heoadfa5432011-11-23 09:28:16 -0800278/* poor man's completion - we want to use wait_event_freezable() on it */
279struct dmatest_done {
280 bool done;
281 wait_queue_head_t *wait;
282};
283
284static void dmatest_callback(void *arg)
Dan Williamse44e0aa2009-03-25 09:13:25 -0700285{
Tejun Heoadfa5432011-11-23 09:28:16 -0800286 struct dmatest_done *done = arg;
287
288 done->done = true;
289 wake_up_all(done->wait);
Dan Williamse44e0aa2009-03-25 09:13:25 -0700290}
291
Andy Shevchenko632fd282012-12-17 15:59:52 -0800292static inline void unmap_src(struct device *dev, dma_addr_t *addr, size_t len,
293 unsigned int count)
294{
295 while (count--)
296 dma_unmap_single(dev, addr[count], len, DMA_TO_DEVICE);
297}
298
299static inline void unmap_dst(struct device *dev, dma_addr_t *addr, size_t len,
300 unsigned int count)
301{
302 while (count--)
303 dma_unmap_single(dev, addr[count], len, DMA_BIDIRECTIONAL);
304}
305
Akinobu Mita8be9e32b2012-10-28 00:49:32 +0900306static unsigned int min_odd(unsigned int x, unsigned int y)
307{
308 unsigned int val = min(x, y);
309
310 return val % 2 ? val : val - 1;
311}
312
Dan Williams872f05c2013-11-06 16:29:58 -0800313static void result(const char *err, unsigned int n, unsigned int src_off,
314 unsigned int dst_off, unsigned int len, unsigned long data)
Andy Shevchenko95019c82013-03-04 11:09:33 +0200315{
Dan Williams872f05c2013-11-06 16:29:58 -0800316 pr_info("%s: result #%u: '%s' with src_off=0x%x ""dst_off=0x%x len=0x%x (%lu)",
317 current->comm, n, err, src_off, dst_off, len, data);
Andy Shevchenko95019c82013-03-04 11:09:33 +0200318}
319
Dan Williams872f05c2013-11-06 16:29:58 -0800320static void dbg_result(const char *err, unsigned int n, unsigned int src_off,
321 unsigned int dst_off, unsigned int len,
322 unsigned long data)
Andy Shevchenko95019c82013-03-04 11:09:33 +0200323{
Dan Williams872f05c2013-11-06 16:29:58 -0800324 pr_debug("%s: result #%u: '%s' with src_off=0x%x ""dst_off=0x%x len=0x%x (%lu)",
325 current->comm, n, err, src_off, dst_off, len, data);
Andy Shevchenko95019c82013-03-04 11:09:33 +0200326}
327
Dan Williams86727442013-11-06 16:30:07 -0800328static unsigned long long dmatest_persec(s64 runtime, unsigned int val)
329{
330 unsigned long long per_sec = 1000000;
331
332 if (runtime <= 0)
333 return 0;
334
335 /* drop precision until runtime is 32-bits */
336 while (runtime > UINT_MAX) {
337 runtime >>= 1;
338 per_sec <<= 1;
339 }
340
341 per_sec *= val;
342 do_div(per_sec, runtime);
343 return per_sec;
344}
345
346static unsigned long long dmatest_KBs(s64 runtime, unsigned long long len)
347{
348 return dmatest_persec(runtime, len >> 10);
349}
350
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700351/*
352 * This function repeatedly tests DMA transfers of various lengths and
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700353 * offsets for a given operation type until it is told to exit by
354 * kthread_stop(). There may be multiple threads running this function
355 * in parallel for a single channel, and there may be multiple channels
356 * being tested in parallel.
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700357 *
358 * Before each test, the source and destination buffer is initialized
359 * with a known pattern. This pattern is different depending on
360 * whether it's in an area which is supposed to be copied or
361 * overwritten, and different in the source and destination buffers.
362 * So if the DMA engine doesn't copy exactly what we tell it to copy,
363 * we'll notice.
364 */
365static int dmatest_func(void *data)
366{
Tejun Heoadfa5432011-11-23 09:28:16 -0800367 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_wait);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700368 struct dmatest_thread *thread = data;
Tejun Heoadfa5432011-11-23 09:28:16 -0800369 struct dmatest_done done = { .wait = &done_wait };
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200370 struct dmatest_info *info;
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200371 struct dmatest_params *params;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700372 struct dma_chan *chan;
Akinobu Mita8be9e32b2012-10-28 00:49:32 +0900373 struct dma_device *dev;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700374 unsigned int src_off, dst_off, len;
375 unsigned int error_count;
376 unsigned int failed_tests = 0;
377 unsigned int total_tests = 0;
378 dma_cookie_t cookie;
379 enum dma_status status;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700380 enum dma_ctrl_flags flags;
Andy Shevchenko945b5af2013-03-04 11:09:26 +0200381 u8 *pq_coefs = NULL;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700382 int ret;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700383 int src_cnt;
384 int dst_cnt;
385 int i;
Dan Williams86727442013-11-06 16:30:07 -0800386 ktime_t ktime;
387 s64 runtime = 0;
388 unsigned long long total_len = 0;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700389
Tejun Heoadfa5432011-11-23 09:28:16 -0800390 set_freezable();
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700391
392 ret = -ENOMEM;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700393
394 smp_rmb();
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200395 info = thread->info;
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200396 params = &info->params;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700397 chan = thread->chan;
Akinobu Mita8be9e32b2012-10-28 00:49:32 +0900398 dev = chan->device;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700399 if (thread->type == DMA_MEMCPY)
400 src_cnt = dst_cnt = 1;
401 else if (thread->type == DMA_XOR) {
Akinobu Mita8be9e32b2012-10-28 00:49:32 +0900402 /* force odd to ensure dst = src */
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200403 src_cnt = min_odd(params->xor_sources | 1, dev->max_xor);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700404 dst_cnt = 1;
Dan Williams58691d62009-08-29 19:09:27 -0700405 } else if (thread->type == DMA_PQ) {
Akinobu Mita8be9e32b2012-10-28 00:49:32 +0900406 /* force odd to ensure dst = src */
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200407 src_cnt = min_odd(params->pq_sources | 1, dma_maxpq(dev, 0));
Dan Williams58691d62009-08-29 19:09:27 -0700408 dst_cnt = 2;
Andy Shevchenko945b5af2013-03-04 11:09:26 +0200409
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200410 pq_coefs = kmalloc(params->pq_sources+1, GFP_KERNEL);
Andy Shevchenko945b5af2013-03-04 11:09:26 +0200411 if (!pq_coefs)
412 goto err_thread_type;
413
Anatolij Gustschin94de6482010-02-15 22:35:23 +0100414 for (i = 0; i < src_cnt; i++)
Dan Williams58691d62009-08-29 19:09:27 -0700415 pq_coefs[i] = 1;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700416 } else
Andy Shevchenko945b5af2013-03-04 11:09:26 +0200417 goto err_thread_type;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700418
419 thread->srcs = kcalloc(src_cnt+1, sizeof(u8 *), GFP_KERNEL);
420 if (!thread->srcs)
421 goto err_srcs;
422 for (i = 0; i < src_cnt; i++) {
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200423 thread->srcs[i] = kmalloc(params->buf_size, GFP_KERNEL);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700424 if (!thread->srcs[i])
425 goto err_srcbuf;
426 }
427 thread->srcs[i] = NULL;
428
429 thread->dsts = kcalloc(dst_cnt+1, sizeof(u8 *), GFP_KERNEL);
430 if (!thread->dsts)
431 goto err_dsts;
432 for (i = 0; i < dst_cnt; i++) {
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200433 thread->dsts[i] = kmalloc(params->buf_size, GFP_KERNEL);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700434 if (!thread->dsts[i])
435 goto err_dstbuf;
436 }
437 thread->dsts[i] = NULL;
438
Dan Williamse44e0aa2009-03-25 09:13:25 -0700439 set_user_nice(current, 10);
440
Ira Snyderb203bd32011-03-03 07:54:53 +0000441 /*
Bartlomiej Zolnierkiewiczd1cab342013-10-18 19:35:21 +0200442 * src and dst buffers are freed by ourselves below
Ira Snyderb203bd32011-03-03 07:54:53 +0000443 */
Bartlomiej Zolnierkiewicz0776ae72013-10-18 19:35:33 +0200444 flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700445
Dan Williams86727442013-11-06 16:30:07 -0800446 ktime = ktime_get();
Nicolas Ferre0a2ff57d2009-07-03 19:26:51 +0200447 while (!kthread_should_stop()
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200448 && !(params->iterations && total_tests >= params->iterations)) {
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700449 struct dma_async_tx_descriptor *tx = NULL;
450 dma_addr_t dma_srcs[src_cnt];
451 dma_addr_t dma_dsts[dst_cnt];
Dan Williams83544ae2009-09-08 17:42:53 -0700452 u8 align = 0;
Atsushi Nemotod86be862009-01-13 09:22:20 -0700453
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700454 total_tests++;
455
Dan Williams83544ae2009-09-08 17:42:53 -0700456 /* honor alignment restrictions */
457 if (thread->type == DMA_MEMCPY)
458 align = dev->copy_align;
459 else if (thread->type == DMA_XOR)
460 align = dev->xor_align;
461 else if (thread->type == DMA_PQ)
462 align = dev->pq_align;
463
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200464 if (1 << align > params->buf_size) {
Guennadi Liakhovetskicfe4f272009-12-04 19:44:48 +0100465 pr_err("%u-byte buffer too small for %d-byte alignment\n",
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200466 params->buf_size, 1 << align);
Guennadi Liakhovetskicfe4f272009-12-04 19:44:48 +0100467 break;
468 }
469
Dan Williamse3b9c342013-11-06 16:30:05 -0800470 if (params->noverify) {
471 len = params->buf_size;
472 src_off = 0;
473 dst_off = 0;
474 } else {
475 len = dmatest_random() % params->buf_size + 1;
476 len = (len >> align) << align;
477 if (!len)
478 len = 1 << align;
479 src_off = dmatest_random() % (params->buf_size - len + 1);
480 dst_off = dmatest_random() % (params->buf_size - len + 1);
481
482 src_off = (src_off >> align) << align;
483 dst_off = (dst_off >> align) << align;
484
485 dmatest_init_srcs(thread->srcs, src_off, len,
486 params->buf_size);
487 dmatest_init_dsts(thread->dsts, dst_off, len,
488 params->buf_size);
489 }
490
Dan Williams83544ae2009-09-08 17:42:53 -0700491 len = (len >> align) << align;
Guennadi Liakhovetskicfe4f272009-12-04 19:44:48 +0100492 if (!len)
493 len = 1 << align;
Dan Williams86727442013-11-06 16:30:07 -0800494 total_len += len;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700495
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700496 for (i = 0; i < src_cnt; i++) {
497 u8 *buf = thread->srcs[i] + src_off;
498
499 dma_srcs[i] = dma_map_single(dev->dev, buf, len,
500 DMA_TO_DEVICE);
Andy Shevchenkoafde3be2012-12-17 15:59:53 -0800501 ret = dma_mapping_error(dev->dev, dma_srcs[i]);
502 if (ret) {
503 unmap_src(dev->dev, dma_srcs, len, i);
Dan Williams872f05c2013-11-06 16:29:58 -0800504 result("src mapping error", total_tests,
505 src_off, dst_off, len, ret);
Andy Shevchenkoafde3be2012-12-17 15:59:53 -0800506 failed_tests++;
507 continue;
508 }
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700509 }
Atsushi Nemotod86be862009-01-13 09:22:20 -0700510 /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700511 for (i = 0; i < dst_cnt; i++) {
512 dma_dsts[i] = dma_map_single(dev->dev, thread->dsts[i],
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200513 params->buf_size,
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700514 DMA_BIDIRECTIONAL);
Andy Shevchenkoafde3be2012-12-17 15:59:53 -0800515 ret = dma_mapping_error(dev->dev, dma_dsts[i]);
516 if (ret) {
517 unmap_src(dev->dev, dma_srcs, len, src_cnt);
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200518 unmap_dst(dev->dev, dma_dsts, params->buf_size,
519 i);
Dan Williams872f05c2013-11-06 16:29:58 -0800520 result("dst mapping error", total_tests,
521 src_off, dst_off, len, ret);
Andy Shevchenkoafde3be2012-12-17 15:59:53 -0800522 failed_tests++;
523 continue;
524 }
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700525 }
Atsushi Nemotod86be862009-01-13 09:22:20 -0700526
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700527 if (thread->type == DMA_MEMCPY)
528 tx = dev->device_prep_dma_memcpy(chan,
529 dma_dsts[0] + dst_off,
530 dma_srcs[0], len,
531 flags);
532 else if (thread->type == DMA_XOR)
533 tx = dev->device_prep_dma_xor(chan,
534 dma_dsts[0] + dst_off,
Dan Williams67b91242010-02-28 22:20:18 -0700535 dma_srcs, src_cnt,
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700536 len, flags);
Dan Williams58691d62009-08-29 19:09:27 -0700537 else if (thread->type == DMA_PQ) {
538 dma_addr_t dma_pq[dst_cnt];
539
540 for (i = 0; i < dst_cnt; i++)
541 dma_pq[i] = dma_dsts[i] + dst_off;
542 tx = dev->device_prep_dma_pq(chan, dma_pq, dma_srcs,
Anatolij Gustschin94de6482010-02-15 22:35:23 +0100543 src_cnt, pq_coefs,
Dan Williams58691d62009-08-29 19:09:27 -0700544 len, flags);
545 }
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700546
Atsushi Nemotod86be862009-01-13 09:22:20 -0700547 if (!tx) {
Andy Shevchenko632fd282012-12-17 15:59:52 -0800548 unmap_src(dev->dev, dma_srcs, len, src_cnt);
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200549 unmap_dst(dev->dev, dma_dsts, params->buf_size,
550 dst_cnt);
Dan Williams872f05c2013-11-06 16:29:58 -0800551 result("prep error", total_tests, src_off,
552 dst_off, len, ret);
Atsushi Nemotod86be862009-01-13 09:22:20 -0700553 msleep(100);
554 failed_tests++;
555 continue;
556 }
Dan Williamse44e0aa2009-03-25 09:13:25 -0700557
Tejun Heoadfa5432011-11-23 09:28:16 -0800558 done.done = false;
Dan Williamse44e0aa2009-03-25 09:13:25 -0700559 tx->callback = dmatest_callback;
Tejun Heoadfa5432011-11-23 09:28:16 -0800560 tx->callback_param = &done;
Atsushi Nemotod86be862009-01-13 09:22:20 -0700561 cookie = tx->tx_submit(tx);
562
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700563 if (dma_submit_error(cookie)) {
Dan Williams872f05c2013-11-06 16:29:58 -0800564 result("submit error", total_tests, src_off,
565 dst_off, len, ret);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700566 msleep(100);
567 failed_tests++;
568 continue;
569 }
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700570 dma_async_issue_pending(chan);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700571
Andy Shevchenkobcc567e2013-05-23 14:29:53 +0300572 wait_event_freezable_timeout(done_wait, done.done,
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200573 msecs_to_jiffies(params->timeout));
Guennadi Liakhovetski981ed702011-08-18 16:50:51 +0200574
Dan Williamse44e0aa2009-03-25 09:13:25 -0700575 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700576
Tejun Heoadfa5432011-11-23 09:28:16 -0800577 if (!done.done) {
578 /*
579 * We're leaving the timed out dma operation with
580 * dangling pointer to done_wait. To make this
581 * correct, we'll need to allocate wait_done for
582 * each test iteration and perform "who's gonna
583 * free it this time?" dancing. For now, just
584 * leave it dangling.
585 */
Dan Williams872f05c2013-11-06 16:29:58 -0800586 result("test timed out", total_tests, src_off, dst_off,
587 len, 0);
Dan Williamse44e0aa2009-03-25 09:13:25 -0700588 failed_tests++;
589 continue;
590 } else if (status != DMA_SUCCESS) {
Dan Williams872f05c2013-11-06 16:29:58 -0800591 result(status == DMA_ERROR ?
592 "completion error status" :
593 "completion busy status", total_tests, src_off,
594 dst_off, len, ret);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700595 failed_tests++;
596 continue;
597 }
Dan Williamse44e0aa2009-03-25 09:13:25 -0700598
Bartlomiej Zolnierkiewiczd1cab342013-10-18 19:35:21 +0200599 /* Unmap by myself */
600 unmap_src(dev->dev, dma_srcs, len, src_cnt);
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200601 unmap_dst(dev->dev, dma_dsts, params->buf_size, dst_cnt);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700602
Dan Williamse3b9c342013-11-06 16:30:05 -0800603 if (params->noverify) {
604 dbg_result("test passed", total_tests, src_off, dst_off,
605 len, 0);
606 continue;
607 }
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700608
Dan Williams872f05c2013-11-06 16:29:58 -0800609 pr_debug("%s: verifying source buffer...\n", current->comm);
Dan Williamse3b9c342013-11-06 16:30:05 -0800610 error_count = dmatest_verify(thread->srcs, 0, src_off,
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700611 0, PATTERN_SRC, true);
Dan Williams7b610172013-11-06 16:29:57 -0800612 error_count += dmatest_verify(thread->srcs, src_off,
613 src_off + len, src_off,
614 PATTERN_SRC | PATTERN_COPY, true);
615 error_count += dmatest_verify(thread->srcs, src_off + len,
616 params->buf_size, src_off + len,
617 PATTERN_SRC, true);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700618
Dan Williams872f05c2013-11-06 16:29:58 -0800619 pr_debug("%s: verifying dest buffer...\n", current->comm);
Dan Williams7b610172013-11-06 16:29:57 -0800620 error_count += dmatest_verify(thread->dsts, 0, dst_off,
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700621 0, PATTERN_DST, false);
Dan Williams7b610172013-11-06 16:29:57 -0800622 error_count += dmatest_verify(thread->dsts, dst_off,
623 dst_off + len, src_off,
624 PATTERN_SRC | PATTERN_COPY, false);
625 error_count += dmatest_verify(thread->dsts, dst_off + len,
626 params->buf_size, dst_off + len,
627 PATTERN_DST, false);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700628
629 if (error_count) {
Dan Williams872f05c2013-11-06 16:29:58 -0800630 result("data error", total_tests, src_off, dst_off,
631 len, error_count);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700632 failed_tests++;
633 } else {
Dan Williams872f05c2013-11-06 16:29:58 -0800634 dbg_result("test passed", total_tests, src_off, dst_off,
635 len, 0);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700636 }
637 }
Dan Williams86727442013-11-06 16:30:07 -0800638 runtime = ktime_us_delta(ktime_get(), ktime);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700639
640 ret = 0;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700641 for (i = 0; thread->dsts[i]; i++)
642 kfree(thread->dsts[i]);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700643err_dstbuf:
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700644 kfree(thread->dsts);
645err_dsts:
646 for (i = 0; thread->srcs[i]; i++)
647 kfree(thread->srcs[i]);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700648err_srcbuf:
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700649 kfree(thread->srcs);
650err_srcs:
Andy Shevchenko945b5af2013-03-04 11:09:26 +0200651 kfree(pq_coefs);
652err_thread_type:
Dan Williams86727442013-11-06 16:30:07 -0800653 pr_info("%s: summary %u tests, %u failures %llu iops %llu KB/s (%d)\n",
654 current->comm, total_tests, failed_tests,
655 dmatest_persec(runtime, total_tests),
656 dmatest_KBs(runtime, total_len), ret);
Nicolas Ferre0a2ff57d2009-07-03 19:26:51 +0200657
Viresh Kumar9704efa2011-07-29 16:21:57 +0530658 /* terminate all transfers on specified channels */
Shiraz Hashim5e034f72012-11-09 15:26:29 +0000659 if (ret)
660 dmaengine_terminate_all(chan);
661
Andy Shevchenko3e5ccd82013-03-04 11:09:31 +0200662 thread->done = true;
663
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200664 if (params->iterations > 0)
Nicolas Ferre0a2ff57d2009-07-03 19:26:51 +0200665 while (!kthread_should_stop()) {
Yong Zhangb953df72010-02-05 21:52:37 +0800666 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit);
Nicolas Ferre0a2ff57d2009-07-03 19:26:51 +0200667 interruptible_sleep_on(&wait_dmatest_exit);
668 }
669
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700670 return ret;
671}
672
673static void dmatest_cleanup_channel(struct dmatest_chan *dtc)
674{
675 struct dmatest_thread *thread;
676 struct dmatest_thread *_thread;
677 int ret;
678
679 list_for_each_entry_safe(thread, _thread, &dtc->threads, node) {
680 ret = kthread_stop(thread->task);
Dan Williams0adff802013-11-06 16:30:00 -0800681 pr_debug("thread %s exited with status %d\n",
682 thread->task->comm, ret);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700683 list_del(&thread->node);
684 kfree(thread);
685 }
Viresh Kumar9704efa2011-07-29 16:21:57 +0530686
687 /* terminate all transfers on specified channels */
Jon Mason944ea4d2012-11-11 23:03:20 +0000688 dmaengine_terminate_all(dtc->chan);
Viresh Kumar9704efa2011-07-29 16:21:57 +0530689
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700690 kfree(dtc);
691}
692
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200693static int dmatest_add_threads(struct dmatest_info *info,
694 struct dmatest_chan *dtc, enum dma_transaction_type type)
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700695{
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200696 struct dmatest_params *params = &info->params;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700697 struct dmatest_thread *thread;
698 struct dma_chan *chan = dtc->chan;
699 char *op;
700 unsigned int i;
701
702 if (type == DMA_MEMCPY)
703 op = "copy";
704 else if (type == DMA_XOR)
705 op = "xor";
Dan Williams58691d62009-08-29 19:09:27 -0700706 else if (type == DMA_PQ)
707 op = "pq";
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700708 else
709 return -EINVAL;
710
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200711 for (i = 0; i < params->threads_per_chan; i++) {
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700712 thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL);
713 if (!thread) {
Dan Williams0adff802013-11-06 16:30:00 -0800714 pr_warn("No memory for %s-%s%u\n",
715 dma_chan_name(chan), op, i);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700716 break;
717 }
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200718 thread->info = info;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700719 thread->chan = dtc->chan;
720 thread->type = type;
721 smp_wmb();
722 thread->task = kthread_run(dmatest_func, thread, "%s-%s%u",
723 dma_chan_name(chan), op, i);
724 if (IS_ERR(thread->task)) {
Dan Williams0adff802013-11-06 16:30:00 -0800725 pr_warn("Failed to run thread %s-%s%u\n",
726 dma_chan_name(chan), op, i);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700727 kfree(thread);
728 break;
729 }
730
731 /* srcbuf and dstbuf are allocated by the thread itself */
732
733 list_add_tail(&thread->node, &dtc->threads);
734 }
735
736 return i;
737}
738
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200739static int dmatest_add_channel(struct dmatest_info *info,
740 struct dma_chan *chan)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700741{
742 struct dmatest_chan *dtc;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700743 struct dma_device *dma_dev = chan->device;
744 unsigned int thread_count = 0;
Kulikov Vasiliyb9033e62010-07-17 19:19:48 +0400745 int cnt;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700746
Andrew Morton6fdb8bd2008-09-19 04:16:23 -0700747 dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700748 if (!dtc) {
Dan Williams0adff802013-11-06 16:30:00 -0800749 pr_warn("No memory for %s\n", dma_chan_name(chan));
Dan Williams33df8ca2009-01-06 11:38:15 -0700750 return -ENOMEM;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700751 }
752
753 dtc->chan = chan;
754 INIT_LIST_HEAD(&dtc->threads);
755
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700756 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200757 cnt = dmatest_add_threads(info, dtc, DMA_MEMCPY);
Nicolas Ferref1aef8b2009-07-06 18:19:44 +0200758 thread_count += cnt > 0 ? cnt : 0;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700759 }
760 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200761 cnt = dmatest_add_threads(info, dtc, DMA_XOR);
Nicolas Ferref1aef8b2009-07-06 18:19:44 +0200762 thread_count += cnt > 0 ? cnt : 0;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700763 }
Dan Williams58691d62009-08-29 19:09:27 -0700764 if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200765 cnt = dmatest_add_threads(info, dtc, DMA_PQ);
Dr. David Alan Gilbertd07a74a2011-08-25 16:13:55 -0700766 thread_count += cnt > 0 ? cnt : 0;
Dan Williams58691d62009-08-29 19:09:27 -0700767 }
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700768
Dan Williams0adff802013-11-06 16:30:00 -0800769 pr_info("Started %u threads using %s\n",
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700770 thread_count, dma_chan_name(chan));
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700771
Andy Shevchenko838cc702013-03-04 11:09:28 +0200772 list_add_tail(&dtc->node, &info->channels);
773 info->nr_channels++;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700774
Dan Williams33df8ca2009-01-06 11:38:15 -0700775 return 0;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700776}
777
Dan Williams7dd60252009-01-06 11:38:19 -0700778static bool filter(struct dma_chan *chan, void *param)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700779{
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200780 struct dmatest_params *params = param;
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200781
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200782 if (!dmatest_match_channel(params, chan) ||
783 !dmatest_match_device(params, chan->device))
Dan Williams7dd60252009-01-06 11:38:19 -0700784 return false;
Dan Williams33df8ca2009-01-06 11:38:15 -0700785 else
Dan Williams7dd60252009-01-06 11:38:19 -0700786 return true;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700787}
788
Dan Williamsa9e55492013-11-06 16:30:02 -0800789static void request_channels(struct dmatest_info *info,
790 enum dma_transaction_type type)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700791{
Dan Williams33df8ca2009-01-06 11:38:15 -0700792 dma_cap_mask_t mask;
Dan Williamsa9e55492013-11-06 16:30:02 -0800793
794 dma_cap_zero(mask);
795 dma_cap_set(type, mask);
796 for (;;) {
797 struct dmatest_params *params = &info->params;
798 struct dma_chan *chan;
799
800 chan = dma_request_channel(mask, filter, params);
801 if (chan) {
802 if (dmatest_add_channel(info, chan)) {
803 dma_release_channel(chan);
804 break; /* add_channel failed, punt */
805 }
806 } else
807 break; /* no more channels available */
808 if (params->max_channels &&
809 info->nr_channels >= params->max_channels)
810 break; /* we have all we need */
811 }
812}
813
814static void run_threaded_test(struct dmatest_info *info)
815{
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200816 struct dmatest_params *params = &info->params;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700817
Dan Williamsa310d032013-11-06 16:30:01 -0800818 /* Copy test parameters */
819 params->buf_size = test_buf_size;
820 strlcpy(params->channel, strim(test_channel), sizeof(params->channel));
821 strlcpy(params->device, strim(test_device), sizeof(params->device));
822 params->threads_per_chan = threads_per_chan;
823 params->max_channels = max_channels;
824 params->iterations = iterations;
825 params->xor_sources = xor_sources;
826 params->pq_sources = pq_sources;
827 params->timeout = timeout;
Dan Williamse3b9c342013-11-06 16:30:05 -0800828 params->noverify = noverify;
Dan Williamsa310d032013-11-06 16:30:01 -0800829
Dan Williamsa9e55492013-11-06 16:30:02 -0800830 request_channels(info, DMA_MEMCPY);
831 request_channels(info, DMA_XOR);
832 request_channels(info, DMA_PQ);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700833}
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700834
Dan Williamsa310d032013-11-06 16:30:01 -0800835static void stop_threaded_test(struct dmatest_info *info)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700836{
Dan Williams33df8ca2009-01-06 11:38:15 -0700837 struct dmatest_chan *dtc, *_dtc;
Dan Williams7cbd4872009-03-04 16:06:03 -0700838 struct dma_chan *chan;
Dan Williams33df8ca2009-01-06 11:38:15 -0700839
Andy Shevchenko838cc702013-03-04 11:09:28 +0200840 list_for_each_entry_safe(dtc, _dtc, &info->channels, node) {
Dan Williams33df8ca2009-01-06 11:38:15 -0700841 list_del(&dtc->node);
Dan Williams7cbd4872009-03-04 16:06:03 -0700842 chan = dtc->chan;
Dan Williams33df8ca2009-01-06 11:38:15 -0700843 dmatest_cleanup_channel(dtc);
Dan Williams0adff802013-11-06 16:30:00 -0800844 pr_debug("dropped channel %s\n", dma_chan_name(chan));
Dan Williams7cbd4872009-03-04 16:06:03 -0700845 dma_release_channel(chan);
Dan Williams33df8ca2009-01-06 11:38:15 -0700846 }
Andy Shevchenko838cc702013-03-04 11:09:28 +0200847
848 info->nr_channels = 0;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700849}
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200850
Dan Williamsa9e55492013-11-06 16:30:02 -0800851static void restart_threaded_test(struct dmatest_info *info, bool run)
Andy Shevchenko851b7e12013-03-04 11:09:30 +0200852{
Dan Williamsa310d032013-11-06 16:30:01 -0800853 /* we might be called early to set run=, defer running until all
854 * parameters have been evaluated
855 */
856 if (!info->did_init)
Dan Williamsa9e55492013-11-06 16:30:02 -0800857 return;
Andy Shevchenko851b7e12013-03-04 11:09:30 +0200858
Dan Williamsa310d032013-11-06 16:30:01 -0800859 /* Stop any running test first */
860 stop_threaded_test(info);
Andy Shevchenko851b7e12013-03-04 11:09:30 +0200861
862 /* Run test with new parameters */
Dan Williamsa9e55492013-11-06 16:30:02 -0800863 run_threaded_test(info);
Andy Shevchenkobcc567e2013-05-23 14:29:53 +0300864}
865
Dan Williamsa310d032013-11-06 16:30:01 -0800866static bool is_threaded_test_run(struct dmatest_info *info)
Andy Shevchenkobcc567e2013-05-23 14:29:53 +0300867{
868 struct dmatest_chan *dtc;
869
870 list_for_each_entry(dtc, &info->channels, node) {
871 struct dmatest_thread *thread;
872
873 list_for_each_entry(thread, &dtc->threads, node) {
874 if (!thread->done)
875 return true;
876 }
Andy Shevchenko851b7e12013-03-04 11:09:30 +0200877 }
878
Andy Shevchenkobcc567e2013-05-23 14:29:53 +0300879 return false;
Andy Shevchenko851b7e12013-03-04 11:09:30 +0200880}
881
Dan Williamsa310d032013-11-06 16:30:01 -0800882static int dmatest_run_get(char *val, const struct kernel_param *kp)
Andy Shevchenko851b7e12013-03-04 11:09:30 +0200883{
Dan Williamsa310d032013-11-06 16:30:01 -0800884 struct dmatest_info *info = &test_info;
Andy Shevchenko851b7e12013-03-04 11:09:30 +0200885
886 mutex_lock(&info->lock);
Dan Williamsa310d032013-11-06 16:30:01 -0800887 if (is_threaded_test_run(info)) {
888 dmatest_run = true;
Andy Shevchenko3e5ccd82013-03-04 11:09:31 +0200889 } else {
Dan Williamsa310d032013-11-06 16:30:01 -0800890 stop_threaded_test(info);
891 dmatest_run = false;
Andy Shevchenko3e5ccd82013-03-04 11:09:31 +0200892 }
Dan Williamsa310d032013-11-06 16:30:01 -0800893 mutex_unlock(&info->lock);
894
895 return param_get_bool(val, kp);
896}
897
898static int dmatest_run_set(const char *val, const struct kernel_param *kp)
899{
900 struct dmatest_info *info = &test_info;
901 int ret;
902
903 mutex_lock(&info->lock);
904 ret = param_set_bool(val, kp);
905 if (ret) {
906 mutex_unlock(&info->lock);
907 return ret;
908 }
909
910 if (is_threaded_test_run(info))
911 ret = -EBUSY;
912 else if (dmatest_run)
Dan Williamsa9e55492013-11-06 16:30:02 -0800913 restart_threaded_test(info, dmatest_run);
Andy Shevchenko3e5ccd82013-03-04 11:09:31 +0200914
Andy Shevchenko851b7e12013-03-04 11:09:30 +0200915 mutex_unlock(&info->lock);
Andy Shevchenko851b7e12013-03-04 11:09:30 +0200916
Dan Williamsa310d032013-11-06 16:30:01 -0800917 return ret;
Andy Shevchenko851b7e12013-03-04 11:09:30 +0200918}
919
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200920static int __init dmatest_init(void)
921{
922 struct dmatest_info *info = &test_info;
923
Dan Williamsa310d032013-11-06 16:30:01 -0800924 if (dmatest_run) {
925 mutex_lock(&info->lock);
Dan Williamsa9e55492013-11-06 16:30:02 -0800926 run_threaded_test(info);
Dan Williamsa310d032013-11-06 16:30:01 -0800927 mutex_unlock(&info->lock);
928 }
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200929
Dan Williamsa310d032013-11-06 16:30:01 -0800930 /* module parameters are stable, inittime tests are started,
931 * let userspace take over 'run' control
932 */
933 info->did_init = true;
Andy Shevchenko838cc702013-03-04 11:09:28 +0200934
Dan Williamsa9e55492013-11-06 16:30:02 -0800935 return 0;
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200936}
937/* when compiled-in wait for drivers to load first */
938late_initcall(dmatest_init);
939
940static void __exit dmatest_exit(void)
941{
942 struct dmatest_info *info = &test_info;
943
Dan Williamsa310d032013-11-06 16:30:01 -0800944 mutex_lock(&info->lock);
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200945 stop_threaded_test(info);
Dan Williamsa310d032013-11-06 16:30:01 -0800946 mutex_unlock(&info->lock);
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200947}
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700948module_exit(dmatest_exit);
949
Jean Delvaree05503e2011-05-18 16:49:24 +0200950MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700951MODULE_LICENSE("GPL v2");