blob: dcb38d86550e1af79ff739aa55173de1559ec37f [file] [log] [blame]
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -07001/*
2 * DMA Engine test module
3 *
4 * Copyright (C) 2007 Atmel Corporation
Andy Shevchenko851b7e12013-03-04 11:09:30 +02005 * Copyright (C) 2013 Intel Corporation
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -07006 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/delay.h>
Alexey Dobriyanb7f080c2011-06-16 11:01:34 +000012#include <linux/dma-mapping.h>
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070013#include <linux/dmaengine.h>
Guennadi Liakhovetski981ed702011-08-18 16:50:51 +020014#include <linux/freezer.h>
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070015#include <linux/init.h>
16#include <linux/kthread.h>
17#include <linux/module.h>
18#include <linux/moduleparam.h>
19#include <linux/random.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090020#include <linux/slab.h>
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070021#include <linux/wait.h>
Andy Shevchenko851b7e12013-03-04 11:09:30 +020022#include <linux/ctype.h>
23#include <linux/debugfs.h>
24#include <linux/uaccess.h>
25#include <linux/seq_file.h>
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070026
27static unsigned int test_buf_size = 16384;
Andy Shevchenkoa6c268d2013-07-23 18:36:46 +030028module_param(test_buf_size, uint, S_IRUGO | S_IWUSR);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070029MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer");
30
Kay Sievers06190d82008-11-11 13:12:33 -070031static char test_channel[20];
Andy Shevchenkoa6c268d2013-07-23 18:36:46 +030032module_param_string(channel, test_channel, sizeof(test_channel),
33 S_IRUGO | S_IWUSR);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070034MODULE_PARM_DESC(channel, "Bus ID of the channel to test (default: any)");
35
Kay Sievers06190d82008-11-11 13:12:33 -070036static char test_device[20];
Andy Shevchenkoa6c268d2013-07-23 18:36:46 +030037module_param_string(device, test_device, sizeof(test_device),
38 S_IRUGO | S_IWUSR);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070039MODULE_PARM_DESC(device, "Bus ID of the DMA Engine to test (default: any)");
40
41static unsigned int threads_per_chan = 1;
Andy Shevchenkoa6c268d2013-07-23 18:36:46 +030042module_param(threads_per_chan, uint, S_IRUGO | S_IWUSR);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070043MODULE_PARM_DESC(threads_per_chan,
44 "Number of threads to start per channel (default: 1)");
45
46static unsigned int max_channels;
Andy Shevchenkoa6c268d2013-07-23 18:36:46 +030047module_param(max_channels, uint, S_IRUGO | S_IWUSR);
Dan Williams33df8ca2009-01-06 11:38:15 -070048MODULE_PARM_DESC(max_channels,
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070049 "Maximum number of channels to use (default: all)");
50
Nicolas Ferre0a2ff57d2009-07-03 19:26:51 +020051static unsigned int iterations;
Andy Shevchenkoa6c268d2013-07-23 18:36:46 +030052module_param(iterations, uint, S_IRUGO | S_IWUSR);
Nicolas Ferre0a2ff57d2009-07-03 19:26:51 +020053MODULE_PARM_DESC(iterations,
54 "Iterations before stopping test (default: infinite)");
55
Dan Williamsb54d5cb2009-03-25 09:13:25 -070056static unsigned int xor_sources = 3;
Andy Shevchenkoa6c268d2013-07-23 18:36:46 +030057module_param(xor_sources, uint, S_IRUGO | S_IWUSR);
Dan Williamsb54d5cb2009-03-25 09:13:25 -070058MODULE_PARM_DESC(xor_sources,
59 "Number of xor source buffers (default: 3)");
60
Dan Williams58691d62009-08-29 19:09:27 -070061static unsigned int pq_sources = 3;
Andy Shevchenkoa6c268d2013-07-23 18:36:46 +030062module_param(pq_sources, uint, S_IRUGO | S_IWUSR);
Dan Williams58691d62009-08-29 19:09:27 -070063MODULE_PARM_DESC(pq_sources,
64 "Number of p+q source buffers (default: 3)");
65
Viresh Kumard42efe62011-03-22 17:27:25 +053066static int timeout = 3000;
Andy Shevchenkoa6c268d2013-07-23 18:36:46 +030067module_param(timeout, uint, S_IRUGO | S_IWUSR);
Joe Perches85ee7a12011-04-23 20:38:19 -070068MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), "
69 "Pass -1 for infinite timeout");
Viresh Kumard42efe62011-03-22 17:27:25 +053070
Andy Shevchenko74b5c072013-03-04 11:09:32 +020071/* Maximum amount of mismatched bytes in buffer to print */
72#define MAX_ERROR_COUNT 32
73
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070074/*
75 * Initialization patterns. All bytes in the source buffer has bit 7
76 * set, all bytes in the destination buffer has bit 7 cleared.
77 *
78 * Bit 6 is set for all bytes which are to be copied by the DMA
79 * engine. Bit 5 is set for all bytes which are to be overwritten by
80 * the DMA engine.
81 *
82 * The remaining bits are the inverse of a counter which increments by
83 * one for each byte address.
84 */
85#define PATTERN_SRC 0x80
86#define PATTERN_DST 0x00
87#define PATTERN_COPY 0x40
88#define PATTERN_OVERWRITE 0x20
89#define PATTERN_COUNT_MASK 0x1f
90
Andy Shevchenko95019c82013-03-04 11:09:33 +020091enum dmatest_error_type {
92 DMATEST_ET_OK,
93 DMATEST_ET_MAP_SRC,
94 DMATEST_ET_MAP_DST,
95 DMATEST_ET_PREP,
96 DMATEST_ET_SUBMIT,
97 DMATEST_ET_TIMEOUT,
98 DMATEST_ET_DMA_ERROR,
99 DMATEST_ET_DMA_IN_PROGRESS,
100 DMATEST_ET_VERIFY,
101};
102
103struct dmatest_thread_result {
104 struct list_head node;
105 unsigned int n;
106 unsigned int src_off;
107 unsigned int dst_off;
108 unsigned int len;
109 enum dmatest_error_type type;
110 union {
Dan Williams7b610172013-11-06 16:29:57 -0800111 unsigned long data;
112 dma_cookie_t cookie;
113 enum dma_status status;
114 int error;
Andy Shevchenko95019c82013-03-04 11:09:33 +0200115 };
116};
117
118struct dmatest_result {
119 struct list_head node;
120 char *name;
121 struct list_head results;
122};
123
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200124struct dmatest_info;
125
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700126struct dmatest_thread {
127 struct list_head node;
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200128 struct dmatest_info *info;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700129 struct task_struct *task;
130 struct dma_chan *chan;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700131 u8 **srcs;
132 u8 **dsts;
133 enum dma_transaction_type type;
Andy Shevchenko3e5ccd82013-03-04 11:09:31 +0200134 bool done;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700135};
136
137struct dmatest_chan {
138 struct list_head node;
139 struct dma_chan *chan;
140 struct list_head threads;
141};
142
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200143/**
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200144 * struct dmatest_params - test parameters.
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200145 * @buf_size: size of the memcpy test buffer
146 * @channel: bus ID of the channel to test
147 * @device: bus ID of the DMA Engine to test
148 * @threads_per_chan: number of threads to start per channel
149 * @max_channels: maximum number of channels to use
150 * @iterations: iterations before stopping test
151 * @xor_sources: number of xor source buffers
152 * @pq_sources: number of p+q source buffers
153 * @timeout: transfer timeout in msec, -1 for infinite timeout
154 */
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200155struct dmatest_params {
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200156 unsigned int buf_size;
157 char channel[20];
158 char device[20];
159 unsigned int threads_per_chan;
160 unsigned int max_channels;
161 unsigned int iterations;
162 unsigned int xor_sources;
163 unsigned int pq_sources;
164 int timeout;
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200165};
166
167/**
168 * struct dmatest_info - test information.
169 * @params: test parameters
Andy Shevchenko851b7e12013-03-04 11:09:30 +0200170 * @lock: access protection to the fields of this structure
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200171 */
172struct dmatest_info {
173 /* Test parameters */
174 struct dmatest_params params;
Andy Shevchenko838cc702013-03-04 11:09:28 +0200175
176 /* Internal state */
177 struct list_head channels;
178 unsigned int nr_channels;
Andy Shevchenko851b7e12013-03-04 11:09:30 +0200179 struct mutex lock;
180
181 /* debugfs related stuff */
182 struct dentry *root;
Andy Shevchenko95019c82013-03-04 11:09:33 +0200183
184 /* Test results */
185 struct list_head results;
186 struct mutex results_lock;
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200187};
188
189static struct dmatest_info test_info;
190
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200191static bool dmatest_match_channel(struct dmatest_params *params,
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200192 struct dma_chan *chan)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700193{
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200194 if (params->channel[0] == '\0')
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700195 return true;
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200196 return strcmp(dma_chan_name(chan), params->channel) == 0;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700197}
198
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200199static bool dmatest_match_device(struct dmatest_params *params,
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200200 struct dma_device *device)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700201{
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200202 if (params->device[0] == '\0')
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700203 return true;
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200204 return strcmp(dev_name(device->dev), params->device) == 0;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700205}
206
207static unsigned long dmatest_random(void)
208{
209 unsigned long buf;
210
211 get_random_bytes(&buf, sizeof(buf));
212 return buf;
213}
214
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200215static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len,
216 unsigned int buf_size)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700217{
218 unsigned int i;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700219 u8 *buf;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700220
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700221 for (; (buf = *bufs); bufs++) {
222 for (i = 0; i < start; i++)
223 buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
224 for ( ; i < start + len; i++)
225 buf[i] = PATTERN_SRC | PATTERN_COPY
Joe Perchesc0198942009-06-28 09:26:21 -0700226 | (~i & PATTERN_COUNT_MASK);
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200227 for ( ; i < buf_size; i++)
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700228 buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
229 buf++;
230 }
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700231}
232
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200233static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len,
234 unsigned int buf_size)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700235{
236 unsigned int i;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700237 u8 *buf;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700238
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700239 for (; (buf = *bufs); bufs++) {
240 for (i = 0; i < start; i++)
241 buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
242 for ( ; i < start + len; i++)
243 buf[i] = PATTERN_DST | PATTERN_OVERWRITE
244 | (~i & PATTERN_COUNT_MASK);
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200245 for ( ; i < buf_size; i++)
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700246 buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
247 }
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700248}
249
Dan Williams7b610172013-11-06 16:29:57 -0800250static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index,
251 unsigned int counter, bool is_srcbuf)
252{
253 u8 diff = actual ^ pattern;
254 u8 expected = pattern | (~counter & PATTERN_COUNT_MASK);
255 const char *thread_name = current->comm;
256
257 if (is_srcbuf)
258 pr_warn("%s: srcbuf[0x%x] overwritten! Expected %02x, got %02x\n",
259 thread_name, index, expected, actual);
260 else if ((pattern & PATTERN_COPY)
261 && (diff & (PATTERN_COPY | PATTERN_OVERWRITE)))
262 pr_warn("%s: dstbuf[0x%x] not copied! Expected %02x, got %02x\n",
263 thread_name, index, expected, actual);
264 else if (diff & PATTERN_SRC)
265 pr_warn("%s: dstbuf[0x%x] was copied! Expected %02x, got %02x\n",
266 thread_name, index, expected, actual);
267 else
268 pr_warn("%s: dstbuf[0x%x] mismatch! Expected %02x, got %02x\n",
269 thread_name, index, expected, actual);
270}
271
272static unsigned int dmatest_verify(u8 **bufs, unsigned int start,
273 unsigned int end, unsigned int counter, u8 pattern,
274 bool is_srcbuf)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700275{
276 unsigned int i;
277 unsigned int error_count = 0;
278 u8 actual;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700279 u8 expected;
280 u8 *buf;
281 unsigned int counter_orig = counter;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700282
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700283 for (; (buf = *bufs); bufs++) {
284 counter = counter_orig;
285 for (i = start; i < end; i++) {
286 actual = buf[i];
287 expected = pattern | (~counter & PATTERN_COUNT_MASK);
288 if (actual != expected) {
Dan Williams7b610172013-11-06 16:29:57 -0800289 if (error_count < MAX_ERROR_COUNT)
290 dmatest_mismatch(actual, pattern, i,
291 counter, is_srcbuf);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700292 error_count++;
293 }
294 counter++;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700295 }
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700296 }
297
Andy Shevchenko74b5c072013-03-04 11:09:32 +0200298 if (error_count > MAX_ERROR_COUNT)
Dan Williams7b610172013-11-06 16:29:57 -0800299 pr_warn("%s: %u errors suppressed\n",
Andy Shevchenko74b5c072013-03-04 11:09:32 +0200300 current->comm, error_count - MAX_ERROR_COUNT);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700301
302 return error_count;
303}
304
Tejun Heoadfa5432011-11-23 09:28:16 -0800305/* poor man's completion - we want to use wait_event_freezable() on it */
306struct dmatest_done {
307 bool done;
308 wait_queue_head_t *wait;
309};
310
311static void dmatest_callback(void *arg)
Dan Williamse44e0aa2009-03-25 09:13:25 -0700312{
Tejun Heoadfa5432011-11-23 09:28:16 -0800313 struct dmatest_done *done = arg;
314
315 done->done = true;
316 wake_up_all(done->wait);
Dan Williamse44e0aa2009-03-25 09:13:25 -0700317}
318
Andy Shevchenko632fd282012-12-17 15:59:52 -0800319static inline void unmap_src(struct device *dev, dma_addr_t *addr, size_t len,
320 unsigned int count)
321{
322 while (count--)
323 dma_unmap_single(dev, addr[count], len, DMA_TO_DEVICE);
324}
325
326static inline void unmap_dst(struct device *dev, dma_addr_t *addr, size_t len,
327 unsigned int count)
328{
329 while (count--)
330 dma_unmap_single(dev, addr[count], len, DMA_BIDIRECTIONAL);
331}
332
Akinobu Mita8be9e32b2012-10-28 00:49:32 +0900333static unsigned int min_odd(unsigned int x, unsigned int y)
334{
335 unsigned int val = min(x, y);
336
337 return val % 2 ? val : val - 1;
338}
339
Andy Shevchenko95019c82013-03-04 11:09:33 +0200340static char *thread_result_get(const char *name,
341 struct dmatest_thread_result *tr)
342{
343 static const char * const messages[] = {
344 [DMATEST_ET_OK] = "No errors",
345 [DMATEST_ET_MAP_SRC] = "src mapping error",
346 [DMATEST_ET_MAP_DST] = "dst mapping error",
347 [DMATEST_ET_PREP] = "prep error",
348 [DMATEST_ET_SUBMIT] = "submit error",
349 [DMATEST_ET_TIMEOUT] = "test timed out",
350 [DMATEST_ET_DMA_ERROR] =
351 "got completion callback (DMA_ERROR)",
352 [DMATEST_ET_DMA_IN_PROGRESS] =
353 "got completion callback (DMA_IN_PROGRESS)",
354 [DMATEST_ET_VERIFY] = "errors",
355 };
356 static char buf[512];
357
358 snprintf(buf, sizeof(buf) - 1,
359 "%s: #%u: %s with src_off=0x%x ""dst_off=0x%x len=0x%x (%lu)",
360 name, tr->n, messages[tr->type], tr->src_off, tr->dst_off,
361 tr->len, tr->data);
362
363 return buf;
364}
365
366static int thread_result_add(struct dmatest_info *info,
367 struct dmatest_result *r, enum dmatest_error_type type,
368 unsigned int n, unsigned int src_off, unsigned int dst_off,
369 unsigned int len, unsigned long data)
370{
371 struct dmatest_thread_result *tr;
372
373 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
374 if (!tr)
375 return -ENOMEM;
376
377 tr->type = type;
378 tr->n = n;
379 tr->src_off = src_off;
380 tr->dst_off = dst_off;
381 tr->len = len;
382 tr->data = data;
383
384 mutex_lock(&info->results_lock);
385 list_add_tail(&tr->node, &r->results);
386 mutex_unlock(&info->results_lock);
387
Andy Shevchenkoad5278c2013-07-23 18:36:48 +0300388 if (tr->type == DMATEST_ET_OK)
389 pr_debug("%s\n", thread_result_get(r->name, tr));
390 else
391 pr_warn("%s\n", thread_result_get(r->name, tr));
392
Andy Shevchenko95019c82013-03-04 11:09:33 +0200393 return 0;
394}
395
396static void result_free(struct dmatest_info *info, const char *name)
397{
398 struct dmatest_result *r, *_r;
399
400 mutex_lock(&info->results_lock);
401 list_for_each_entry_safe(r, _r, &info->results, node) {
402 struct dmatest_thread_result *tr, *_tr;
403
404 if (name && strcmp(r->name, name))
405 continue;
406
407 list_for_each_entry_safe(tr, _tr, &r->results, node) {
408 list_del(&tr->node);
409 kfree(tr);
410 }
411
412 kfree(r->name);
413 list_del(&r->node);
414 kfree(r);
415 }
416
417 mutex_unlock(&info->results_lock);
418}
419
420static struct dmatest_result *result_init(struct dmatest_info *info,
421 const char *name)
422{
423 struct dmatest_result *r;
424
425 r = kzalloc(sizeof(*r), GFP_KERNEL);
426 if (r) {
427 r->name = kstrdup(name, GFP_KERNEL);
428 INIT_LIST_HEAD(&r->results);
429 mutex_lock(&info->results_lock);
430 list_add_tail(&r->node, &info->results);
431 mutex_unlock(&info->results_lock);
432 }
433 return r;
434}
435
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700436/*
437 * This function repeatedly tests DMA transfers of various lengths and
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700438 * offsets for a given operation type until it is told to exit by
439 * kthread_stop(). There may be multiple threads running this function
440 * in parallel for a single channel, and there may be multiple channels
441 * being tested in parallel.
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700442 *
443 * Before each test, the source and destination buffer is initialized
444 * with a known pattern. This pattern is different depending on
445 * whether it's in an area which is supposed to be copied or
446 * overwritten, and different in the source and destination buffers.
447 * So if the DMA engine doesn't copy exactly what we tell it to copy,
448 * we'll notice.
449 */
450static int dmatest_func(void *data)
451{
Tejun Heoadfa5432011-11-23 09:28:16 -0800452 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_wait);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700453 struct dmatest_thread *thread = data;
Tejun Heoadfa5432011-11-23 09:28:16 -0800454 struct dmatest_done done = { .wait = &done_wait };
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200455 struct dmatest_info *info;
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200456 struct dmatest_params *params;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700457 struct dma_chan *chan;
Akinobu Mita8be9e32b2012-10-28 00:49:32 +0900458 struct dma_device *dev;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700459 const char *thread_name;
460 unsigned int src_off, dst_off, len;
461 unsigned int error_count;
462 unsigned int failed_tests = 0;
463 unsigned int total_tests = 0;
464 dma_cookie_t cookie;
465 enum dma_status status;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700466 enum dma_ctrl_flags flags;
Andy Shevchenko945b5af2013-03-04 11:09:26 +0200467 u8 *pq_coefs = NULL;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700468 int ret;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700469 int src_cnt;
470 int dst_cnt;
471 int i;
Andy Shevchenko95019c82013-03-04 11:09:33 +0200472 struct dmatest_result *result;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700473
474 thread_name = current->comm;
Tejun Heoadfa5432011-11-23 09:28:16 -0800475 set_freezable();
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700476
477 ret = -ENOMEM;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700478
479 smp_rmb();
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200480 info = thread->info;
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200481 params = &info->params;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700482 chan = thread->chan;
Akinobu Mita8be9e32b2012-10-28 00:49:32 +0900483 dev = chan->device;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700484 if (thread->type == DMA_MEMCPY)
485 src_cnt = dst_cnt = 1;
486 else if (thread->type == DMA_XOR) {
Akinobu Mita8be9e32b2012-10-28 00:49:32 +0900487 /* force odd to ensure dst = src */
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200488 src_cnt = min_odd(params->xor_sources | 1, dev->max_xor);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700489 dst_cnt = 1;
Dan Williams58691d62009-08-29 19:09:27 -0700490 } else if (thread->type == DMA_PQ) {
Akinobu Mita8be9e32b2012-10-28 00:49:32 +0900491 /* force odd to ensure dst = src */
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200492 src_cnt = min_odd(params->pq_sources | 1, dma_maxpq(dev, 0));
Dan Williams58691d62009-08-29 19:09:27 -0700493 dst_cnt = 2;
Andy Shevchenko945b5af2013-03-04 11:09:26 +0200494
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200495 pq_coefs = kmalloc(params->pq_sources+1, GFP_KERNEL);
Andy Shevchenko945b5af2013-03-04 11:09:26 +0200496 if (!pq_coefs)
497 goto err_thread_type;
498
Anatolij Gustschin94de6482010-02-15 22:35:23 +0100499 for (i = 0; i < src_cnt; i++)
Dan Williams58691d62009-08-29 19:09:27 -0700500 pq_coefs[i] = 1;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700501 } else
Andy Shevchenko945b5af2013-03-04 11:09:26 +0200502 goto err_thread_type;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700503
Andy Shevchenko95019c82013-03-04 11:09:33 +0200504 result = result_init(info, thread_name);
505 if (!result)
506 goto err_srcs;
507
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700508 thread->srcs = kcalloc(src_cnt+1, sizeof(u8 *), GFP_KERNEL);
509 if (!thread->srcs)
510 goto err_srcs;
511 for (i = 0; i < src_cnt; i++) {
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200512 thread->srcs[i] = kmalloc(params->buf_size, GFP_KERNEL);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700513 if (!thread->srcs[i])
514 goto err_srcbuf;
515 }
516 thread->srcs[i] = NULL;
517
518 thread->dsts = kcalloc(dst_cnt+1, sizeof(u8 *), GFP_KERNEL);
519 if (!thread->dsts)
520 goto err_dsts;
521 for (i = 0; i < dst_cnt; i++) {
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200522 thread->dsts[i] = kmalloc(params->buf_size, GFP_KERNEL);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700523 if (!thread->dsts[i])
524 goto err_dstbuf;
525 }
526 thread->dsts[i] = NULL;
527
Dan Williamse44e0aa2009-03-25 09:13:25 -0700528 set_user_nice(current, 10);
529
Ira Snyderb203bd32011-03-03 07:54:53 +0000530 /*
Bartlomiej Zolnierkiewiczd1cab342013-10-18 19:35:21 +0200531 * src and dst buffers are freed by ourselves below
Ira Snyderb203bd32011-03-03 07:54:53 +0000532 */
Bartlomiej Zolnierkiewicz0776ae72013-10-18 19:35:33 +0200533 flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700534
Nicolas Ferre0a2ff57d2009-07-03 19:26:51 +0200535 while (!kthread_should_stop()
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200536 && !(params->iterations && total_tests >= params->iterations)) {
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700537 struct dma_async_tx_descriptor *tx = NULL;
538 dma_addr_t dma_srcs[src_cnt];
539 dma_addr_t dma_dsts[dst_cnt];
Dan Williams83544ae2009-09-08 17:42:53 -0700540 u8 align = 0;
Atsushi Nemotod86be862009-01-13 09:22:20 -0700541
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700542 total_tests++;
543
Dan Williams83544ae2009-09-08 17:42:53 -0700544 /* honor alignment restrictions */
545 if (thread->type == DMA_MEMCPY)
546 align = dev->copy_align;
547 else if (thread->type == DMA_XOR)
548 align = dev->xor_align;
549 else if (thread->type == DMA_PQ)
550 align = dev->pq_align;
551
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200552 if (1 << align > params->buf_size) {
Guennadi Liakhovetskicfe4f272009-12-04 19:44:48 +0100553 pr_err("%u-byte buffer too small for %d-byte alignment\n",
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200554 params->buf_size, 1 << align);
Guennadi Liakhovetskicfe4f272009-12-04 19:44:48 +0100555 break;
556 }
557
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200558 len = dmatest_random() % params->buf_size + 1;
Dan Williams83544ae2009-09-08 17:42:53 -0700559 len = (len >> align) << align;
Guennadi Liakhovetskicfe4f272009-12-04 19:44:48 +0100560 if (!len)
561 len = 1 << align;
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200562 src_off = dmatest_random() % (params->buf_size - len + 1);
563 dst_off = dmatest_random() % (params->buf_size - len + 1);
Guennadi Liakhovetskicfe4f272009-12-04 19:44:48 +0100564
Dan Williams83544ae2009-09-08 17:42:53 -0700565 src_off = (src_off >> align) << align;
566 dst_off = (dst_off >> align) << align;
567
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200568 dmatest_init_srcs(thread->srcs, src_off, len, params->buf_size);
569 dmatest_init_dsts(thread->dsts, dst_off, len, params->buf_size);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700570
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700571 for (i = 0; i < src_cnt; i++) {
572 u8 *buf = thread->srcs[i] + src_off;
573
574 dma_srcs[i] = dma_map_single(dev->dev, buf, len,
575 DMA_TO_DEVICE);
Andy Shevchenkoafde3be2012-12-17 15:59:53 -0800576 ret = dma_mapping_error(dev->dev, dma_srcs[i]);
577 if (ret) {
578 unmap_src(dev->dev, dma_srcs, len, i);
Andy Shevchenko95019c82013-03-04 11:09:33 +0200579 thread_result_add(info, result,
580 DMATEST_ET_MAP_SRC,
581 total_tests, src_off, dst_off,
582 len, ret);
Andy Shevchenkoafde3be2012-12-17 15:59:53 -0800583 failed_tests++;
584 continue;
585 }
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700586 }
Atsushi Nemotod86be862009-01-13 09:22:20 -0700587 /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700588 for (i = 0; i < dst_cnt; i++) {
589 dma_dsts[i] = dma_map_single(dev->dev, thread->dsts[i],
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200590 params->buf_size,
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700591 DMA_BIDIRECTIONAL);
Andy Shevchenkoafde3be2012-12-17 15:59:53 -0800592 ret = dma_mapping_error(dev->dev, dma_dsts[i]);
593 if (ret) {
594 unmap_src(dev->dev, dma_srcs, len, src_cnt);
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200595 unmap_dst(dev->dev, dma_dsts, params->buf_size,
596 i);
Andy Shevchenko95019c82013-03-04 11:09:33 +0200597 thread_result_add(info, result,
598 DMATEST_ET_MAP_DST,
599 total_tests, src_off, dst_off,
600 len, ret);
Andy Shevchenkoafde3be2012-12-17 15:59:53 -0800601 failed_tests++;
602 continue;
603 }
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700604 }
Atsushi Nemotod86be862009-01-13 09:22:20 -0700605
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700606 if (thread->type == DMA_MEMCPY)
607 tx = dev->device_prep_dma_memcpy(chan,
608 dma_dsts[0] + dst_off,
609 dma_srcs[0], len,
610 flags);
611 else if (thread->type == DMA_XOR)
612 tx = dev->device_prep_dma_xor(chan,
613 dma_dsts[0] + dst_off,
Dan Williams67b91242010-02-28 22:20:18 -0700614 dma_srcs, src_cnt,
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700615 len, flags);
Dan Williams58691d62009-08-29 19:09:27 -0700616 else if (thread->type == DMA_PQ) {
617 dma_addr_t dma_pq[dst_cnt];
618
619 for (i = 0; i < dst_cnt; i++)
620 dma_pq[i] = dma_dsts[i] + dst_off;
621 tx = dev->device_prep_dma_pq(chan, dma_pq, dma_srcs,
Anatolij Gustschin94de6482010-02-15 22:35:23 +0100622 src_cnt, pq_coefs,
Dan Williams58691d62009-08-29 19:09:27 -0700623 len, flags);
624 }
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700625
Atsushi Nemotod86be862009-01-13 09:22:20 -0700626 if (!tx) {
Andy Shevchenko632fd282012-12-17 15:59:52 -0800627 unmap_src(dev->dev, dma_srcs, len, src_cnt);
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200628 unmap_dst(dev->dev, dma_dsts, params->buf_size,
629 dst_cnt);
Andy Shevchenko95019c82013-03-04 11:09:33 +0200630 thread_result_add(info, result, DMATEST_ET_PREP,
631 total_tests, src_off, dst_off,
632 len, 0);
Atsushi Nemotod86be862009-01-13 09:22:20 -0700633 msleep(100);
634 failed_tests++;
635 continue;
636 }
Dan Williamse44e0aa2009-03-25 09:13:25 -0700637
Tejun Heoadfa5432011-11-23 09:28:16 -0800638 done.done = false;
Dan Williamse44e0aa2009-03-25 09:13:25 -0700639 tx->callback = dmatest_callback;
Tejun Heoadfa5432011-11-23 09:28:16 -0800640 tx->callback_param = &done;
Atsushi Nemotod86be862009-01-13 09:22:20 -0700641 cookie = tx->tx_submit(tx);
642
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700643 if (dma_submit_error(cookie)) {
Andy Shevchenko95019c82013-03-04 11:09:33 +0200644 thread_result_add(info, result, DMATEST_ET_SUBMIT,
645 total_tests, src_off, dst_off,
646 len, cookie);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700647 msleep(100);
648 failed_tests++;
649 continue;
650 }
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700651 dma_async_issue_pending(chan);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700652
Andy Shevchenkobcc567e2013-05-23 14:29:53 +0300653 wait_event_freezable_timeout(done_wait, done.done,
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200654 msecs_to_jiffies(params->timeout));
Guennadi Liakhovetski981ed702011-08-18 16:50:51 +0200655
Dan Williamse44e0aa2009-03-25 09:13:25 -0700656 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700657
Tejun Heoadfa5432011-11-23 09:28:16 -0800658 if (!done.done) {
659 /*
660 * We're leaving the timed out dma operation with
661 * dangling pointer to done_wait. To make this
662 * correct, we'll need to allocate wait_done for
663 * each test iteration and perform "who's gonna
664 * free it this time?" dancing. For now, just
665 * leave it dangling.
666 */
Andy Shevchenko95019c82013-03-04 11:09:33 +0200667 thread_result_add(info, result, DMATEST_ET_TIMEOUT,
668 total_tests, src_off, dst_off,
669 len, 0);
Dan Williamse44e0aa2009-03-25 09:13:25 -0700670 failed_tests++;
671 continue;
672 } else if (status != DMA_SUCCESS) {
Andy Shevchenko95019c82013-03-04 11:09:33 +0200673 enum dmatest_error_type type = (status == DMA_ERROR) ?
674 DMATEST_ET_DMA_ERROR : DMATEST_ET_DMA_IN_PROGRESS;
675 thread_result_add(info, result, type,
676 total_tests, src_off, dst_off,
677 len, status);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700678 failed_tests++;
679 continue;
680 }
Dan Williamse44e0aa2009-03-25 09:13:25 -0700681
Bartlomiej Zolnierkiewiczd1cab342013-10-18 19:35:21 +0200682 /* Unmap by myself */
683 unmap_src(dev->dev, dma_srcs, len, src_cnt);
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200684 unmap_dst(dev->dev, dma_dsts, params->buf_size, dst_cnt);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700685
686 error_count = 0;
687
688 pr_debug("%s: verifying source buffer...\n", thread_name);
Dan Williams7b610172013-11-06 16:29:57 -0800689 error_count += dmatest_verify(thread->srcs, 0, src_off,
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700690 0, PATTERN_SRC, true);
Dan Williams7b610172013-11-06 16:29:57 -0800691 error_count += dmatest_verify(thread->srcs, src_off,
692 src_off + len, src_off,
693 PATTERN_SRC | PATTERN_COPY, true);
694 error_count += dmatest_verify(thread->srcs, src_off + len,
695 params->buf_size, src_off + len,
696 PATTERN_SRC, true);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700697
Dan Williams7b610172013-11-06 16:29:57 -0800698 pr_debug("%s: verifying dest buffer...\n",
699 thread->task->comm);
700 error_count += dmatest_verify(thread->dsts, 0, dst_off,
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700701 0, PATTERN_DST, false);
Dan Williams7b610172013-11-06 16:29:57 -0800702 error_count += dmatest_verify(thread->dsts, dst_off,
703 dst_off + len, src_off,
704 PATTERN_SRC | PATTERN_COPY, false);
705 error_count += dmatest_verify(thread->dsts, dst_off + len,
706 params->buf_size, dst_off + len,
707 PATTERN_DST, false);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700708
709 if (error_count) {
Andy Shevchenko95019c82013-03-04 11:09:33 +0200710 thread_result_add(info, result, DMATEST_ET_VERIFY,
711 total_tests, src_off, dst_off,
712 len, error_count);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700713 failed_tests++;
714 } else {
Andy Shevchenko95019c82013-03-04 11:09:33 +0200715 thread_result_add(info, result, DMATEST_ET_OK,
716 total_tests, src_off, dst_off,
717 len, 0);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700718 }
719 }
720
721 ret = 0;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700722 for (i = 0; thread->dsts[i]; i++)
723 kfree(thread->dsts[i]);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700724err_dstbuf:
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700725 kfree(thread->dsts);
726err_dsts:
727 for (i = 0; thread->srcs[i]; i++)
728 kfree(thread->srcs[i]);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700729err_srcbuf:
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700730 kfree(thread->srcs);
731err_srcs:
Andy Shevchenko945b5af2013-03-04 11:09:26 +0200732 kfree(pq_coefs);
733err_thread_type:
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700734 pr_notice("%s: terminating after %u tests, %u failures (status %d)\n",
735 thread_name, total_tests, failed_tests, ret);
Nicolas Ferre0a2ff57d2009-07-03 19:26:51 +0200736
Viresh Kumar9704efa2011-07-29 16:21:57 +0530737 /* terminate all transfers on specified channels */
Shiraz Hashim5e034f72012-11-09 15:26:29 +0000738 if (ret)
739 dmaengine_terminate_all(chan);
740
Andy Shevchenko3e5ccd82013-03-04 11:09:31 +0200741 thread->done = true;
742
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200743 if (params->iterations > 0)
Nicolas Ferre0a2ff57d2009-07-03 19:26:51 +0200744 while (!kthread_should_stop()) {
Yong Zhangb953df72010-02-05 21:52:37 +0800745 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit);
Nicolas Ferre0a2ff57d2009-07-03 19:26:51 +0200746 interruptible_sleep_on(&wait_dmatest_exit);
747 }
748
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700749 return ret;
750}
751
752static void dmatest_cleanup_channel(struct dmatest_chan *dtc)
753{
754 struct dmatest_thread *thread;
755 struct dmatest_thread *_thread;
756 int ret;
757
758 list_for_each_entry_safe(thread, _thread, &dtc->threads, node) {
759 ret = kthread_stop(thread->task);
760 pr_debug("dmatest: thread %s exited with status %d\n",
761 thread->task->comm, ret);
762 list_del(&thread->node);
763 kfree(thread);
764 }
Viresh Kumar9704efa2011-07-29 16:21:57 +0530765
766 /* terminate all transfers on specified channels */
Jon Mason944ea4d2012-11-11 23:03:20 +0000767 dmaengine_terminate_all(dtc->chan);
Viresh Kumar9704efa2011-07-29 16:21:57 +0530768
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700769 kfree(dtc);
770}
771
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200772static int dmatest_add_threads(struct dmatest_info *info,
773 struct dmatest_chan *dtc, enum dma_transaction_type type)
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700774{
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200775 struct dmatest_params *params = &info->params;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700776 struct dmatest_thread *thread;
777 struct dma_chan *chan = dtc->chan;
778 char *op;
779 unsigned int i;
780
781 if (type == DMA_MEMCPY)
782 op = "copy";
783 else if (type == DMA_XOR)
784 op = "xor";
Dan Williams58691d62009-08-29 19:09:27 -0700785 else if (type == DMA_PQ)
786 op = "pq";
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700787 else
788 return -EINVAL;
789
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200790 for (i = 0; i < params->threads_per_chan; i++) {
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700791 thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL);
792 if (!thread) {
793 pr_warning("dmatest: No memory for %s-%s%u\n",
794 dma_chan_name(chan), op, i);
795
796 break;
797 }
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200798 thread->info = info;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700799 thread->chan = dtc->chan;
800 thread->type = type;
801 smp_wmb();
802 thread->task = kthread_run(dmatest_func, thread, "%s-%s%u",
803 dma_chan_name(chan), op, i);
804 if (IS_ERR(thread->task)) {
805 pr_warning("dmatest: Failed to run thread %s-%s%u\n",
806 dma_chan_name(chan), op, i);
807 kfree(thread);
808 break;
809 }
810
811 /* srcbuf and dstbuf are allocated by the thread itself */
812
813 list_add_tail(&thread->node, &dtc->threads);
814 }
815
816 return i;
817}
818
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200819static int dmatest_add_channel(struct dmatest_info *info,
820 struct dma_chan *chan)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700821{
822 struct dmatest_chan *dtc;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700823 struct dma_device *dma_dev = chan->device;
824 unsigned int thread_count = 0;
Kulikov Vasiliyb9033e62010-07-17 19:19:48 +0400825 int cnt;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700826
Andrew Morton6fdb8bd2008-09-19 04:16:23 -0700827 dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700828 if (!dtc) {
Dan Williams41d5e592009-01-06 11:38:21 -0700829 pr_warning("dmatest: No memory for %s\n", dma_chan_name(chan));
Dan Williams33df8ca2009-01-06 11:38:15 -0700830 return -ENOMEM;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700831 }
832
833 dtc->chan = chan;
834 INIT_LIST_HEAD(&dtc->threads);
835
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700836 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200837 cnt = dmatest_add_threads(info, dtc, DMA_MEMCPY);
Nicolas Ferref1aef8b2009-07-06 18:19:44 +0200838 thread_count += cnt > 0 ? cnt : 0;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700839 }
840 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200841 cnt = dmatest_add_threads(info, dtc, DMA_XOR);
Nicolas Ferref1aef8b2009-07-06 18:19:44 +0200842 thread_count += cnt > 0 ? cnt : 0;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700843 }
Dan Williams58691d62009-08-29 19:09:27 -0700844 if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200845 cnt = dmatest_add_threads(info, dtc, DMA_PQ);
Dr. David Alan Gilbertd07a74a2011-08-25 16:13:55 -0700846 thread_count += cnt > 0 ? cnt : 0;
Dan Williams58691d62009-08-29 19:09:27 -0700847 }
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700848
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700849 pr_info("dmatest: Started %u threads using %s\n",
850 thread_count, dma_chan_name(chan));
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700851
Andy Shevchenko838cc702013-03-04 11:09:28 +0200852 list_add_tail(&dtc->node, &info->channels);
853 info->nr_channels++;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700854
Dan Williams33df8ca2009-01-06 11:38:15 -0700855 return 0;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700856}
857
Dan Williams7dd60252009-01-06 11:38:19 -0700858static bool filter(struct dma_chan *chan, void *param)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700859{
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200860 struct dmatest_params *params = param;
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200861
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200862 if (!dmatest_match_channel(params, chan) ||
863 !dmatest_match_device(params, chan->device))
Dan Williams7dd60252009-01-06 11:38:19 -0700864 return false;
Dan Williams33df8ca2009-01-06 11:38:15 -0700865 else
Dan Williams7dd60252009-01-06 11:38:19 -0700866 return true;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700867}
868
Andy Shevchenko851b7e12013-03-04 11:09:30 +0200869static int __run_threaded_test(struct dmatest_info *info)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700870{
Dan Williams33df8ca2009-01-06 11:38:15 -0700871 dma_cap_mask_t mask;
872 struct dma_chan *chan;
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200873 struct dmatest_params *params = &info->params;
Dan Williams33df8ca2009-01-06 11:38:15 -0700874 int err = 0;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700875
Dan Williams33df8ca2009-01-06 11:38:15 -0700876 dma_cap_zero(mask);
877 dma_cap_set(DMA_MEMCPY, mask);
878 for (;;) {
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200879 chan = dma_request_channel(mask, filter, params);
Dan Williams33df8ca2009-01-06 11:38:15 -0700880 if (chan) {
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200881 err = dmatest_add_channel(info, chan);
Dan Williamsc56c81a2009-04-08 15:08:23 -0700882 if (err) {
Dan Williams33df8ca2009-01-06 11:38:15 -0700883 dma_release_channel(chan);
884 break; /* add_channel failed, punt */
885 }
886 } else
887 break; /* no more channels available */
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200888 if (params->max_channels &&
889 info->nr_channels >= params->max_channels)
Dan Williams33df8ca2009-01-06 11:38:15 -0700890 break; /* we have all we need */
891 }
Dan Williams33df8ca2009-01-06 11:38:15 -0700892 return err;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700893}
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700894
Andy Shevchenko851b7e12013-03-04 11:09:30 +0200895#ifndef MODULE
896static int run_threaded_test(struct dmatest_info *info)
897{
898 int ret;
899
900 mutex_lock(&info->lock);
901 ret = __run_threaded_test(info);
902 mutex_unlock(&info->lock);
903 return ret;
904}
905#endif
906
907static void __stop_threaded_test(struct dmatest_info *info)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700908{
Dan Williams33df8ca2009-01-06 11:38:15 -0700909 struct dmatest_chan *dtc, *_dtc;
Dan Williams7cbd4872009-03-04 16:06:03 -0700910 struct dma_chan *chan;
Dan Williams33df8ca2009-01-06 11:38:15 -0700911
Andy Shevchenko838cc702013-03-04 11:09:28 +0200912 list_for_each_entry_safe(dtc, _dtc, &info->channels, node) {
Dan Williams33df8ca2009-01-06 11:38:15 -0700913 list_del(&dtc->node);
Dan Williams7cbd4872009-03-04 16:06:03 -0700914 chan = dtc->chan;
Dan Williams33df8ca2009-01-06 11:38:15 -0700915 dmatest_cleanup_channel(dtc);
Andy Shevchenko838cc702013-03-04 11:09:28 +0200916 pr_debug("dmatest: dropped channel %s\n", dma_chan_name(chan));
Dan Williams7cbd4872009-03-04 16:06:03 -0700917 dma_release_channel(chan);
Dan Williams33df8ca2009-01-06 11:38:15 -0700918 }
Andy Shevchenko838cc702013-03-04 11:09:28 +0200919
920 info->nr_channels = 0;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700921}
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200922
Andy Shevchenko851b7e12013-03-04 11:09:30 +0200923static void stop_threaded_test(struct dmatest_info *info)
924{
925 mutex_lock(&info->lock);
926 __stop_threaded_test(info);
927 mutex_unlock(&info->lock);
928}
929
930static int __restart_threaded_test(struct dmatest_info *info, bool run)
931{
932 struct dmatest_params *params = &info->params;
Andy Shevchenko851b7e12013-03-04 11:09:30 +0200933
934 /* Stop any running test first */
935 __stop_threaded_test(info);
936
937 if (run == false)
938 return 0;
939
Andy Shevchenko95019c82013-03-04 11:09:33 +0200940 /* Clear results from previous run */
941 result_free(info, NULL);
942
Andy Shevchenko851b7e12013-03-04 11:09:30 +0200943 /* Copy test parameters */
Andy Shevchenkoa6c268d2013-07-23 18:36:46 +0300944 params->buf_size = test_buf_size;
945 strlcpy(params->channel, strim(test_channel), sizeof(params->channel));
946 strlcpy(params->device, strim(test_device), sizeof(params->device));
947 params->threads_per_chan = threads_per_chan;
948 params->max_channels = max_channels;
949 params->iterations = iterations;
950 params->xor_sources = xor_sources;
951 params->pq_sources = pq_sources;
952 params->timeout = timeout;
Andy Shevchenko851b7e12013-03-04 11:09:30 +0200953
954 /* Run test with new parameters */
Andy Shevchenkobcc567e2013-05-23 14:29:53 +0300955 return __run_threaded_test(info);
956}
957
958static bool __is_threaded_test_run(struct dmatest_info *info)
959{
960 struct dmatest_chan *dtc;
961
962 list_for_each_entry(dtc, &info->channels, node) {
963 struct dmatest_thread *thread;
964
965 list_for_each_entry(thread, &dtc->threads, node) {
966 if (!thread->done)
967 return true;
968 }
Andy Shevchenko851b7e12013-03-04 11:09:30 +0200969 }
970
Andy Shevchenkobcc567e2013-05-23 14:29:53 +0300971 return false;
Andy Shevchenko851b7e12013-03-04 11:09:30 +0200972}
973
Andy Shevchenko851b7e12013-03-04 11:09:30 +0200974static ssize_t dtf_read_run(struct file *file, char __user *user_buf,
975 size_t count, loff_t *ppos)
976{
977 struct dmatest_info *info = file->private_data;
978 char buf[3];
979
980 mutex_lock(&info->lock);
Andy Shevchenko3e5ccd82013-03-04 11:09:31 +0200981
Andy Shevchenkobcc567e2013-05-23 14:29:53 +0300982 if (__is_threaded_test_run(info)) {
Andy Shevchenko851b7e12013-03-04 11:09:30 +0200983 buf[0] = 'Y';
Andy Shevchenko3e5ccd82013-03-04 11:09:31 +0200984 } else {
985 __stop_threaded_test(info);
Andy Shevchenko851b7e12013-03-04 11:09:30 +0200986 buf[0] = 'N';
Andy Shevchenko3e5ccd82013-03-04 11:09:31 +0200987 }
988
Andy Shevchenko851b7e12013-03-04 11:09:30 +0200989 mutex_unlock(&info->lock);
990 buf[1] = '\n';
991 buf[2] = 0x00;
992 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
993}
994
995static ssize_t dtf_write_run(struct file *file, const char __user *user_buf,
996 size_t count, loff_t *ppos)
997{
998 struct dmatest_info *info = file->private_data;
999 char buf[16];
1000 bool bv;
1001 int ret = 0;
1002
1003 if (copy_from_user(buf, user_buf, min(count, (sizeof(buf) - 1))))
1004 return -EFAULT;
1005
1006 if (strtobool(buf, &bv) == 0) {
1007 mutex_lock(&info->lock);
Andy Shevchenkobcc567e2013-05-23 14:29:53 +03001008
1009 if (__is_threaded_test_run(info))
1010 ret = -EBUSY;
1011 else
1012 ret = __restart_threaded_test(info, bv);
1013
Andy Shevchenko851b7e12013-03-04 11:09:30 +02001014 mutex_unlock(&info->lock);
1015 }
1016
1017 return ret ? ret : count;
1018}
1019
1020static const struct file_operations dtf_run_fops = {
1021 .read = dtf_read_run,
1022 .write = dtf_write_run,
1023 .open = simple_open,
1024 .llseek = default_llseek,
1025};
1026
Andy Shevchenko95019c82013-03-04 11:09:33 +02001027static int dtf_results_show(struct seq_file *sf, void *data)
1028{
1029 struct dmatest_info *info = sf->private;
1030 struct dmatest_result *result;
1031 struct dmatest_thread_result *tr;
1032
1033 mutex_lock(&info->results_lock);
1034 list_for_each_entry(result, &info->results, node) {
Dan Williams7b610172013-11-06 16:29:57 -08001035 list_for_each_entry(tr, &result->results, node)
Andy Shevchenko95019c82013-03-04 11:09:33 +02001036 seq_printf(sf, "%s\n",
1037 thread_result_get(result->name, tr));
1038 }
1039
1040 mutex_unlock(&info->results_lock);
1041 return 0;
1042}
1043
1044static int dtf_results_open(struct inode *inode, struct file *file)
1045{
1046 return single_open(file, dtf_results_show, inode->i_private);
1047}
1048
1049static const struct file_operations dtf_results_fops = {
1050 .open = dtf_results_open,
1051 .read = seq_read,
1052 .llseek = seq_lseek,
1053 .release = single_release,
1054};
1055
Andy Shevchenko851b7e12013-03-04 11:09:30 +02001056static int dmatest_register_dbgfs(struct dmatest_info *info)
1057{
1058 struct dentry *d;
Andy Shevchenko851b7e12013-03-04 11:09:30 +02001059
1060 d = debugfs_create_dir("dmatest", NULL);
1061 if (IS_ERR(d))
1062 return PTR_ERR(d);
1063 if (!d)
1064 goto err_root;
1065
1066 info->root = d;
1067
Andy Shevchenko851b7e12013-03-04 11:09:30 +02001068 /* Run or stop threaded test */
Andy Shevchenkoe24775e2013-07-23 18:36:47 +03001069 debugfs_create_file("run", S_IWUSR | S_IRUGO, info->root, info,
1070 &dtf_run_fops);
Andy Shevchenko851b7e12013-03-04 11:09:30 +02001071
Andy Shevchenko95019c82013-03-04 11:09:33 +02001072 /* Results of test in progress */
Andy Shevchenkoe24775e2013-07-23 18:36:47 +03001073 debugfs_create_file("results", S_IRUGO, info->root, info,
1074 &dtf_results_fops);
Andy Shevchenko95019c82013-03-04 11:09:33 +02001075
Andy Shevchenko851b7e12013-03-04 11:09:30 +02001076 return 0;
1077
Andy Shevchenko851b7e12013-03-04 11:09:30 +02001078err_root:
1079 pr_err("dmatest: Failed to initialize debugfs\n");
Andy Shevchenkoe24775e2013-07-23 18:36:47 +03001080 return -ENOMEM;
Andy Shevchenko851b7e12013-03-04 11:09:30 +02001081}
1082
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +02001083static int __init dmatest_init(void)
1084{
1085 struct dmatest_info *info = &test_info;
Andy Shevchenko851b7e12013-03-04 11:09:30 +02001086 int ret;
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +02001087
1088 memset(info, 0, sizeof(*info));
1089
Andy Shevchenko851b7e12013-03-04 11:09:30 +02001090 mutex_init(&info->lock);
Andy Shevchenko838cc702013-03-04 11:09:28 +02001091 INIT_LIST_HEAD(&info->channels);
1092
Andy Shevchenko95019c82013-03-04 11:09:33 +02001093 mutex_init(&info->results_lock);
1094 INIT_LIST_HEAD(&info->results);
1095
Andy Shevchenko851b7e12013-03-04 11:09:30 +02001096 ret = dmatest_register_dbgfs(info);
1097 if (ret)
1098 return ret;
1099
1100#ifdef MODULE
1101 return 0;
1102#else
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +02001103 return run_threaded_test(info);
Andy Shevchenko851b7e12013-03-04 11:09:30 +02001104#endif
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +02001105}
1106/* when compiled-in wait for drivers to load first */
1107late_initcall(dmatest_init);
1108
1109static void __exit dmatest_exit(void)
1110{
1111 struct dmatest_info *info = &test_info;
1112
Andy Shevchenko851b7e12013-03-04 11:09:30 +02001113 debugfs_remove_recursive(info->root);
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +02001114 stop_threaded_test(info);
Andy Shevchenko95019c82013-03-04 11:09:33 +02001115 result_free(info, NULL);
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +02001116}
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -07001117module_exit(dmatest_exit);
1118
Jean Delvaree05503e2011-05-18 16:49:24 +02001119MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -07001120MODULE_LICENSE("GPL v2");