blob: c38a3a84a455b0afba20f824bf8cd24d31137921 [file] [log] [blame]
Pierre Ossman88ae6002007-08-12 14:23:50 +02001/*
2 * linux/drivers/mmc/card/mmc_test.c
3 *
Pierre Ossman0121a982008-06-28 17:51:27 +02004 * Copyright 2007-2008 Pierre Ossman
Pierre Ossman88ae6002007-08-12 14:23:50 +02005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
10 */
11
12#include <linux/mmc/core.h>
13#include <linux/mmc/card.h>
14#include <linux/mmc/host.h>
15#include <linux/mmc/mmc.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090016#include <linux/slab.h>
Pierre Ossman88ae6002007-08-12 14:23:50 +020017
18#include <linux/scatterlist.h>
Adrian Hunterfec4dcc2010-08-11 14:17:51 -070019#include <linux/swap.h> /* For nr_free_buffer_pages() */
Andy Shevchenko3183aa12010-09-01 09:26:47 +030020#include <linux/list.h>
Pierre Ossman88ae6002007-08-12 14:23:50 +020021
Andy Shevchenko130067e2010-09-10 10:10:50 +030022#include <linux/debugfs.h>
23#include <linux/uaccess.h>
24#include <linux/seq_file.h>
25
Pierre Ossman88ae6002007-08-12 14:23:50 +020026#define RESULT_OK 0
27#define RESULT_FAIL 1
28#define RESULT_UNSUP_HOST 2
29#define RESULT_UNSUP_CARD 3
30
Pierre Ossman26610812008-07-04 18:17:13 +020031#define BUFFER_ORDER 2
32#define BUFFER_SIZE (PAGE_SIZE << BUFFER_ORDER)
Pierre Ossman88ae6002007-08-12 14:23:50 +020033
Adrian Hunterfec4dcc2010-08-11 14:17:51 -070034/*
35 * Limit the test area size to the maximum MMC HC erase group size. Note that
36 * the maximum SD allocation unit size is just 4MiB.
37 */
38#define TEST_AREA_MAX_SIZE (128 * 1024 * 1024)
39
Adrian Hunter64f71202010-08-11 14:17:51 -070040/**
41 * struct mmc_test_pages - pages allocated by 'alloc_pages()'.
42 * @page: first page in the allocation
43 * @order: order of the number of pages allocated
44 */
45struct mmc_test_pages {
46 struct page *page;
47 unsigned int order;
48};
49
50/**
51 * struct mmc_test_mem - allocated memory.
52 * @arr: array of allocations
53 * @cnt: number of allocations
54 */
55struct mmc_test_mem {
56 struct mmc_test_pages *arr;
57 unsigned int cnt;
58};
59
60/**
61 * struct mmc_test_area - information for performance tests.
Adrian Hunter64f71202010-08-11 14:17:51 -070062 * @max_sz: test area size (in bytes)
Adrian Hunterfec4dcc2010-08-11 14:17:51 -070063 * @dev_addr: address on card at which to do performance tests
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +030064 * @max_tfr: maximum transfer size allowed by driver (in bytes)
65 * @max_segs: maximum segments allowed by driver in scatterlist @sg
66 * @max_seg_sz: maximum segment size allowed by driver
Adrian Hunter64f71202010-08-11 14:17:51 -070067 * @blocks: number of (512 byte) blocks currently mapped by @sg
68 * @sg_len: length of currently mapped scatterlist @sg
69 * @mem: allocated memory
70 * @sg: scatterlist
71 */
72struct mmc_test_area {
Adrian Hunterfec4dcc2010-08-11 14:17:51 -070073 unsigned long max_sz;
Adrian Hunter64f71202010-08-11 14:17:51 -070074 unsigned int dev_addr;
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +030075 unsigned int max_tfr;
Adrian Hunter64f71202010-08-11 14:17:51 -070076 unsigned int max_segs;
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +030077 unsigned int max_seg_sz;
Adrian Hunter64f71202010-08-11 14:17:51 -070078 unsigned int blocks;
79 unsigned int sg_len;
80 struct mmc_test_mem *mem;
81 struct scatterlist *sg;
82};
83
84/**
Andy Shevchenko3183aa12010-09-01 09:26:47 +030085 * struct mmc_test_transfer_result - transfer results for performance tests.
86 * @link: double-linked list
87 * @count: amount of group of sectors to check
88 * @sectors: amount of sectors to check in one group
89 * @ts: time values of transfer
90 * @rate: calculated transfer rate
91 */
92struct mmc_test_transfer_result {
93 struct list_head link;
94 unsigned int count;
95 unsigned int sectors;
96 struct timespec ts;
97 unsigned int rate;
98};
99
100/**
101 * struct mmc_test_general_result - results for tests.
102 * @link: double-linked list
103 * @card: card under test
104 * @testcase: number of test case
105 * @result: result of test run
106 * @tr_lst: transfer measurements if any as mmc_test_transfer_result
107 */
108struct mmc_test_general_result {
109 struct list_head link;
110 struct mmc_card *card;
111 int testcase;
112 int result;
113 struct list_head tr_lst;
114};
115
116/**
Andy Shevchenko130067e2010-09-10 10:10:50 +0300117 * struct mmc_test_dbgfs_file - debugfs related file.
118 * @link: double-linked list
119 * @card: card under test
120 * @file: file created under debugfs
121 */
122struct mmc_test_dbgfs_file {
123 struct list_head link;
124 struct mmc_card *card;
125 struct dentry *file;
126};
127
128/**
Adrian Hunter64f71202010-08-11 14:17:51 -0700129 * struct mmc_test_card - test information.
130 * @card: card under test
131 * @scratch: transfer buffer
132 * @buffer: transfer buffer
133 * @highmem: buffer for highmem tests
134 * @area: information for performance tests
Andy Shevchenko3183aa12010-09-01 09:26:47 +0300135 * @gr: pointer to results of current testcase
Adrian Hunter64f71202010-08-11 14:17:51 -0700136 */
Pierre Ossman88ae6002007-08-12 14:23:50 +0200137struct mmc_test_card {
138 struct mmc_card *card;
139
Pierre Ossman6b174932008-06-30 09:09:27 +0200140 u8 scratch[BUFFER_SIZE];
Pierre Ossman88ae6002007-08-12 14:23:50 +0200141 u8 *buffer;
Pierre Ossman26610812008-07-04 18:17:13 +0200142#ifdef CONFIG_HIGHMEM
143 struct page *highmem;
144#endif
Andy Shevchenko3183aa12010-09-01 09:26:47 +0300145 struct mmc_test_area area;
146 struct mmc_test_general_result *gr;
Pierre Ossman88ae6002007-08-12 14:23:50 +0200147};
148
149/*******************************************************************/
Pierre Ossman6b174932008-06-30 09:09:27 +0200150/* General helper functions */
Pierre Ossman88ae6002007-08-12 14:23:50 +0200151/*******************************************************************/
152
Pierre Ossman6b174932008-06-30 09:09:27 +0200153/*
154 * Configure correct block size in card
155 */
Pierre Ossman88ae6002007-08-12 14:23:50 +0200156static int mmc_test_set_blksize(struct mmc_test_card *test, unsigned size)
157{
158 struct mmc_command cmd;
159 int ret;
160
161 cmd.opcode = MMC_SET_BLOCKLEN;
162 cmd.arg = size;
163 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
164 ret = mmc_wait_for_cmd(test->card->host, &cmd, 0);
165 if (ret)
166 return ret;
167
168 return 0;
169}
170
Pierre Ossman6b174932008-06-30 09:09:27 +0200171/*
172 * Fill in the mmc_request structure given a set of transfer parameters.
173 */
174static void mmc_test_prepare_mrq(struct mmc_test_card *test,
175 struct mmc_request *mrq, struct scatterlist *sg, unsigned sg_len,
176 unsigned dev_addr, unsigned blocks, unsigned blksz, int write)
177{
178 BUG_ON(!mrq || !mrq->cmd || !mrq->data || !mrq->stop);
179
180 if (blocks > 1) {
181 mrq->cmd->opcode = write ?
182 MMC_WRITE_MULTIPLE_BLOCK : MMC_READ_MULTIPLE_BLOCK;
183 } else {
184 mrq->cmd->opcode = write ?
185 MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
186 }
187
188 mrq->cmd->arg = dev_addr;
Johan Kristellc286d032010-02-10 13:56:34 -0800189 if (!mmc_card_blockaddr(test->card))
190 mrq->cmd->arg <<= 9;
191
Pierre Ossman6b174932008-06-30 09:09:27 +0200192 mrq->cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC;
193
194 if (blocks == 1)
195 mrq->stop = NULL;
196 else {
197 mrq->stop->opcode = MMC_STOP_TRANSMISSION;
198 mrq->stop->arg = 0;
199 mrq->stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
200 }
201
202 mrq->data->blksz = blksz;
203 mrq->data->blocks = blocks;
204 mrq->data->flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
205 mrq->data->sg = sg;
206 mrq->data->sg_len = sg_len;
207
208 mmc_set_data_timeout(mrq->data, test->card);
209}
210
Adrian Hunter64f71202010-08-11 14:17:51 -0700211static int mmc_test_busy(struct mmc_command *cmd)
212{
213 return !(cmd->resp[0] & R1_READY_FOR_DATA) ||
214 (R1_CURRENT_STATE(cmd->resp[0]) == 7);
215}
216
Pierre Ossman6b174932008-06-30 09:09:27 +0200217/*
218 * Wait for the card to finish the busy state
219 */
220static int mmc_test_wait_busy(struct mmc_test_card *test)
Pierre Ossman88ae6002007-08-12 14:23:50 +0200221{
222 int ret, busy;
Pierre Ossman88ae6002007-08-12 14:23:50 +0200223 struct mmc_command cmd;
Pierre Ossman88ae6002007-08-12 14:23:50 +0200224
225 busy = 0;
226 do {
Pierre Ossman88ae6002007-08-12 14:23:50 +0200227 memset(&cmd, 0, sizeof(struct mmc_command));
228
229 cmd.opcode = MMC_SEND_STATUS;
230 cmd.arg = test->card->rca << 16;
231 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
232
Pierre Ossman6b174932008-06-30 09:09:27 +0200233 ret = mmc_wait_for_cmd(test->card->host, &cmd, 0);
234 if (ret)
Pierre Ossman88ae6002007-08-12 14:23:50 +0200235 break;
236
Adrian Hunter64f71202010-08-11 14:17:51 -0700237 if (!busy && mmc_test_busy(&cmd)) {
Pierre Ossman88ae6002007-08-12 14:23:50 +0200238 busy = 1;
239 printk(KERN_INFO "%s: Warning: Host did not "
240 "wait for busy state to end.\n",
241 mmc_hostname(test->card->host));
242 }
Adrian Hunter64f71202010-08-11 14:17:51 -0700243 } while (mmc_test_busy(&cmd));
Pierre Ossman88ae6002007-08-12 14:23:50 +0200244
245 return ret;
246}
247
Pierre Ossman6b174932008-06-30 09:09:27 +0200248/*
249 * Transfer a single sector of kernel addressable data
250 */
251static int mmc_test_buffer_transfer(struct mmc_test_card *test,
252 u8 *buffer, unsigned addr, unsigned blksz, int write)
Pierre Ossman88ae6002007-08-12 14:23:50 +0200253{
Pierre Ossman6b174932008-06-30 09:09:27 +0200254 int ret;
255
256 struct mmc_request mrq;
257 struct mmc_command cmd;
258 struct mmc_command stop;
259 struct mmc_data data;
260
261 struct scatterlist sg;
262
263 memset(&mrq, 0, sizeof(struct mmc_request));
264 memset(&cmd, 0, sizeof(struct mmc_command));
265 memset(&data, 0, sizeof(struct mmc_data));
266 memset(&stop, 0, sizeof(struct mmc_command));
267
268 mrq.cmd = &cmd;
269 mrq.data = &data;
270 mrq.stop = &stop;
271
272 sg_init_one(&sg, buffer, blksz);
273
274 mmc_test_prepare_mrq(test, &mrq, &sg, 1, addr, 1, blksz, write);
275
276 mmc_wait_for_req(test->card->host, &mrq);
277
278 if (cmd.error)
279 return cmd.error;
280 if (data.error)
281 return data.error;
282
283 ret = mmc_test_wait_busy(test);
284 if (ret)
285 return ret;
286
287 return 0;
Pierre Ossman88ae6002007-08-12 14:23:50 +0200288}
289
Adrian Hunter64f71202010-08-11 14:17:51 -0700290static void mmc_test_free_mem(struct mmc_test_mem *mem)
291{
292 if (!mem)
293 return;
294 while (mem->cnt--)
295 __free_pages(mem->arr[mem->cnt].page,
296 mem->arr[mem->cnt].order);
297 kfree(mem->arr);
298 kfree(mem);
299}
300
301/*
302 * Allocate a lot of memory, preferrably max_sz but at least min_sz. In case
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +0300303 * there isn't much memory do not exceed 1/16th total lowmem pages. Also do
304 * not exceed a maximum number of segments and try not to make segments much
305 * bigger than maximum segment size.
Adrian Hunter64f71202010-08-11 14:17:51 -0700306 */
Adrian Hunterfec4dcc2010-08-11 14:17:51 -0700307static struct mmc_test_mem *mmc_test_alloc_mem(unsigned long min_sz,
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +0300308 unsigned long max_sz,
309 unsigned int max_segs,
310 unsigned int max_seg_sz)
Adrian Hunter64f71202010-08-11 14:17:51 -0700311{
Adrian Hunterfec4dcc2010-08-11 14:17:51 -0700312 unsigned long max_page_cnt = DIV_ROUND_UP(max_sz, PAGE_SIZE);
313 unsigned long min_page_cnt = DIV_ROUND_UP(min_sz, PAGE_SIZE);
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +0300314 unsigned long max_seg_page_cnt = DIV_ROUND_UP(max_seg_sz, PAGE_SIZE);
Adrian Hunterfec4dcc2010-08-11 14:17:51 -0700315 unsigned long page_cnt = 0;
316 unsigned long limit = nr_free_buffer_pages() >> 4;
Adrian Hunter64f71202010-08-11 14:17:51 -0700317 struct mmc_test_mem *mem;
Adrian Hunter64f71202010-08-11 14:17:51 -0700318
Adrian Hunterfec4dcc2010-08-11 14:17:51 -0700319 if (max_page_cnt > limit)
320 max_page_cnt = limit;
Adrian Hunter3d203be2010-09-23 14:51:29 +0300321 if (min_page_cnt > max_page_cnt)
322 min_page_cnt = max_page_cnt;
Adrian Hunter64f71202010-08-11 14:17:51 -0700323
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +0300324 if (max_seg_page_cnt > max_page_cnt)
325 max_seg_page_cnt = max_page_cnt;
326
327 if (max_segs > max_page_cnt)
328 max_segs = max_page_cnt;
329
Adrian Hunter64f71202010-08-11 14:17:51 -0700330 mem = kzalloc(sizeof(struct mmc_test_mem), GFP_KERNEL);
331 if (!mem)
332 return NULL;
333
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +0300334 mem->arr = kzalloc(sizeof(struct mmc_test_pages) * max_segs,
Adrian Hunter64f71202010-08-11 14:17:51 -0700335 GFP_KERNEL);
336 if (!mem->arr)
337 goto out_free;
338
339 while (max_page_cnt) {
340 struct page *page;
341 unsigned int order;
342 gfp_t flags = GFP_KERNEL | GFP_DMA | __GFP_NOWARN |
343 __GFP_NORETRY;
344
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +0300345 order = get_order(max_seg_page_cnt << PAGE_SHIFT);
Adrian Hunter64f71202010-08-11 14:17:51 -0700346 while (1) {
347 page = alloc_pages(flags, order);
348 if (page || !order)
349 break;
350 order -= 1;
351 }
352 if (!page) {
353 if (page_cnt < min_page_cnt)
354 goto out_free;
355 break;
356 }
357 mem->arr[mem->cnt].page = page;
358 mem->arr[mem->cnt].order = order;
359 mem->cnt += 1;
Adrian Hunterfec4dcc2010-08-11 14:17:51 -0700360 if (max_page_cnt <= (1UL << order))
361 break;
Adrian Hunter3d203be2010-09-23 14:51:29 +0300362 max_page_cnt -= 1UL << order;
363 page_cnt += 1UL << order;
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +0300364 if (mem->cnt >= max_segs) {
365 if (page_cnt < min_page_cnt)
366 goto out_free;
367 break;
368 }
Adrian Hunter64f71202010-08-11 14:17:51 -0700369 }
370
371 return mem;
372
373out_free:
374 mmc_test_free_mem(mem);
375 return NULL;
376}
377
378/*
379 * Map memory into a scatterlist. Optionally allow the same memory to be
380 * mapped more than once.
381 */
Adrian Hunterfec4dcc2010-08-11 14:17:51 -0700382static int mmc_test_map_sg(struct mmc_test_mem *mem, unsigned long sz,
Adrian Hunter64f71202010-08-11 14:17:51 -0700383 struct scatterlist *sglist, int repeat,
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +0300384 unsigned int max_segs, unsigned int max_seg_sz,
385 unsigned int *sg_len)
Adrian Hunter64f71202010-08-11 14:17:51 -0700386{
387 struct scatterlist *sg = NULL;
388 unsigned int i;
389
390 sg_init_table(sglist, max_segs);
391
392 *sg_len = 0;
393 do {
394 for (i = 0; i < mem->cnt; i++) {
Adrian Hunterfec4dcc2010-08-11 14:17:51 -0700395 unsigned long len = PAGE_SIZE << mem->arr[i].order;
Adrian Hunter64f71202010-08-11 14:17:51 -0700396
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +0300397 if (len > sz)
Adrian Hunter64f71202010-08-11 14:17:51 -0700398 len = sz;
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +0300399 if (len > max_seg_sz)
400 len = max_seg_sz;
Adrian Hunter64f71202010-08-11 14:17:51 -0700401 if (sg)
402 sg = sg_next(sg);
403 else
404 sg = sglist;
405 if (!sg)
406 return -EINVAL;
407 sg_set_page(sg, mem->arr[i].page, len, 0);
408 sz -= len;
409 *sg_len += 1;
410 if (!sz)
411 break;
412 }
413 } while (sz && repeat);
414
415 if (sz)
416 return -EINVAL;
417
418 if (sg)
419 sg_mark_end(sg);
420
421 return 0;
422}
423
424/*
425 * Map memory into a scatterlist so that no pages are contiguous. Allow the
426 * same memory to be mapped more than once.
427 */
428static int mmc_test_map_sg_max_scatter(struct mmc_test_mem *mem,
Adrian Hunterfec4dcc2010-08-11 14:17:51 -0700429 unsigned long sz,
Adrian Hunter64f71202010-08-11 14:17:51 -0700430 struct scatterlist *sglist,
431 unsigned int max_segs,
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +0300432 unsigned int max_seg_sz,
Adrian Hunter64f71202010-08-11 14:17:51 -0700433 unsigned int *sg_len)
434{
435 struct scatterlist *sg = NULL;
Adrian Hunterfec4dcc2010-08-11 14:17:51 -0700436 unsigned int i = mem->cnt, cnt;
437 unsigned long len;
Adrian Hunter64f71202010-08-11 14:17:51 -0700438 void *base, *addr, *last_addr = NULL;
439
440 sg_init_table(sglist, max_segs);
441
442 *sg_len = 0;
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +0300443 while (sz) {
Adrian Hunter64f71202010-08-11 14:17:51 -0700444 base = page_address(mem->arr[--i].page);
445 cnt = 1 << mem->arr[i].order;
446 while (sz && cnt) {
447 addr = base + PAGE_SIZE * --cnt;
448 if (last_addr && last_addr + PAGE_SIZE == addr)
449 continue;
450 last_addr = addr;
451 len = PAGE_SIZE;
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +0300452 if (len > max_seg_sz)
453 len = max_seg_sz;
454 if (len > sz)
Adrian Hunter64f71202010-08-11 14:17:51 -0700455 len = sz;
456 if (sg)
457 sg = sg_next(sg);
458 else
459 sg = sglist;
460 if (!sg)
461 return -EINVAL;
462 sg_set_page(sg, virt_to_page(addr), len, 0);
463 sz -= len;
464 *sg_len += 1;
465 }
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +0300466 if (i == 0)
467 i = mem->cnt;
Adrian Hunter64f71202010-08-11 14:17:51 -0700468 }
469
470 if (sg)
471 sg_mark_end(sg);
472
473 return 0;
474}
475
476/*
477 * Calculate transfer rate in bytes per second.
478 */
479static unsigned int mmc_test_rate(uint64_t bytes, struct timespec *ts)
480{
481 uint64_t ns;
482
483 ns = ts->tv_sec;
484 ns *= 1000000000;
485 ns += ts->tv_nsec;
486
487 bytes *= 1000000000;
488
489 while (ns > UINT_MAX) {
490 bytes >>= 1;
491 ns >>= 1;
492 }
493
494 if (!ns)
495 return 0;
496
497 do_div(bytes, (uint32_t)ns);
498
499 return bytes;
500}
501
502/*
Andy Shevchenko3183aa12010-09-01 09:26:47 +0300503 * Save transfer results for future usage
504 */
505static void mmc_test_save_transfer_result(struct mmc_test_card *test,
506 unsigned int count, unsigned int sectors, struct timespec ts,
507 unsigned int rate)
508{
509 struct mmc_test_transfer_result *tr;
510
511 if (!test->gr)
512 return;
513
514 tr = kmalloc(sizeof(struct mmc_test_transfer_result), GFP_KERNEL);
515 if (!tr)
516 return;
517
518 tr->count = count;
519 tr->sectors = sectors;
520 tr->ts = ts;
521 tr->rate = rate;
522
523 list_add_tail(&tr->link, &test->gr->tr_lst);
524}
525
526/*
Adrian Hunter64f71202010-08-11 14:17:51 -0700527 * Print the transfer rate.
528 */
529static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes,
530 struct timespec *ts1, struct timespec *ts2)
531{
532 unsigned int rate, sectors = bytes >> 9;
533 struct timespec ts;
534
535 ts = timespec_sub(*ts2, *ts1);
536
537 rate = mmc_test_rate(bytes, &ts);
538
539 printk(KERN_INFO "%s: Transfer of %u sectors (%u%s KiB) took %lu.%09lu "
540 "seconds (%u kB/s, %u KiB/s)\n",
541 mmc_hostname(test->card->host), sectors, sectors >> 1,
Adrian Hunterc27d37a2010-09-23 14:51:36 +0300542 (sectors & 1 ? ".5" : ""), (unsigned long)ts.tv_sec,
Adrian Hunter64f71202010-08-11 14:17:51 -0700543 (unsigned long)ts.tv_nsec, rate / 1000, rate / 1024);
Andy Shevchenko3183aa12010-09-01 09:26:47 +0300544
545 mmc_test_save_transfer_result(test, 1, sectors, ts, rate);
Adrian Hunter64f71202010-08-11 14:17:51 -0700546}
547
548/*
549 * Print the average transfer rate.
550 */
551static void mmc_test_print_avg_rate(struct mmc_test_card *test, uint64_t bytes,
552 unsigned int count, struct timespec *ts1,
553 struct timespec *ts2)
554{
555 unsigned int rate, sectors = bytes >> 9;
556 uint64_t tot = bytes * count;
557 struct timespec ts;
558
559 ts = timespec_sub(*ts2, *ts1);
560
561 rate = mmc_test_rate(tot, &ts);
562
563 printk(KERN_INFO "%s: Transfer of %u x %u sectors (%u x %u%s KiB) took "
564 "%lu.%09lu seconds (%u kB/s, %u KiB/s)\n",
565 mmc_hostname(test->card->host), count, sectors, count,
Adrian Hunterc27d37a2010-09-23 14:51:36 +0300566 sectors >> 1, (sectors & 1 ? ".5" : ""),
Adrian Hunter64f71202010-08-11 14:17:51 -0700567 (unsigned long)ts.tv_sec, (unsigned long)ts.tv_nsec,
568 rate / 1000, rate / 1024);
Andy Shevchenko3183aa12010-09-01 09:26:47 +0300569
570 mmc_test_save_transfer_result(test, count, sectors, ts, rate);
Adrian Hunter64f71202010-08-11 14:17:51 -0700571}
572
573/*
574 * Return the card size in sectors.
575 */
576static unsigned int mmc_test_capacity(struct mmc_card *card)
577{
578 if (!mmc_card_sd(card) && mmc_card_blockaddr(card))
579 return card->ext_csd.sectors;
580 else
581 return card->csd.capacity << (card->csd.read_blkbits - 9);
582}
583
Pierre Ossman6b174932008-06-30 09:09:27 +0200584/*******************************************************************/
585/* Test preparation and cleanup */
586/*******************************************************************/
587
588/*
589 * Fill the first couple of sectors of the card with known data
590 * so that bad reads/writes can be detected
591 */
592static int __mmc_test_prepare(struct mmc_test_card *test, int write)
Pierre Ossman88ae6002007-08-12 14:23:50 +0200593{
594 int ret, i;
595
596 ret = mmc_test_set_blksize(test, 512);
597 if (ret)
598 return ret;
599
600 if (write)
Pierre Ossman6b174932008-06-30 09:09:27 +0200601 memset(test->buffer, 0xDF, 512);
Pierre Ossman88ae6002007-08-12 14:23:50 +0200602 else {
Pierre Ossman6b174932008-06-30 09:09:27 +0200603 for (i = 0;i < 512;i++)
Pierre Ossman88ae6002007-08-12 14:23:50 +0200604 test->buffer[i] = i;
605 }
606
607 for (i = 0;i < BUFFER_SIZE / 512;i++) {
Johan Kristellc286d032010-02-10 13:56:34 -0800608 ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
Pierre Ossman88ae6002007-08-12 14:23:50 +0200609 if (ret)
610 return ret;
611 }
612
613 return 0;
614}
615
Pierre Ossman6b174932008-06-30 09:09:27 +0200616static int mmc_test_prepare_write(struct mmc_test_card *test)
Pierre Ossman88ae6002007-08-12 14:23:50 +0200617{
Pierre Ossman6b174932008-06-30 09:09:27 +0200618 return __mmc_test_prepare(test, 1);
Pierre Ossman88ae6002007-08-12 14:23:50 +0200619}
620
Pierre Ossman6b174932008-06-30 09:09:27 +0200621static int mmc_test_prepare_read(struct mmc_test_card *test)
Pierre Ossman88ae6002007-08-12 14:23:50 +0200622{
Pierre Ossman6b174932008-06-30 09:09:27 +0200623 return __mmc_test_prepare(test, 0);
Pierre Ossman88ae6002007-08-12 14:23:50 +0200624}
625
Pierre Ossman6b174932008-06-30 09:09:27 +0200626static int mmc_test_cleanup(struct mmc_test_card *test)
Pierre Ossman88ae6002007-08-12 14:23:50 +0200627{
Pierre Ossman6b174932008-06-30 09:09:27 +0200628 int ret, i;
Pierre Ossman88ae6002007-08-12 14:23:50 +0200629
Pierre Ossman6b174932008-06-30 09:09:27 +0200630 ret = mmc_test_set_blksize(test, 512);
631 if (ret)
632 return ret;
Pierre Ossman88ae6002007-08-12 14:23:50 +0200633
Pierre Ossman6b174932008-06-30 09:09:27 +0200634 memset(test->buffer, 0, 512);
635
636 for (i = 0;i < BUFFER_SIZE / 512;i++) {
Johan Kristellc286d032010-02-10 13:56:34 -0800637 ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
Pierre Ossman6b174932008-06-30 09:09:27 +0200638 if (ret)
639 return ret;
640 }
641
642 return 0;
643}
644
645/*******************************************************************/
646/* Test execution helpers */
647/*******************************************************************/
648
649/*
650 * Modifies the mmc_request to perform the "short transfer" tests
651 */
652static void mmc_test_prepare_broken_mrq(struct mmc_test_card *test,
653 struct mmc_request *mrq, int write)
654{
655 BUG_ON(!mrq || !mrq->cmd || !mrq->data);
656
657 if (mrq->data->blocks > 1) {
658 mrq->cmd->opcode = write ?
659 MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
660 mrq->stop = NULL;
661 } else {
662 mrq->cmd->opcode = MMC_SEND_STATUS;
663 mrq->cmd->arg = test->card->rca << 16;
664 }
665}
666
667/*
668 * Checks that a normal transfer didn't have any errors
669 */
670static int mmc_test_check_result(struct mmc_test_card *test,
671 struct mmc_request *mrq)
672{
673 int ret;
674
675 BUG_ON(!mrq || !mrq->cmd || !mrq->data);
676
677 ret = 0;
678
679 if (!ret && mrq->cmd->error)
680 ret = mrq->cmd->error;
681 if (!ret && mrq->data->error)
682 ret = mrq->data->error;
683 if (!ret && mrq->stop && mrq->stop->error)
684 ret = mrq->stop->error;
685 if (!ret && mrq->data->bytes_xfered !=
686 mrq->data->blocks * mrq->data->blksz)
687 ret = RESULT_FAIL;
688
689 if (ret == -EINVAL)
690 ret = RESULT_UNSUP_HOST;
691
692 return ret;
693}
694
695/*
696 * Checks that a "short transfer" behaved as expected
697 */
698static int mmc_test_check_broken_result(struct mmc_test_card *test,
699 struct mmc_request *mrq)
700{
701 int ret;
702
703 BUG_ON(!mrq || !mrq->cmd || !mrq->data);
704
705 ret = 0;
706
707 if (!ret && mrq->cmd->error)
708 ret = mrq->cmd->error;
709 if (!ret && mrq->data->error == 0)
710 ret = RESULT_FAIL;
711 if (!ret && mrq->data->error != -ETIMEDOUT)
712 ret = mrq->data->error;
713 if (!ret && mrq->stop && mrq->stop->error)
714 ret = mrq->stop->error;
715 if (mrq->data->blocks > 1) {
716 if (!ret && mrq->data->bytes_xfered > mrq->data->blksz)
717 ret = RESULT_FAIL;
718 } else {
719 if (!ret && mrq->data->bytes_xfered > 0)
720 ret = RESULT_FAIL;
721 }
722
723 if (ret == -EINVAL)
724 ret = RESULT_UNSUP_HOST;
725
726 return ret;
727}
728
729/*
730 * Tests a basic transfer with certain parameters
731 */
732static int mmc_test_simple_transfer(struct mmc_test_card *test,
733 struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
734 unsigned blocks, unsigned blksz, int write)
735{
736 struct mmc_request mrq;
737 struct mmc_command cmd;
738 struct mmc_command stop;
739 struct mmc_data data;
740
741 memset(&mrq, 0, sizeof(struct mmc_request));
742 memset(&cmd, 0, sizeof(struct mmc_command));
743 memset(&data, 0, sizeof(struct mmc_data));
744 memset(&stop, 0, sizeof(struct mmc_command));
745
746 mrq.cmd = &cmd;
747 mrq.data = &data;
748 mrq.stop = &stop;
749
750 mmc_test_prepare_mrq(test, &mrq, sg, sg_len, dev_addr,
751 blocks, blksz, write);
752
753 mmc_wait_for_req(test->card->host, &mrq);
754
755 mmc_test_wait_busy(test);
756
757 return mmc_test_check_result(test, &mrq);
758}
759
760/*
761 * Tests a transfer where the card will fail completely or partly
762 */
763static int mmc_test_broken_transfer(struct mmc_test_card *test,
764 unsigned blocks, unsigned blksz, int write)
765{
766 struct mmc_request mrq;
767 struct mmc_command cmd;
768 struct mmc_command stop;
769 struct mmc_data data;
770
771 struct scatterlist sg;
772
773 memset(&mrq, 0, sizeof(struct mmc_request));
774 memset(&cmd, 0, sizeof(struct mmc_command));
775 memset(&data, 0, sizeof(struct mmc_data));
776 memset(&stop, 0, sizeof(struct mmc_command));
777
778 mrq.cmd = &cmd;
779 mrq.data = &data;
780 mrq.stop = &stop;
781
782 sg_init_one(&sg, test->buffer, blocks * blksz);
783
784 mmc_test_prepare_mrq(test, &mrq, &sg, 1, 0, blocks, blksz, write);
785 mmc_test_prepare_broken_mrq(test, &mrq, write);
786
787 mmc_wait_for_req(test->card->host, &mrq);
788
789 mmc_test_wait_busy(test);
790
791 return mmc_test_check_broken_result(test, &mrq);
792}
793
794/*
795 * Does a complete transfer test where data is also validated
796 *
797 * Note: mmc_test_prepare() must have been done before this call
798 */
799static int mmc_test_transfer(struct mmc_test_card *test,
800 struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
801 unsigned blocks, unsigned blksz, int write)
802{
803 int ret, i;
804 unsigned long flags;
Pierre Ossman88ae6002007-08-12 14:23:50 +0200805
806 if (write) {
807 for (i = 0;i < blocks * blksz;i++)
Pierre Ossman6b174932008-06-30 09:09:27 +0200808 test->scratch[i] = i;
809 } else {
Pierre Ossmanb7ac2cf2008-07-29 01:05:22 +0200810 memset(test->scratch, 0, BUFFER_SIZE);
Pierre Ossman88ae6002007-08-12 14:23:50 +0200811 }
Pierre Ossman6b174932008-06-30 09:09:27 +0200812 local_irq_save(flags);
Pierre Ossmanb7ac2cf2008-07-29 01:05:22 +0200813 sg_copy_from_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
Pierre Ossman6b174932008-06-30 09:09:27 +0200814 local_irq_restore(flags);
Pierre Ossman88ae6002007-08-12 14:23:50 +0200815
816 ret = mmc_test_set_blksize(test, blksz);
817 if (ret)
818 return ret;
819
Pierre Ossman6b174932008-06-30 09:09:27 +0200820 ret = mmc_test_simple_transfer(test, sg, sg_len, dev_addr,
821 blocks, blksz, write);
Pierre Ossman88ae6002007-08-12 14:23:50 +0200822 if (ret)
823 return ret;
824
825 if (write) {
Pierre Ossman6b174932008-06-30 09:09:27 +0200826 int sectors;
827
Pierre Ossman88ae6002007-08-12 14:23:50 +0200828 ret = mmc_test_set_blksize(test, 512);
829 if (ret)
830 return ret;
831
832 sectors = (blocks * blksz + 511) / 512;
833 if ((sectors * 512) == (blocks * blksz))
834 sectors++;
835
836 if ((sectors * 512) > BUFFER_SIZE)
837 return -EINVAL;
838
839 memset(test->buffer, 0, sectors * 512);
840
841 for (i = 0;i < sectors;i++) {
Pierre Ossman6b174932008-06-30 09:09:27 +0200842 ret = mmc_test_buffer_transfer(test,
Pierre Ossman88ae6002007-08-12 14:23:50 +0200843 test->buffer + i * 512,
Johan Kristellc286d032010-02-10 13:56:34 -0800844 dev_addr + i, 512, 0);
Pierre Ossman88ae6002007-08-12 14:23:50 +0200845 if (ret)
846 return ret;
847 }
848
849 for (i = 0;i < blocks * blksz;i++) {
850 if (test->buffer[i] != (u8)i)
851 return RESULT_FAIL;
852 }
853
854 for (;i < sectors * 512;i++) {
855 if (test->buffer[i] != 0xDF)
856 return RESULT_FAIL;
857 }
858 } else {
Pierre Ossman6b174932008-06-30 09:09:27 +0200859 local_irq_save(flags);
Pierre Ossmanb7ac2cf2008-07-29 01:05:22 +0200860 sg_copy_to_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
Pierre Ossman6b174932008-06-30 09:09:27 +0200861 local_irq_restore(flags);
Pierre Ossman88ae6002007-08-12 14:23:50 +0200862 for (i = 0;i < blocks * blksz;i++) {
Pierre Ossman6b174932008-06-30 09:09:27 +0200863 if (test->scratch[i] != (u8)i)
Pierre Ossman88ae6002007-08-12 14:23:50 +0200864 return RESULT_FAIL;
865 }
866 }
867
868 return 0;
869}
870
Pierre Ossman88ae6002007-08-12 14:23:50 +0200871/*******************************************************************/
872/* Tests */
873/*******************************************************************/
874
875struct mmc_test_case {
876 const char *name;
877
878 int (*prepare)(struct mmc_test_card *);
879 int (*run)(struct mmc_test_card *);
880 int (*cleanup)(struct mmc_test_card *);
881};
882
883static int mmc_test_basic_write(struct mmc_test_card *test)
884{
885 int ret;
Pierre Ossman6b174932008-06-30 09:09:27 +0200886 struct scatterlist sg;
Pierre Ossman88ae6002007-08-12 14:23:50 +0200887
888 ret = mmc_test_set_blksize(test, 512);
889 if (ret)
890 return ret;
891
Pierre Ossman6b174932008-06-30 09:09:27 +0200892 sg_init_one(&sg, test->buffer, 512);
893
894 ret = mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 1);
Pierre Ossman88ae6002007-08-12 14:23:50 +0200895 if (ret)
896 return ret;
897
898 return 0;
899}
900
901static int mmc_test_basic_read(struct mmc_test_card *test)
902{
903 int ret;
Pierre Ossman6b174932008-06-30 09:09:27 +0200904 struct scatterlist sg;
Pierre Ossman88ae6002007-08-12 14:23:50 +0200905
906 ret = mmc_test_set_blksize(test, 512);
907 if (ret)
908 return ret;
909
Pierre Ossman6b174932008-06-30 09:09:27 +0200910 sg_init_one(&sg, test->buffer, 512);
911
Rabin Vincent58a5dd32009-02-13 22:55:26 +0530912 ret = mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 0);
Pierre Ossman88ae6002007-08-12 14:23:50 +0200913 if (ret)
914 return ret;
915
916 return 0;
917}
918
919static int mmc_test_verify_write(struct mmc_test_card *test)
920{
921 int ret;
Pierre Ossman6b174932008-06-30 09:09:27 +0200922 struct scatterlist sg;
Pierre Ossman88ae6002007-08-12 14:23:50 +0200923
Pierre Ossman6b174932008-06-30 09:09:27 +0200924 sg_init_one(&sg, test->buffer, 512);
925
926 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
Pierre Ossman88ae6002007-08-12 14:23:50 +0200927 if (ret)
928 return ret;
929
930 return 0;
931}
932
933static int mmc_test_verify_read(struct mmc_test_card *test)
934{
935 int ret;
Pierre Ossman6b174932008-06-30 09:09:27 +0200936 struct scatterlist sg;
Pierre Ossman88ae6002007-08-12 14:23:50 +0200937
Pierre Ossman6b174932008-06-30 09:09:27 +0200938 sg_init_one(&sg, test->buffer, 512);
939
940 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
Pierre Ossman88ae6002007-08-12 14:23:50 +0200941 if (ret)
942 return ret;
943
944 return 0;
945}
946
947static int mmc_test_multi_write(struct mmc_test_card *test)
948{
949 int ret;
950 unsigned int size;
Pierre Ossman6b174932008-06-30 09:09:27 +0200951 struct scatterlist sg;
Pierre Ossman88ae6002007-08-12 14:23:50 +0200952
953 if (test->card->host->max_blk_count == 1)
954 return RESULT_UNSUP_HOST;
955
956 size = PAGE_SIZE * 2;
957 size = min(size, test->card->host->max_req_size);
958 size = min(size, test->card->host->max_seg_size);
959 size = min(size, test->card->host->max_blk_count * 512);
960
961 if (size < 1024)
962 return RESULT_UNSUP_HOST;
963
Pierre Ossman6b174932008-06-30 09:09:27 +0200964 sg_init_one(&sg, test->buffer, size);
965
966 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
Pierre Ossman88ae6002007-08-12 14:23:50 +0200967 if (ret)
968 return ret;
969
970 return 0;
971}
972
973static int mmc_test_multi_read(struct mmc_test_card *test)
974{
975 int ret;
976 unsigned int size;
Pierre Ossman6b174932008-06-30 09:09:27 +0200977 struct scatterlist sg;
Pierre Ossman88ae6002007-08-12 14:23:50 +0200978
979 if (test->card->host->max_blk_count == 1)
980 return RESULT_UNSUP_HOST;
981
982 size = PAGE_SIZE * 2;
983 size = min(size, test->card->host->max_req_size);
984 size = min(size, test->card->host->max_seg_size);
985 size = min(size, test->card->host->max_blk_count * 512);
986
987 if (size < 1024)
988 return RESULT_UNSUP_HOST;
989
Pierre Ossman6b174932008-06-30 09:09:27 +0200990 sg_init_one(&sg, test->buffer, size);
991
992 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
Pierre Ossman88ae6002007-08-12 14:23:50 +0200993 if (ret)
994 return ret;
995
996 return 0;
997}
998
999static int mmc_test_pow2_write(struct mmc_test_card *test)
1000{
1001 int ret, i;
Pierre Ossman6b174932008-06-30 09:09:27 +02001002 struct scatterlist sg;
Pierre Ossman88ae6002007-08-12 14:23:50 +02001003
1004 if (!test->card->csd.write_partial)
1005 return RESULT_UNSUP_CARD;
1006
1007 for (i = 1; i < 512;i <<= 1) {
Pierre Ossman6b174932008-06-30 09:09:27 +02001008 sg_init_one(&sg, test->buffer, i);
1009 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
Pierre Ossman88ae6002007-08-12 14:23:50 +02001010 if (ret)
1011 return ret;
1012 }
1013
1014 return 0;
1015}
1016
1017static int mmc_test_pow2_read(struct mmc_test_card *test)
1018{
1019 int ret, i;
Pierre Ossman6b174932008-06-30 09:09:27 +02001020 struct scatterlist sg;
Pierre Ossman88ae6002007-08-12 14:23:50 +02001021
1022 if (!test->card->csd.read_partial)
1023 return RESULT_UNSUP_CARD;
1024
1025 for (i = 1; i < 512;i <<= 1) {
Pierre Ossman6b174932008-06-30 09:09:27 +02001026 sg_init_one(&sg, test->buffer, i);
1027 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
Pierre Ossman88ae6002007-08-12 14:23:50 +02001028 if (ret)
1029 return ret;
1030 }
1031
1032 return 0;
1033}
1034
1035static int mmc_test_weird_write(struct mmc_test_card *test)
1036{
1037 int ret, i;
Pierre Ossman6b174932008-06-30 09:09:27 +02001038 struct scatterlist sg;
Pierre Ossman88ae6002007-08-12 14:23:50 +02001039
1040 if (!test->card->csd.write_partial)
1041 return RESULT_UNSUP_CARD;
1042
1043 for (i = 3; i < 512;i += 7) {
Pierre Ossman6b174932008-06-30 09:09:27 +02001044 sg_init_one(&sg, test->buffer, i);
1045 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
Pierre Ossman88ae6002007-08-12 14:23:50 +02001046 if (ret)
1047 return ret;
1048 }
1049
1050 return 0;
1051}
1052
1053static int mmc_test_weird_read(struct mmc_test_card *test)
1054{
1055 int ret, i;
Pierre Ossman6b174932008-06-30 09:09:27 +02001056 struct scatterlist sg;
Pierre Ossman88ae6002007-08-12 14:23:50 +02001057
1058 if (!test->card->csd.read_partial)
1059 return RESULT_UNSUP_CARD;
1060
1061 for (i = 3; i < 512;i += 7) {
Pierre Ossman6b174932008-06-30 09:09:27 +02001062 sg_init_one(&sg, test->buffer, i);
1063 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
Pierre Ossman88ae6002007-08-12 14:23:50 +02001064 if (ret)
1065 return ret;
1066 }
1067
1068 return 0;
1069}
1070
1071static int mmc_test_align_write(struct mmc_test_card *test)
1072{
1073 int ret, i;
Pierre Ossman6b174932008-06-30 09:09:27 +02001074 struct scatterlist sg;
Pierre Ossman88ae6002007-08-12 14:23:50 +02001075
1076 for (i = 1;i < 4;i++) {
Pierre Ossman6b174932008-06-30 09:09:27 +02001077 sg_init_one(&sg, test->buffer + i, 512);
1078 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
Pierre Ossman88ae6002007-08-12 14:23:50 +02001079 if (ret)
1080 return ret;
1081 }
1082
1083 return 0;
1084}
1085
1086static int mmc_test_align_read(struct mmc_test_card *test)
1087{
1088 int ret, i;
Pierre Ossman6b174932008-06-30 09:09:27 +02001089 struct scatterlist sg;
Pierre Ossman88ae6002007-08-12 14:23:50 +02001090
1091 for (i = 1;i < 4;i++) {
Pierre Ossman6b174932008-06-30 09:09:27 +02001092 sg_init_one(&sg, test->buffer + i, 512);
1093 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
Pierre Ossman88ae6002007-08-12 14:23:50 +02001094 if (ret)
1095 return ret;
1096 }
1097
1098 return 0;
1099}
1100
1101static int mmc_test_align_multi_write(struct mmc_test_card *test)
1102{
1103 int ret, i;
1104 unsigned int size;
Pierre Ossman6b174932008-06-30 09:09:27 +02001105 struct scatterlist sg;
Pierre Ossman88ae6002007-08-12 14:23:50 +02001106
1107 if (test->card->host->max_blk_count == 1)
1108 return RESULT_UNSUP_HOST;
1109
1110 size = PAGE_SIZE * 2;
1111 size = min(size, test->card->host->max_req_size);
1112 size = min(size, test->card->host->max_seg_size);
1113 size = min(size, test->card->host->max_blk_count * 512);
1114
1115 if (size < 1024)
1116 return RESULT_UNSUP_HOST;
1117
1118 for (i = 1;i < 4;i++) {
Pierre Ossman6b174932008-06-30 09:09:27 +02001119 sg_init_one(&sg, test->buffer + i, size);
1120 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
Pierre Ossman88ae6002007-08-12 14:23:50 +02001121 if (ret)
1122 return ret;
1123 }
1124
1125 return 0;
1126}
1127
1128static int mmc_test_align_multi_read(struct mmc_test_card *test)
1129{
1130 int ret, i;
1131 unsigned int size;
Pierre Ossman6b174932008-06-30 09:09:27 +02001132 struct scatterlist sg;
Pierre Ossman88ae6002007-08-12 14:23:50 +02001133
1134 if (test->card->host->max_blk_count == 1)
1135 return RESULT_UNSUP_HOST;
1136
1137 size = PAGE_SIZE * 2;
1138 size = min(size, test->card->host->max_req_size);
1139 size = min(size, test->card->host->max_seg_size);
1140 size = min(size, test->card->host->max_blk_count * 512);
1141
1142 if (size < 1024)
1143 return RESULT_UNSUP_HOST;
1144
1145 for (i = 1;i < 4;i++) {
Pierre Ossman6b174932008-06-30 09:09:27 +02001146 sg_init_one(&sg, test->buffer + i, size);
1147 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
Pierre Ossman88ae6002007-08-12 14:23:50 +02001148 if (ret)
1149 return ret;
1150 }
1151
1152 return 0;
1153}
1154
1155static int mmc_test_xfersize_write(struct mmc_test_card *test)
1156{
1157 int ret;
1158
1159 ret = mmc_test_set_blksize(test, 512);
1160 if (ret)
1161 return ret;
1162
Pierre Ossman6b174932008-06-30 09:09:27 +02001163 ret = mmc_test_broken_transfer(test, 1, 512, 1);
Pierre Ossman88ae6002007-08-12 14:23:50 +02001164 if (ret)
1165 return ret;
1166
1167 return 0;
1168}
1169
1170static int mmc_test_xfersize_read(struct mmc_test_card *test)
1171{
1172 int ret;
1173
1174 ret = mmc_test_set_blksize(test, 512);
1175 if (ret)
1176 return ret;
1177
Pierre Ossman6b174932008-06-30 09:09:27 +02001178 ret = mmc_test_broken_transfer(test, 1, 512, 0);
Pierre Ossman88ae6002007-08-12 14:23:50 +02001179 if (ret)
1180 return ret;
1181
1182 return 0;
1183}
1184
1185static int mmc_test_multi_xfersize_write(struct mmc_test_card *test)
1186{
1187 int ret;
1188
1189 if (test->card->host->max_blk_count == 1)
1190 return RESULT_UNSUP_HOST;
1191
1192 ret = mmc_test_set_blksize(test, 512);
1193 if (ret)
1194 return ret;
1195
Pierre Ossman6b174932008-06-30 09:09:27 +02001196 ret = mmc_test_broken_transfer(test, 2, 512, 1);
Pierre Ossman88ae6002007-08-12 14:23:50 +02001197 if (ret)
1198 return ret;
1199
1200 return 0;
1201}
1202
1203static int mmc_test_multi_xfersize_read(struct mmc_test_card *test)
1204{
1205 int ret;
1206
1207 if (test->card->host->max_blk_count == 1)
1208 return RESULT_UNSUP_HOST;
1209
1210 ret = mmc_test_set_blksize(test, 512);
1211 if (ret)
1212 return ret;
1213
Pierre Ossman6b174932008-06-30 09:09:27 +02001214 ret = mmc_test_broken_transfer(test, 2, 512, 0);
Pierre Ossman88ae6002007-08-12 14:23:50 +02001215 if (ret)
1216 return ret;
1217
1218 return 0;
1219}
1220
Pierre Ossman26610812008-07-04 18:17:13 +02001221#ifdef CONFIG_HIGHMEM
1222
1223static int mmc_test_write_high(struct mmc_test_card *test)
1224{
1225 int ret;
1226 struct scatterlist sg;
1227
1228 sg_init_table(&sg, 1);
1229 sg_set_page(&sg, test->highmem, 512, 0);
1230
1231 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
1232 if (ret)
1233 return ret;
1234
1235 return 0;
1236}
1237
1238static int mmc_test_read_high(struct mmc_test_card *test)
1239{
1240 int ret;
1241 struct scatterlist sg;
1242
1243 sg_init_table(&sg, 1);
1244 sg_set_page(&sg, test->highmem, 512, 0);
1245
1246 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
1247 if (ret)
1248 return ret;
1249
1250 return 0;
1251}
1252
1253static int mmc_test_multi_write_high(struct mmc_test_card *test)
1254{
1255 int ret;
1256 unsigned int size;
1257 struct scatterlist sg;
1258
1259 if (test->card->host->max_blk_count == 1)
1260 return RESULT_UNSUP_HOST;
1261
1262 size = PAGE_SIZE * 2;
1263 size = min(size, test->card->host->max_req_size);
1264 size = min(size, test->card->host->max_seg_size);
1265 size = min(size, test->card->host->max_blk_count * 512);
1266
1267 if (size < 1024)
1268 return RESULT_UNSUP_HOST;
1269
1270 sg_init_table(&sg, 1);
1271 sg_set_page(&sg, test->highmem, size, 0);
1272
1273 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
1274 if (ret)
1275 return ret;
1276
1277 return 0;
1278}
1279
1280static int mmc_test_multi_read_high(struct mmc_test_card *test)
1281{
1282 int ret;
1283 unsigned int size;
1284 struct scatterlist sg;
1285
1286 if (test->card->host->max_blk_count == 1)
1287 return RESULT_UNSUP_HOST;
1288
1289 size = PAGE_SIZE * 2;
1290 size = min(size, test->card->host->max_req_size);
1291 size = min(size, test->card->host->max_seg_size);
1292 size = min(size, test->card->host->max_blk_count * 512);
1293
1294 if (size < 1024)
1295 return RESULT_UNSUP_HOST;
1296
1297 sg_init_table(&sg, 1);
1298 sg_set_page(&sg, test->highmem, size, 0);
1299
1300 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
1301 if (ret)
1302 return ret;
1303
1304 return 0;
1305}
1306
Adrian Hunter64f71202010-08-11 14:17:51 -07001307#else
1308
1309static int mmc_test_no_highmem(struct mmc_test_card *test)
1310{
1311 printk(KERN_INFO "%s: Highmem not configured - test skipped\n",
1312 mmc_hostname(test->card->host));
1313 return 0;
1314}
1315
Pierre Ossman26610812008-07-04 18:17:13 +02001316#endif /* CONFIG_HIGHMEM */
1317
Adrian Hunter64f71202010-08-11 14:17:51 -07001318/*
1319 * Map sz bytes so that it can be transferred.
1320 */
Adrian Hunterfec4dcc2010-08-11 14:17:51 -07001321static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz,
Adrian Hunter64f71202010-08-11 14:17:51 -07001322 int max_scatter)
1323{
1324 struct mmc_test_area *t = &test->area;
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +03001325 int err;
Adrian Hunter64f71202010-08-11 14:17:51 -07001326
1327 t->blocks = sz >> 9;
1328
1329 if (max_scatter) {
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +03001330 err = mmc_test_map_sg_max_scatter(t->mem, sz, t->sg,
1331 t->max_segs, t->max_seg_sz,
Adrian Hunter64f71202010-08-11 14:17:51 -07001332 &t->sg_len);
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +03001333 } else {
1334 err = mmc_test_map_sg(t->mem, sz, t->sg, 1, t->max_segs,
1335 t->max_seg_sz, &t->sg_len);
Adrian Hunter64f71202010-08-11 14:17:51 -07001336 }
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +03001337 if (err)
1338 printk(KERN_INFO "%s: Failed to map sg list\n",
1339 mmc_hostname(test->card->host));
1340 return err;
Adrian Hunter64f71202010-08-11 14:17:51 -07001341}
1342
1343/*
1344 * Transfer bytes mapped by mmc_test_area_map().
1345 */
1346static int mmc_test_area_transfer(struct mmc_test_card *test,
1347 unsigned int dev_addr, int write)
1348{
1349 struct mmc_test_area *t = &test->area;
1350
1351 return mmc_test_simple_transfer(test, t->sg, t->sg_len, dev_addr,
1352 t->blocks, 512, write);
1353}
1354
1355/*
1356 * Map and transfer bytes.
1357 */
Adrian Hunterfec4dcc2010-08-11 14:17:51 -07001358static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz,
Adrian Hunter64f71202010-08-11 14:17:51 -07001359 unsigned int dev_addr, int write, int max_scatter,
1360 int timed)
1361{
1362 struct timespec ts1, ts2;
1363 int ret;
1364
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +03001365 /*
1366 * In the case of a maximally scattered transfer, the maximum transfer
1367 * size is further limited by using PAGE_SIZE segments.
1368 */
1369 if (max_scatter) {
1370 struct mmc_test_area *t = &test->area;
1371 unsigned long max_tfr;
1372
1373 if (t->max_seg_sz >= PAGE_SIZE)
1374 max_tfr = t->max_segs * PAGE_SIZE;
1375 else
1376 max_tfr = t->max_segs * t->max_seg_sz;
1377 if (sz > max_tfr)
1378 sz = max_tfr;
1379 }
1380
Adrian Hunter64f71202010-08-11 14:17:51 -07001381 ret = mmc_test_area_map(test, sz, max_scatter);
1382 if (ret)
1383 return ret;
1384
1385 if (timed)
1386 getnstimeofday(&ts1);
1387
1388 ret = mmc_test_area_transfer(test, dev_addr, write);
1389 if (ret)
1390 return ret;
1391
1392 if (timed)
1393 getnstimeofday(&ts2);
1394
1395 if (timed)
1396 mmc_test_print_rate(test, sz, &ts1, &ts2);
1397
1398 return 0;
1399}
1400
1401/*
1402 * Write the test area entirely.
1403 */
1404static int mmc_test_area_fill(struct mmc_test_card *test)
1405{
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +03001406 return mmc_test_area_io(test, test->area.max_tfr, test->area.dev_addr,
Adrian Hunter64f71202010-08-11 14:17:51 -07001407 1, 0, 0);
1408}
1409
1410/*
1411 * Erase the test area entirely.
1412 */
1413static int mmc_test_area_erase(struct mmc_test_card *test)
1414{
1415 struct mmc_test_area *t = &test->area;
1416
1417 if (!mmc_can_erase(test->card))
1418 return 0;
1419
1420 return mmc_erase(test->card, t->dev_addr, test->area.max_sz >> 9,
1421 MMC_ERASE_ARG);
1422}
1423
1424/*
1425 * Cleanup struct mmc_test_area.
1426 */
1427static int mmc_test_area_cleanup(struct mmc_test_card *test)
1428{
1429 struct mmc_test_area *t = &test->area;
1430
1431 kfree(t->sg);
1432 mmc_test_free_mem(t->mem);
1433
1434 return 0;
1435}
1436
1437/*
1438 * Initialize an area for testing large transfers. The size of the area is the
1439 * preferred erase size which is a good size for optimal transfer speed. Note
1440 * that is typically 4MiB for modern cards. The test area is set to the middle
1441 * of the card because cards may have different charateristics at the front
1442 * (for FAT file system optimization). Optionally, the area is erased (if the
1443 * card supports it) which may improve write performance. Optionally, the area
1444 * is filled with data for subsequent read tests.
1445 */
1446static int mmc_test_area_init(struct mmc_test_card *test, int erase, int fill)
1447{
1448 struct mmc_test_area *t = &test->area;
Adrian Hunterfec4dcc2010-08-11 14:17:51 -07001449 unsigned long min_sz = 64 * 1024;
Adrian Hunter64f71202010-08-11 14:17:51 -07001450 int ret;
1451
1452 ret = mmc_test_set_blksize(test, 512);
1453 if (ret)
1454 return ret;
1455
Adrian Hunterfec4dcc2010-08-11 14:17:51 -07001456 if (test->card->pref_erase > TEST_AREA_MAX_SIZE >> 9)
1457 t->max_sz = TEST_AREA_MAX_SIZE;
1458 else
1459 t->max_sz = (unsigned long)test->card->pref_erase << 9;
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +03001460
1461 t->max_segs = test->card->host->max_segs;
1462 t->max_seg_sz = test->card->host->max_seg_size;
1463
1464 t->max_tfr = t->max_sz;
1465 if (t->max_tfr >> 9 > test->card->host->max_blk_count)
1466 t->max_tfr = test->card->host->max_blk_count << 9;
1467 if (t->max_tfr > test->card->host->max_req_size)
1468 t->max_tfr = test->card->host->max_req_size;
1469 if (t->max_tfr / t->max_seg_sz > t->max_segs)
1470 t->max_tfr = t->max_segs * t->max_seg_sz;
1471
Adrian Hunter64f71202010-08-11 14:17:51 -07001472 /*
Adrian Hunter3d203be2010-09-23 14:51:29 +03001473 * Try to allocate enough memory for a max. sized transfer. Less is OK
Adrian Hunter64f71202010-08-11 14:17:51 -07001474 * because the same memory can be mapped into the scatterlist more than
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +03001475 * once. Also, take into account the limits imposed on scatterlist
1476 * segments by the host driver.
Adrian Hunter64f71202010-08-11 14:17:51 -07001477 */
Adrian Hunter3d203be2010-09-23 14:51:29 +03001478 t->mem = mmc_test_alloc_mem(min_sz, t->max_tfr, t->max_segs,
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +03001479 t->max_seg_sz);
Adrian Hunter64f71202010-08-11 14:17:51 -07001480 if (!t->mem)
1481 return -ENOMEM;
1482
Adrian Hunter64f71202010-08-11 14:17:51 -07001483 t->sg = kmalloc(sizeof(struct scatterlist) * t->max_segs, GFP_KERNEL);
1484 if (!t->sg) {
1485 ret = -ENOMEM;
1486 goto out_free;
1487 }
1488
1489 t->dev_addr = mmc_test_capacity(test->card) / 2;
1490 t->dev_addr -= t->dev_addr % (t->max_sz >> 9);
1491
1492 if (erase) {
1493 ret = mmc_test_area_erase(test);
1494 if (ret)
1495 goto out_free;
1496 }
1497
1498 if (fill) {
1499 ret = mmc_test_area_fill(test);
1500 if (ret)
1501 goto out_free;
1502 }
1503
1504 return 0;
1505
1506out_free:
1507 mmc_test_area_cleanup(test);
1508 return ret;
1509}
1510
1511/*
1512 * Prepare for large transfers. Do not erase the test area.
1513 */
1514static int mmc_test_area_prepare(struct mmc_test_card *test)
1515{
1516 return mmc_test_area_init(test, 0, 0);
1517}
1518
1519/*
1520 * Prepare for large transfers. Do erase the test area.
1521 */
1522static int mmc_test_area_prepare_erase(struct mmc_test_card *test)
1523{
1524 return mmc_test_area_init(test, 1, 0);
1525}
1526
1527/*
1528 * Prepare for large transfers. Erase and fill the test area.
1529 */
1530static int mmc_test_area_prepare_fill(struct mmc_test_card *test)
1531{
1532 return mmc_test_area_init(test, 1, 1);
1533}
1534
1535/*
1536 * Test best-case performance. Best-case performance is expected from
1537 * a single large transfer.
1538 *
1539 * An additional option (max_scatter) allows the measurement of the same
1540 * transfer but with no contiguous pages in the scatter list. This tests
1541 * the efficiency of DMA to handle scattered pages.
1542 */
1543static int mmc_test_best_performance(struct mmc_test_card *test, int write,
1544 int max_scatter)
1545{
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +03001546 return mmc_test_area_io(test, test->area.max_tfr, test->area.dev_addr,
Adrian Hunter64f71202010-08-11 14:17:51 -07001547 write, max_scatter, 1);
1548}
1549
1550/*
1551 * Best-case read performance.
1552 */
1553static int mmc_test_best_read_performance(struct mmc_test_card *test)
1554{
1555 return mmc_test_best_performance(test, 0, 0);
1556}
1557
1558/*
1559 * Best-case write performance.
1560 */
1561static int mmc_test_best_write_performance(struct mmc_test_card *test)
1562{
1563 return mmc_test_best_performance(test, 1, 0);
1564}
1565
1566/*
1567 * Best-case read performance into scattered pages.
1568 */
1569static int mmc_test_best_read_perf_max_scatter(struct mmc_test_card *test)
1570{
1571 return mmc_test_best_performance(test, 0, 1);
1572}
1573
1574/*
1575 * Best-case write performance from scattered pages.
1576 */
1577static int mmc_test_best_write_perf_max_scatter(struct mmc_test_card *test)
1578{
1579 return mmc_test_best_performance(test, 1, 1);
1580}
1581
1582/*
1583 * Single read performance by transfer size.
1584 */
1585static int mmc_test_profile_read_perf(struct mmc_test_card *test)
1586{
Adrian Hunterfec4dcc2010-08-11 14:17:51 -07001587 unsigned long sz;
1588 unsigned int dev_addr;
Adrian Hunter64f71202010-08-11 14:17:51 -07001589 int ret;
1590
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +03001591 for (sz = 512; sz < test->area.max_tfr; sz <<= 1) {
Adrian Hunter64f71202010-08-11 14:17:51 -07001592 dev_addr = test->area.dev_addr + (sz >> 9);
1593 ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
1594 if (ret)
1595 return ret;
1596 }
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +03001597 sz = test->area.max_tfr;
Adrian Hunter64f71202010-08-11 14:17:51 -07001598 dev_addr = test->area.dev_addr;
1599 return mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
1600}
1601
1602/*
1603 * Single write performance by transfer size.
1604 */
1605static int mmc_test_profile_write_perf(struct mmc_test_card *test)
1606{
Adrian Hunterfec4dcc2010-08-11 14:17:51 -07001607 unsigned long sz;
1608 unsigned int dev_addr;
Adrian Hunter64f71202010-08-11 14:17:51 -07001609 int ret;
1610
1611 ret = mmc_test_area_erase(test);
1612 if (ret)
1613 return ret;
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +03001614 for (sz = 512; sz < test->area.max_tfr; sz <<= 1) {
Adrian Hunter64f71202010-08-11 14:17:51 -07001615 dev_addr = test->area.dev_addr + (sz >> 9);
1616 ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
1617 if (ret)
1618 return ret;
1619 }
1620 ret = mmc_test_area_erase(test);
1621 if (ret)
1622 return ret;
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +03001623 sz = test->area.max_tfr;
Adrian Hunter64f71202010-08-11 14:17:51 -07001624 dev_addr = test->area.dev_addr;
1625 return mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
1626}
1627
1628/*
1629 * Single trim performance by transfer size.
1630 */
1631static int mmc_test_profile_trim_perf(struct mmc_test_card *test)
1632{
Adrian Hunterfec4dcc2010-08-11 14:17:51 -07001633 unsigned long sz;
1634 unsigned int dev_addr;
Adrian Hunter64f71202010-08-11 14:17:51 -07001635 struct timespec ts1, ts2;
1636 int ret;
1637
1638 if (!mmc_can_trim(test->card))
1639 return RESULT_UNSUP_CARD;
1640
1641 if (!mmc_can_erase(test->card))
1642 return RESULT_UNSUP_HOST;
1643
1644 for (sz = 512; sz < test->area.max_sz; sz <<= 1) {
1645 dev_addr = test->area.dev_addr + (sz >> 9);
1646 getnstimeofday(&ts1);
1647 ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
1648 if (ret)
1649 return ret;
1650 getnstimeofday(&ts2);
1651 mmc_test_print_rate(test, sz, &ts1, &ts2);
1652 }
1653 dev_addr = test->area.dev_addr;
1654 getnstimeofday(&ts1);
1655 ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
1656 if (ret)
1657 return ret;
1658 getnstimeofday(&ts2);
1659 mmc_test_print_rate(test, sz, &ts1, &ts2);
1660 return 0;
1661}
1662
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +03001663static int mmc_test_seq_read_perf(struct mmc_test_card *test, unsigned long sz)
1664{
1665 unsigned int dev_addr, i, cnt;
1666 struct timespec ts1, ts2;
1667 int ret;
1668
1669 cnt = test->area.max_sz / sz;
1670 dev_addr = test->area.dev_addr;
1671 getnstimeofday(&ts1);
1672 for (i = 0; i < cnt; i++) {
1673 ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 0);
1674 if (ret)
1675 return ret;
1676 dev_addr += (sz >> 9);
1677 }
1678 getnstimeofday(&ts2);
1679 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1680 return 0;
1681}
1682
Adrian Hunter64f71202010-08-11 14:17:51 -07001683/*
1684 * Consecutive read performance by transfer size.
1685 */
1686static int mmc_test_profile_seq_read_perf(struct mmc_test_card *test)
1687{
Adrian Hunterfec4dcc2010-08-11 14:17:51 -07001688 unsigned long sz;
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +03001689 int ret;
1690
1691 for (sz = 512; sz < test->area.max_tfr; sz <<= 1) {
1692 ret = mmc_test_seq_read_perf(test, sz);
1693 if (ret)
1694 return ret;
1695 }
1696 sz = test->area.max_tfr;
1697 return mmc_test_seq_read_perf(test, sz);
1698}
1699
1700static int mmc_test_seq_write_perf(struct mmc_test_card *test, unsigned long sz)
1701{
Adrian Hunterfec4dcc2010-08-11 14:17:51 -07001702 unsigned int dev_addr, i, cnt;
Adrian Hunter64f71202010-08-11 14:17:51 -07001703 struct timespec ts1, ts2;
1704 int ret;
1705
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +03001706 ret = mmc_test_area_erase(test);
1707 if (ret)
1708 return ret;
1709 cnt = test->area.max_sz / sz;
1710 dev_addr = test->area.dev_addr;
1711 getnstimeofday(&ts1);
1712 for (i = 0; i < cnt; i++) {
1713 ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 0);
1714 if (ret)
1715 return ret;
1716 dev_addr += (sz >> 9);
Adrian Hunter64f71202010-08-11 14:17:51 -07001717 }
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +03001718 getnstimeofday(&ts2);
1719 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
Adrian Hunter64f71202010-08-11 14:17:51 -07001720 return 0;
1721}
1722
1723/*
1724 * Consecutive write performance by transfer size.
1725 */
1726static int mmc_test_profile_seq_write_perf(struct mmc_test_card *test)
1727{
Adrian Hunterfec4dcc2010-08-11 14:17:51 -07001728 unsigned long sz;
Adrian Hunter64f71202010-08-11 14:17:51 -07001729 int ret;
1730
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +03001731 for (sz = 512; sz < test->area.max_tfr; sz <<= 1) {
1732 ret = mmc_test_seq_write_perf(test, sz);
Adrian Hunter64f71202010-08-11 14:17:51 -07001733 if (ret)
1734 return ret;
Adrian Hunter64f71202010-08-11 14:17:51 -07001735 }
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +03001736 sz = test->area.max_tfr;
1737 return mmc_test_seq_write_perf(test, sz);
Adrian Hunter64f71202010-08-11 14:17:51 -07001738}
1739
1740/*
1741 * Consecutive trim performance by transfer size.
1742 */
1743static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test)
1744{
Adrian Hunterfec4dcc2010-08-11 14:17:51 -07001745 unsigned long sz;
1746 unsigned int dev_addr, i, cnt;
Adrian Hunter64f71202010-08-11 14:17:51 -07001747 struct timespec ts1, ts2;
1748 int ret;
1749
1750 if (!mmc_can_trim(test->card))
1751 return RESULT_UNSUP_CARD;
1752
1753 if (!mmc_can_erase(test->card))
1754 return RESULT_UNSUP_HOST;
1755
1756 for (sz = 512; sz <= test->area.max_sz; sz <<= 1) {
1757 ret = mmc_test_area_erase(test);
1758 if (ret)
1759 return ret;
1760 ret = mmc_test_area_fill(test);
1761 if (ret)
1762 return ret;
1763 cnt = test->area.max_sz / sz;
1764 dev_addr = test->area.dev_addr;
1765 getnstimeofday(&ts1);
1766 for (i = 0; i < cnt; i++) {
1767 ret = mmc_erase(test->card, dev_addr, sz >> 9,
1768 MMC_TRIM_ARG);
1769 if (ret)
1770 return ret;
1771 dev_addr += (sz >> 9);
1772 }
1773 getnstimeofday(&ts2);
1774 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1775 }
1776 return 0;
1777}
1778
Pierre Ossman88ae6002007-08-12 14:23:50 +02001779static const struct mmc_test_case mmc_test_cases[] = {
1780 {
1781 .name = "Basic write (no data verification)",
1782 .run = mmc_test_basic_write,
1783 },
1784
1785 {
1786 .name = "Basic read (no data verification)",
1787 .run = mmc_test_basic_read,
1788 },
1789
1790 {
1791 .name = "Basic write (with data verification)",
Pierre Ossman6b174932008-06-30 09:09:27 +02001792 .prepare = mmc_test_prepare_write,
Pierre Ossman88ae6002007-08-12 14:23:50 +02001793 .run = mmc_test_verify_write,
Pierre Ossman6b174932008-06-30 09:09:27 +02001794 .cleanup = mmc_test_cleanup,
Pierre Ossman88ae6002007-08-12 14:23:50 +02001795 },
1796
1797 {
1798 .name = "Basic read (with data verification)",
Pierre Ossman6b174932008-06-30 09:09:27 +02001799 .prepare = mmc_test_prepare_read,
Pierre Ossman88ae6002007-08-12 14:23:50 +02001800 .run = mmc_test_verify_read,
Pierre Ossman6b174932008-06-30 09:09:27 +02001801 .cleanup = mmc_test_cleanup,
Pierre Ossman88ae6002007-08-12 14:23:50 +02001802 },
1803
1804 {
1805 .name = "Multi-block write",
Pierre Ossman6b174932008-06-30 09:09:27 +02001806 .prepare = mmc_test_prepare_write,
Pierre Ossman88ae6002007-08-12 14:23:50 +02001807 .run = mmc_test_multi_write,
Pierre Ossman6b174932008-06-30 09:09:27 +02001808 .cleanup = mmc_test_cleanup,
Pierre Ossman88ae6002007-08-12 14:23:50 +02001809 },
1810
1811 {
1812 .name = "Multi-block read",
Pierre Ossman6b174932008-06-30 09:09:27 +02001813 .prepare = mmc_test_prepare_read,
Pierre Ossman88ae6002007-08-12 14:23:50 +02001814 .run = mmc_test_multi_read,
Pierre Ossman6b174932008-06-30 09:09:27 +02001815 .cleanup = mmc_test_cleanup,
Pierre Ossman88ae6002007-08-12 14:23:50 +02001816 },
1817
1818 {
1819 .name = "Power of two block writes",
Pierre Ossman6b174932008-06-30 09:09:27 +02001820 .prepare = mmc_test_prepare_write,
Pierre Ossman88ae6002007-08-12 14:23:50 +02001821 .run = mmc_test_pow2_write,
Pierre Ossman6b174932008-06-30 09:09:27 +02001822 .cleanup = mmc_test_cleanup,
Pierre Ossman88ae6002007-08-12 14:23:50 +02001823 },
1824
1825 {
1826 .name = "Power of two block reads",
Pierre Ossman6b174932008-06-30 09:09:27 +02001827 .prepare = mmc_test_prepare_read,
Pierre Ossman88ae6002007-08-12 14:23:50 +02001828 .run = mmc_test_pow2_read,
Pierre Ossman6b174932008-06-30 09:09:27 +02001829 .cleanup = mmc_test_cleanup,
Pierre Ossman88ae6002007-08-12 14:23:50 +02001830 },
1831
1832 {
1833 .name = "Weird sized block writes",
Pierre Ossman6b174932008-06-30 09:09:27 +02001834 .prepare = mmc_test_prepare_write,
Pierre Ossman88ae6002007-08-12 14:23:50 +02001835 .run = mmc_test_weird_write,
Pierre Ossman6b174932008-06-30 09:09:27 +02001836 .cleanup = mmc_test_cleanup,
Pierre Ossman88ae6002007-08-12 14:23:50 +02001837 },
1838
1839 {
1840 .name = "Weird sized block reads",
Pierre Ossman6b174932008-06-30 09:09:27 +02001841 .prepare = mmc_test_prepare_read,
Pierre Ossman88ae6002007-08-12 14:23:50 +02001842 .run = mmc_test_weird_read,
Pierre Ossman6b174932008-06-30 09:09:27 +02001843 .cleanup = mmc_test_cleanup,
Pierre Ossman88ae6002007-08-12 14:23:50 +02001844 },
1845
1846 {
1847 .name = "Badly aligned write",
Pierre Ossman6b174932008-06-30 09:09:27 +02001848 .prepare = mmc_test_prepare_write,
Pierre Ossman88ae6002007-08-12 14:23:50 +02001849 .run = mmc_test_align_write,
Pierre Ossman6b174932008-06-30 09:09:27 +02001850 .cleanup = mmc_test_cleanup,
Pierre Ossman88ae6002007-08-12 14:23:50 +02001851 },
1852
1853 {
1854 .name = "Badly aligned read",
Pierre Ossman6b174932008-06-30 09:09:27 +02001855 .prepare = mmc_test_prepare_read,
Pierre Ossman88ae6002007-08-12 14:23:50 +02001856 .run = mmc_test_align_read,
Pierre Ossman6b174932008-06-30 09:09:27 +02001857 .cleanup = mmc_test_cleanup,
Pierre Ossman88ae6002007-08-12 14:23:50 +02001858 },
1859
1860 {
1861 .name = "Badly aligned multi-block write",
Pierre Ossman6b174932008-06-30 09:09:27 +02001862 .prepare = mmc_test_prepare_write,
Pierre Ossman88ae6002007-08-12 14:23:50 +02001863 .run = mmc_test_align_multi_write,
Pierre Ossman6b174932008-06-30 09:09:27 +02001864 .cleanup = mmc_test_cleanup,
Pierre Ossman88ae6002007-08-12 14:23:50 +02001865 },
1866
1867 {
1868 .name = "Badly aligned multi-block read",
Pierre Ossman6b174932008-06-30 09:09:27 +02001869 .prepare = mmc_test_prepare_read,
Pierre Ossman88ae6002007-08-12 14:23:50 +02001870 .run = mmc_test_align_multi_read,
Pierre Ossman6b174932008-06-30 09:09:27 +02001871 .cleanup = mmc_test_cleanup,
Pierre Ossman88ae6002007-08-12 14:23:50 +02001872 },
1873
1874 {
1875 .name = "Correct xfer_size at write (start failure)",
1876 .run = mmc_test_xfersize_write,
1877 },
1878
1879 {
1880 .name = "Correct xfer_size at read (start failure)",
1881 .run = mmc_test_xfersize_read,
1882 },
1883
1884 {
1885 .name = "Correct xfer_size at write (midway failure)",
1886 .run = mmc_test_multi_xfersize_write,
1887 },
1888
1889 {
1890 .name = "Correct xfer_size at read (midway failure)",
1891 .run = mmc_test_multi_xfersize_read,
1892 },
Pierre Ossman26610812008-07-04 18:17:13 +02001893
1894#ifdef CONFIG_HIGHMEM
1895
1896 {
1897 .name = "Highmem write",
1898 .prepare = mmc_test_prepare_write,
1899 .run = mmc_test_write_high,
1900 .cleanup = mmc_test_cleanup,
1901 },
1902
1903 {
1904 .name = "Highmem read",
1905 .prepare = mmc_test_prepare_read,
1906 .run = mmc_test_read_high,
1907 .cleanup = mmc_test_cleanup,
1908 },
1909
1910 {
1911 .name = "Multi-block highmem write",
1912 .prepare = mmc_test_prepare_write,
1913 .run = mmc_test_multi_write_high,
1914 .cleanup = mmc_test_cleanup,
1915 },
1916
1917 {
1918 .name = "Multi-block highmem read",
1919 .prepare = mmc_test_prepare_read,
1920 .run = mmc_test_multi_read_high,
1921 .cleanup = mmc_test_cleanup,
1922 },
1923
Adrian Hunter64f71202010-08-11 14:17:51 -07001924#else
1925
1926 {
1927 .name = "Highmem write",
1928 .run = mmc_test_no_highmem,
1929 },
1930
1931 {
1932 .name = "Highmem read",
1933 .run = mmc_test_no_highmem,
1934 },
1935
1936 {
1937 .name = "Multi-block highmem write",
1938 .run = mmc_test_no_highmem,
1939 },
1940
1941 {
1942 .name = "Multi-block highmem read",
1943 .run = mmc_test_no_highmem,
1944 },
1945
Pierre Ossman26610812008-07-04 18:17:13 +02001946#endif /* CONFIG_HIGHMEM */
1947
Adrian Hunter64f71202010-08-11 14:17:51 -07001948 {
1949 .name = "Best-case read performance",
1950 .prepare = mmc_test_area_prepare_fill,
1951 .run = mmc_test_best_read_performance,
1952 .cleanup = mmc_test_area_cleanup,
1953 },
1954
1955 {
1956 .name = "Best-case write performance",
1957 .prepare = mmc_test_area_prepare_erase,
1958 .run = mmc_test_best_write_performance,
1959 .cleanup = mmc_test_area_cleanup,
1960 },
1961
1962 {
1963 .name = "Best-case read performance into scattered pages",
1964 .prepare = mmc_test_area_prepare_fill,
1965 .run = mmc_test_best_read_perf_max_scatter,
1966 .cleanup = mmc_test_area_cleanup,
1967 },
1968
1969 {
1970 .name = "Best-case write performance from scattered pages",
1971 .prepare = mmc_test_area_prepare_erase,
1972 .run = mmc_test_best_write_perf_max_scatter,
1973 .cleanup = mmc_test_area_cleanup,
1974 },
1975
1976 {
1977 .name = "Single read performance by transfer size",
1978 .prepare = mmc_test_area_prepare_fill,
1979 .run = mmc_test_profile_read_perf,
1980 .cleanup = mmc_test_area_cleanup,
1981 },
1982
1983 {
1984 .name = "Single write performance by transfer size",
1985 .prepare = mmc_test_area_prepare,
1986 .run = mmc_test_profile_write_perf,
1987 .cleanup = mmc_test_area_cleanup,
1988 },
1989
1990 {
1991 .name = "Single trim performance by transfer size",
1992 .prepare = mmc_test_area_prepare_fill,
1993 .run = mmc_test_profile_trim_perf,
1994 .cleanup = mmc_test_area_cleanup,
1995 },
1996
1997 {
1998 .name = "Consecutive read performance by transfer size",
1999 .prepare = mmc_test_area_prepare_fill,
2000 .run = mmc_test_profile_seq_read_perf,
2001 .cleanup = mmc_test_area_cleanup,
2002 },
2003
2004 {
2005 .name = "Consecutive write performance by transfer size",
2006 .prepare = mmc_test_area_prepare,
2007 .run = mmc_test_profile_seq_write_perf,
2008 .cleanup = mmc_test_area_cleanup,
2009 },
2010
2011 {
2012 .name = "Consecutive trim performance by transfer size",
2013 .prepare = mmc_test_area_prepare,
2014 .run = mmc_test_profile_seq_trim_perf,
2015 .cleanup = mmc_test_area_cleanup,
2016 },
2017
Pierre Ossman88ae6002007-08-12 14:23:50 +02002018};
2019
Akinobu Mitaa6500312008-09-13 19:03:32 +09002020static DEFINE_MUTEX(mmc_test_lock);
Pierre Ossman88ae6002007-08-12 14:23:50 +02002021
Andy Shevchenko3183aa12010-09-01 09:26:47 +03002022static LIST_HEAD(mmc_test_result);
2023
Pierre Ossmanfd8c3262008-05-24 22:36:31 +02002024static void mmc_test_run(struct mmc_test_card *test, int testcase)
Pierre Ossman88ae6002007-08-12 14:23:50 +02002025{
2026 int i, ret;
2027
2028 printk(KERN_INFO "%s: Starting tests of card %s...\n",
2029 mmc_hostname(test->card->host), mmc_card_id(test->card));
2030
2031 mmc_claim_host(test->card->host);
2032
2033 for (i = 0;i < ARRAY_SIZE(mmc_test_cases);i++) {
Andy Shevchenko3183aa12010-09-01 09:26:47 +03002034 struct mmc_test_general_result *gr;
2035
Pierre Ossmanfd8c3262008-05-24 22:36:31 +02002036 if (testcase && ((i + 1) != testcase))
2037 continue;
2038
Pierre Ossman88ae6002007-08-12 14:23:50 +02002039 printk(KERN_INFO "%s: Test case %d. %s...\n",
2040 mmc_hostname(test->card->host), i + 1,
2041 mmc_test_cases[i].name);
2042
2043 if (mmc_test_cases[i].prepare) {
2044 ret = mmc_test_cases[i].prepare(test);
2045 if (ret) {
2046 printk(KERN_INFO "%s: Result: Prepare "
2047 "stage failed! (%d)\n",
2048 mmc_hostname(test->card->host),
2049 ret);
2050 continue;
2051 }
2052 }
2053
Andy Shevchenko3183aa12010-09-01 09:26:47 +03002054 gr = kzalloc(sizeof(struct mmc_test_general_result),
2055 GFP_KERNEL);
2056 if (gr) {
2057 INIT_LIST_HEAD(&gr->tr_lst);
2058
2059 /* Assign data what we know already */
2060 gr->card = test->card;
2061 gr->testcase = i;
2062
2063 /* Append container to global one */
2064 list_add_tail(&gr->link, &mmc_test_result);
2065
2066 /*
2067 * Save the pointer to created container in our private
2068 * structure.
2069 */
2070 test->gr = gr;
2071 }
2072
Pierre Ossman88ae6002007-08-12 14:23:50 +02002073 ret = mmc_test_cases[i].run(test);
2074 switch (ret) {
2075 case RESULT_OK:
2076 printk(KERN_INFO "%s: Result: OK\n",
2077 mmc_hostname(test->card->host));
2078 break;
2079 case RESULT_FAIL:
2080 printk(KERN_INFO "%s: Result: FAILED\n",
2081 mmc_hostname(test->card->host));
2082 break;
2083 case RESULT_UNSUP_HOST:
2084 printk(KERN_INFO "%s: Result: UNSUPPORTED "
2085 "(by host)\n",
2086 mmc_hostname(test->card->host));
2087 break;
2088 case RESULT_UNSUP_CARD:
2089 printk(KERN_INFO "%s: Result: UNSUPPORTED "
2090 "(by card)\n",
2091 mmc_hostname(test->card->host));
2092 break;
2093 default:
2094 printk(KERN_INFO "%s: Result: ERROR (%d)\n",
2095 mmc_hostname(test->card->host), ret);
2096 }
2097
Andy Shevchenko3183aa12010-09-01 09:26:47 +03002098 /* Save the result */
2099 if (gr)
2100 gr->result = ret;
2101
Pierre Ossman88ae6002007-08-12 14:23:50 +02002102 if (mmc_test_cases[i].cleanup) {
2103 ret = mmc_test_cases[i].cleanup(test);
2104 if (ret) {
2105 printk(KERN_INFO "%s: Warning: Cleanup "
2106 "stage failed! (%d)\n",
2107 mmc_hostname(test->card->host),
2108 ret);
2109 }
2110 }
2111 }
2112
2113 mmc_release_host(test->card->host);
2114
2115 printk(KERN_INFO "%s: Tests completed.\n",
2116 mmc_hostname(test->card->host));
2117}
2118
Andy Shevchenko3183aa12010-09-01 09:26:47 +03002119static void mmc_test_free_result(struct mmc_card *card)
2120{
2121 struct mmc_test_general_result *gr, *grs;
2122
2123 mutex_lock(&mmc_test_lock);
2124
2125 list_for_each_entry_safe(gr, grs, &mmc_test_result, link) {
2126 struct mmc_test_transfer_result *tr, *trs;
2127
2128 if (card && gr->card != card)
2129 continue;
2130
2131 list_for_each_entry_safe(tr, trs, &gr->tr_lst, link) {
2132 list_del(&tr->link);
2133 kfree(tr);
2134 }
2135
2136 list_del(&gr->link);
2137 kfree(gr);
2138 }
2139
2140 mutex_unlock(&mmc_test_lock);
2141}
2142
Andy Shevchenko130067e2010-09-10 10:10:50 +03002143static LIST_HEAD(mmc_test_file_test);
2144
2145static int mtf_test_show(struct seq_file *sf, void *data)
Pierre Ossman88ae6002007-08-12 14:23:50 +02002146{
Andy Shevchenko130067e2010-09-10 10:10:50 +03002147 struct mmc_card *card = (struct mmc_card *)sf->private;
Andy Shevchenko3183aa12010-09-01 09:26:47 +03002148 struct mmc_test_general_result *gr;
Andy Shevchenko3183aa12010-09-01 09:26:47 +03002149
Pierre Ossman88ae6002007-08-12 14:23:50 +02002150 mutex_lock(&mmc_test_lock);
Andy Shevchenko3183aa12010-09-01 09:26:47 +03002151
2152 list_for_each_entry(gr, &mmc_test_result, link) {
2153 struct mmc_test_transfer_result *tr;
2154
2155 if (gr->card != card)
2156 continue;
2157
Andy Shevchenko130067e2010-09-10 10:10:50 +03002158 seq_printf(sf, "Test %d: %d\n", gr->testcase + 1, gr->result);
Andy Shevchenko3183aa12010-09-01 09:26:47 +03002159
2160 list_for_each_entry(tr, &gr->tr_lst, link) {
Andy Shevchenko130067e2010-09-10 10:10:50 +03002161 seq_printf(sf, "%u %d %lu.%09lu %u\n",
Andy Shevchenko3183aa12010-09-01 09:26:47 +03002162 tr->count, tr->sectors,
2163 (unsigned long)tr->ts.tv_sec,
2164 (unsigned long)tr->ts.tv_nsec,
2165 tr->rate);
Andy Shevchenko3183aa12010-09-01 09:26:47 +03002166 }
2167 }
2168
Pierre Ossman88ae6002007-08-12 14:23:50 +02002169 mutex_unlock(&mmc_test_lock);
2170
Andy Shevchenko130067e2010-09-10 10:10:50 +03002171 return 0;
Pierre Ossman88ae6002007-08-12 14:23:50 +02002172}
2173
Andy Shevchenko130067e2010-09-10 10:10:50 +03002174static int mtf_test_open(struct inode *inode, struct file *file)
Pierre Ossman88ae6002007-08-12 14:23:50 +02002175{
Andy Shevchenko130067e2010-09-10 10:10:50 +03002176 return single_open(file, mtf_test_show, inode->i_private);
2177}
2178
2179static ssize_t mtf_test_write(struct file *file, const char __user *buf,
2180 size_t count, loff_t *pos)
2181{
2182 struct seq_file *sf = (struct seq_file *)file->private_data;
2183 struct mmc_card *card = (struct mmc_card *)sf->private;
Pierre Ossman88ae6002007-08-12 14:23:50 +02002184 struct mmc_test_card *test;
Andy Shevchenko130067e2010-09-10 10:10:50 +03002185 char lbuf[12];
Andy Shevchenko5c25aee2010-09-01 09:26:46 +03002186 long testcase;
Pierre Ossman88ae6002007-08-12 14:23:50 +02002187
Andy Shevchenko130067e2010-09-10 10:10:50 +03002188 if (count >= sizeof(lbuf))
2189 return -EINVAL;
2190
2191 if (copy_from_user(lbuf, buf, count))
2192 return -EFAULT;
2193 lbuf[count] = '\0';
2194
2195 if (strict_strtol(lbuf, 10, &testcase))
Andy Shevchenko5c25aee2010-09-01 09:26:46 +03002196 return -EINVAL;
Pierre Ossmanfd8c3262008-05-24 22:36:31 +02002197
Pierre Ossman88ae6002007-08-12 14:23:50 +02002198 test = kzalloc(sizeof(struct mmc_test_card), GFP_KERNEL);
2199 if (!test)
2200 return -ENOMEM;
2201
Andy Shevchenko3183aa12010-09-01 09:26:47 +03002202 /*
2203 * Remove all test cases associated with given card. Thus we have only
2204 * actual data of the last run.
2205 */
2206 mmc_test_free_result(card);
2207
Pierre Ossman88ae6002007-08-12 14:23:50 +02002208 test->card = card;
2209
2210 test->buffer = kzalloc(BUFFER_SIZE, GFP_KERNEL);
Pierre Ossman26610812008-07-04 18:17:13 +02002211#ifdef CONFIG_HIGHMEM
2212 test->highmem = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, BUFFER_ORDER);
2213#endif
2214
2215#ifdef CONFIG_HIGHMEM
2216 if (test->buffer && test->highmem) {
2217#else
Pierre Ossman88ae6002007-08-12 14:23:50 +02002218 if (test->buffer) {
Pierre Ossman26610812008-07-04 18:17:13 +02002219#endif
Pierre Ossman88ae6002007-08-12 14:23:50 +02002220 mutex_lock(&mmc_test_lock);
Pierre Ossmanfd8c3262008-05-24 22:36:31 +02002221 mmc_test_run(test, testcase);
Pierre Ossman88ae6002007-08-12 14:23:50 +02002222 mutex_unlock(&mmc_test_lock);
2223 }
2224
Pierre Ossman26610812008-07-04 18:17:13 +02002225#ifdef CONFIG_HIGHMEM
2226 __free_pages(test->highmem, BUFFER_ORDER);
2227#endif
Pierre Ossman88ae6002007-08-12 14:23:50 +02002228 kfree(test->buffer);
2229 kfree(test);
2230
2231 return count;
2232}
2233
Andy Shevchenko130067e2010-09-10 10:10:50 +03002234static const struct file_operations mmc_test_fops_test = {
2235 .open = mtf_test_open,
2236 .read = seq_read,
2237 .write = mtf_test_write,
2238 .llseek = seq_lseek,
2239 .release = single_release,
2240};
2241
2242static void mmc_test_free_file_test(struct mmc_card *card)
2243{
2244 struct mmc_test_dbgfs_file *df, *dfs;
2245
2246 mutex_lock(&mmc_test_lock);
2247
2248 list_for_each_entry_safe(df, dfs, &mmc_test_file_test, link) {
2249 if (card && df->card != card)
2250 continue;
2251 debugfs_remove(df->file);
2252 list_del(&df->link);
2253 kfree(df);
2254 }
2255
2256 mutex_unlock(&mmc_test_lock);
2257}
2258
2259static int mmc_test_register_file_test(struct mmc_card *card)
2260{
2261 struct dentry *file = NULL;
2262 struct mmc_test_dbgfs_file *df;
2263 int ret = 0;
2264
2265 mutex_lock(&mmc_test_lock);
2266
2267 if (card->debugfs_root)
2268 file = debugfs_create_file("test", S_IWUSR | S_IRUGO,
2269 card->debugfs_root, card, &mmc_test_fops_test);
2270
2271 if (IS_ERR_OR_NULL(file)) {
2272 dev_err(&card->dev,
2273 "Can't create file. Perhaps debugfs is disabled.\n");
2274 ret = -ENODEV;
2275 goto err;
2276 }
2277
2278 df = kmalloc(sizeof(struct mmc_test_dbgfs_file), GFP_KERNEL);
2279 if (!df) {
2280 debugfs_remove(file);
2281 dev_err(&card->dev,
2282 "Can't allocate memory for internal usage.\n");
2283 ret = -ENOMEM;
2284 goto err;
2285 }
2286
2287 df->card = card;
2288 df->file = file;
2289
2290 list_add(&df->link, &mmc_test_file_test);
2291
2292err:
2293 mutex_unlock(&mmc_test_lock);
2294
2295 return ret;
2296}
Pierre Ossman88ae6002007-08-12 14:23:50 +02002297
2298static int mmc_test_probe(struct mmc_card *card)
2299{
2300 int ret;
2301
Andy Shevchenko63be54c2010-09-01 09:26:45 +03002302 if (!mmc_card_mmc(card) && !mmc_card_sd(card))
Pierre Ossman0121a982008-06-28 17:51:27 +02002303 return -ENODEV;
2304
Andy Shevchenko130067e2010-09-10 10:10:50 +03002305 ret = mmc_test_register_file_test(card);
Pierre Ossman88ae6002007-08-12 14:23:50 +02002306 if (ret)
2307 return ret;
2308
Pierre Ossman60c9c7b2008-07-22 14:38:35 +02002309 dev_info(&card->dev, "Card claimed for testing.\n");
2310
Pierre Ossman88ae6002007-08-12 14:23:50 +02002311 return 0;
2312}
2313
2314static void mmc_test_remove(struct mmc_card *card)
2315{
Andy Shevchenko3183aa12010-09-01 09:26:47 +03002316 mmc_test_free_result(card);
Andy Shevchenko130067e2010-09-10 10:10:50 +03002317 mmc_test_free_file_test(card);
Pierre Ossman88ae6002007-08-12 14:23:50 +02002318}
2319
2320static struct mmc_driver mmc_driver = {
2321 .drv = {
2322 .name = "mmc_test",
2323 },
2324 .probe = mmc_test_probe,
2325 .remove = mmc_test_remove,
2326};
2327
2328static int __init mmc_test_init(void)
2329{
2330 return mmc_register_driver(&mmc_driver);
2331}
2332
2333static void __exit mmc_test_exit(void)
2334{
Andy Shevchenko3183aa12010-09-01 09:26:47 +03002335 /* Clear stalled data if card is still plugged */
2336 mmc_test_free_result(NULL);
Andy Shevchenko130067e2010-09-10 10:10:50 +03002337 mmc_test_free_file_test(NULL);
Andy Shevchenko3183aa12010-09-01 09:26:47 +03002338
Pierre Ossman88ae6002007-08-12 14:23:50 +02002339 mmc_unregister_driver(&mmc_driver);
2340}
2341
2342module_init(mmc_test_init);
2343module_exit(mmc_test_exit);
2344
2345MODULE_LICENSE("GPL");
2346MODULE_DESCRIPTION("Multimedia Card (MMC) host test driver");
2347MODULE_AUTHOR("Pierre Ossman");