blob: 0c8b5685d4b73196bcaa6127168985b1f2083579 [file] [log] [blame]
Pierre Ossman88ae6002007-08-12 14:23:50 +02001/*
2 * linux/drivers/mmc/card/mmc_test.c
3 *
Pierre Ossman0121a982008-06-28 17:51:27 +02004 * Copyright 2007-2008 Pierre Ossman
Pierre Ossman88ae6002007-08-12 14:23:50 +02005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
10 */
11
12#include <linux/mmc/core.h>
13#include <linux/mmc/card.h>
14#include <linux/mmc/host.h>
15#include <linux/mmc/mmc.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090016#include <linux/slab.h>
Pierre Ossman88ae6002007-08-12 14:23:50 +020017
18#include <linux/scatterlist.h>
Adrian Hunterfec4dcc2010-08-11 14:17:51 -070019#include <linux/swap.h> /* For nr_free_buffer_pages() */
Andy Shevchenko3183aa12010-09-01 09:26:47 +030020#include <linux/list.h>
Pierre Ossman88ae6002007-08-12 14:23:50 +020021
22#define RESULT_OK 0
23#define RESULT_FAIL 1
24#define RESULT_UNSUP_HOST 2
25#define RESULT_UNSUP_CARD 3
26
Pierre Ossman26610812008-07-04 18:17:13 +020027#define BUFFER_ORDER 2
28#define BUFFER_SIZE (PAGE_SIZE << BUFFER_ORDER)
Pierre Ossman88ae6002007-08-12 14:23:50 +020029
Adrian Hunterfec4dcc2010-08-11 14:17:51 -070030/*
31 * Limit the test area size to the maximum MMC HC erase group size. Note that
32 * the maximum SD allocation unit size is just 4MiB.
33 */
34#define TEST_AREA_MAX_SIZE (128 * 1024 * 1024)
35
Adrian Hunter64f71202010-08-11 14:17:51 -070036/**
37 * struct mmc_test_pages - pages allocated by 'alloc_pages()'.
38 * @page: first page in the allocation
39 * @order: order of the number of pages allocated
40 */
41struct mmc_test_pages {
42 struct page *page;
43 unsigned int order;
44};
45
46/**
47 * struct mmc_test_mem - allocated memory.
48 * @arr: array of allocations
49 * @cnt: number of allocations
50 */
51struct mmc_test_mem {
52 struct mmc_test_pages *arr;
53 unsigned int cnt;
54};
55
56/**
57 * struct mmc_test_area - information for performance tests.
Adrian Hunter64f71202010-08-11 14:17:51 -070058 * @max_sz: test area size (in bytes)
Adrian Hunterfec4dcc2010-08-11 14:17:51 -070059 * @dev_addr: address on card at which to do performance tests
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +030060 * @max_tfr: maximum transfer size allowed by driver (in bytes)
61 * @max_segs: maximum segments allowed by driver in scatterlist @sg
62 * @max_seg_sz: maximum segment size allowed by driver
Adrian Hunter64f71202010-08-11 14:17:51 -070063 * @blocks: number of (512 byte) blocks currently mapped by @sg
64 * @sg_len: length of currently mapped scatterlist @sg
65 * @mem: allocated memory
66 * @sg: scatterlist
67 */
68struct mmc_test_area {
Adrian Hunterfec4dcc2010-08-11 14:17:51 -070069 unsigned long max_sz;
Adrian Hunter64f71202010-08-11 14:17:51 -070070 unsigned int dev_addr;
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +030071 unsigned int max_tfr;
Adrian Hunter64f71202010-08-11 14:17:51 -070072 unsigned int max_segs;
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +030073 unsigned int max_seg_sz;
Adrian Hunter64f71202010-08-11 14:17:51 -070074 unsigned int blocks;
75 unsigned int sg_len;
76 struct mmc_test_mem *mem;
77 struct scatterlist *sg;
78};
79
80/**
Andy Shevchenko3183aa12010-09-01 09:26:47 +030081 * struct mmc_test_transfer_result - transfer results for performance tests.
82 * @link: double-linked list
83 * @count: amount of group of sectors to check
84 * @sectors: amount of sectors to check in one group
85 * @ts: time values of transfer
86 * @rate: calculated transfer rate
87 */
88struct mmc_test_transfer_result {
89 struct list_head link;
90 unsigned int count;
91 unsigned int sectors;
92 struct timespec ts;
93 unsigned int rate;
94};
95
96/**
97 * struct mmc_test_general_result - results for tests.
98 * @link: double-linked list
99 * @card: card under test
100 * @testcase: number of test case
101 * @result: result of test run
102 * @tr_lst: transfer measurements if any as mmc_test_transfer_result
103 */
104struct mmc_test_general_result {
105 struct list_head link;
106 struct mmc_card *card;
107 int testcase;
108 int result;
109 struct list_head tr_lst;
110};
111
112/**
Adrian Hunter64f71202010-08-11 14:17:51 -0700113 * struct mmc_test_card - test information.
114 * @card: card under test
115 * @scratch: transfer buffer
116 * @buffer: transfer buffer
117 * @highmem: buffer for highmem tests
118 * @area: information for performance tests
Andy Shevchenko3183aa12010-09-01 09:26:47 +0300119 * @gr: pointer to results of current testcase
Adrian Hunter64f71202010-08-11 14:17:51 -0700120 */
Pierre Ossman88ae6002007-08-12 14:23:50 +0200121struct mmc_test_card {
122 struct mmc_card *card;
123
Pierre Ossman6b174932008-06-30 09:09:27 +0200124 u8 scratch[BUFFER_SIZE];
Pierre Ossman88ae6002007-08-12 14:23:50 +0200125 u8 *buffer;
Pierre Ossman26610812008-07-04 18:17:13 +0200126#ifdef CONFIG_HIGHMEM
127 struct page *highmem;
128#endif
Andy Shevchenko3183aa12010-09-01 09:26:47 +0300129 struct mmc_test_area area;
130 struct mmc_test_general_result *gr;
Pierre Ossman88ae6002007-08-12 14:23:50 +0200131};
132
133/*******************************************************************/
Pierre Ossman6b174932008-06-30 09:09:27 +0200134/* General helper functions */
Pierre Ossman88ae6002007-08-12 14:23:50 +0200135/*******************************************************************/
136
Pierre Ossman6b174932008-06-30 09:09:27 +0200137/*
138 * Configure correct block size in card
139 */
Pierre Ossman88ae6002007-08-12 14:23:50 +0200140static int mmc_test_set_blksize(struct mmc_test_card *test, unsigned size)
141{
142 struct mmc_command cmd;
143 int ret;
144
145 cmd.opcode = MMC_SET_BLOCKLEN;
146 cmd.arg = size;
147 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
148 ret = mmc_wait_for_cmd(test->card->host, &cmd, 0);
149 if (ret)
150 return ret;
151
152 return 0;
153}
154
Pierre Ossman6b174932008-06-30 09:09:27 +0200155/*
156 * Fill in the mmc_request structure given a set of transfer parameters.
157 */
158static void mmc_test_prepare_mrq(struct mmc_test_card *test,
159 struct mmc_request *mrq, struct scatterlist *sg, unsigned sg_len,
160 unsigned dev_addr, unsigned blocks, unsigned blksz, int write)
161{
162 BUG_ON(!mrq || !mrq->cmd || !mrq->data || !mrq->stop);
163
164 if (blocks > 1) {
165 mrq->cmd->opcode = write ?
166 MMC_WRITE_MULTIPLE_BLOCK : MMC_READ_MULTIPLE_BLOCK;
167 } else {
168 mrq->cmd->opcode = write ?
169 MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
170 }
171
172 mrq->cmd->arg = dev_addr;
Johan Kristellc286d032010-02-10 13:56:34 -0800173 if (!mmc_card_blockaddr(test->card))
174 mrq->cmd->arg <<= 9;
175
Pierre Ossman6b174932008-06-30 09:09:27 +0200176 mrq->cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC;
177
178 if (blocks == 1)
179 mrq->stop = NULL;
180 else {
181 mrq->stop->opcode = MMC_STOP_TRANSMISSION;
182 mrq->stop->arg = 0;
183 mrq->stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
184 }
185
186 mrq->data->blksz = blksz;
187 mrq->data->blocks = blocks;
188 mrq->data->flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
189 mrq->data->sg = sg;
190 mrq->data->sg_len = sg_len;
191
192 mmc_set_data_timeout(mrq->data, test->card);
193}
194
Adrian Hunter64f71202010-08-11 14:17:51 -0700195static int mmc_test_busy(struct mmc_command *cmd)
196{
197 return !(cmd->resp[0] & R1_READY_FOR_DATA) ||
198 (R1_CURRENT_STATE(cmd->resp[0]) == 7);
199}
200
Pierre Ossman6b174932008-06-30 09:09:27 +0200201/*
202 * Wait for the card to finish the busy state
203 */
204static int mmc_test_wait_busy(struct mmc_test_card *test)
Pierre Ossman88ae6002007-08-12 14:23:50 +0200205{
206 int ret, busy;
Pierre Ossman88ae6002007-08-12 14:23:50 +0200207 struct mmc_command cmd;
Pierre Ossman88ae6002007-08-12 14:23:50 +0200208
209 busy = 0;
210 do {
Pierre Ossman88ae6002007-08-12 14:23:50 +0200211 memset(&cmd, 0, sizeof(struct mmc_command));
212
213 cmd.opcode = MMC_SEND_STATUS;
214 cmd.arg = test->card->rca << 16;
215 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
216
Pierre Ossman6b174932008-06-30 09:09:27 +0200217 ret = mmc_wait_for_cmd(test->card->host, &cmd, 0);
218 if (ret)
Pierre Ossman88ae6002007-08-12 14:23:50 +0200219 break;
220
Adrian Hunter64f71202010-08-11 14:17:51 -0700221 if (!busy && mmc_test_busy(&cmd)) {
Pierre Ossman88ae6002007-08-12 14:23:50 +0200222 busy = 1;
223 printk(KERN_INFO "%s: Warning: Host did not "
224 "wait for busy state to end.\n",
225 mmc_hostname(test->card->host));
226 }
Adrian Hunter64f71202010-08-11 14:17:51 -0700227 } while (mmc_test_busy(&cmd));
Pierre Ossman88ae6002007-08-12 14:23:50 +0200228
229 return ret;
230}
231
Pierre Ossman6b174932008-06-30 09:09:27 +0200232/*
233 * Transfer a single sector of kernel addressable data
234 */
235static int mmc_test_buffer_transfer(struct mmc_test_card *test,
236 u8 *buffer, unsigned addr, unsigned blksz, int write)
Pierre Ossman88ae6002007-08-12 14:23:50 +0200237{
Pierre Ossman6b174932008-06-30 09:09:27 +0200238 int ret;
239
240 struct mmc_request mrq;
241 struct mmc_command cmd;
242 struct mmc_command stop;
243 struct mmc_data data;
244
245 struct scatterlist sg;
246
247 memset(&mrq, 0, sizeof(struct mmc_request));
248 memset(&cmd, 0, sizeof(struct mmc_command));
249 memset(&data, 0, sizeof(struct mmc_data));
250 memset(&stop, 0, sizeof(struct mmc_command));
251
252 mrq.cmd = &cmd;
253 mrq.data = &data;
254 mrq.stop = &stop;
255
256 sg_init_one(&sg, buffer, blksz);
257
258 mmc_test_prepare_mrq(test, &mrq, &sg, 1, addr, 1, blksz, write);
259
260 mmc_wait_for_req(test->card->host, &mrq);
261
262 if (cmd.error)
263 return cmd.error;
264 if (data.error)
265 return data.error;
266
267 ret = mmc_test_wait_busy(test);
268 if (ret)
269 return ret;
270
271 return 0;
Pierre Ossman88ae6002007-08-12 14:23:50 +0200272}
273
Adrian Hunter64f71202010-08-11 14:17:51 -0700274static void mmc_test_free_mem(struct mmc_test_mem *mem)
275{
276 if (!mem)
277 return;
278 while (mem->cnt--)
279 __free_pages(mem->arr[mem->cnt].page,
280 mem->arr[mem->cnt].order);
281 kfree(mem->arr);
282 kfree(mem);
283}
284
285/*
286 * Allocate a lot of memory, preferrably max_sz but at least min_sz. In case
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +0300287 * there isn't much memory do not exceed 1/16th total lowmem pages. Also do
288 * not exceed a maximum number of segments and try not to make segments much
289 * bigger than maximum segment size.
Adrian Hunter64f71202010-08-11 14:17:51 -0700290 */
Adrian Hunterfec4dcc2010-08-11 14:17:51 -0700291static struct mmc_test_mem *mmc_test_alloc_mem(unsigned long min_sz,
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +0300292 unsigned long max_sz,
293 unsigned int max_segs,
294 unsigned int max_seg_sz)
Adrian Hunter64f71202010-08-11 14:17:51 -0700295{
Adrian Hunterfec4dcc2010-08-11 14:17:51 -0700296 unsigned long max_page_cnt = DIV_ROUND_UP(max_sz, PAGE_SIZE);
297 unsigned long min_page_cnt = DIV_ROUND_UP(min_sz, PAGE_SIZE);
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +0300298 unsigned long max_seg_page_cnt = DIV_ROUND_UP(max_seg_sz, PAGE_SIZE);
Adrian Hunterfec4dcc2010-08-11 14:17:51 -0700299 unsigned long page_cnt = 0;
300 unsigned long limit = nr_free_buffer_pages() >> 4;
Adrian Hunter64f71202010-08-11 14:17:51 -0700301 struct mmc_test_mem *mem;
Adrian Hunter64f71202010-08-11 14:17:51 -0700302
Adrian Hunterfec4dcc2010-08-11 14:17:51 -0700303 if (max_page_cnt > limit)
304 max_page_cnt = limit;
Adrian Hunter64f71202010-08-11 14:17:51 -0700305 if (max_page_cnt < min_page_cnt)
306 max_page_cnt = min_page_cnt;
307
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +0300308 if (max_seg_page_cnt > max_page_cnt)
309 max_seg_page_cnt = max_page_cnt;
310
311 if (max_segs > max_page_cnt)
312 max_segs = max_page_cnt;
313
Adrian Hunter64f71202010-08-11 14:17:51 -0700314 mem = kzalloc(sizeof(struct mmc_test_mem), GFP_KERNEL);
315 if (!mem)
316 return NULL;
317
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +0300318 mem->arr = kzalloc(sizeof(struct mmc_test_pages) * max_segs,
Adrian Hunter64f71202010-08-11 14:17:51 -0700319 GFP_KERNEL);
320 if (!mem->arr)
321 goto out_free;
322
323 while (max_page_cnt) {
324 struct page *page;
325 unsigned int order;
326 gfp_t flags = GFP_KERNEL | GFP_DMA | __GFP_NOWARN |
327 __GFP_NORETRY;
328
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +0300329 order = get_order(max_seg_page_cnt << PAGE_SHIFT);
Adrian Hunter64f71202010-08-11 14:17:51 -0700330 while (1) {
331 page = alloc_pages(flags, order);
332 if (page || !order)
333 break;
334 order -= 1;
335 }
336 if (!page) {
337 if (page_cnt < min_page_cnt)
338 goto out_free;
339 break;
340 }
341 mem->arr[mem->cnt].page = page;
342 mem->arr[mem->cnt].order = order;
343 mem->cnt += 1;
Adrian Hunterfec4dcc2010-08-11 14:17:51 -0700344 if (max_page_cnt <= (1UL << order))
345 break;
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +0300346 if (mem->cnt >= max_segs) {
347 if (page_cnt < min_page_cnt)
348 goto out_free;
349 break;
350 }
Adrian Hunterfec4dcc2010-08-11 14:17:51 -0700351 max_page_cnt -= 1UL << order;
352 page_cnt += 1UL << order;
Adrian Hunter64f71202010-08-11 14:17:51 -0700353 }
354
355 return mem;
356
357out_free:
358 mmc_test_free_mem(mem);
359 return NULL;
360}
361
362/*
363 * Map memory into a scatterlist. Optionally allow the same memory to be
364 * mapped more than once.
365 */
Adrian Hunterfec4dcc2010-08-11 14:17:51 -0700366static int mmc_test_map_sg(struct mmc_test_mem *mem, unsigned long sz,
Adrian Hunter64f71202010-08-11 14:17:51 -0700367 struct scatterlist *sglist, int repeat,
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +0300368 unsigned int max_segs, unsigned int max_seg_sz,
369 unsigned int *sg_len)
Adrian Hunter64f71202010-08-11 14:17:51 -0700370{
371 struct scatterlist *sg = NULL;
372 unsigned int i;
373
374 sg_init_table(sglist, max_segs);
375
376 *sg_len = 0;
377 do {
378 for (i = 0; i < mem->cnt; i++) {
Adrian Hunterfec4dcc2010-08-11 14:17:51 -0700379 unsigned long len = PAGE_SIZE << mem->arr[i].order;
Adrian Hunter64f71202010-08-11 14:17:51 -0700380
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +0300381 if (len > sz)
Adrian Hunter64f71202010-08-11 14:17:51 -0700382 len = sz;
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +0300383 if (len > max_seg_sz)
384 len = max_seg_sz;
Adrian Hunter64f71202010-08-11 14:17:51 -0700385 if (sg)
386 sg = sg_next(sg);
387 else
388 sg = sglist;
389 if (!sg)
390 return -EINVAL;
391 sg_set_page(sg, mem->arr[i].page, len, 0);
392 sz -= len;
393 *sg_len += 1;
394 if (!sz)
395 break;
396 }
397 } while (sz && repeat);
398
399 if (sz)
400 return -EINVAL;
401
402 if (sg)
403 sg_mark_end(sg);
404
405 return 0;
406}
407
408/*
409 * Map memory into a scatterlist so that no pages are contiguous. Allow the
410 * same memory to be mapped more than once.
411 */
412static int mmc_test_map_sg_max_scatter(struct mmc_test_mem *mem,
Adrian Hunterfec4dcc2010-08-11 14:17:51 -0700413 unsigned long sz,
Adrian Hunter64f71202010-08-11 14:17:51 -0700414 struct scatterlist *sglist,
415 unsigned int max_segs,
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +0300416 unsigned int max_seg_sz,
Adrian Hunter64f71202010-08-11 14:17:51 -0700417 unsigned int *sg_len)
418{
419 struct scatterlist *sg = NULL;
Adrian Hunterfec4dcc2010-08-11 14:17:51 -0700420 unsigned int i = mem->cnt, cnt;
421 unsigned long len;
Adrian Hunter64f71202010-08-11 14:17:51 -0700422 void *base, *addr, *last_addr = NULL;
423
424 sg_init_table(sglist, max_segs);
425
426 *sg_len = 0;
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +0300427 while (sz) {
Adrian Hunter64f71202010-08-11 14:17:51 -0700428 base = page_address(mem->arr[--i].page);
429 cnt = 1 << mem->arr[i].order;
430 while (sz && cnt) {
431 addr = base + PAGE_SIZE * --cnt;
432 if (last_addr && last_addr + PAGE_SIZE == addr)
433 continue;
434 last_addr = addr;
435 len = PAGE_SIZE;
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +0300436 if (len > max_seg_sz)
437 len = max_seg_sz;
438 if (len > sz)
Adrian Hunter64f71202010-08-11 14:17:51 -0700439 len = sz;
440 if (sg)
441 sg = sg_next(sg);
442 else
443 sg = sglist;
444 if (!sg)
445 return -EINVAL;
446 sg_set_page(sg, virt_to_page(addr), len, 0);
447 sz -= len;
448 *sg_len += 1;
449 }
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +0300450 if (i == 0)
451 i = mem->cnt;
Adrian Hunter64f71202010-08-11 14:17:51 -0700452 }
453
454 if (sg)
455 sg_mark_end(sg);
456
457 return 0;
458}
459
460/*
461 * Calculate transfer rate in bytes per second.
462 */
463static unsigned int mmc_test_rate(uint64_t bytes, struct timespec *ts)
464{
465 uint64_t ns;
466
467 ns = ts->tv_sec;
468 ns *= 1000000000;
469 ns += ts->tv_nsec;
470
471 bytes *= 1000000000;
472
473 while (ns > UINT_MAX) {
474 bytes >>= 1;
475 ns >>= 1;
476 }
477
478 if (!ns)
479 return 0;
480
481 do_div(bytes, (uint32_t)ns);
482
483 return bytes;
484}
485
486/*
Andy Shevchenko3183aa12010-09-01 09:26:47 +0300487 * Save transfer results for future usage
488 */
489static void mmc_test_save_transfer_result(struct mmc_test_card *test,
490 unsigned int count, unsigned int sectors, struct timespec ts,
491 unsigned int rate)
492{
493 struct mmc_test_transfer_result *tr;
494
495 if (!test->gr)
496 return;
497
498 tr = kmalloc(sizeof(struct mmc_test_transfer_result), GFP_KERNEL);
499 if (!tr)
500 return;
501
502 tr->count = count;
503 tr->sectors = sectors;
504 tr->ts = ts;
505 tr->rate = rate;
506
507 list_add_tail(&tr->link, &test->gr->tr_lst);
508}
509
510/*
Adrian Hunter64f71202010-08-11 14:17:51 -0700511 * Print the transfer rate.
512 */
513static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes,
514 struct timespec *ts1, struct timespec *ts2)
515{
516 unsigned int rate, sectors = bytes >> 9;
517 struct timespec ts;
518
519 ts = timespec_sub(*ts2, *ts1);
520
521 rate = mmc_test_rate(bytes, &ts);
522
523 printk(KERN_INFO "%s: Transfer of %u sectors (%u%s KiB) took %lu.%09lu "
524 "seconds (%u kB/s, %u KiB/s)\n",
525 mmc_hostname(test->card->host), sectors, sectors >> 1,
526 (sectors == 1 ? ".5" : ""), (unsigned long)ts.tv_sec,
527 (unsigned long)ts.tv_nsec, rate / 1000, rate / 1024);
Andy Shevchenko3183aa12010-09-01 09:26:47 +0300528
529 mmc_test_save_transfer_result(test, 1, sectors, ts, rate);
Adrian Hunter64f71202010-08-11 14:17:51 -0700530}
531
532/*
533 * Print the average transfer rate.
534 */
535static void mmc_test_print_avg_rate(struct mmc_test_card *test, uint64_t bytes,
536 unsigned int count, struct timespec *ts1,
537 struct timespec *ts2)
538{
539 unsigned int rate, sectors = bytes >> 9;
540 uint64_t tot = bytes * count;
541 struct timespec ts;
542
543 ts = timespec_sub(*ts2, *ts1);
544
545 rate = mmc_test_rate(tot, &ts);
546
547 printk(KERN_INFO "%s: Transfer of %u x %u sectors (%u x %u%s KiB) took "
548 "%lu.%09lu seconds (%u kB/s, %u KiB/s)\n",
549 mmc_hostname(test->card->host), count, sectors, count,
550 sectors >> 1, (sectors == 1 ? ".5" : ""),
551 (unsigned long)ts.tv_sec, (unsigned long)ts.tv_nsec,
552 rate / 1000, rate / 1024);
Andy Shevchenko3183aa12010-09-01 09:26:47 +0300553
554 mmc_test_save_transfer_result(test, count, sectors, ts, rate);
Adrian Hunter64f71202010-08-11 14:17:51 -0700555}
556
557/*
558 * Return the card size in sectors.
559 */
560static unsigned int mmc_test_capacity(struct mmc_card *card)
561{
562 if (!mmc_card_sd(card) && mmc_card_blockaddr(card))
563 return card->ext_csd.sectors;
564 else
565 return card->csd.capacity << (card->csd.read_blkbits - 9);
566}
567
Pierre Ossman6b174932008-06-30 09:09:27 +0200568/*******************************************************************/
569/* Test preparation and cleanup */
570/*******************************************************************/
571
572/*
573 * Fill the first couple of sectors of the card with known data
574 * so that bad reads/writes can be detected
575 */
576static int __mmc_test_prepare(struct mmc_test_card *test, int write)
Pierre Ossman88ae6002007-08-12 14:23:50 +0200577{
578 int ret, i;
579
580 ret = mmc_test_set_blksize(test, 512);
581 if (ret)
582 return ret;
583
584 if (write)
Pierre Ossman6b174932008-06-30 09:09:27 +0200585 memset(test->buffer, 0xDF, 512);
Pierre Ossman88ae6002007-08-12 14:23:50 +0200586 else {
Pierre Ossman6b174932008-06-30 09:09:27 +0200587 for (i = 0;i < 512;i++)
Pierre Ossman88ae6002007-08-12 14:23:50 +0200588 test->buffer[i] = i;
589 }
590
591 for (i = 0;i < BUFFER_SIZE / 512;i++) {
Johan Kristellc286d032010-02-10 13:56:34 -0800592 ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
Pierre Ossman88ae6002007-08-12 14:23:50 +0200593 if (ret)
594 return ret;
595 }
596
597 return 0;
598}
599
Pierre Ossman6b174932008-06-30 09:09:27 +0200600static int mmc_test_prepare_write(struct mmc_test_card *test)
Pierre Ossman88ae6002007-08-12 14:23:50 +0200601{
Pierre Ossman6b174932008-06-30 09:09:27 +0200602 return __mmc_test_prepare(test, 1);
Pierre Ossman88ae6002007-08-12 14:23:50 +0200603}
604
Pierre Ossman6b174932008-06-30 09:09:27 +0200605static int mmc_test_prepare_read(struct mmc_test_card *test)
Pierre Ossman88ae6002007-08-12 14:23:50 +0200606{
Pierre Ossman6b174932008-06-30 09:09:27 +0200607 return __mmc_test_prepare(test, 0);
Pierre Ossman88ae6002007-08-12 14:23:50 +0200608}
609
Pierre Ossman6b174932008-06-30 09:09:27 +0200610static int mmc_test_cleanup(struct mmc_test_card *test)
Pierre Ossman88ae6002007-08-12 14:23:50 +0200611{
Pierre Ossman6b174932008-06-30 09:09:27 +0200612 int ret, i;
Pierre Ossman88ae6002007-08-12 14:23:50 +0200613
Pierre Ossman6b174932008-06-30 09:09:27 +0200614 ret = mmc_test_set_blksize(test, 512);
615 if (ret)
616 return ret;
Pierre Ossman88ae6002007-08-12 14:23:50 +0200617
Pierre Ossman6b174932008-06-30 09:09:27 +0200618 memset(test->buffer, 0, 512);
619
620 for (i = 0;i < BUFFER_SIZE / 512;i++) {
Johan Kristellc286d032010-02-10 13:56:34 -0800621 ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
Pierre Ossman6b174932008-06-30 09:09:27 +0200622 if (ret)
623 return ret;
624 }
625
626 return 0;
627}
628
629/*******************************************************************/
630/* Test execution helpers */
631/*******************************************************************/
632
633/*
634 * Modifies the mmc_request to perform the "short transfer" tests
635 */
636static void mmc_test_prepare_broken_mrq(struct mmc_test_card *test,
637 struct mmc_request *mrq, int write)
638{
639 BUG_ON(!mrq || !mrq->cmd || !mrq->data);
640
641 if (mrq->data->blocks > 1) {
642 mrq->cmd->opcode = write ?
643 MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
644 mrq->stop = NULL;
645 } else {
646 mrq->cmd->opcode = MMC_SEND_STATUS;
647 mrq->cmd->arg = test->card->rca << 16;
648 }
649}
650
651/*
652 * Checks that a normal transfer didn't have any errors
653 */
654static int mmc_test_check_result(struct mmc_test_card *test,
655 struct mmc_request *mrq)
656{
657 int ret;
658
659 BUG_ON(!mrq || !mrq->cmd || !mrq->data);
660
661 ret = 0;
662
663 if (!ret && mrq->cmd->error)
664 ret = mrq->cmd->error;
665 if (!ret && mrq->data->error)
666 ret = mrq->data->error;
667 if (!ret && mrq->stop && mrq->stop->error)
668 ret = mrq->stop->error;
669 if (!ret && mrq->data->bytes_xfered !=
670 mrq->data->blocks * mrq->data->blksz)
671 ret = RESULT_FAIL;
672
673 if (ret == -EINVAL)
674 ret = RESULT_UNSUP_HOST;
675
676 return ret;
677}
678
679/*
680 * Checks that a "short transfer" behaved as expected
681 */
682static int mmc_test_check_broken_result(struct mmc_test_card *test,
683 struct mmc_request *mrq)
684{
685 int ret;
686
687 BUG_ON(!mrq || !mrq->cmd || !mrq->data);
688
689 ret = 0;
690
691 if (!ret && mrq->cmd->error)
692 ret = mrq->cmd->error;
693 if (!ret && mrq->data->error == 0)
694 ret = RESULT_FAIL;
695 if (!ret && mrq->data->error != -ETIMEDOUT)
696 ret = mrq->data->error;
697 if (!ret && mrq->stop && mrq->stop->error)
698 ret = mrq->stop->error;
699 if (mrq->data->blocks > 1) {
700 if (!ret && mrq->data->bytes_xfered > mrq->data->blksz)
701 ret = RESULT_FAIL;
702 } else {
703 if (!ret && mrq->data->bytes_xfered > 0)
704 ret = RESULT_FAIL;
705 }
706
707 if (ret == -EINVAL)
708 ret = RESULT_UNSUP_HOST;
709
710 return ret;
711}
712
713/*
714 * Tests a basic transfer with certain parameters
715 */
716static int mmc_test_simple_transfer(struct mmc_test_card *test,
717 struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
718 unsigned blocks, unsigned blksz, int write)
719{
720 struct mmc_request mrq;
721 struct mmc_command cmd;
722 struct mmc_command stop;
723 struct mmc_data data;
724
725 memset(&mrq, 0, sizeof(struct mmc_request));
726 memset(&cmd, 0, sizeof(struct mmc_command));
727 memset(&data, 0, sizeof(struct mmc_data));
728 memset(&stop, 0, sizeof(struct mmc_command));
729
730 mrq.cmd = &cmd;
731 mrq.data = &data;
732 mrq.stop = &stop;
733
734 mmc_test_prepare_mrq(test, &mrq, sg, sg_len, dev_addr,
735 blocks, blksz, write);
736
737 mmc_wait_for_req(test->card->host, &mrq);
738
739 mmc_test_wait_busy(test);
740
741 return mmc_test_check_result(test, &mrq);
742}
743
744/*
745 * Tests a transfer where the card will fail completely or partly
746 */
747static int mmc_test_broken_transfer(struct mmc_test_card *test,
748 unsigned blocks, unsigned blksz, int write)
749{
750 struct mmc_request mrq;
751 struct mmc_command cmd;
752 struct mmc_command stop;
753 struct mmc_data data;
754
755 struct scatterlist sg;
756
757 memset(&mrq, 0, sizeof(struct mmc_request));
758 memset(&cmd, 0, sizeof(struct mmc_command));
759 memset(&data, 0, sizeof(struct mmc_data));
760 memset(&stop, 0, sizeof(struct mmc_command));
761
762 mrq.cmd = &cmd;
763 mrq.data = &data;
764 mrq.stop = &stop;
765
766 sg_init_one(&sg, test->buffer, blocks * blksz);
767
768 mmc_test_prepare_mrq(test, &mrq, &sg, 1, 0, blocks, blksz, write);
769 mmc_test_prepare_broken_mrq(test, &mrq, write);
770
771 mmc_wait_for_req(test->card->host, &mrq);
772
773 mmc_test_wait_busy(test);
774
775 return mmc_test_check_broken_result(test, &mrq);
776}
777
778/*
779 * Does a complete transfer test where data is also validated
780 *
781 * Note: mmc_test_prepare() must have been done before this call
782 */
783static int mmc_test_transfer(struct mmc_test_card *test,
784 struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
785 unsigned blocks, unsigned blksz, int write)
786{
787 int ret, i;
788 unsigned long flags;
Pierre Ossman88ae6002007-08-12 14:23:50 +0200789
790 if (write) {
791 for (i = 0;i < blocks * blksz;i++)
Pierre Ossman6b174932008-06-30 09:09:27 +0200792 test->scratch[i] = i;
793 } else {
Pierre Ossmanb7ac2cf2008-07-29 01:05:22 +0200794 memset(test->scratch, 0, BUFFER_SIZE);
Pierre Ossman88ae6002007-08-12 14:23:50 +0200795 }
Pierre Ossman6b174932008-06-30 09:09:27 +0200796 local_irq_save(flags);
Pierre Ossmanb7ac2cf2008-07-29 01:05:22 +0200797 sg_copy_from_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
Pierre Ossman6b174932008-06-30 09:09:27 +0200798 local_irq_restore(flags);
Pierre Ossman88ae6002007-08-12 14:23:50 +0200799
800 ret = mmc_test_set_blksize(test, blksz);
801 if (ret)
802 return ret;
803
Pierre Ossman6b174932008-06-30 09:09:27 +0200804 ret = mmc_test_simple_transfer(test, sg, sg_len, dev_addr,
805 blocks, blksz, write);
Pierre Ossman88ae6002007-08-12 14:23:50 +0200806 if (ret)
807 return ret;
808
809 if (write) {
Pierre Ossman6b174932008-06-30 09:09:27 +0200810 int sectors;
811
Pierre Ossman88ae6002007-08-12 14:23:50 +0200812 ret = mmc_test_set_blksize(test, 512);
813 if (ret)
814 return ret;
815
816 sectors = (blocks * blksz + 511) / 512;
817 if ((sectors * 512) == (blocks * blksz))
818 sectors++;
819
820 if ((sectors * 512) > BUFFER_SIZE)
821 return -EINVAL;
822
823 memset(test->buffer, 0, sectors * 512);
824
825 for (i = 0;i < sectors;i++) {
Pierre Ossman6b174932008-06-30 09:09:27 +0200826 ret = mmc_test_buffer_transfer(test,
Pierre Ossman88ae6002007-08-12 14:23:50 +0200827 test->buffer + i * 512,
Johan Kristellc286d032010-02-10 13:56:34 -0800828 dev_addr + i, 512, 0);
Pierre Ossman88ae6002007-08-12 14:23:50 +0200829 if (ret)
830 return ret;
831 }
832
833 for (i = 0;i < blocks * blksz;i++) {
834 if (test->buffer[i] != (u8)i)
835 return RESULT_FAIL;
836 }
837
838 for (;i < sectors * 512;i++) {
839 if (test->buffer[i] != 0xDF)
840 return RESULT_FAIL;
841 }
842 } else {
Pierre Ossman6b174932008-06-30 09:09:27 +0200843 local_irq_save(flags);
Pierre Ossmanb7ac2cf2008-07-29 01:05:22 +0200844 sg_copy_to_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
Pierre Ossman6b174932008-06-30 09:09:27 +0200845 local_irq_restore(flags);
Pierre Ossman88ae6002007-08-12 14:23:50 +0200846 for (i = 0;i < blocks * blksz;i++) {
Pierre Ossman6b174932008-06-30 09:09:27 +0200847 if (test->scratch[i] != (u8)i)
Pierre Ossman88ae6002007-08-12 14:23:50 +0200848 return RESULT_FAIL;
849 }
850 }
851
852 return 0;
853}
854
Pierre Ossman88ae6002007-08-12 14:23:50 +0200855/*******************************************************************/
856/* Tests */
857/*******************************************************************/
858
859struct mmc_test_case {
860 const char *name;
861
862 int (*prepare)(struct mmc_test_card *);
863 int (*run)(struct mmc_test_card *);
864 int (*cleanup)(struct mmc_test_card *);
865};
866
867static int mmc_test_basic_write(struct mmc_test_card *test)
868{
869 int ret;
Pierre Ossman6b174932008-06-30 09:09:27 +0200870 struct scatterlist sg;
Pierre Ossman88ae6002007-08-12 14:23:50 +0200871
872 ret = mmc_test_set_blksize(test, 512);
873 if (ret)
874 return ret;
875
Pierre Ossman6b174932008-06-30 09:09:27 +0200876 sg_init_one(&sg, test->buffer, 512);
877
878 ret = mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 1);
Pierre Ossman88ae6002007-08-12 14:23:50 +0200879 if (ret)
880 return ret;
881
882 return 0;
883}
884
885static int mmc_test_basic_read(struct mmc_test_card *test)
886{
887 int ret;
Pierre Ossman6b174932008-06-30 09:09:27 +0200888 struct scatterlist sg;
Pierre Ossman88ae6002007-08-12 14:23:50 +0200889
890 ret = mmc_test_set_blksize(test, 512);
891 if (ret)
892 return ret;
893
Pierre Ossman6b174932008-06-30 09:09:27 +0200894 sg_init_one(&sg, test->buffer, 512);
895
Rabin Vincent58a5dd32009-02-13 22:55:26 +0530896 ret = mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 0);
Pierre Ossman88ae6002007-08-12 14:23:50 +0200897 if (ret)
898 return ret;
899
900 return 0;
901}
902
903static int mmc_test_verify_write(struct mmc_test_card *test)
904{
905 int ret;
Pierre Ossman6b174932008-06-30 09:09:27 +0200906 struct scatterlist sg;
Pierre Ossman88ae6002007-08-12 14:23:50 +0200907
Pierre Ossman6b174932008-06-30 09:09:27 +0200908 sg_init_one(&sg, test->buffer, 512);
909
910 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
Pierre Ossman88ae6002007-08-12 14:23:50 +0200911 if (ret)
912 return ret;
913
914 return 0;
915}
916
917static int mmc_test_verify_read(struct mmc_test_card *test)
918{
919 int ret;
Pierre Ossman6b174932008-06-30 09:09:27 +0200920 struct scatterlist sg;
Pierre Ossman88ae6002007-08-12 14:23:50 +0200921
Pierre Ossman6b174932008-06-30 09:09:27 +0200922 sg_init_one(&sg, test->buffer, 512);
923
924 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
Pierre Ossman88ae6002007-08-12 14:23:50 +0200925 if (ret)
926 return ret;
927
928 return 0;
929}
930
931static int mmc_test_multi_write(struct mmc_test_card *test)
932{
933 int ret;
934 unsigned int size;
Pierre Ossman6b174932008-06-30 09:09:27 +0200935 struct scatterlist sg;
Pierre Ossman88ae6002007-08-12 14:23:50 +0200936
937 if (test->card->host->max_blk_count == 1)
938 return RESULT_UNSUP_HOST;
939
940 size = PAGE_SIZE * 2;
941 size = min(size, test->card->host->max_req_size);
942 size = min(size, test->card->host->max_seg_size);
943 size = min(size, test->card->host->max_blk_count * 512);
944
945 if (size < 1024)
946 return RESULT_UNSUP_HOST;
947
Pierre Ossman6b174932008-06-30 09:09:27 +0200948 sg_init_one(&sg, test->buffer, size);
949
950 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
Pierre Ossman88ae6002007-08-12 14:23:50 +0200951 if (ret)
952 return ret;
953
954 return 0;
955}
956
957static int mmc_test_multi_read(struct mmc_test_card *test)
958{
959 int ret;
960 unsigned int size;
Pierre Ossman6b174932008-06-30 09:09:27 +0200961 struct scatterlist sg;
Pierre Ossman88ae6002007-08-12 14:23:50 +0200962
963 if (test->card->host->max_blk_count == 1)
964 return RESULT_UNSUP_HOST;
965
966 size = PAGE_SIZE * 2;
967 size = min(size, test->card->host->max_req_size);
968 size = min(size, test->card->host->max_seg_size);
969 size = min(size, test->card->host->max_blk_count * 512);
970
971 if (size < 1024)
972 return RESULT_UNSUP_HOST;
973
Pierre Ossman6b174932008-06-30 09:09:27 +0200974 sg_init_one(&sg, test->buffer, size);
975
976 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
Pierre Ossman88ae6002007-08-12 14:23:50 +0200977 if (ret)
978 return ret;
979
980 return 0;
981}
982
983static int mmc_test_pow2_write(struct mmc_test_card *test)
984{
985 int ret, i;
Pierre Ossman6b174932008-06-30 09:09:27 +0200986 struct scatterlist sg;
Pierre Ossman88ae6002007-08-12 14:23:50 +0200987
988 if (!test->card->csd.write_partial)
989 return RESULT_UNSUP_CARD;
990
991 for (i = 1; i < 512;i <<= 1) {
Pierre Ossman6b174932008-06-30 09:09:27 +0200992 sg_init_one(&sg, test->buffer, i);
993 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
Pierre Ossman88ae6002007-08-12 14:23:50 +0200994 if (ret)
995 return ret;
996 }
997
998 return 0;
999}
1000
1001static int mmc_test_pow2_read(struct mmc_test_card *test)
1002{
1003 int ret, i;
Pierre Ossman6b174932008-06-30 09:09:27 +02001004 struct scatterlist sg;
Pierre Ossman88ae6002007-08-12 14:23:50 +02001005
1006 if (!test->card->csd.read_partial)
1007 return RESULT_UNSUP_CARD;
1008
1009 for (i = 1; i < 512;i <<= 1) {
Pierre Ossman6b174932008-06-30 09:09:27 +02001010 sg_init_one(&sg, test->buffer, i);
1011 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
Pierre Ossman88ae6002007-08-12 14:23:50 +02001012 if (ret)
1013 return ret;
1014 }
1015
1016 return 0;
1017}
1018
1019static int mmc_test_weird_write(struct mmc_test_card *test)
1020{
1021 int ret, i;
Pierre Ossman6b174932008-06-30 09:09:27 +02001022 struct scatterlist sg;
Pierre Ossman88ae6002007-08-12 14:23:50 +02001023
1024 if (!test->card->csd.write_partial)
1025 return RESULT_UNSUP_CARD;
1026
1027 for (i = 3; i < 512;i += 7) {
Pierre Ossman6b174932008-06-30 09:09:27 +02001028 sg_init_one(&sg, test->buffer, i);
1029 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
Pierre Ossman88ae6002007-08-12 14:23:50 +02001030 if (ret)
1031 return ret;
1032 }
1033
1034 return 0;
1035}
1036
1037static int mmc_test_weird_read(struct mmc_test_card *test)
1038{
1039 int ret, i;
Pierre Ossman6b174932008-06-30 09:09:27 +02001040 struct scatterlist sg;
Pierre Ossman88ae6002007-08-12 14:23:50 +02001041
1042 if (!test->card->csd.read_partial)
1043 return RESULT_UNSUP_CARD;
1044
1045 for (i = 3; i < 512;i += 7) {
Pierre Ossman6b174932008-06-30 09:09:27 +02001046 sg_init_one(&sg, test->buffer, i);
1047 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
Pierre Ossman88ae6002007-08-12 14:23:50 +02001048 if (ret)
1049 return ret;
1050 }
1051
1052 return 0;
1053}
1054
1055static int mmc_test_align_write(struct mmc_test_card *test)
1056{
1057 int ret, i;
Pierre Ossman6b174932008-06-30 09:09:27 +02001058 struct scatterlist sg;
Pierre Ossman88ae6002007-08-12 14:23:50 +02001059
1060 for (i = 1;i < 4;i++) {
Pierre Ossman6b174932008-06-30 09:09:27 +02001061 sg_init_one(&sg, test->buffer + i, 512);
1062 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
Pierre Ossman88ae6002007-08-12 14:23:50 +02001063 if (ret)
1064 return ret;
1065 }
1066
1067 return 0;
1068}
1069
1070static int mmc_test_align_read(struct mmc_test_card *test)
1071{
1072 int ret, i;
Pierre Ossman6b174932008-06-30 09:09:27 +02001073 struct scatterlist sg;
Pierre Ossman88ae6002007-08-12 14:23:50 +02001074
1075 for (i = 1;i < 4;i++) {
Pierre Ossman6b174932008-06-30 09:09:27 +02001076 sg_init_one(&sg, test->buffer + i, 512);
1077 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
Pierre Ossman88ae6002007-08-12 14:23:50 +02001078 if (ret)
1079 return ret;
1080 }
1081
1082 return 0;
1083}
1084
1085static int mmc_test_align_multi_write(struct mmc_test_card *test)
1086{
1087 int ret, i;
1088 unsigned int size;
Pierre Ossman6b174932008-06-30 09:09:27 +02001089 struct scatterlist sg;
Pierre Ossman88ae6002007-08-12 14:23:50 +02001090
1091 if (test->card->host->max_blk_count == 1)
1092 return RESULT_UNSUP_HOST;
1093
1094 size = PAGE_SIZE * 2;
1095 size = min(size, test->card->host->max_req_size);
1096 size = min(size, test->card->host->max_seg_size);
1097 size = min(size, test->card->host->max_blk_count * 512);
1098
1099 if (size < 1024)
1100 return RESULT_UNSUP_HOST;
1101
1102 for (i = 1;i < 4;i++) {
Pierre Ossman6b174932008-06-30 09:09:27 +02001103 sg_init_one(&sg, test->buffer + i, size);
1104 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
Pierre Ossman88ae6002007-08-12 14:23:50 +02001105 if (ret)
1106 return ret;
1107 }
1108
1109 return 0;
1110}
1111
1112static int mmc_test_align_multi_read(struct mmc_test_card *test)
1113{
1114 int ret, i;
1115 unsigned int size;
Pierre Ossman6b174932008-06-30 09:09:27 +02001116 struct scatterlist sg;
Pierre Ossman88ae6002007-08-12 14:23:50 +02001117
1118 if (test->card->host->max_blk_count == 1)
1119 return RESULT_UNSUP_HOST;
1120
1121 size = PAGE_SIZE * 2;
1122 size = min(size, test->card->host->max_req_size);
1123 size = min(size, test->card->host->max_seg_size);
1124 size = min(size, test->card->host->max_blk_count * 512);
1125
1126 if (size < 1024)
1127 return RESULT_UNSUP_HOST;
1128
1129 for (i = 1;i < 4;i++) {
Pierre Ossman6b174932008-06-30 09:09:27 +02001130 sg_init_one(&sg, test->buffer + i, size);
1131 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
Pierre Ossman88ae6002007-08-12 14:23:50 +02001132 if (ret)
1133 return ret;
1134 }
1135
1136 return 0;
1137}
1138
1139static int mmc_test_xfersize_write(struct mmc_test_card *test)
1140{
1141 int ret;
1142
1143 ret = mmc_test_set_blksize(test, 512);
1144 if (ret)
1145 return ret;
1146
Pierre Ossman6b174932008-06-30 09:09:27 +02001147 ret = mmc_test_broken_transfer(test, 1, 512, 1);
Pierre Ossman88ae6002007-08-12 14:23:50 +02001148 if (ret)
1149 return ret;
1150
1151 return 0;
1152}
1153
1154static int mmc_test_xfersize_read(struct mmc_test_card *test)
1155{
1156 int ret;
1157
1158 ret = mmc_test_set_blksize(test, 512);
1159 if (ret)
1160 return ret;
1161
Pierre Ossman6b174932008-06-30 09:09:27 +02001162 ret = mmc_test_broken_transfer(test, 1, 512, 0);
Pierre Ossman88ae6002007-08-12 14:23:50 +02001163 if (ret)
1164 return ret;
1165
1166 return 0;
1167}
1168
1169static int mmc_test_multi_xfersize_write(struct mmc_test_card *test)
1170{
1171 int ret;
1172
1173 if (test->card->host->max_blk_count == 1)
1174 return RESULT_UNSUP_HOST;
1175
1176 ret = mmc_test_set_blksize(test, 512);
1177 if (ret)
1178 return ret;
1179
Pierre Ossman6b174932008-06-30 09:09:27 +02001180 ret = mmc_test_broken_transfer(test, 2, 512, 1);
Pierre Ossman88ae6002007-08-12 14:23:50 +02001181 if (ret)
1182 return ret;
1183
1184 return 0;
1185}
1186
1187static int mmc_test_multi_xfersize_read(struct mmc_test_card *test)
1188{
1189 int ret;
1190
1191 if (test->card->host->max_blk_count == 1)
1192 return RESULT_UNSUP_HOST;
1193
1194 ret = mmc_test_set_blksize(test, 512);
1195 if (ret)
1196 return ret;
1197
Pierre Ossman6b174932008-06-30 09:09:27 +02001198 ret = mmc_test_broken_transfer(test, 2, 512, 0);
Pierre Ossman88ae6002007-08-12 14:23:50 +02001199 if (ret)
1200 return ret;
1201
1202 return 0;
1203}
1204
Pierre Ossman26610812008-07-04 18:17:13 +02001205#ifdef CONFIG_HIGHMEM
1206
1207static int mmc_test_write_high(struct mmc_test_card *test)
1208{
1209 int ret;
1210 struct scatterlist sg;
1211
1212 sg_init_table(&sg, 1);
1213 sg_set_page(&sg, test->highmem, 512, 0);
1214
1215 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
1216 if (ret)
1217 return ret;
1218
1219 return 0;
1220}
1221
1222static int mmc_test_read_high(struct mmc_test_card *test)
1223{
1224 int ret;
1225 struct scatterlist sg;
1226
1227 sg_init_table(&sg, 1);
1228 sg_set_page(&sg, test->highmem, 512, 0);
1229
1230 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
1231 if (ret)
1232 return ret;
1233
1234 return 0;
1235}
1236
1237static int mmc_test_multi_write_high(struct mmc_test_card *test)
1238{
1239 int ret;
1240 unsigned int size;
1241 struct scatterlist sg;
1242
1243 if (test->card->host->max_blk_count == 1)
1244 return RESULT_UNSUP_HOST;
1245
1246 size = PAGE_SIZE * 2;
1247 size = min(size, test->card->host->max_req_size);
1248 size = min(size, test->card->host->max_seg_size);
1249 size = min(size, test->card->host->max_blk_count * 512);
1250
1251 if (size < 1024)
1252 return RESULT_UNSUP_HOST;
1253
1254 sg_init_table(&sg, 1);
1255 sg_set_page(&sg, test->highmem, size, 0);
1256
1257 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
1258 if (ret)
1259 return ret;
1260
1261 return 0;
1262}
1263
1264static int mmc_test_multi_read_high(struct mmc_test_card *test)
1265{
1266 int ret;
1267 unsigned int size;
1268 struct scatterlist sg;
1269
1270 if (test->card->host->max_blk_count == 1)
1271 return RESULT_UNSUP_HOST;
1272
1273 size = PAGE_SIZE * 2;
1274 size = min(size, test->card->host->max_req_size);
1275 size = min(size, test->card->host->max_seg_size);
1276 size = min(size, test->card->host->max_blk_count * 512);
1277
1278 if (size < 1024)
1279 return RESULT_UNSUP_HOST;
1280
1281 sg_init_table(&sg, 1);
1282 sg_set_page(&sg, test->highmem, size, 0);
1283
1284 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
1285 if (ret)
1286 return ret;
1287
1288 return 0;
1289}
1290
Adrian Hunter64f71202010-08-11 14:17:51 -07001291#else
1292
1293static int mmc_test_no_highmem(struct mmc_test_card *test)
1294{
1295 printk(KERN_INFO "%s: Highmem not configured - test skipped\n",
1296 mmc_hostname(test->card->host));
1297 return 0;
1298}
1299
Pierre Ossman26610812008-07-04 18:17:13 +02001300#endif /* CONFIG_HIGHMEM */
1301
Adrian Hunter64f71202010-08-11 14:17:51 -07001302/*
1303 * Map sz bytes so that it can be transferred.
1304 */
Adrian Hunterfec4dcc2010-08-11 14:17:51 -07001305static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz,
Adrian Hunter64f71202010-08-11 14:17:51 -07001306 int max_scatter)
1307{
1308 struct mmc_test_area *t = &test->area;
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +03001309 int err;
Adrian Hunter64f71202010-08-11 14:17:51 -07001310
1311 t->blocks = sz >> 9;
1312
1313 if (max_scatter) {
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +03001314 err = mmc_test_map_sg_max_scatter(t->mem, sz, t->sg,
1315 t->max_segs, t->max_seg_sz,
Adrian Hunter64f71202010-08-11 14:17:51 -07001316 &t->sg_len);
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +03001317 } else {
1318 err = mmc_test_map_sg(t->mem, sz, t->sg, 1, t->max_segs,
1319 t->max_seg_sz, &t->sg_len);
Adrian Hunter64f71202010-08-11 14:17:51 -07001320 }
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +03001321 if (err)
1322 printk(KERN_INFO "%s: Failed to map sg list\n",
1323 mmc_hostname(test->card->host));
1324 return err;
Adrian Hunter64f71202010-08-11 14:17:51 -07001325}
1326
1327/*
1328 * Transfer bytes mapped by mmc_test_area_map().
1329 */
1330static int mmc_test_area_transfer(struct mmc_test_card *test,
1331 unsigned int dev_addr, int write)
1332{
1333 struct mmc_test_area *t = &test->area;
1334
1335 return mmc_test_simple_transfer(test, t->sg, t->sg_len, dev_addr,
1336 t->blocks, 512, write);
1337}
1338
1339/*
1340 * Map and transfer bytes.
1341 */
Adrian Hunterfec4dcc2010-08-11 14:17:51 -07001342static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz,
Adrian Hunter64f71202010-08-11 14:17:51 -07001343 unsigned int dev_addr, int write, int max_scatter,
1344 int timed)
1345{
1346 struct timespec ts1, ts2;
1347 int ret;
1348
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +03001349 /*
1350 * In the case of a maximally scattered transfer, the maximum transfer
1351 * size is further limited by using PAGE_SIZE segments.
1352 */
1353 if (max_scatter) {
1354 struct mmc_test_area *t = &test->area;
1355 unsigned long max_tfr;
1356
1357 if (t->max_seg_sz >= PAGE_SIZE)
1358 max_tfr = t->max_segs * PAGE_SIZE;
1359 else
1360 max_tfr = t->max_segs * t->max_seg_sz;
1361 if (sz > max_tfr)
1362 sz = max_tfr;
1363 }
1364
Adrian Hunter64f71202010-08-11 14:17:51 -07001365 ret = mmc_test_area_map(test, sz, max_scatter);
1366 if (ret)
1367 return ret;
1368
1369 if (timed)
1370 getnstimeofday(&ts1);
1371
1372 ret = mmc_test_area_transfer(test, dev_addr, write);
1373 if (ret)
1374 return ret;
1375
1376 if (timed)
1377 getnstimeofday(&ts2);
1378
1379 if (timed)
1380 mmc_test_print_rate(test, sz, &ts1, &ts2);
1381
1382 return 0;
1383}
1384
1385/*
1386 * Write the test area entirely.
1387 */
1388static int mmc_test_area_fill(struct mmc_test_card *test)
1389{
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +03001390 return mmc_test_area_io(test, test->area.max_tfr, test->area.dev_addr,
Adrian Hunter64f71202010-08-11 14:17:51 -07001391 1, 0, 0);
1392}
1393
1394/*
1395 * Erase the test area entirely.
1396 */
1397static int mmc_test_area_erase(struct mmc_test_card *test)
1398{
1399 struct mmc_test_area *t = &test->area;
1400
1401 if (!mmc_can_erase(test->card))
1402 return 0;
1403
1404 return mmc_erase(test->card, t->dev_addr, test->area.max_sz >> 9,
1405 MMC_ERASE_ARG);
1406}
1407
1408/*
1409 * Cleanup struct mmc_test_area.
1410 */
1411static int mmc_test_area_cleanup(struct mmc_test_card *test)
1412{
1413 struct mmc_test_area *t = &test->area;
1414
1415 kfree(t->sg);
1416 mmc_test_free_mem(t->mem);
1417
1418 return 0;
1419}
1420
1421/*
1422 * Initialize an area for testing large transfers. The size of the area is the
1423 * preferred erase size which is a good size for optimal transfer speed. Note
1424 * that is typically 4MiB for modern cards. The test area is set to the middle
1425 * of the card because cards may have different charateristics at the front
1426 * (for FAT file system optimization). Optionally, the area is erased (if the
1427 * card supports it) which may improve write performance. Optionally, the area
1428 * is filled with data for subsequent read tests.
1429 */
1430static int mmc_test_area_init(struct mmc_test_card *test, int erase, int fill)
1431{
1432 struct mmc_test_area *t = &test->area;
Adrian Hunterfec4dcc2010-08-11 14:17:51 -07001433 unsigned long min_sz = 64 * 1024;
Adrian Hunter64f71202010-08-11 14:17:51 -07001434 int ret;
1435
1436 ret = mmc_test_set_blksize(test, 512);
1437 if (ret)
1438 return ret;
1439
Adrian Hunterfec4dcc2010-08-11 14:17:51 -07001440 if (test->card->pref_erase > TEST_AREA_MAX_SIZE >> 9)
1441 t->max_sz = TEST_AREA_MAX_SIZE;
1442 else
1443 t->max_sz = (unsigned long)test->card->pref_erase << 9;
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +03001444
1445 t->max_segs = test->card->host->max_segs;
1446 t->max_seg_sz = test->card->host->max_seg_size;
1447
1448 t->max_tfr = t->max_sz;
1449 if (t->max_tfr >> 9 > test->card->host->max_blk_count)
1450 t->max_tfr = test->card->host->max_blk_count << 9;
1451 if (t->max_tfr > test->card->host->max_req_size)
1452 t->max_tfr = test->card->host->max_req_size;
1453 if (t->max_tfr / t->max_seg_sz > t->max_segs)
1454 t->max_tfr = t->max_segs * t->max_seg_sz;
1455
Adrian Hunter64f71202010-08-11 14:17:51 -07001456 /*
1457 * Try to allocate enough memory for the whole area. Less is OK
1458 * because the same memory can be mapped into the scatterlist more than
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +03001459 * once. Also, take into account the limits imposed on scatterlist
1460 * segments by the host driver.
Adrian Hunter64f71202010-08-11 14:17:51 -07001461 */
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +03001462 t->mem = mmc_test_alloc_mem(min_sz, t->max_sz, t->max_segs,
1463 t->max_seg_sz);
Adrian Hunter64f71202010-08-11 14:17:51 -07001464 if (!t->mem)
1465 return -ENOMEM;
1466
Adrian Hunter64f71202010-08-11 14:17:51 -07001467 t->sg = kmalloc(sizeof(struct scatterlist) * t->max_segs, GFP_KERNEL);
1468 if (!t->sg) {
1469 ret = -ENOMEM;
1470 goto out_free;
1471 }
1472
1473 t->dev_addr = mmc_test_capacity(test->card) / 2;
1474 t->dev_addr -= t->dev_addr % (t->max_sz >> 9);
1475
1476 if (erase) {
1477 ret = mmc_test_area_erase(test);
1478 if (ret)
1479 goto out_free;
1480 }
1481
1482 if (fill) {
1483 ret = mmc_test_area_fill(test);
1484 if (ret)
1485 goto out_free;
1486 }
1487
1488 return 0;
1489
1490out_free:
1491 mmc_test_area_cleanup(test);
1492 return ret;
1493}
1494
1495/*
1496 * Prepare for large transfers. Do not erase the test area.
1497 */
1498static int mmc_test_area_prepare(struct mmc_test_card *test)
1499{
1500 return mmc_test_area_init(test, 0, 0);
1501}
1502
1503/*
1504 * Prepare for large transfers. Do erase the test area.
1505 */
1506static int mmc_test_area_prepare_erase(struct mmc_test_card *test)
1507{
1508 return mmc_test_area_init(test, 1, 0);
1509}
1510
1511/*
1512 * Prepare for large transfers. Erase and fill the test area.
1513 */
1514static int mmc_test_area_prepare_fill(struct mmc_test_card *test)
1515{
1516 return mmc_test_area_init(test, 1, 1);
1517}
1518
1519/*
1520 * Test best-case performance. Best-case performance is expected from
1521 * a single large transfer.
1522 *
1523 * An additional option (max_scatter) allows the measurement of the same
1524 * transfer but with no contiguous pages in the scatter list. This tests
1525 * the efficiency of DMA to handle scattered pages.
1526 */
1527static int mmc_test_best_performance(struct mmc_test_card *test, int write,
1528 int max_scatter)
1529{
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +03001530 return mmc_test_area_io(test, test->area.max_tfr, test->area.dev_addr,
Adrian Hunter64f71202010-08-11 14:17:51 -07001531 write, max_scatter, 1);
1532}
1533
1534/*
1535 * Best-case read performance.
1536 */
1537static int mmc_test_best_read_performance(struct mmc_test_card *test)
1538{
1539 return mmc_test_best_performance(test, 0, 0);
1540}
1541
1542/*
1543 * Best-case write performance.
1544 */
1545static int mmc_test_best_write_performance(struct mmc_test_card *test)
1546{
1547 return mmc_test_best_performance(test, 1, 0);
1548}
1549
1550/*
1551 * Best-case read performance into scattered pages.
1552 */
1553static int mmc_test_best_read_perf_max_scatter(struct mmc_test_card *test)
1554{
1555 return mmc_test_best_performance(test, 0, 1);
1556}
1557
1558/*
1559 * Best-case write performance from scattered pages.
1560 */
1561static int mmc_test_best_write_perf_max_scatter(struct mmc_test_card *test)
1562{
1563 return mmc_test_best_performance(test, 1, 1);
1564}
1565
1566/*
1567 * Single read performance by transfer size.
1568 */
1569static int mmc_test_profile_read_perf(struct mmc_test_card *test)
1570{
Adrian Hunterfec4dcc2010-08-11 14:17:51 -07001571 unsigned long sz;
1572 unsigned int dev_addr;
Adrian Hunter64f71202010-08-11 14:17:51 -07001573 int ret;
1574
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +03001575 for (sz = 512; sz < test->area.max_tfr; sz <<= 1) {
Adrian Hunter64f71202010-08-11 14:17:51 -07001576 dev_addr = test->area.dev_addr + (sz >> 9);
1577 ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
1578 if (ret)
1579 return ret;
1580 }
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +03001581 sz = test->area.max_tfr;
Adrian Hunter64f71202010-08-11 14:17:51 -07001582 dev_addr = test->area.dev_addr;
1583 return mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
1584}
1585
1586/*
1587 * Single write performance by transfer size.
1588 */
1589static int mmc_test_profile_write_perf(struct mmc_test_card *test)
1590{
Adrian Hunterfec4dcc2010-08-11 14:17:51 -07001591 unsigned long sz;
1592 unsigned int dev_addr;
Adrian Hunter64f71202010-08-11 14:17:51 -07001593 int ret;
1594
1595 ret = mmc_test_area_erase(test);
1596 if (ret)
1597 return ret;
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +03001598 for (sz = 512; sz < test->area.max_tfr; sz <<= 1) {
Adrian Hunter64f71202010-08-11 14:17:51 -07001599 dev_addr = test->area.dev_addr + (sz >> 9);
1600 ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
1601 if (ret)
1602 return ret;
1603 }
1604 ret = mmc_test_area_erase(test);
1605 if (ret)
1606 return ret;
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +03001607 sz = test->area.max_tfr;
Adrian Hunter64f71202010-08-11 14:17:51 -07001608 dev_addr = test->area.dev_addr;
1609 return mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
1610}
1611
1612/*
1613 * Single trim performance by transfer size.
1614 */
1615static int mmc_test_profile_trim_perf(struct mmc_test_card *test)
1616{
Adrian Hunterfec4dcc2010-08-11 14:17:51 -07001617 unsigned long sz;
1618 unsigned int dev_addr;
Adrian Hunter64f71202010-08-11 14:17:51 -07001619 struct timespec ts1, ts2;
1620 int ret;
1621
1622 if (!mmc_can_trim(test->card))
1623 return RESULT_UNSUP_CARD;
1624
1625 if (!mmc_can_erase(test->card))
1626 return RESULT_UNSUP_HOST;
1627
1628 for (sz = 512; sz < test->area.max_sz; sz <<= 1) {
1629 dev_addr = test->area.dev_addr + (sz >> 9);
1630 getnstimeofday(&ts1);
1631 ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
1632 if (ret)
1633 return ret;
1634 getnstimeofday(&ts2);
1635 mmc_test_print_rate(test, sz, &ts1, &ts2);
1636 }
1637 dev_addr = test->area.dev_addr;
1638 getnstimeofday(&ts1);
1639 ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
1640 if (ret)
1641 return ret;
1642 getnstimeofday(&ts2);
1643 mmc_test_print_rate(test, sz, &ts1, &ts2);
1644 return 0;
1645}
1646
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +03001647static int mmc_test_seq_read_perf(struct mmc_test_card *test, unsigned long sz)
1648{
1649 unsigned int dev_addr, i, cnt;
1650 struct timespec ts1, ts2;
1651 int ret;
1652
1653 cnt = test->area.max_sz / sz;
1654 dev_addr = test->area.dev_addr;
1655 getnstimeofday(&ts1);
1656 for (i = 0; i < cnt; i++) {
1657 ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 0);
1658 if (ret)
1659 return ret;
1660 dev_addr += (sz >> 9);
1661 }
1662 getnstimeofday(&ts2);
1663 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1664 return 0;
1665}
1666
Adrian Hunter64f71202010-08-11 14:17:51 -07001667/*
1668 * Consecutive read performance by transfer size.
1669 */
1670static int mmc_test_profile_seq_read_perf(struct mmc_test_card *test)
1671{
Adrian Hunterfec4dcc2010-08-11 14:17:51 -07001672 unsigned long sz;
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +03001673 int ret;
1674
1675 for (sz = 512; sz < test->area.max_tfr; sz <<= 1) {
1676 ret = mmc_test_seq_read_perf(test, sz);
1677 if (ret)
1678 return ret;
1679 }
1680 sz = test->area.max_tfr;
1681 return mmc_test_seq_read_perf(test, sz);
1682}
1683
1684static int mmc_test_seq_write_perf(struct mmc_test_card *test, unsigned long sz)
1685{
Adrian Hunterfec4dcc2010-08-11 14:17:51 -07001686 unsigned int dev_addr, i, cnt;
Adrian Hunter64f71202010-08-11 14:17:51 -07001687 struct timespec ts1, ts2;
1688 int ret;
1689
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +03001690 ret = mmc_test_area_erase(test);
1691 if (ret)
1692 return ret;
1693 cnt = test->area.max_sz / sz;
1694 dev_addr = test->area.dev_addr;
1695 getnstimeofday(&ts1);
1696 for (i = 0; i < cnt; i++) {
1697 ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 0);
1698 if (ret)
1699 return ret;
1700 dev_addr += (sz >> 9);
Adrian Hunter64f71202010-08-11 14:17:51 -07001701 }
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +03001702 getnstimeofday(&ts2);
1703 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
Adrian Hunter64f71202010-08-11 14:17:51 -07001704 return 0;
1705}
1706
1707/*
1708 * Consecutive write performance by transfer size.
1709 */
1710static int mmc_test_profile_seq_write_perf(struct mmc_test_card *test)
1711{
Adrian Hunterfec4dcc2010-08-11 14:17:51 -07001712 unsigned long sz;
Adrian Hunter64f71202010-08-11 14:17:51 -07001713 int ret;
1714
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +03001715 for (sz = 512; sz < test->area.max_tfr; sz <<= 1) {
1716 ret = mmc_test_seq_write_perf(test, sz);
Adrian Hunter64f71202010-08-11 14:17:51 -07001717 if (ret)
1718 return ret;
Adrian Hunter64f71202010-08-11 14:17:51 -07001719 }
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +03001720 sz = test->area.max_tfr;
1721 return mmc_test_seq_write_perf(test, sz);
Adrian Hunter64f71202010-08-11 14:17:51 -07001722}
1723
1724/*
1725 * Consecutive trim performance by transfer size.
1726 */
1727static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test)
1728{
Adrian Hunterfec4dcc2010-08-11 14:17:51 -07001729 unsigned long sz;
1730 unsigned int dev_addr, i, cnt;
Adrian Hunter64f71202010-08-11 14:17:51 -07001731 struct timespec ts1, ts2;
1732 int ret;
1733
1734 if (!mmc_can_trim(test->card))
1735 return RESULT_UNSUP_CARD;
1736
1737 if (!mmc_can_erase(test->card))
1738 return RESULT_UNSUP_HOST;
1739
1740 for (sz = 512; sz <= test->area.max_sz; sz <<= 1) {
1741 ret = mmc_test_area_erase(test);
1742 if (ret)
1743 return ret;
1744 ret = mmc_test_area_fill(test);
1745 if (ret)
1746 return ret;
1747 cnt = test->area.max_sz / sz;
1748 dev_addr = test->area.dev_addr;
1749 getnstimeofday(&ts1);
1750 for (i = 0; i < cnt; i++) {
1751 ret = mmc_erase(test->card, dev_addr, sz >> 9,
1752 MMC_TRIM_ARG);
1753 if (ret)
1754 return ret;
1755 dev_addr += (sz >> 9);
1756 }
1757 getnstimeofday(&ts2);
1758 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1759 }
1760 return 0;
1761}
1762
Pierre Ossman88ae6002007-08-12 14:23:50 +02001763static const struct mmc_test_case mmc_test_cases[] = {
1764 {
1765 .name = "Basic write (no data verification)",
1766 .run = mmc_test_basic_write,
1767 },
1768
1769 {
1770 .name = "Basic read (no data verification)",
1771 .run = mmc_test_basic_read,
1772 },
1773
1774 {
1775 .name = "Basic write (with data verification)",
Pierre Ossman6b174932008-06-30 09:09:27 +02001776 .prepare = mmc_test_prepare_write,
Pierre Ossman88ae6002007-08-12 14:23:50 +02001777 .run = mmc_test_verify_write,
Pierre Ossman6b174932008-06-30 09:09:27 +02001778 .cleanup = mmc_test_cleanup,
Pierre Ossman88ae6002007-08-12 14:23:50 +02001779 },
1780
1781 {
1782 .name = "Basic read (with data verification)",
Pierre Ossman6b174932008-06-30 09:09:27 +02001783 .prepare = mmc_test_prepare_read,
Pierre Ossman88ae6002007-08-12 14:23:50 +02001784 .run = mmc_test_verify_read,
Pierre Ossman6b174932008-06-30 09:09:27 +02001785 .cleanup = mmc_test_cleanup,
Pierre Ossman88ae6002007-08-12 14:23:50 +02001786 },
1787
1788 {
1789 .name = "Multi-block write",
Pierre Ossman6b174932008-06-30 09:09:27 +02001790 .prepare = mmc_test_prepare_write,
Pierre Ossman88ae6002007-08-12 14:23:50 +02001791 .run = mmc_test_multi_write,
Pierre Ossman6b174932008-06-30 09:09:27 +02001792 .cleanup = mmc_test_cleanup,
Pierre Ossman88ae6002007-08-12 14:23:50 +02001793 },
1794
1795 {
1796 .name = "Multi-block read",
Pierre Ossman6b174932008-06-30 09:09:27 +02001797 .prepare = mmc_test_prepare_read,
Pierre Ossman88ae6002007-08-12 14:23:50 +02001798 .run = mmc_test_multi_read,
Pierre Ossman6b174932008-06-30 09:09:27 +02001799 .cleanup = mmc_test_cleanup,
Pierre Ossman88ae6002007-08-12 14:23:50 +02001800 },
1801
1802 {
1803 .name = "Power of two block writes",
Pierre Ossman6b174932008-06-30 09:09:27 +02001804 .prepare = mmc_test_prepare_write,
Pierre Ossman88ae6002007-08-12 14:23:50 +02001805 .run = mmc_test_pow2_write,
Pierre Ossman6b174932008-06-30 09:09:27 +02001806 .cleanup = mmc_test_cleanup,
Pierre Ossman88ae6002007-08-12 14:23:50 +02001807 },
1808
1809 {
1810 .name = "Power of two block reads",
Pierre Ossman6b174932008-06-30 09:09:27 +02001811 .prepare = mmc_test_prepare_read,
Pierre Ossman88ae6002007-08-12 14:23:50 +02001812 .run = mmc_test_pow2_read,
Pierre Ossman6b174932008-06-30 09:09:27 +02001813 .cleanup = mmc_test_cleanup,
Pierre Ossman88ae6002007-08-12 14:23:50 +02001814 },
1815
1816 {
1817 .name = "Weird sized block writes",
Pierre Ossman6b174932008-06-30 09:09:27 +02001818 .prepare = mmc_test_prepare_write,
Pierre Ossman88ae6002007-08-12 14:23:50 +02001819 .run = mmc_test_weird_write,
Pierre Ossman6b174932008-06-30 09:09:27 +02001820 .cleanup = mmc_test_cleanup,
Pierre Ossman88ae6002007-08-12 14:23:50 +02001821 },
1822
1823 {
1824 .name = "Weird sized block reads",
Pierre Ossman6b174932008-06-30 09:09:27 +02001825 .prepare = mmc_test_prepare_read,
Pierre Ossman88ae6002007-08-12 14:23:50 +02001826 .run = mmc_test_weird_read,
Pierre Ossman6b174932008-06-30 09:09:27 +02001827 .cleanup = mmc_test_cleanup,
Pierre Ossman88ae6002007-08-12 14:23:50 +02001828 },
1829
1830 {
1831 .name = "Badly aligned write",
Pierre Ossman6b174932008-06-30 09:09:27 +02001832 .prepare = mmc_test_prepare_write,
Pierre Ossman88ae6002007-08-12 14:23:50 +02001833 .run = mmc_test_align_write,
Pierre Ossman6b174932008-06-30 09:09:27 +02001834 .cleanup = mmc_test_cleanup,
Pierre Ossman88ae6002007-08-12 14:23:50 +02001835 },
1836
1837 {
1838 .name = "Badly aligned read",
Pierre Ossman6b174932008-06-30 09:09:27 +02001839 .prepare = mmc_test_prepare_read,
Pierre Ossman88ae6002007-08-12 14:23:50 +02001840 .run = mmc_test_align_read,
Pierre Ossman6b174932008-06-30 09:09:27 +02001841 .cleanup = mmc_test_cleanup,
Pierre Ossman88ae6002007-08-12 14:23:50 +02001842 },
1843
1844 {
1845 .name = "Badly aligned multi-block write",
Pierre Ossman6b174932008-06-30 09:09:27 +02001846 .prepare = mmc_test_prepare_write,
Pierre Ossman88ae6002007-08-12 14:23:50 +02001847 .run = mmc_test_align_multi_write,
Pierre Ossman6b174932008-06-30 09:09:27 +02001848 .cleanup = mmc_test_cleanup,
Pierre Ossman88ae6002007-08-12 14:23:50 +02001849 },
1850
1851 {
1852 .name = "Badly aligned multi-block read",
Pierre Ossman6b174932008-06-30 09:09:27 +02001853 .prepare = mmc_test_prepare_read,
Pierre Ossman88ae6002007-08-12 14:23:50 +02001854 .run = mmc_test_align_multi_read,
Pierre Ossman6b174932008-06-30 09:09:27 +02001855 .cleanup = mmc_test_cleanup,
Pierre Ossman88ae6002007-08-12 14:23:50 +02001856 },
1857
1858 {
1859 .name = "Correct xfer_size at write (start failure)",
1860 .run = mmc_test_xfersize_write,
1861 },
1862
1863 {
1864 .name = "Correct xfer_size at read (start failure)",
1865 .run = mmc_test_xfersize_read,
1866 },
1867
1868 {
1869 .name = "Correct xfer_size at write (midway failure)",
1870 .run = mmc_test_multi_xfersize_write,
1871 },
1872
1873 {
1874 .name = "Correct xfer_size at read (midway failure)",
1875 .run = mmc_test_multi_xfersize_read,
1876 },
Pierre Ossman26610812008-07-04 18:17:13 +02001877
1878#ifdef CONFIG_HIGHMEM
1879
1880 {
1881 .name = "Highmem write",
1882 .prepare = mmc_test_prepare_write,
1883 .run = mmc_test_write_high,
1884 .cleanup = mmc_test_cleanup,
1885 },
1886
1887 {
1888 .name = "Highmem read",
1889 .prepare = mmc_test_prepare_read,
1890 .run = mmc_test_read_high,
1891 .cleanup = mmc_test_cleanup,
1892 },
1893
1894 {
1895 .name = "Multi-block highmem write",
1896 .prepare = mmc_test_prepare_write,
1897 .run = mmc_test_multi_write_high,
1898 .cleanup = mmc_test_cleanup,
1899 },
1900
1901 {
1902 .name = "Multi-block highmem read",
1903 .prepare = mmc_test_prepare_read,
1904 .run = mmc_test_multi_read_high,
1905 .cleanup = mmc_test_cleanup,
1906 },
1907
Adrian Hunter64f71202010-08-11 14:17:51 -07001908#else
1909
1910 {
1911 .name = "Highmem write",
1912 .run = mmc_test_no_highmem,
1913 },
1914
1915 {
1916 .name = "Highmem read",
1917 .run = mmc_test_no_highmem,
1918 },
1919
1920 {
1921 .name = "Multi-block highmem write",
1922 .run = mmc_test_no_highmem,
1923 },
1924
1925 {
1926 .name = "Multi-block highmem read",
1927 .run = mmc_test_no_highmem,
1928 },
1929
Pierre Ossman26610812008-07-04 18:17:13 +02001930#endif /* CONFIG_HIGHMEM */
1931
Adrian Hunter64f71202010-08-11 14:17:51 -07001932 {
1933 .name = "Best-case read performance",
1934 .prepare = mmc_test_area_prepare_fill,
1935 .run = mmc_test_best_read_performance,
1936 .cleanup = mmc_test_area_cleanup,
1937 },
1938
1939 {
1940 .name = "Best-case write performance",
1941 .prepare = mmc_test_area_prepare_erase,
1942 .run = mmc_test_best_write_performance,
1943 .cleanup = mmc_test_area_cleanup,
1944 },
1945
1946 {
1947 .name = "Best-case read performance into scattered pages",
1948 .prepare = mmc_test_area_prepare_fill,
1949 .run = mmc_test_best_read_perf_max_scatter,
1950 .cleanup = mmc_test_area_cleanup,
1951 },
1952
1953 {
1954 .name = "Best-case write performance from scattered pages",
1955 .prepare = mmc_test_area_prepare_erase,
1956 .run = mmc_test_best_write_perf_max_scatter,
1957 .cleanup = mmc_test_area_cleanup,
1958 },
1959
1960 {
1961 .name = "Single read performance by transfer size",
1962 .prepare = mmc_test_area_prepare_fill,
1963 .run = mmc_test_profile_read_perf,
1964 .cleanup = mmc_test_area_cleanup,
1965 },
1966
1967 {
1968 .name = "Single write performance by transfer size",
1969 .prepare = mmc_test_area_prepare,
1970 .run = mmc_test_profile_write_perf,
1971 .cleanup = mmc_test_area_cleanup,
1972 },
1973
1974 {
1975 .name = "Single trim performance by transfer size",
1976 .prepare = mmc_test_area_prepare_fill,
1977 .run = mmc_test_profile_trim_perf,
1978 .cleanup = mmc_test_area_cleanup,
1979 },
1980
1981 {
1982 .name = "Consecutive read performance by transfer size",
1983 .prepare = mmc_test_area_prepare_fill,
1984 .run = mmc_test_profile_seq_read_perf,
1985 .cleanup = mmc_test_area_cleanup,
1986 },
1987
1988 {
1989 .name = "Consecutive write performance by transfer size",
1990 .prepare = mmc_test_area_prepare,
1991 .run = mmc_test_profile_seq_write_perf,
1992 .cleanup = mmc_test_area_cleanup,
1993 },
1994
1995 {
1996 .name = "Consecutive trim performance by transfer size",
1997 .prepare = mmc_test_area_prepare,
1998 .run = mmc_test_profile_seq_trim_perf,
1999 .cleanup = mmc_test_area_cleanup,
2000 },
2001
Pierre Ossman88ae6002007-08-12 14:23:50 +02002002};
2003
Akinobu Mitaa6500312008-09-13 19:03:32 +09002004static DEFINE_MUTEX(mmc_test_lock);
Pierre Ossman88ae6002007-08-12 14:23:50 +02002005
Andy Shevchenko3183aa12010-09-01 09:26:47 +03002006static LIST_HEAD(mmc_test_result);
2007
Pierre Ossmanfd8c3262008-05-24 22:36:31 +02002008static void mmc_test_run(struct mmc_test_card *test, int testcase)
Pierre Ossman88ae6002007-08-12 14:23:50 +02002009{
2010 int i, ret;
2011
2012 printk(KERN_INFO "%s: Starting tests of card %s...\n",
2013 mmc_hostname(test->card->host), mmc_card_id(test->card));
2014
2015 mmc_claim_host(test->card->host);
2016
2017 for (i = 0;i < ARRAY_SIZE(mmc_test_cases);i++) {
Andy Shevchenko3183aa12010-09-01 09:26:47 +03002018 struct mmc_test_general_result *gr;
2019
Pierre Ossmanfd8c3262008-05-24 22:36:31 +02002020 if (testcase && ((i + 1) != testcase))
2021 continue;
2022
Pierre Ossman88ae6002007-08-12 14:23:50 +02002023 printk(KERN_INFO "%s: Test case %d. %s...\n",
2024 mmc_hostname(test->card->host), i + 1,
2025 mmc_test_cases[i].name);
2026
2027 if (mmc_test_cases[i].prepare) {
2028 ret = mmc_test_cases[i].prepare(test);
2029 if (ret) {
2030 printk(KERN_INFO "%s: Result: Prepare "
2031 "stage failed! (%d)\n",
2032 mmc_hostname(test->card->host),
2033 ret);
2034 continue;
2035 }
2036 }
2037
Andy Shevchenko3183aa12010-09-01 09:26:47 +03002038 gr = kzalloc(sizeof(struct mmc_test_general_result),
2039 GFP_KERNEL);
2040 if (gr) {
2041 INIT_LIST_HEAD(&gr->tr_lst);
2042
2043 /* Assign data what we know already */
2044 gr->card = test->card;
2045 gr->testcase = i;
2046
2047 /* Append container to global one */
2048 list_add_tail(&gr->link, &mmc_test_result);
2049
2050 /*
2051 * Save the pointer to created container in our private
2052 * structure.
2053 */
2054 test->gr = gr;
2055 }
2056
Pierre Ossman88ae6002007-08-12 14:23:50 +02002057 ret = mmc_test_cases[i].run(test);
2058 switch (ret) {
2059 case RESULT_OK:
2060 printk(KERN_INFO "%s: Result: OK\n",
2061 mmc_hostname(test->card->host));
2062 break;
2063 case RESULT_FAIL:
2064 printk(KERN_INFO "%s: Result: FAILED\n",
2065 mmc_hostname(test->card->host));
2066 break;
2067 case RESULT_UNSUP_HOST:
2068 printk(KERN_INFO "%s: Result: UNSUPPORTED "
2069 "(by host)\n",
2070 mmc_hostname(test->card->host));
2071 break;
2072 case RESULT_UNSUP_CARD:
2073 printk(KERN_INFO "%s: Result: UNSUPPORTED "
2074 "(by card)\n",
2075 mmc_hostname(test->card->host));
2076 break;
2077 default:
2078 printk(KERN_INFO "%s: Result: ERROR (%d)\n",
2079 mmc_hostname(test->card->host), ret);
2080 }
2081
Andy Shevchenko3183aa12010-09-01 09:26:47 +03002082 /* Save the result */
2083 if (gr)
2084 gr->result = ret;
2085
Pierre Ossman88ae6002007-08-12 14:23:50 +02002086 if (mmc_test_cases[i].cleanup) {
2087 ret = mmc_test_cases[i].cleanup(test);
2088 if (ret) {
2089 printk(KERN_INFO "%s: Warning: Cleanup "
2090 "stage failed! (%d)\n",
2091 mmc_hostname(test->card->host),
2092 ret);
2093 }
2094 }
2095 }
2096
2097 mmc_release_host(test->card->host);
2098
2099 printk(KERN_INFO "%s: Tests completed.\n",
2100 mmc_hostname(test->card->host));
2101}
2102
Andy Shevchenko3183aa12010-09-01 09:26:47 +03002103static void mmc_test_free_result(struct mmc_card *card)
2104{
2105 struct mmc_test_general_result *gr, *grs;
2106
2107 mutex_lock(&mmc_test_lock);
2108
2109 list_for_each_entry_safe(gr, grs, &mmc_test_result, link) {
2110 struct mmc_test_transfer_result *tr, *trs;
2111
2112 if (card && gr->card != card)
2113 continue;
2114
2115 list_for_each_entry_safe(tr, trs, &gr->tr_lst, link) {
2116 list_del(&tr->link);
2117 kfree(tr);
2118 }
2119
2120 list_del(&gr->link);
2121 kfree(gr);
2122 }
2123
2124 mutex_unlock(&mmc_test_lock);
2125}
2126
Pierre Ossman88ae6002007-08-12 14:23:50 +02002127static ssize_t mmc_test_show(struct device *dev,
2128 struct device_attribute *attr, char *buf)
2129{
Andy Shevchenko3183aa12010-09-01 09:26:47 +03002130 struct mmc_card *card = mmc_dev_to_card(dev);
2131 struct mmc_test_general_result *gr;
2132 char *p = buf;
2133 size_t len = PAGE_SIZE;
2134 int ret;
2135
Pierre Ossman88ae6002007-08-12 14:23:50 +02002136 mutex_lock(&mmc_test_lock);
Andy Shevchenko3183aa12010-09-01 09:26:47 +03002137
2138 list_for_each_entry(gr, &mmc_test_result, link) {
2139 struct mmc_test_transfer_result *tr;
2140
2141 if (gr->card != card)
2142 continue;
2143
2144 ret = snprintf(p, len, "Test %d: %d\n", gr->testcase + 1,
2145 gr->result);
2146 if (ret < 0)
2147 goto err;
2148 if (ret >= len) {
2149 ret = -ENOBUFS;
2150 goto err;
2151 }
2152 p += ret;
2153 len -= ret;
2154
2155 list_for_each_entry(tr, &gr->tr_lst, link) {
2156 ret = snprintf(p, len, "%u %d %lu.%09lu %u\n",
2157 tr->count, tr->sectors,
2158 (unsigned long)tr->ts.tv_sec,
2159 (unsigned long)tr->ts.tv_nsec,
2160 tr->rate);
2161 if (ret < 0)
2162 goto err;
2163 if (ret >= len) {
2164 ret = -ENOBUFS;
2165 goto err;
2166 }
2167 p += ret;
2168 len -= ret;
2169 }
2170 }
2171
2172 ret = PAGE_SIZE - len;
2173err:
Pierre Ossman88ae6002007-08-12 14:23:50 +02002174 mutex_unlock(&mmc_test_lock);
2175
Andy Shevchenko3183aa12010-09-01 09:26:47 +03002176 return ret;
Pierre Ossman88ae6002007-08-12 14:23:50 +02002177}
2178
2179static ssize_t mmc_test_store(struct device *dev,
2180 struct device_attribute *attr, const char *buf, size_t count)
2181{
Andy Shevchenko265cdc92010-09-17 20:32:25 -04002182 struct mmc_card *card = mmc_dev_to_card(dev);
Pierre Ossman88ae6002007-08-12 14:23:50 +02002183 struct mmc_test_card *test;
Andy Shevchenko5c25aee2010-09-01 09:26:46 +03002184 long testcase;
Pierre Ossman88ae6002007-08-12 14:23:50 +02002185
Andy Shevchenko5c25aee2010-09-01 09:26:46 +03002186 if (strict_strtol(buf, 10, &testcase))
2187 return -EINVAL;
Pierre Ossmanfd8c3262008-05-24 22:36:31 +02002188
Pierre Ossman88ae6002007-08-12 14:23:50 +02002189 test = kzalloc(sizeof(struct mmc_test_card), GFP_KERNEL);
2190 if (!test)
2191 return -ENOMEM;
2192
Andy Shevchenko3183aa12010-09-01 09:26:47 +03002193 /*
2194 * Remove all test cases associated with given card. Thus we have only
2195 * actual data of the last run.
2196 */
2197 mmc_test_free_result(card);
2198
Pierre Ossman88ae6002007-08-12 14:23:50 +02002199 test->card = card;
2200
2201 test->buffer = kzalloc(BUFFER_SIZE, GFP_KERNEL);
Pierre Ossman26610812008-07-04 18:17:13 +02002202#ifdef CONFIG_HIGHMEM
2203 test->highmem = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, BUFFER_ORDER);
2204#endif
2205
2206#ifdef CONFIG_HIGHMEM
2207 if (test->buffer && test->highmem) {
2208#else
Pierre Ossman88ae6002007-08-12 14:23:50 +02002209 if (test->buffer) {
Pierre Ossman26610812008-07-04 18:17:13 +02002210#endif
Pierre Ossman88ae6002007-08-12 14:23:50 +02002211 mutex_lock(&mmc_test_lock);
Pierre Ossmanfd8c3262008-05-24 22:36:31 +02002212 mmc_test_run(test, testcase);
Pierre Ossman88ae6002007-08-12 14:23:50 +02002213 mutex_unlock(&mmc_test_lock);
2214 }
2215
Pierre Ossman26610812008-07-04 18:17:13 +02002216#ifdef CONFIG_HIGHMEM
2217 __free_pages(test->highmem, BUFFER_ORDER);
2218#endif
Pierre Ossman88ae6002007-08-12 14:23:50 +02002219 kfree(test->buffer);
2220 kfree(test);
2221
2222 return count;
2223}
2224
2225static DEVICE_ATTR(test, S_IWUSR | S_IRUGO, mmc_test_show, mmc_test_store);
2226
2227static int mmc_test_probe(struct mmc_card *card)
2228{
2229 int ret;
2230
Andy Shevchenko63be54c2010-09-01 09:26:45 +03002231 if (!mmc_card_mmc(card) && !mmc_card_sd(card))
Pierre Ossman0121a982008-06-28 17:51:27 +02002232 return -ENODEV;
2233
Pierre Ossman88ae6002007-08-12 14:23:50 +02002234 ret = device_create_file(&card->dev, &dev_attr_test);
2235 if (ret)
2236 return ret;
2237
Pierre Ossman60c9c7b2008-07-22 14:38:35 +02002238 dev_info(&card->dev, "Card claimed for testing.\n");
2239
Pierre Ossman88ae6002007-08-12 14:23:50 +02002240 return 0;
2241}
2242
2243static void mmc_test_remove(struct mmc_card *card)
2244{
Andy Shevchenko3183aa12010-09-01 09:26:47 +03002245 mmc_test_free_result(card);
Pierre Ossman88ae6002007-08-12 14:23:50 +02002246 device_remove_file(&card->dev, &dev_attr_test);
2247}
2248
2249static struct mmc_driver mmc_driver = {
2250 .drv = {
2251 .name = "mmc_test",
2252 },
2253 .probe = mmc_test_probe,
2254 .remove = mmc_test_remove,
2255};
2256
2257static int __init mmc_test_init(void)
2258{
2259 return mmc_register_driver(&mmc_driver);
2260}
2261
2262static void __exit mmc_test_exit(void)
2263{
Andy Shevchenko3183aa12010-09-01 09:26:47 +03002264 /* Clear stalled data if card is still plugged */
2265 mmc_test_free_result(NULL);
2266
Pierre Ossman88ae6002007-08-12 14:23:50 +02002267 mmc_unregister_driver(&mmc_driver);
2268}
2269
2270module_init(mmc_test_init);
2271module_exit(mmc_test_exit);
2272
2273MODULE_LICENSE("GPL");
2274MODULE_DESCRIPTION("Multimedia Card (MMC) host test driver");
2275MODULE_AUTHOR("Pierre Ossman");