blob: b0643432d6d9938b1ffffa7dbe233a9e01b8574e [file] [log] [blame]
Pierre Ossman88ae6002007-08-12 14:23:50 +02001/*
2 * linux/drivers/mmc/card/mmc_test.c
3 *
Pierre Ossman0121a982008-06-28 17:51:27 +02004 * Copyright 2007-2008 Pierre Ossman
Pierre Ossman88ae6002007-08-12 14:23:50 +02005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
10 */
11
12#include <linux/mmc/core.h>
13#include <linux/mmc/card.h>
14#include <linux/mmc/host.h>
15#include <linux/mmc/mmc.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090016#include <linux/slab.h>
Ulf Hansson6685ac62014-10-06 13:51:40 +020017#include <linux/device.h>
Pierre Ossman88ae6002007-08-12 14:23:50 +020018
19#include <linux/scatterlist.h>
Adrian Hunterfec4dcc2010-08-11 14:17:51 -070020#include <linux/swap.h> /* For nr_free_buffer_pages() */
Andy Shevchenko3183aa12010-09-01 09:26:47 +030021#include <linux/list.h>
Pierre Ossman88ae6002007-08-12 14:23:50 +020022
Andy Shevchenko130067e2010-09-10 10:10:50 +030023#include <linux/debugfs.h>
24#include <linux/uaccess.h>
25#include <linux/seq_file.h>
Paul Gortmaker88b47672011-07-03 15:15:51 -040026#include <linux/module.h>
Andy Shevchenko130067e2010-09-10 10:10:50 +030027
Pierre Ossman88ae6002007-08-12 14:23:50 +020028#define RESULT_OK 0
29#define RESULT_FAIL 1
30#define RESULT_UNSUP_HOST 2
31#define RESULT_UNSUP_CARD 3
32
Pierre Ossman26610812008-07-04 18:17:13 +020033#define BUFFER_ORDER 2
34#define BUFFER_SIZE (PAGE_SIZE << BUFFER_ORDER)
Pierre Ossman88ae6002007-08-12 14:23:50 +020035
Adrian Hunterfec4dcc2010-08-11 14:17:51 -070036/*
37 * Limit the test area size to the maximum MMC HC erase group size. Note that
38 * the maximum SD allocation unit size is just 4MiB.
39 */
40#define TEST_AREA_MAX_SIZE (128 * 1024 * 1024)
41
Adrian Hunter64f71202010-08-11 14:17:51 -070042/**
43 * struct mmc_test_pages - pages allocated by 'alloc_pages()'.
44 * @page: first page in the allocation
45 * @order: order of the number of pages allocated
46 */
47struct mmc_test_pages {
48 struct page *page;
49 unsigned int order;
50};
51
52/**
53 * struct mmc_test_mem - allocated memory.
54 * @arr: array of allocations
55 * @cnt: number of allocations
56 */
57struct mmc_test_mem {
58 struct mmc_test_pages *arr;
59 unsigned int cnt;
60};
61
62/**
63 * struct mmc_test_area - information for performance tests.
Adrian Hunter64f71202010-08-11 14:17:51 -070064 * @max_sz: test area size (in bytes)
Adrian Hunterfec4dcc2010-08-11 14:17:51 -070065 * @dev_addr: address on card at which to do performance tests
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +030066 * @max_tfr: maximum transfer size allowed by driver (in bytes)
67 * @max_segs: maximum segments allowed by driver in scatterlist @sg
68 * @max_seg_sz: maximum segment size allowed by driver
Adrian Hunter64f71202010-08-11 14:17:51 -070069 * @blocks: number of (512 byte) blocks currently mapped by @sg
70 * @sg_len: length of currently mapped scatterlist @sg
71 * @mem: allocated memory
72 * @sg: scatterlist
73 */
74struct mmc_test_area {
Adrian Hunterfec4dcc2010-08-11 14:17:51 -070075 unsigned long max_sz;
Adrian Hunter64f71202010-08-11 14:17:51 -070076 unsigned int dev_addr;
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +030077 unsigned int max_tfr;
Adrian Hunter64f71202010-08-11 14:17:51 -070078 unsigned int max_segs;
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +030079 unsigned int max_seg_sz;
Adrian Hunter64f71202010-08-11 14:17:51 -070080 unsigned int blocks;
81 unsigned int sg_len;
82 struct mmc_test_mem *mem;
83 struct scatterlist *sg;
84};
85
86/**
Andy Shevchenko3183aa12010-09-01 09:26:47 +030087 * struct mmc_test_transfer_result - transfer results for performance tests.
88 * @link: double-linked list
89 * @count: amount of group of sectors to check
90 * @sectors: amount of sectors to check in one group
91 * @ts: time values of transfer
92 * @rate: calculated transfer rate
Adrian Hunterb6056d12011-02-08 13:41:02 +020093 * @iops: I/O operations per second (times 100)
Andy Shevchenko3183aa12010-09-01 09:26:47 +030094 */
95struct mmc_test_transfer_result {
96 struct list_head link;
97 unsigned int count;
98 unsigned int sectors;
99 struct timespec ts;
100 unsigned int rate;
Adrian Hunterb6056d12011-02-08 13:41:02 +0200101 unsigned int iops;
Andy Shevchenko3183aa12010-09-01 09:26:47 +0300102};
103
104/**
105 * struct mmc_test_general_result - results for tests.
106 * @link: double-linked list
107 * @card: card under test
108 * @testcase: number of test case
109 * @result: result of test run
110 * @tr_lst: transfer measurements if any as mmc_test_transfer_result
111 */
112struct mmc_test_general_result {
113 struct list_head link;
114 struct mmc_card *card;
115 int testcase;
116 int result;
117 struct list_head tr_lst;
118};
119
120/**
Andy Shevchenko130067e2010-09-10 10:10:50 +0300121 * struct mmc_test_dbgfs_file - debugfs related file.
122 * @link: double-linked list
123 * @card: card under test
124 * @file: file created under debugfs
125 */
126struct mmc_test_dbgfs_file {
127 struct list_head link;
128 struct mmc_card *card;
129 struct dentry *file;
130};
131
132/**
Adrian Hunter64f71202010-08-11 14:17:51 -0700133 * struct mmc_test_card - test information.
134 * @card: card under test
135 * @scratch: transfer buffer
136 * @buffer: transfer buffer
137 * @highmem: buffer for highmem tests
138 * @area: information for performance tests
Andy Shevchenko3183aa12010-09-01 09:26:47 +0300139 * @gr: pointer to results of current testcase
Adrian Hunter64f71202010-08-11 14:17:51 -0700140 */
Pierre Ossman88ae6002007-08-12 14:23:50 +0200141struct mmc_test_card {
142 struct mmc_card *card;
143
Pierre Ossman6b174932008-06-30 09:09:27 +0200144 u8 scratch[BUFFER_SIZE];
Pierre Ossman88ae6002007-08-12 14:23:50 +0200145 u8 *buffer;
Pierre Ossman26610812008-07-04 18:17:13 +0200146#ifdef CONFIG_HIGHMEM
147 struct page *highmem;
148#endif
Andy Shevchenko3183aa12010-09-01 09:26:47 +0300149 struct mmc_test_area area;
150 struct mmc_test_general_result *gr;
Pierre Ossman88ae6002007-08-12 14:23:50 +0200151};
152
Per Forlin9f9c4182011-07-01 18:55:26 +0200153enum mmc_test_prep_media {
154 MMC_TEST_PREP_NONE = 0,
155 MMC_TEST_PREP_WRITE_FULL = 1 << 0,
156 MMC_TEST_PREP_ERASE = 1 << 1,
157};
158
159struct mmc_test_multiple_rw {
Per Forlinbf043332011-07-01 18:55:27 +0200160 unsigned int *sg_len;
Per Forlin9f9c4182011-07-01 18:55:26 +0200161 unsigned int *bs;
162 unsigned int len;
163 unsigned int size;
164 bool do_write;
165 bool do_nonblock_req;
166 enum mmc_test_prep_media prepare;
167};
168
169struct mmc_test_async_req {
170 struct mmc_async_req areq;
171 struct mmc_test_card *test;
172};
173
Pierre Ossman88ae6002007-08-12 14:23:50 +0200174/*******************************************************************/
Pierre Ossman6b174932008-06-30 09:09:27 +0200175/* General helper functions */
Pierre Ossman88ae6002007-08-12 14:23:50 +0200176/*******************************************************************/
177
Pierre Ossman6b174932008-06-30 09:09:27 +0200178/*
179 * Configure correct block size in card
180 */
Pierre Ossman88ae6002007-08-12 14:23:50 +0200181static int mmc_test_set_blksize(struct mmc_test_card *test, unsigned size)
182{
Adrian Hunter0f8d8ea2010-08-24 13:20:26 +0300183 return mmc_set_blocklen(test->card, size);
Pierre Ossman88ae6002007-08-12 14:23:50 +0200184}
185
Pierre Ossman6b174932008-06-30 09:09:27 +0200186/*
187 * Fill in the mmc_request structure given a set of transfer parameters.
188 */
189static void mmc_test_prepare_mrq(struct mmc_test_card *test,
190 struct mmc_request *mrq, struct scatterlist *sg, unsigned sg_len,
191 unsigned dev_addr, unsigned blocks, unsigned blksz, int write)
192{
193 BUG_ON(!mrq || !mrq->cmd || !mrq->data || !mrq->stop);
194
195 if (blocks > 1) {
196 mrq->cmd->opcode = write ?
197 MMC_WRITE_MULTIPLE_BLOCK : MMC_READ_MULTIPLE_BLOCK;
198 } else {
199 mrq->cmd->opcode = write ?
200 MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
201 }
202
203 mrq->cmd->arg = dev_addr;
Johan Kristellc286d032010-02-10 13:56:34 -0800204 if (!mmc_card_blockaddr(test->card))
205 mrq->cmd->arg <<= 9;
206
Pierre Ossman6b174932008-06-30 09:09:27 +0200207 mrq->cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC;
208
209 if (blocks == 1)
210 mrq->stop = NULL;
211 else {
212 mrq->stop->opcode = MMC_STOP_TRANSMISSION;
213 mrq->stop->arg = 0;
214 mrq->stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
215 }
216
217 mrq->data->blksz = blksz;
218 mrq->data->blocks = blocks;
219 mrq->data->flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
220 mrq->data->sg = sg;
221 mrq->data->sg_len = sg_len;
222
223 mmc_set_data_timeout(mrq->data, test->card);
224}
225
Adrian Hunter64f71202010-08-11 14:17:51 -0700226static int mmc_test_busy(struct mmc_command *cmd)
227{
228 return !(cmd->resp[0] & R1_READY_FOR_DATA) ||
Jaehoon Chung7435bb72011-08-10 18:46:28 +0900229 (R1_CURRENT_STATE(cmd->resp[0]) == R1_STATE_PRG);
Adrian Hunter64f71202010-08-11 14:17:51 -0700230}
231
Pierre Ossman6b174932008-06-30 09:09:27 +0200232/*
233 * Wait for the card to finish the busy state
234 */
235static int mmc_test_wait_busy(struct mmc_test_card *test)
Pierre Ossman88ae6002007-08-12 14:23:50 +0200236{
237 int ret, busy;
Chris Ball1278dba2011-04-13 23:40:30 -0400238 struct mmc_command cmd = {0};
Pierre Ossman88ae6002007-08-12 14:23:50 +0200239
240 busy = 0;
241 do {
Pierre Ossman88ae6002007-08-12 14:23:50 +0200242 memset(&cmd, 0, sizeof(struct mmc_command));
243
244 cmd.opcode = MMC_SEND_STATUS;
245 cmd.arg = test->card->rca << 16;
246 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
247
Pierre Ossman6b174932008-06-30 09:09:27 +0200248 ret = mmc_wait_for_cmd(test->card->host, &cmd, 0);
249 if (ret)
Pierre Ossman88ae6002007-08-12 14:23:50 +0200250 break;
251
Adrian Hunter64f71202010-08-11 14:17:51 -0700252 if (!busy && mmc_test_busy(&cmd)) {
Pierre Ossman88ae6002007-08-12 14:23:50 +0200253 busy = 1;
Pawel Moll54d6b442011-02-06 15:06:24 -0500254 if (test->card->host->caps & MMC_CAP_WAIT_WHILE_BUSY)
Girish K Sa3c76eb2011-10-11 11:44:09 +0530255 pr_info("%s: Warning: Host did not "
Pawel Moll54d6b442011-02-06 15:06:24 -0500256 "wait for busy state to end.\n",
257 mmc_hostname(test->card->host));
Pierre Ossman88ae6002007-08-12 14:23:50 +0200258 }
Adrian Hunter64f71202010-08-11 14:17:51 -0700259 } while (mmc_test_busy(&cmd));
Pierre Ossman88ae6002007-08-12 14:23:50 +0200260
261 return ret;
262}
263
Pierre Ossman6b174932008-06-30 09:09:27 +0200264/*
265 * Transfer a single sector of kernel addressable data
266 */
267static int mmc_test_buffer_transfer(struct mmc_test_card *test,
268 u8 *buffer, unsigned addr, unsigned blksz, int write)
Pierre Ossman88ae6002007-08-12 14:23:50 +0200269{
Pierre Ossman6b174932008-06-30 09:09:27 +0200270 int ret;
271
Chris Ball24f5b532011-04-13 23:49:45 -0400272 struct mmc_request mrq = {0};
Chris Ball1278dba2011-04-13 23:40:30 -0400273 struct mmc_command cmd = {0};
274 struct mmc_command stop = {0};
Chris Balla61ad2b2011-04-13 23:46:05 -0400275 struct mmc_data data = {0};
Pierre Ossman6b174932008-06-30 09:09:27 +0200276
277 struct scatterlist sg;
278
Pierre Ossman6b174932008-06-30 09:09:27 +0200279 mrq.cmd = &cmd;
280 mrq.data = &data;
281 mrq.stop = &stop;
282
283 sg_init_one(&sg, buffer, blksz);
284
285 mmc_test_prepare_mrq(test, &mrq, &sg, 1, addr, 1, blksz, write);
286
287 mmc_wait_for_req(test->card->host, &mrq);
288
289 if (cmd.error)
290 return cmd.error;
291 if (data.error)
292 return data.error;
293
294 ret = mmc_test_wait_busy(test);
295 if (ret)
296 return ret;
297
298 return 0;
Pierre Ossman88ae6002007-08-12 14:23:50 +0200299}
300
Adrian Hunter64f71202010-08-11 14:17:51 -0700301static void mmc_test_free_mem(struct mmc_test_mem *mem)
302{
303 if (!mem)
304 return;
305 while (mem->cnt--)
306 __free_pages(mem->arr[mem->cnt].page,
307 mem->arr[mem->cnt].order);
308 kfree(mem->arr);
309 kfree(mem);
310}
311
312/*
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300313 * Allocate a lot of memory, preferably max_sz but at least min_sz. In case
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +0300314 * there isn't much memory do not exceed 1/16th total lowmem pages. Also do
315 * not exceed a maximum number of segments and try not to make segments much
316 * bigger than maximum segment size.
Adrian Hunter64f71202010-08-11 14:17:51 -0700317 */
Adrian Hunterfec4dcc2010-08-11 14:17:51 -0700318static struct mmc_test_mem *mmc_test_alloc_mem(unsigned long min_sz,
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +0300319 unsigned long max_sz,
320 unsigned int max_segs,
321 unsigned int max_seg_sz)
Adrian Hunter64f71202010-08-11 14:17:51 -0700322{
Adrian Hunterfec4dcc2010-08-11 14:17:51 -0700323 unsigned long max_page_cnt = DIV_ROUND_UP(max_sz, PAGE_SIZE);
324 unsigned long min_page_cnt = DIV_ROUND_UP(min_sz, PAGE_SIZE);
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +0300325 unsigned long max_seg_page_cnt = DIV_ROUND_UP(max_seg_sz, PAGE_SIZE);
Adrian Hunterfec4dcc2010-08-11 14:17:51 -0700326 unsigned long page_cnt = 0;
327 unsigned long limit = nr_free_buffer_pages() >> 4;
Adrian Hunter64f71202010-08-11 14:17:51 -0700328 struct mmc_test_mem *mem;
Adrian Hunter64f71202010-08-11 14:17:51 -0700329
Adrian Hunterfec4dcc2010-08-11 14:17:51 -0700330 if (max_page_cnt > limit)
331 max_page_cnt = limit;
Adrian Hunter3d203be2010-09-23 14:51:29 +0300332 if (min_page_cnt > max_page_cnt)
333 min_page_cnt = max_page_cnt;
Adrian Hunter64f71202010-08-11 14:17:51 -0700334
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +0300335 if (max_seg_page_cnt > max_page_cnt)
336 max_seg_page_cnt = max_page_cnt;
337
338 if (max_segs > max_page_cnt)
339 max_segs = max_page_cnt;
340
Adrian Hunter64f71202010-08-11 14:17:51 -0700341 mem = kzalloc(sizeof(struct mmc_test_mem), GFP_KERNEL);
342 if (!mem)
343 return NULL;
344
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +0300345 mem->arr = kzalloc(sizeof(struct mmc_test_pages) * max_segs,
Adrian Hunter64f71202010-08-11 14:17:51 -0700346 GFP_KERNEL);
347 if (!mem->arr)
348 goto out_free;
349
350 while (max_page_cnt) {
351 struct page *page;
352 unsigned int order;
353 gfp_t flags = GFP_KERNEL | GFP_DMA | __GFP_NOWARN |
354 __GFP_NORETRY;
355
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +0300356 order = get_order(max_seg_page_cnt << PAGE_SHIFT);
Adrian Hunter64f71202010-08-11 14:17:51 -0700357 while (1) {
358 page = alloc_pages(flags, order);
359 if (page || !order)
360 break;
361 order -= 1;
362 }
363 if (!page) {
364 if (page_cnt < min_page_cnt)
365 goto out_free;
366 break;
367 }
368 mem->arr[mem->cnt].page = page;
369 mem->arr[mem->cnt].order = order;
370 mem->cnt += 1;
Adrian Hunterfec4dcc2010-08-11 14:17:51 -0700371 if (max_page_cnt <= (1UL << order))
372 break;
Adrian Hunter3d203be2010-09-23 14:51:29 +0300373 max_page_cnt -= 1UL << order;
374 page_cnt += 1UL << order;
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +0300375 if (mem->cnt >= max_segs) {
376 if (page_cnt < min_page_cnt)
377 goto out_free;
378 break;
379 }
Adrian Hunter64f71202010-08-11 14:17:51 -0700380 }
381
382 return mem;
383
384out_free:
385 mmc_test_free_mem(mem);
386 return NULL;
387}
388
389/*
390 * Map memory into a scatterlist. Optionally allow the same memory to be
391 * mapped more than once.
392 */
Per Forlinbf043332011-07-01 18:55:27 +0200393static int mmc_test_map_sg(struct mmc_test_mem *mem, unsigned long size,
Adrian Hunter64f71202010-08-11 14:17:51 -0700394 struct scatterlist *sglist, int repeat,
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +0300395 unsigned int max_segs, unsigned int max_seg_sz,
Per Forlinbf043332011-07-01 18:55:27 +0200396 unsigned int *sg_len, int min_sg_len)
Adrian Hunter64f71202010-08-11 14:17:51 -0700397{
398 struct scatterlist *sg = NULL;
399 unsigned int i;
Per Forlinbf043332011-07-01 18:55:27 +0200400 unsigned long sz = size;
Adrian Hunter64f71202010-08-11 14:17:51 -0700401
402 sg_init_table(sglist, max_segs);
Per Forlinbf043332011-07-01 18:55:27 +0200403 if (min_sg_len > max_segs)
404 min_sg_len = max_segs;
Adrian Hunter64f71202010-08-11 14:17:51 -0700405
406 *sg_len = 0;
407 do {
408 for (i = 0; i < mem->cnt; i++) {
Adrian Hunterfec4dcc2010-08-11 14:17:51 -0700409 unsigned long len = PAGE_SIZE << mem->arr[i].order;
Adrian Hunter64f71202010-08-11 14:17:51 -0700410
Per Forlinbf043332011-07-01 18:55:27 +0200411 if (min_sg_len && (size / min_sg_len < len))
412 len = ALIGN(size / min_sg_len, 512);
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +0300413 if (len > sz)
Adrian Hunter64f71202010-08-11 14:17:51 -0700414 len = sz;
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +0300415 if (len > max_seg_sz)
416 len = max_seg_sz;
Adrian Hunter64f71202010-08-11 14:17:51 -0700417 if (sg)
418 sg = sg_next(sg);
419 else
420 sg = sglist;
421 if (!sg)
422 return -EINVAL;
423 sg_set_page(sg, mem->arr[i].page, len, 0);
424 sz -= len;
425 *sg_len += 1;
426 if (!sz)
427 break;
428 }
429 } while (sz && repeat);
430
431 if (sz)
432 return -EINVAL;
433
434 if (sg)
435 sg_mark_end(sg);
436
437 return 0;
438}
439
440/*
441 * Map memory into a scatterlist so that no pages are contiguous. Allow the
442 * same memory to be mapped more than once.
443 */
444static int mmc_test_map_sg_max_scatter(struct mmc_test_mem *mem,
Adrian Hunterfec4dcc2010-08-11 14:17:51 -0700445 unsigned long sz,
Adrian Hunter64f71202010-08-11 14:17:51 -0700446 struct scatterlist *sglist,
447 unsigned int max_segs,
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +0300448 unsigned int max_seg_sz,
Adrian Hunter64f71202010-08-11 14:17:51 -0700449 unsigned int *sg_len)
450{
451 struct scatterlist *sg = NULL;
Adrian Hunterfec4dcc2010-08-11 14:17:51 -0700452 unsigned int i = mem->cnt, cnt;
453 unsigned long len;
Adrian Hunter64f71202010-08-11 14:17:51 -0700454 void *base, *addr, *last_addr = NULL;
455
456 sg_init_table(sglist, max_segs);
457
458 *sg_len = 0;
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +0300459 while (sz) {
Adrian Hunter64f71202010-08-11 14:17:51 -0700460 base = page_address(mem->arr[--i].page);
461 cnt = 1 << mem->arr[i].order;
462 while (sz && cnt) {
463 addr = base + PAGE_SIZE * --cnt;
464 if (last_addr && last_addr + PAGE_SIZE == addr)
465 continue;
466 last_addr = addr;
467 len = PAGE_SIZE;
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +0300468 if (len > max_seg_sz)
469 len = max_seg_sz;
470 if (len > sz)
Adrian Hunter64f71202010-08-11 14:17:51 -0700471 len = sz;
472 if (sg)
473 sg = sg_next(sg);
474 else
475 sg = sglist;
476 if (!sg)
477 return -EINVAL;
478 sg_set_page(sg, virt_to_page(addr), len, 0);
479 sz -= len;
480 *sg_len += 1;
481 }
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +0300482 if (i == 0)
483 i = mem->cnt;
Adrian Hunter64f71202010-08-11 14:17:51 -0700484 }
485
486 if (sg)
487 sg_mark_end(sg);
488
489 return 0;
490}
491
492/*
493 * Calculate transfer rate in bytes per second.
494 */
495static unsigned int mmc_test_rate(uint64_t bytes, struct timespec *ts)
496{
497 uint64_t ns;
498
499 ns = ts->tv_sec;
500 ns *= 1000000000;
501 ns += ts->tv_nsec;
502
503 bytes *= 1000000000;
504
505 while (ns > UINT_MAX) {
506 bytes >>= 1;
507 ns >>= 1;
508 }
509
510 if (!ns)
511 return 0;
512
513 do_div(bytes, (uint32_t)ns);
514
515 return bytes;
516}
517
518/*
Andy Shevchenko3183aa12010-09-01 09:26:47 +0300519 * Save transfer results for future usage
520 */
521static void mmc_test_save_transfer_result(struct mmc_test_card *test,
522 unsigned int count, unsigned int sectors, struct timespec ts,
Adrian Hunterb6056d12011-02-08 13:41:02 +0200523 unsigned int rate, unsigned int iops)
Andy Shevchenko3183aa12010-09-01 09:26:47 +0300524{
525 struct mmc_test_transfer_result *tr;
526
527 if (!test->gr)
528 return;
529
530 tr = kmalloc(sizeof(struct mmc_test_transfer_result), GFP_KERNEL);
531 if (!tr)
532 return;
533
534 tr->count = count;
535 tr->sectors = sectors;
536 tr->ts = ts;
537 tr->rate = rate;
Adrian Hunterb6056d12011-02-08 13:41:02 +0200538 tr->iops = iops;
Andy Shevchenko3183aa12010-09-01 09:26:47 +0300539
540 list_add_tail(&tr->link, &test->gr->tr_lst);
541}
542
543/*
Adrian Hunter64f71202010-08-11 14:17:51 -0700544 * Print the transfer rate.
545 */
546static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes,
547 struct timespec *ts1, struct timespec *ts2)
548{
Adrian Hunterb6056d12011-02-08 13:41:02 +0200549 unsigned int rate, iops, sectors = bytes >> 9;
Adrian Hunter64f71202010-08-11 14:17:51 -0700550 struct timespec ts;
551
552 ts = timespec_sub(*ts2, *ts1);
553
554 rate = mmc_test_rate(bytes, &ts);
Adrian Hunterb6056d12011-02-08 13:41:02 +0200555 iops = mmc_test_rate(100, &ts); /* I/O ops per sec x 100 */
Adrian Hunter64f71202010-08-11 14:17:51 -0700556
Girish K Sa3c76eb2011-10-11 11:44:09 +0530557 pr_info("%s: Transfer of %u sectors (%u%s KiB) took %lu.%09lu "
Adrian Hunterb6056d12011-02-08 13:41:02 +0200558 "seconds (%u kB/s, %u KiB/s, %u.%02u IOPS)\n",
Adrian Hunter64f71202010-08-11 14:17:51 -0700559 mmc_hostname(test->card->host), sectors, sectors >> 1,
Adrian Hunterc27d37a2010-09-23 14:51:36 +0300560 (sectors & 1 ? ".5" : ""), (unsigned long)ts.tv_sec,
Adrian Hunterb6056d12011-02-08 13:41:02 +0200561 (unsigned long)ts.tv_nsec, rate / 1000, rate / 1024,
562 iops / 100, iops % 100);
Andy Shevchenko3183aa12010-09-01 09:26:47 +0300563
Adrian Hunterb6056d12011-02-08 13:41:02 +0200564 mmc_test_save_transfer_result(test, 1, sectors, ts, rate, iops);
Adrian Hunter64f71202010-08-11 14:17:51 -0700565}
566
567/*
568 * Print the average transfer rate.
569 */
570static void mmc_test_print_avg_rate(struct mmc_test_card *test, uint64_t bytes,
571 unsigned int count, struct timespec *ts1,
572 struct timespec *ts2)
573{
Adrian Hunterb6056d12011-02-08 13:41:02 +0200574 unsigned int rate, iops, sectors = bytes >> 9;
Adrian Hunter64f71202010-08-11 14:17:51 -0700575 uint64_t tot = bytes * count;
576 struct timespec ts;
577
578 ts = timespec_sub(*ts2, *ts1);
579
580 rate = mmc_test_rate(tot, &ts);
Adrian Hunterb6056d12011-02-08 13:41:02 +0200581 iops = mmc_test_rate(count * 100, &ts); /* I/O ops per sec x 100 */
Adrian Hunter64f71202010-08-11 14:17:51 -0700582
Girish K Sa3c76eb2011-10-11 11:44:09 +0530583 pr_info("%s: Transfer of %u x %u sectors (%u x %u%s KiB) took "
Adrian Hunterb6056d12011-02-08 13:41:02 +0200584 "%lu.%09lu seconds (%u kB/s, %u KiB/s, "
Per Forlinbf043332011-07-01 18:55:27 +0200585 "%u.%02u IOPS, sg_len %d)\n",
Adrian Hunter64f71202010-08-11 14:17:51 -0700586 mmc_hostname(test->card->host), count, sectors, count,
Adrian Hunterc27d37a2010-09-23 14:51:36 +0300587 sectors >> 1, (sectors & 1 ? ".5" : ""),
Adrian Hunter64f71202010-08-11 14:17:51 -0700588 (unsigned long)ts.tv_sec, (unsigned long)ts.tv_nsec,
Per Forlinbf043332011-07-01 18:55:27 +0200589 rate / 1000, rate / 1024, iops / 100, iops % 100,
590 test->area.sg_len);
Andy Shevchenko3183aa12010-09-01 09:26:47 +0300591
Adrian Hunterb6056d12011-02-08 13:41:02 +0200592 mmc_test_save_transfer_result(test, count, sectors, ts, rate, iops);
Adrian Hunter64f71202010-08-11 14:17:51 -0700593}
594
595/*
596 * Return the card size in sectors.
597 */
598static unsigned int mmc_test_capacity(struct mmc_card *card)
599{
600 if (!mmc_card_sd(card) && mmc_card_blockaddr(card))
601 return card->ext_csd.sectors;
602 else
603 return card->csd.capacity << (card->csd.read_blkbits - 9);
604}
605
Pierre Ossman6b174932008-06-30 09:09:27 +0200606/*******************************************************************/
607/* Test preparation and cleanup */
608/*******************************************************************/
609
610/*
611 * Fill the first couple of sectors of the card with known data
612 * so that bad reads/writes can be detected
613 */
614static int __mmc_test_prepare(struct mmc_test_card *test, int write)
Pierre Ossman88ae6002007-08-12 14:23:50 +0200615{
616 int ret, i;
617
618 ret = mmc_test_set_blksize(test, 512);
619 if (ret)
620 return ret;
621
622 if (write)
Pierre Ossman6b174932008-06-30 09:09:27 +0200623 memset(test->buffer, 0xDF, 512);
Pierre Ossman88ae6002007-08-12 14:23:50 +0200624 else {
Pierre Ossman6b174932008-06-30 09:09:27 +0200625 for (i = 0;i < 512;i++)
Pierre Ossman88ae6002007-08-12 14:23:50 +0200626 test->buffer[i] = i;
627 }
628
629 for (i = 0;i < BUFFER_SIZE / 512;i++) {
Johan Kristellc286d032010-02-10 13:56:34 -0800630 ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
Pierre Ossman88ae6002007-08-12 14:23:50 +0200631 if (ret)
632 return ret;
633 }
634
635 return 0;
636}
637
Pierre Ossman6b174932008-06-30 09:09:27 +0200638static int mmc_test_prepare_write(struct mmc_test_card *test)
Pierre Ossman88ae6002007-08-12 14:23:50 +0200639{
Pierre Ossman6b174932008-06-30 09:09:27 +0200640 return __mmc_test_prepare(test, 1);
Pierre Ossman88ae6002007-08-12 14:23:50 +0200641}
642
Pierre Ossman6b174932008-06-30 09:09:27 +0200643static int mmc_test_prepare_read(struct mmc_test_card *test)
Pierre Ossman88ae6002007-08-12 14:23:50 +0200644{
Pierre Ossman6b174932008-06-30 09:09:27 +0200645 return __mmc_test_prepare(test, 0);
Pierre Ossman88ae6002007-08-12 14:23:50 +0200646}
647
Pierre Ossman6b174932008-06-30 09:09:27 +0200648static int mmc_test_cleanup(struct mmc_test_card *test)
Pierre Ossman88ae6002007-08-12 14:23:50 +0200649{
Pierre Ossman6b174932008-06-30 09:09:27 +0200650 int ret, i;
Pierre Ossman88ae6002007-08-12 14:23:50 +0200651
Pierre Ossman6b174932008-06-30 09:09:27 +0200652 ret = mmc_test_set_blksize(test, 512);
653 if (ret)
654 return ret;
Pierre Ossman88ae6002007-08-12 14:23:50 +0200655
Pierre Ossman6b174932008-06-30 09:09:27 +0200656 memset(test->buffer, 0, 512);
657
658 for (i = 0;i < BUFFER_SIZE / 512;i++) {
Johan Kristellc286d032010-02-10 13:56:34 -0800659 ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
Pierre Ossman6b174932008-06-30 09:09:27 +0200660 if (ret)
661 return ret;
662 }
663
664 return 0;
665}
666
667/*******************************************************************/
668/* Test execution helpers */
669/*******************************************************************/
670
671/*
672 * Modifies the mmc_request to perform the "short transfer" tests
673 */
674static void mmc_test_prepare_broken_mrq(struct mmc_test_card *test,
675 struct mmc_request *mrq, int write)
676{
677 BUG_ON(!mrq || !mrq->cmd || !mrq->data);
678
679 if (mrq->data->blocks > 1) {
680 mrq->cmd->opcode = write ?
681 MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
682 mrq->stop = NULL;
683 } else {
684 mrq->cmd->opcode = MMC_SEND_STATUS;
685 mrq->cmd->arg = test->card->rca << 16;
686 }
687}
688
689/*
690 * Checks that a normal transfer didn't have any errors
691 */
692static int mmc_test_check_result(struct mmc_test_card *test,
Per Forlin9f9c4182011-07-01 18:55:26 +0200693 struct mmc_request *mrq)
Pierre Ossman6b174932008-06-30 09:09:27 +0200694{
695 int ret;
696
697 BUG_ON(!mrq || !mrq->cmd || !mrq->data);
698
699 ret = 0;
700
701 if (!ret && mrq->cmd->error)
702 ret = mrq->cmd->error;
703 if (!ret && mrq->data->error)
704 ret = mrq->data->error;
705 if (!ret && mrq->stop && mrq->stop->error)
706 ret = mrq->stop->error;
707 if (!ret && mrq->data->bytes_xfered !=
708 mrq->data->blocks * mrq->data->blksz)
709 ret = RESULT_FAIL;
710
711 if (ret == -EINVAL)
712 ret = RESULT_UNSUP_HOST;
713
714 return ret;
715}
716
Per Forlin9f9c4182011-07-01 18:55:26 +0200717static int mmc_test_check_result_async(struct mmc_card *card,
718 struct mmc_async_req *areq)
719{
720 struct mmc_test_async_req *test_async =
721 container_of(areq, struct mmc_test_async_req, areq);
722
723 mmc_test_wait_busy(test_async->test);
724
725 return mmc_test_check_result(test_async->test, areq->mrq);
726}
727
Pierre Ossman6b174932008-06-30 09:09:27 +0200728/*
729 * Checks that a "short transfer" behaved as expected
730 */
731static int mmc_test_check_broken_result(struct mmc_test_card *test,
732 struct mmc_request *mrq)
733{
734 int ret;
735
736 BUG_ON(!mrq || !mrq->cmd || !mrq->data);
737
738 ret = 0;
739
740 if (!ret && mrq->cmd->error)
741 ret = mrq->cmd->error;
742 if (!ret && mrq->data->error == 0)
743 ret = RESULT_FAIL;
744 if (!ret && mrq->data->error != -ETIMEDOUT)
745 ret = mrq->data->error;
746 if (!ret && mrq->stop && mrq->stop->error)
747 ret = mrq->stop->error;
748 if (mrq->data->blocks > 1) {
749 if (!ret && mrq->data->bytes_xfered > mrq->data->blksz)
750 ret = RESULT_FAIL;
751 } else {
752 if (!ret && mrq->data->bytes_xfered > 0)
753 ret = RESULT_FAIL;
754 }
755
756 if (ret == -EINVAL)
757 ret = RESULT_UNSUP_HOST;
758
759 return ret;
760}
761
762/*
Per Forlin9f9c4182011-07-01 18:55:26 +0200763 * Tests nonblock transfer with certain parameters
764 */
765static void mmc_test_nonblock_reset(struct mmc_request *mrq,
766 struct mmc_command *cmd,
767 struct mmc_command *stop,
768 struct mmc_data *data)
769{
770 memset(mrq, 0, sizeof(struct mmc_request));
771 memset(cmd, 0, sizeof(struct mmc_command));
772 memset(data, 0, sizeof(struct mmc_data));
773 memset(stop, 0, sizeof(struct mmc_command));
774
775 mrq->cmd = cmd;
776 mrq->data = data;
777 mrq->stop = stop;
778}
779static int mmc_test_nonblock_transfer(struct mmc_test_card *test,
780 struct scatterlist *sg, unsigned sg_len,
781 unsigned dev_addr, unsigned blocks,
782 unsigned blksz, int write, int count)
783{
784 struct mmc_request mrq1;
785 struct mmc_command cmd1;
786 struct mmc_command stop1;
787 struct mmc_data data1;
788
789 struct mmc_request mrq2;
790 struct mmc_command cmd2;
791 struct mmc_command stop2;
792 struct mmc_data data2;
793
794 struct mmc_test_async_req test_areq[2];
795 struct mmc_async_req *done_areq;
796 struct mmc_async_req *cur_areq = &test_areq[0].areq;
797 struct mmc_async_req *other_areq = &test_areq[1].areq;
798 int i;
799 int ret;
800
801 test_areq[0].test = test;
802 test_areq[1].test = test;
803
804 mmc_test_nonblock_reset(&mrq1, &cmd1, &stop1, &data1);
805 mmc_test_nonblock_reset(&mrq2, &cmd2, &stop2, &data2);
806
807 cur_areq->mrq = &mrq1;
808 cur_areq->err_check = mmc_test_check_result_async;
809 other_areq->mrq = &mrq2;
810 other_areq->err_check = mmc_test_check_result_async;
811
812 for (i = 0; i < count; i++) {
813 mmc_test_prepare_mrq(test, cur_areq->mrq, sg, sg_len, dev_addr,
814 blocks, blksz, write);
815 done_areq = mmc_start_req(test->card->host, cur_areq, &ret);
816
817 if (ret || (!done_areq && i > 0))
818 goto err;
819
820 if (done_areq) {
821 if (done_areq->mrq == &mrq2)
822 mmc_test_nonblock_reset(&mrq2, &cmd2,
823 &stop2, &data2);
824 else
825 mmc_test_nonblock_reset(&mrq1, &cmd1,
826 &stop1, &data1);
827 }
828 done_areq = cur_areq;
829 cur_areq = other_areq;
830 other_areq = done_areq;
831 dev_addr += blocks;
832 }
833
834 done_areq = mmc_start_req(test->card->host, NULL, &ret);
835
836 return ret;
837err:
838 return ret;
839}
840
841/*
Pierre Ossman6b174932008-06-30 09:09:27 +0200842 * Tests a basic transfer with certain parameters
843 */
844static int mmc_test_simple_transfer(struct mmc_test_card *test,
845 struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
846 unsigned blocks, unsigned blksz, int write)
847{
Chris Ball24f5b532011-04-13 23:49:45 -0400848 struct mmc_request mrq = {0};
Chris Ball1278dba2011-04-13 23:40:30 -0400849 struct mmc_command cmd = {0};
850 struct mmc_command stop = {0};
Chris Balla61ad2b2011-04-13 23:46:05 -0400851 struct mmc_data data = {0};
Pierre Ossman6b174932008-06-30 09:09:27 +0200852
Pierre Ossman6b174932008-06-30 09:09:27 +0200853 mrq.cmd = &cmd;
854 mrq.data = &data;
855 mrq.stop = &stop;
856
857 mmc_test_prepare_mrq(test, &mrq, sg, sg_len, dev_addr,
858 blocks, blksz, write);
859
860 mmc_wait_for_req(test->card->host, &mrq);
861
862 mmc_test_wait_busy(test);
863
864 return mmc_test_check_result(test, &mrq);
865}
866
867/*
868 * Tests a transfer where the card will fail completely or partly
869 */
870static int mmc_test_broken_transfer(struct mmc_test_card *test,
871 unsigned blocks, unsigned blksz, int write)
872{
Chris Ball24f5b532011-04-13 23:49:45 -0400873 struct mmc_request mrq = {0};
Chris Ball1278dba2011-04-13 23:40:30 -0400874 struct mmc_command cmd = {0};
875 struct mmc_command stop = {0};
Chris Balla61ad2b2011-04-13 23:46:05 -0400876 struct mmc_data data = {0};
Pierre Ossman6b174932008-06-30 09:09:27 +0200877
878 struct scatterlist sg;
879
Pierre Ossman6b174932008-06-30 09:09:27 +0200880 mrq.cmd = &cmd;
881 mrq.data = &data;
882 mrq.stop = &stop;
883
884 sg_init_one(&sg, test->buffer, blocks * blksz);
885
886 mmc_test_prepare_mrq(test, &mrq, &sg, 1, 0, blocks, blksz, write);
887 mmc_test_prepare_broken_mrq(test, &mrq, write);
888
889 mmc_wait_for_req(test->card->host, &mrq);
890
891 mmc_test_wait_busy(test);
892
893 return mmc_test_check_broken_result(test, &mrq);
894}
895
896/*
897 * Does a complete transfer test where data is also validated
898 *
899 * Note: mmc_test_prepare() must have been done before this call
900 */
901static int mmc_test_transfer(struct mmc_test_card *test,
902 struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
903 unsigned blocks, unsigned blksz, int write)
904{
905 int ret, i;
906 unsigned long flags;
Pierre Ossman88ae6002007-08-12 14:23:50 +0200907
908 if (write) {
909 for (i = 0;i < blocks * blksz;i++)
Pierre Ossman6b174932008-06-30 09:09:27 +0200910 test->scratch[i] = i;
911 } else {
Pierre Ossmanb7ac2cf2008-07-29 01:05:22 +0200912 memset(test->scratch, 0, BUFFER_SIZE);
Pierre Ossman88ae6002007-08-12 14:23:50 +0200913 }
Pierre Ossman6b174932008-06-30 09:09:27 +0200914 local_irq_save(flags);
Pierre Ossmanb7ac2cf2008-07-29 01:05:22 +0200915 sg_copy_from_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
Pierre Ossman6b174932008-06-30 09:09:27 +0200916 local_irq_restore(flags);
Pierre Ossman88ae6002007-08-12 14:23:50 +0200917
918 ret = mmc_test_set_blksize(test, blksz);
919 if (ret)
920 return ret;
921
Pierre Ossman6b174932008-06-30 09:09:27 +0200922 ret = mmc_test_simple_transfer(test, sg, sg_len, dev_addr,
923 blocks, blksz, write);
Pierre Ossman88ae6002007-08-12 14:23:50 +0200924 if (ret)
925 return ret;
926
927 if (write) {
Pierre Ossman6b174932008-06-30 09:09:27 +0200928 int sectors;
929
Pierre Ossman88ae6002007-08-12 14:23:50 +0200930 ret = mmc_test_set_blksize(test, 512);
931 if (ret)
932 return ret;
933
934 sectors = (blocks * blksz + 511) / 512;
935 if ((sectors * 512) == (blocks * blksz))
936 sectors++;
937
938 if ((sectors * 512) > BUFFER_SIZE)
939 return -EINVAL;
940
941 memset(test->buffer, 0, sectors * 512);
942
943 for (i = 0;i < sectors;i++) {
Pierre Ossman6b174932008-06-30 09:09:27 +0200944 ret = mmc_test_buffer_transfer(test,
Pierre Ossman88ae6002007-08-12 14:23:50 +0200945 test->buffer + i * 512,
Johan Kristellc286d032010-02-10 13:56:34 -0800946 dev_addr + i, 512, 0);
Pierre Ossman88ae6002007-08-12 14:23:50 +0200947 if (ret)
948 return ret;
949 }
950
951 for (i = 0;i < blocks * blksz;i++) {
952 if (test->buffer[i] != (u8)i)
953 return RESULT_FAIL;
954 }
955
956 for (;i < sectors * 512;i++) {
957 if (test->buffer[i] != 0xDF)
958 return RESULT_FAIL;
959 }
960 } else {
Pierre Ossman6b174932008-06-30 09:09:27 +0200961 local_irq_save(flags);
Pierre Ossmanb7ac2cf2008-07-29 01:05:22 +0200962 sg_copy_to_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
Pierre Ossman6b174932008-06-30 09:09:27 +0200963 local_irq_restore(flags);
Pierre Ossman88ae6002007-08-12 14:23:50 +0200964 for (i = 0;i < blocks * blksz;i++) {
Pierre Ossman6b174932008-06-30 09:09:27 +0200965 if (test->scratch[i] != (u8)i)
Pierre Ossman88ae6002007-08-12 14:23:50 +0200966 return RESULT_FAIL;
967 }
968 }
969
970 return 0;
971}
972
Pierre Ossman88ae6002007-08-12 14:23:50 +0200973/*******************************************************************/
974/* Tests */
975/*******************************************************************/
976
977struct mmc_test_case {
978 const char *name;
979
980 int (*prepare)(struct mmc_test_card *);
981 int (*run)(struct mmc_test_card *);
982 int (*cleanup)(struct mmc_test_card *);
983};
984
985static int mmc_test_basic_write(struct mmc_test_card *test)
986{
987 int ret;
Pierre Ossman6b174932008-06-30 09:09:27 +0200988 struct scatterlist sg;
Pierre Ossman88ae6002007-08-12 14:23:50 +0200989
990 ret = mmc_test_set_blksize(test, 512);
991 if (ret)
992 return ret;
993
Pierre Ossman6b174932008-06-30 09:09:27 +0200994 sg_init_one(&sg, test->buffer, 512);
995
996 ret = mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 1);
Pierre Ossman88ae6002007-08-12 14:23:50 +0200997 if (ret)
998 return ret;
999
1000 return 0;
1001}
1002
1003static int mmc_test_basic_read(struct mmc_test_card *test)
1004{
1005 int ret;
Pierre Ossman6b174932008-06-30 09:09:27 +02001006 struct scatterlist sg;
Pierre Ossman88ae6002007-08-12 14:23:50 +02001007
1008 ret = mmc_test_set_blksize(test, 512);
1009 if (ret)
1010 return ret;
1011
Pierre Ossman6b174932008-06-30 09:09:27 +02001012 sg_init_one(&sg, test->buffer, 512);
1013
Rabin Vincent58a5dd32009-02-13 22:55:26 +05301014 ret = mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 0);
Pierre Ossman88ae6002007-08-12 14:23:50 +02001015 if (ret)
1016 return ret;
1017
1018 return 0;
1019}
1020
1021static int mmc_test_verify_write(struct mmc_test_card *test)
1022{
1023 int ret;
Pierre Ossman6b174932008-06-30 09:09:27 +02001024 struct scatterlist sg;
Pierre Ossman88ae6002007-08-12 14:23:50 +02001025
Pierre Ossman6b174932008-06-30 09:09:27 +02001026 sg_init_one(&sg, test->buffer, 512);
1027
1028 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
Pierre Ossman88ae6002007-08-12 14:23:50 +02001029 if (ret)
1030 return ret;
1031
1032 return 0;
1033}
1034
1035static int mmc_test_verify_read(struct mmc_test_card *test)
1036{
1037 int ret;
Pierre Ossman6b174932008-06-30 09:09:27 +02001038 struct scatterlist sg;
Pierre Ossman88ae6002007-08-12 14:23:50 +02001039
Pierre Ossman6b174932008-06-30 09:09:27 +02001040 sg_init_one(&sg, test->buffer, 512);
1041
1042 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
Pierre Ossman88ae6002007-08-12 14:23:50 +02001043 if (ret)
1044 return ret;
1045
1046 return 0;
1047}
1048
1049static int mmc_test_multi_write(struct mmc_test_card *test)
1050{
1051 int ret;
1052 unsigned int size;
Pierre Ossman6b174932008-06-30 09:09:27 +02001053 struct scatterlist sg;
Pierre Ossman88ae6002007-08-12 14:23:50 +02001054
1055 if (test->card->host->max_blk_count == 1)
1056 return RESULT_UNSUP_HOST;
1057
1058 size = PAGE_SIZE * 2;
1059 size = min(size, test->card->host->max_req_size);
1060 size = min(size, test->card->host->max_seg_size);
1061 size = min(size, test->card->host->max_blk_count * 512);
1062
1063 if (size < 1024)
1064 return RESULT_UNSUP_HOST;
1065
Pierre Ossman6b174932008-06-30 09:09:27 +02001066 sg_init_one(&sg, test->buffer, size);
1067
1068 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
Pierre Ossman88ae6002007-08-12 14:23:50 +02001069 if (ret)
1070 return ret;
1071
1072 return 0;
1073}
1074
1075static int mmc_test_multi_read(struct mmc_test_card *test)
1076{
1077 int ret;
1078 unsigned int size;
Pierre Ossman6b174932008-06-30 09:09:27 +02001079 struct scatterlist sg;
Pierre Ossman88ae6002007-08-12 14:23:50 +02001080
1081 if (test->card->host->max_blk_count == 1)
1082 return RESULT_UNSUP_HOST;
1083
1084 size = PAGE_SIZE * 2;
1085 size = min(size, test->card->host->max_req_size);
1086 size = min(size, test->card->host->max_seg_size);
1087 size = min(size, test->card->host->max_blk_count * 512);
1088
1089 if (size < 1024)
1090 return RESULT_UNSUP_HOST;
1091
Pierre Ossman6b174932008-06-30 09:09:27 +02001092 sg_init_one(&sg, test->buffer, size);
1093
1094 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
Pierre Ossman88ae6002007-08-12 14:23:50 +02001095 if (ret)
1096 return ret;
1097
1098 return 0;
1099}
1100
1101static int mmc_test_pow2_write(struct mmc_test_card *test)
1102{
1103 int ret, i;
Pierre Ossman6b174932008-06-30 09:09:27 +02001104 struct scatterlist sg;
Pierre Ossman88ae6002007-08-12 14:23:50 +02001105
1106 if (!test->card->csd.write_partial)
1107 return RESULT_UNSUP_CARD;
1108
1109 for (i = 1; i < 512;i <<= 1) {
Pierre Ossman6b174932008-06-30 09:09:27 +02001110 sg_init_one(&sg, test->buffer, i);
1111 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
Pierre Ossman88ae6002007-08-12 14:23:50 +02001112 if (ret)
1113 return ret;
1114 }
1115
1116 return 0;
1117}
1118
1119static int mmc_test_pow2_read(struct mmc_test_card *test)
1120{
1121 int ret, i;
Pierre Ossman6b174932008-06-30 09:09:27 +02001122 struct scatterlist sg;
Pierre Ossman88ae6002007-08-12 14:23:50 +02001123
1124 if (!test->card->csd.read_partial)
1125 return RESULT_UNSUP_CARD;
1126
1127 for (i = 1; i < 512;i <<= 1) {
Pierre Ossman6b174932008-06-30 09:09:27 +02001128 sg_init_one(&sg, test->buffer, i);
1129 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
Pierre Ossman88ae6002007-08-12 14:23:50 +02001130 if (ret)
1131 return ret;
1132 }
1133
1134 return 0;
1135}
1136
1137static int mmc_test_weird_write(struct mmc_test_card *test)
1138{
1139 int ret, i;
Pierre Ossman6b174932008-06-30 09:09:27 +02001140 struct scatterlist sg;
Pierre Ossman88ae6002007-08-12 14:23:50 +02001141
1142 if (!test->card->csd.write_partial)
1143 return RESULT_UNSUP_CARD;
1144
1145 for (i = 3; i < 512;i += 7) {
Pierre Ossman6b174932008-06-30 09:09:27 +02001146 sg_init_one(&sg, test->buffer, i);
1147 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
Pierre Ossman88ae6002007-08-12 14:23:50 +02001148 if (ret)
1149 return ret;
1150 }
1151
1152 return 0;
1153}
1154
1155static int mmc_test_weird_read(struct mmc_test_card *test)
1156{
1157 int ret, i;
Pierre Ossman6b174932008-06-30 09:09:27 +02001158 struct scatterlist sg;
Pierre Ossman88ae6002007-08-12 14:23:50 +02001159
1160 if (!test->card->csd.read_partial)
1161 return RESULT_UNSUP_CARD;
1162
1163 for (i = 3; i < 512;i += 7) {
Pierre Ossman6b174932008-06-30 09:09:27 +02001164 sg_init_one(&sg, test->buffer, i);
1165 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
Pierre Ossman88ae6002007-08-12 14:23:50 +02001166 if (ret)
1167 return ret;
1168 }
1169
1170 return 0;
1171}
1172
1173static int mmc_test_align_write(struct mmc_test_card *test)
1174{
1175 int ret, i;
Pierre Ossman6b174932008-06-30 09:09:27 +02001176 struct scatterlist sg;
Pierre Ossman88ae6002007-08-12 14:23:50 +02001177
1178 for (i = 1;i < 4;i++) {
Pierre Ossman6b174932008-06-30 09:09:27 +02001179 sg_init_one(&sg, test->buffer + i, 512);
1180 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
Pierre Ossman88ae6002007-08-12 14:23:50 +02001181 if (ret)
1182 return ret;
1183 }
1184
1185 return 0;
1186}
1187
1188static int mmc_test_align_read(struct mmc_test_card *test)
1189{
1190 int ret, i;
Pierre Ossman6b174932008-06-30 09:09:27 +02001191 struct scatterlist sg;
Pierre Ossman88ae6002007-08-12 14:23:50 +02001192
1193 for (i = 1;i < 4;i++) {
Pierre Ossman6b174932008-06-30 09:09:27 +02001194 sg_init_one(&sg, test->buffer + i, 512);
1195 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
Pierre Ossman88ae6002007-08-12 14:23:50 +02001196 if (ret)
1197 return ret;
1198 }
1199
1200 return 0;
1201}
1202
1203static int mmc_test_align_multi_write(struct mmc_test_card *test)
1204{
1205 int ret, i;
1206 unsigned int size;
Pierre Ossman6b174932008-06-30 09:09:27 +02001207 struct scatterlist sg;
Pierre Ossman88ae6002007-08-12 14:23:50 +02001208
1209 if (test->card->host->max_blk_count == 1)
1210 return RESULT_UNSUP_HOST;
1211
1212 size = PAGE_SIZE * 2;
1213 size = min(size, test->card->host->max_req_size);
1214 size = min(size, test->card->host->max_seg_size);
1215 size = min(size, test->card->host->max_blk_count * 512);
1216
1217 if (size < 1024)
1218 return RESULT_UNSUP_HOST;
1219
1220 for (i = 1;i < 4;i++) {
Pierre Ossman6b174932008-06-30 09:09:27 +02001221 sg_init_one(&sg, test->buffer + i, size);
1222 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
Pierre Ossman88ae6002007-08-12 14:23:50 +02001223 if (ret)
1224 return ret;
1225 }
1226
1227 return 0;
1228}
1229
1230static int mmc_test_align_multi_read(struct mmc_test_card *test)
1231{
1232 int ret, i;
1233 unsigned int size;
Pierre Ossman6b174932008-06-30 09:09:27 +02001234 struct scatterlist sg;
Pierre Ossman88ae6002007-08-12 14:23:50 +02001235
1236 if (test->card->host->max_blk_count == 1)
1237 return RESULT_UNSUP_HOST;
1238
1239 size = PAGE_SIZE * 2;
1240 size = min(size, test->card->host->max_req_size);
1241 size = min(size, test->card->host->max_seg_size);
1242 size = min(size, test->card->host->max_blk_count * 512);
1243
1244 if (size < 1024)
1245 return RESULT_UNSUP_HOST;
1246
1247 for (i = 1;i < 4;i++) {
Pierre Ossman6b174932008-06-30 09:09:27 +02001248 sg_init_one(&sg, test->buffer + i, size);
1249 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
Pierre Ossman88ae6002007-08-12 14:23:50 +02001250 if (ret)
1251 return ret;
1252 }
1253
1254 return 0;
1255}
1256
1257static int mmc_test_xfersize_write(struct mmc_test_card *test)
1258{
1259 int ret;
1260
1261 ret = mmc_test_set_blksize(test, 512);
1262 if (ret)
1263 return ret;
1264
Pierre Ossman6b174932008-06-30 09:09:27 +02001265 ret = mmc_test_broken_transfer(test, 1, 512, 1);
Pierre Ossman88ae6002007-08-12 14:23:50 +02001266 if (ret)
1267 return ret;
1268
1269 return 0;
1270}
1271
1272static int mmc_test_xfersize_read(struct mmc_test_card *test)
1273{
1274 int ret;
1275
1276 ret = mmc_test_set_blksize(test, 512);
1277 if (ret)
1278 return ret;
1279
Pierre Ossman6b174932008-06-30 09:09:27 +02001280 ret = mmc_test_broken_transfer(test, 1, 512, 0);
Pierre Ossman88ae6002007-08-12 14:23:50 +02001281 if (ret)
1282 return ret;
1283
1284 return 0;
1285}
1286
1287static int mmc_test_multi_xfersize_write(struct mmc_test_card *test)
1288{
1289 int ret;
1290
1291 if (test->card->host->max_blk_count == 1)
1292 return RESULT_UNSUP_HOST;
1293
1294 ret = mmc_test_set_blksize(test, 512);
1295 if (ret)
1296 return ret;
1297
Pierre Ossman6b174932008-06-30 09:09:27 +02001298 ret = mmc_test_broken_transfer(test, 2, 512, 1);
Pierre Ossman88ae6002007-08-12 14:23:50 +02001299 if (ret)
1300 return ret;
1301
1302 return 0;
1303}
1304
1305static int mmc_test_multi_xfersize_read(struct mmc_test_card *test)
1306{
1307 int ret;
1308
1309 if (test->card->host->max_blk_count == 1)
1310 return RESULT_UNSUP_HOST;
1311
1312 ret = mmc_test_set_blksize(test, 512);
1313 if (ret)
1314 return ret;
1315
Pierre Ossman6b174932008-06-30 09:09:27 +02001316 ret = mmc_test_broken_transfer(test, 2, 512, 0);
Pierre Ossman88ae6002007-08-12 14:23:50 +02001317 if (ret)
1318 return ret;
1319
1320 return 0;
1321}
1322
Pierre Ossman26610812008-07-04 18:17:13 +02001323#ifdef CONFIG_HIGHMEM
1324
1325static int mmc_test_write_high(struct mmc_test_card *test)
1326{
1327 int ret;
1328 struct scatterlist sg;
1329
1330 sg_init_table(&sg, 1);
1331 sg_set_page(&sg, test->highmem, 512, 0);
1332
1333 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
1334 if (ret)
1335 return ret;
1336
1337 return 0;
1338}
1339
1340static int mmc_test_read_high(struct mmc_test_card *test)
1341{
1342 int ret;
1343 struct scatterlist sg;
1344
1345 sg_init_table(&sg, 1);
1346 sg_set_page(&sg, test->highmem, 512, 0);
1347
1348 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
1349 if (ret)
1350 return ret;
1351
1352 return 0;
1353}
1354
1355static int mmc_test_multi_write_high(struct mmc_test_card *test)
1356{
1357 int ret;
1358 unsigned int size;
1359 struct scatterlist sg;
1360
1361 if (test->card->host->max_blk_count == 1)
1362 return RESULT_UNSUP_HOST;
1363
1364 size = PAGE_SIZE * 2;
1365 size = min(size, test->card->host->max_req_size);
1366 size = min(size, test->card->host->max_seg_size);
1367 size = min(size, test->card->host->max_blk_count * 512);
1368
1369 if (size < 1024)
1370 return RESULT_UNSUP_HOST;
1371
1372 sg_init_table(&sg, 1);
1373 sg_set_page(&sg, test->highmem, size, 0);
1374
1375 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
1376 if (ret)
1377 return ret;
1378
1379 return 0;
1380}
1381
1382static int mmc_test_multi_read_high(struct mmc_test_card *test)
1383{
1384 int ret;
1385 unsigned int size;
1386 struct scatterlist sg;
1387
1388 if (test->card->host->max_blk_count == 1)
1389 return RESULT_UNSUP_HOST;
1390
1391 size = PAGE_SIZE * 2;
1392 size = min(size, test->card->host->max_req_size);
1393 size = min(size, test->card->host->max_seg_size);
1394 size = min(size, test->card->host->max_blk_count * 512);
1395
1396 if (size < 1024)
1397 return RESULT_UNSUP_HOST;
1398
1399 sg_init_table(&sg, 1);
1400 sg_set_page(&sg, test->highmem, size, 0);
1401
1402 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
1403 if (ret)
1404 return ret;
1405
1406 return 0;
1407}
1408
Adrian Hunter64f71202010-08-11 14:17:51 -07001409#else
1410
1411static int mmc_test_no_highmem(struct mmc_test_card *test)
1412{
Girish K Sa3c76eb2011-10-11 11:44:09 +05301413 pr_info("%s: Highmem not configured - test skipped\n",
Adrian Hunter64f71202010-08-11 14:17:51 -07001414 mmc_hostname(test->card->host));
1415 return 0;
1416}
1417
Pierre Ossman26610812008-07-04 18:17:13 +02001418#endif /* CONFIG_HIGHMEM */
1419
Adrian Hunter64f71202010-08-11 14:17:51 -07001420/*
1421 * Map sz bytes so that it can be transferred.
1422 */
Adrian Hunterfec4dcc2010-08-11 14:17:51 -07001423static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz,
Per Forlinbf043332011-07-01 18:55:27 +02001424 int max_scatter, int min_sg_len)
Adrian Hunter64f71202010-08-11 14:17:51 -07001425{
1426 struct mmc_test_area *t = &test->area;
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +03001427 int err;
Adrian Hunter64f71202010-08-11 14:17:51 -07001428
1429 t->blocks = sz >> 9;
1430
1431 if (max_scatter) {
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +03001432 err = mmc_test_map_sg_max_scatter(t->mem, sz, t->sg,
1433 t->max_segs, t->max_seg_sz,
Adrian Hunter64f71202010-08-11 14:17:51 -07001434 &t->sg_len);
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +03001435 } else {
1436 err = mmc_test_map_sg(t->mem, sz, t->sg, 1, t->max_segs,
Per Forlinbf043332011-07-01 18:55:27 +02001437 t->max_seg_sz, &t->sg_len, min_sg_len);
Adrian Hunter64f71202010-08-11 14:17:51 -07001438 }
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +03001439 if (err)
Girish K Sa3c76eb2011-10-11 11:44:09 +05301440 pr_info("%s: Failed to map sg list\n",
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +03001441 mmc_hostname(test->card->host));
1442 return err;
Adrian Hunter64f71202010-08-11 14:17:51 -07001443}
1444
1445/*
1446 * Transfer bytes mapped by mmc_test_area_map().
1447 */
1448static int mmc_test_area_transfer(struct mmc_test_card *test,
1449 unsigned int dev_addr, int write)
1450{
1451 struct mmc_test_area *t = &test->area;
1452
1453 return mmc_test_simple_transfer(test, t->sg, t->sg_len, dev_addr,
1454 t->blocks, 512, write);
1455}
1456
1457/*
Per Forlin9f9c4182011-07-01 18:55:26 +02001458 * Map and transfer bytes for multiple transfers.
Adrian Hunter64f71202010-08-11 14:17:51 -07001459 */
Per Forlin9f9c4182011-07-01 18:55:26 +02001460static int mmc_test_area_io_seq(struct mmc_test_card *test, unsigned long sz,
1461 unsigned int dev_addr, int write,
1462 int max_scatter, int timed, int count,
Per Forlinbf043332011-07-01 18:55:27 +02001463 bool nonblock, int min_sg_len)
Adrian Hunter64f71202010-08-11 14:17:51 -07001464{
1465 struct timespec ts1, ts2;
Per Forlin9f9c4182011-07-01 18:55:26 +02001466 int ret = 0;
1467 int i;
1468 struct mmc_test_area *t = &test->area;
Adrian Hunter64f71202010-08-11 14:17:51 -07001469
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +03001470 /*
1471 * In the case of a maximally scattered transfer, the maximum transfer
1472 * size is further limited by using PAGE_SIZE segments.
1473 */
1474 if (max_scatter) {
1475 struct mmc_test_area *t = &test->area;
1476 unsigned long max_tfr;
1477
1478 if (t->max_seg_sz >= PAGE_SIZE)
1479 max_tfr = t->max_segs * PAGE_SIZE;
1480 else
1481 max_tfr = t->max_segs * t->max_seg_sz;
1482 if (sz > max_tfr)
1483 sz = max_tfr;
1484 }
1485
Per Forlinbf043332011-07-01 18:55:27 +02001486 ret = mmc_test_area_map(test, sz, max_scatter, min_sg_len);
Adrian Hunter64f71202010-08-11 14:17:51 -07001487 if (ret)
1488 return ret;
1489
1490 if (timed)
1491 getnstimeofday(&ts1);
Per Forlin9f9c4182011-07-01 18:55:26 +02001492 if (nonblock)
1493 ret = mmc_test_nonblock_transfer(test, t->sg, t->sg_len,
1494 dev_addr, t->blocks, 512, write, count);
1495 else
1496 for (i = 0; i < count && ret == 0; i++) {
1497 ret = mmc_test_area_transfer(test, dev_addr, write);
1498 dev_addr += sz >> 9;
1499 }
Adrian Hunter64f71202010-08-11 14:17:51 -07001500
Adrian Hunter64f71202010-08-11 14:17:51 -07001501 if (ret)
1502 return ret;
1503
1504 if (timed)
1505 getnstimeofday(&ts2);
1506
1507 if (timed)
Per Forlin9f9c4182011-07-01 18:55:26 +02001508 mmc_test_print_avg_rate(test, sz, count, &ts1, &ts2);
Adrian Hunter64f71202010-08-11 14:17:51 -07001509
1510 return 0;
1511}
1512
Per Forlin9f9c4182011-07-01 18:55:26 +02001513static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz,
1514 unsigned int dev_addr, int write, int max_scatter,
1515 int timed)
1516{
1517 return mmc_test_area_io_seq(test, sz, dev_addr, write, max_scatter,
Per Forlinbf043332011-07-01 18:55:27 +02001518 timed, 1, false, 0);
Per Forlin9f9c4182011-07-01 18:55:26 +02001519}
1520
Adrian Hunter64f71202010-08-11 14:17:51 -07001521/*
1522 * Write the test area entirely.
1523 */
1524static int mmc_test_area_fill(struct mmc_test_card *test)
1525{
Andy Shevchenko253d6a22011-05-10 15:59:01 +03001526 struct mmc_test_area *t = &test->area;
1527
1528 return mmc_test_area_io(test, t->max_tfr, t->dev_addr, 1, 0, 0);
Adrian Hunter64f71202010-08-11 14:17:51 -07001529}
1530
1531/*
1532 * Erase the test area entirely.
1533 */
1534static int mmc_test_area_erase(struct mmc_test_card *test)
1535{
1536 struct mmc_test_area *t = &test->area;
1537
1538 if (!mmc_can_erase(test->card))
1539 return 0;
1540
Andy Shevchenko253d6a22011-05-10 15:59:01 +03001541 return mmc_erase(test->card, t->dev_addr, t->max_sz >> 9,
Adrian Hunter64f71202010-08-11 14:17:51 -07001542 MMC_ERASE_ARG);
1543}
1544
1545/*
1546 * Cleanup struct mmc_test_area.
1547 */
1548static int mmc_test_area_cleanup(struct mmc_test_card *test)
1549{
1550 struct mmc_test_area *t = &test->area;
1551
1552 kfree(t->sg);
1553 mmc_test_free_mem(t->mem);
1554
1555 return 0;
1556}
1557
1558/*
Adrian Hunter0532ff62011-02-08 13:41:01 +02001559 * Initialize an area for testing large transfers. The test area is set to the
1560 * middle of the card because cards may have different charateristics at the
1561 * front (for FAT file system optimization). Optionally, the area is erased
1562 * (if the card supports it) which may improve write performance. Optionally,
1563 * the area is filled with data for subsequent read tests.
Adrian Hunter64f71202010-08-11 14:17:51 -07001564 */
1565static int mmc_test_area_init(struct mmc_test_card *test, int erase, int fill)
1566{
1567 struct mmc_test_area *t = &test->area;
Adrian Hunter0532ff62011-02-08 13:41:01 +02001568 unsigned long min_sz = 64 * 1024, sz;
Adrian Hunter64f71202010-08-11 14:17:51 -07001569 int ret;
1570
1571 ret = mmc_test_set_blksize(test, 512);
1572 if (ret)
1573 return ret;
1574
Adrian Hunter0532ff62011-02-08 13:41:01 +02001575 /* Make the test area size about 4MiB */
1576 sz = (unsigned long)test->card->pref_erase << 9;
1577 t->max_sz = sz;
1578 while (t->max_sz < 4 * 1024 * 1024)
1579 t->max_sz += sz;
1580 while (t->max_sz > TEST_AREA_MAX_SIZE && t->max_sz > sz)
1581 t->max_sz -= sz;
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +03001582
1583 t->max_segs = test->card->host->max_segs;
1584 t->max_seg_sz = test->card->host->max_seg_size;
Per Forlin739c69c2011-11-14 12:04:24 +01001585 t->max_seg_sz -= t->max_seg_sz % 512;
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +03001586
1587 t->max_tfr = t->max_sz;
1588 if (t->max_tfr >> 9 > test->card->host->max_blk_count)
1589 t->max_tfr = test->card->host->max_blk_count << 9;
1590 if (t->max_tfr > test->card->host->max_req_size)
1591 t->max_tfr = test->card->host->max_req_size;
1592 if (t->max_tfr / t->max_seg_sz > t->max_segs)
1593 t->max_tfr = t->max_segs * t->max_seg_sz;
1594
Adrian Hunter64f71202010-08-11 14:17:51 -07001595 /*
Adrian Hunter3d203be2010-09-23 14:51:29 +03001596 * Try to allocate enough memory for a max. sized transfer. Less is OK
Adrian Hunter64f71202010-08-11 14:17:51 -07001597 * because the same memory can be mapped into the scatterlist more than
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +03001598 * once. Also, take into account the limits imposed on scatterlist
1599 * segments by the host driver.
Adrian Hunter64f71202010-08-11 14:17:51 -07001600 */
Adrian Hunter3d203be2010-09-23 14:51:29 +03001601 t->mem = mmc_test_alloc_mem(min_sz, t->max_tfr, t->max_segs,
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +03001602 t->max_seg_sz);
Adrian Hunter64f71202010-08-11 14:17:51 -07001603 if (!t->mem)
1604 return -ENOMEM;
1605
Adrian Hunter64f71202010-08-11 14:17:51 -07001606 t->sg = kmalloc(sizeof(struct scatterlist) * t->max_segs, GFP_KERNEL);
1607 if (!t->sg) {
1608 ret = -ENOMEM;
1609 goto out_free;
1610 }
1611
1612 t->dev_addr = mmc_test_capacity(test->card) / 2;
1613 t->dev_addr -= t->dev_addr % (t->max_sz >> 9);
1614
1615 if (erase) {
1616 ret = mmc_test_area_erase(test);
1617 if (ret)
1618 goto out_free;
1619 }
1620
1621 if (fill) {
1622 ret = mmc_test_area_fill(test);
1623 if (ret)
1624 goto out_free;
1625 }
1626
1627 return 0;
1628
1629out_free:
1630 mmc_test_area_cleanup(test);
1631 return ret;
1632}
1633
1634/*
1635 * Prepare for large transfers. Do not erase the test area.
1636 */
1637static int mmc_test_area_prepare(struct mmc_test_card *test)
1638{
1639 return mmc_test_area_init(test, 0, 0);
1640}
1641
1642/*
1643 * Prepare for large transfers. Do erase the test area.
1644 */
1645static int mmc_test_area_prepare_erase(struct mmc_test_card *test)
1646{
1647 return mmc_test_area_init(test, 1, 0);
1648}
1649
1650/*
1651 * Prepare for large transfers. Erase and fill the test area.
1652 */
1653static int mmc_test_area_prepare_fill(struct mmc_test_card *test)
1654{
1655 return mmc_test_area_init(test, 1, 1);
1656}
1657
1658/*
1659 * Test best-case performance. Best-case performance is expected from
1660 * a single large transfer.
1661 *
1662 * An additional option (max_scatter) allows the measurement of the same
1663 * transfer but with no contiguous pages in the scatter list. This tests
1664 * the efficiency of DMA to handle scattered pages.
1665 */
1666static int mmc_test_best_performance(struct mmc_test_card *test, int write,
1667 int max_scatter)
1668{
Andy Shevchenko253d6a22011-05-10 15:59:01 +03001669 struct mmc_test_area *t = &test->area;
1670
1671 return mmc_test_area_io(test, t->max_tfr, t->dev_addr, write,
1672 max_scatter, 1);
Adrian Hunter64f71202010-08-11 14:17:51 -07001673}
1674
1675/*
1676 * Best-case read performance.
1677 */
1678static int mmc_test_best_read_performance(struct mmc_test_card *test)
1679{
1680 return mmc_test_best_performance(test, 0, 0);
1681}
1682
1683/*
1684 * Best-case write performance.
1685 */
1686static int mmc_test_best_write_performance(struct mmc_test_card *test)
1687{
1688 return mmc_test_best_performance(test, 1, 0);
1689}
1690
1691/*
1692 * Best-case read performance into scattered pages.
1693 */
1694static int mmc_test_best_read_perf_max_scatter(struct mmc_test_card *test)
1695{
1696 return mmc_test_best_performance(test, 0, 1);
1697}
1698
1699/*
1700 * Best-case write performance from scattered pages.
1701 */
1702static int mmc_test_best_write_perf_max_scatter(struct mmc_test_card *test)
1703{
1704 return mmc_test_best_performance(test, 1, 1);
1705}
1706
1707/*
1708 * Single read performance by transfer size.
1709 */
1710static int mmc_test_profile_read_perf(struct mmc_test_card *test)
1711{
Andy Shevchenko253d6a22011-05-10 15:59:01 +03001712 struct mmc_test_area *t = &test->area;
Adrian Hunterfec4dcc2010-08-11 14:17:51 -07001713 unsigned long sz;
1714 unsigned int dev_addr;
Adrian Hunter64f71202010-08-11 14:17:51 -07001715 int ret;
1716
Andy Shevchenko253d6a22011-05-10 15:59:01 +03001717 for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1718 dev_addr = t->dev_addr + (sz >> 9);
Adrian Hunter64f71202010-08-11 14:17:51 -07001719 ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
1720 if (ret)
1721 return ret;
1722 }
Andy Shevchenko253d6a22011-05-10 15:59:01 +03001723 sz = t->max_tfr;
1724 dev_addr = t->dev_addr;
Adrian Hunter64f71202010-08-11 14:17:51 -07001725 return mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
1726}
1727
1728/*
1729 * Single write performance by transfer size.
1730 */
1731static int mmc_test_profile_write_perf(struct mmc_test_card *test)
1732{
Andy Shevchenko253d6a22011-05-10 15:59:01 +03001733 struct mmc_test_area *t = &test->area;
Adrian Hunterfec4dcc2010-08-11 14:17:51 -07001734 unsigned long sz;
1735 unsigned int dev_addr;
Adrian Hunter64f71202010-08-11 14:17:51 -07001736 int ret;
1737
1738 ret = mmc_test_area_erase(test);
1739 if (ret)
1740 return ret;
Andy Shevchenko253d6a22011-05-10 15:59:01 +03001741 for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1742 dev_addr = t->dev_addr + (sz >> 9);
Adrian Hunter64f71202010-08-11 14:17:51 -07001743 ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
1744 if (ret)
1745 return ret;
1746 }
1747 ret = mmc_test_area_erase(test);
1748 if (ret)
1749 return ret;
Andy Shevchenko253d6a22011-05-10 15:59:01 +03001750 sz = t->max_tfr;
1751 dev_addr = t->dev_addr;
Adrian Hunter64f71202010-08-11 14:17:51 -07001752 return mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
1753}
1754
1755/*
1756 * Single trim performance by transfer size.
1757 */
1758static int mmc_test_profile_trim_perf(struct mmc_test_card *test)
1759{
Andy Shevchenko253d6a22011-05-10 15:59:01 +03001760 struct mmc_test_area *t = &test->area;
Adrian Hunterfec4dcc2010-08-11 14:17:51 -07001761 unsigned long sz;
1762 unsigned int dev_addr;
Adrian Hunter64f71202010-08-11 14:17:51 -07001763 struct timespec ts1, ts2;
1764 int ret;
1765
1766 if (!mmc_can_trim(test->card))
1767 return RESULT_UNSUP_CARD;
1768
1769 if (!mmc_can_erase(test->card))
1770 return RESULT_UNSUP_HOST;
1771
Andy Shevchenko253d6a22011-05-10 15:59:01 +03001772 for (sz = 512; sz < t->max_sz; sz <<= 1) {
1773 dev_addr = t->dev_addr + (sz >> 9);
Adrian Hunter64f71202010-08-11 14:17:51 -07001774 getnstimeofday(&ts1);
1775 ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
1776 if (ret)
1777 return ret;
1778 getnstimeofday(&ts2);
1779 mmc_test_print_rate(test, sz, &ts1, &ts2);
1780 }
Andy Shevchenko253d6a22011-05-10 15:59:01 +03001781 dev_addr = t->dev_addr;
Adrian Hunter64f71202010-08-11 14:17:51 -07001782 getnstimeofday(&ts1);
1783 ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
1784 if (ret)
1785 return ret;
1786 getnstimeofday(&ts2);
1787 mmc_test_print_rate(test, sz, &ts1, &ts2);
1788 return 0;
1789}
1790
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +03001791static int mmc_test_seq_read_perf(struct mmc_test_card *test, unsigned long sz)
1792{
Andy Shevchenko253d6a22011-05-10 15:59:01 +03001793 struct mmc_test_area *t = &test->area;
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +03001794 unsigned int dev_addr, i, cnt;
1795 struct timespec ts1, ts2;
1796 int ret;
1797
Andy Shevchenko253d6a22011-05-10 15:59:01 +03001798 cnt = t->max_sz / sz;
1799 dev_addr = t->dev_addr;
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +03001800 getnstimeofday(&ts1);
1801 for (i = 0; i < cnt; i++) {
1802 ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 0);
1803 if (ret)
1804 return ret;
1805 dev_addr += (sz >> 9);
1806 }
1807 getnstimeofday(&ts2);
1808 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1809 return 0;
1810}
1811
Adrian Hunter64f71202010-08-11 14:17:51 -07001812/*
1813 * Consecutive read performance by transfer size.
1814 */
1815static int mmc_test_profile_seq_read_perf(struct mmc_test_card *test)
1816{
Andy Shevchenko253d6a22011-05-10 15:59:01 +03001817 struct mmc_test_area *t = &test->area;
Adrian Hunterfec4dcc2010-08-11 14:17:51 -07001818 unsigned long sz;
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +03001819 int ret;
1820
Andy Shevchenko253d6a22011-05-10 15:59:01 +03001821 for (sz = 512; sz < t->max_tfr; sz <<= 1) {
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +03001822 ret = mmc_test_seq_read_perf(test, sz);
1823 if (ret)
1824 return ret;
1825 }
Andy Shevchenko253d6a22011-05-10 15:59:01 +03001826 sz = t->max_tfr;
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +03001827 return mmc_test_seq_read_perf(test, sz);
1828}
1829
1830static int mmc_test_seq_write_perf(struct mmc_test_card *test, unsigned long sz)
1831{
Andy Shevchenko253d6a22011-05-10 15:59:01 +03001832 struct mmc_test_area *t = &test->area;
Adrian Hunterfec4dcc2010-08-11 14:17:51 -07001833 unsigned int dev_addr, i, cnt;
Adrian Hunter64f71202010-08-11 14:17:51 -07001834 struct timespec ts1, ts2;
1835 int ret;
1836
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +03001837 ret = mmc_test_area_erase(test);
1838 if (ret)
1839 return ret;
Andy Shevchenko253d6a22011-05-10 15:59:01 +03001840 cnt = t->max_sz / sz;
1841 dev_addr = t->dev_addr;
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +03001842 getnstimeofday(&ts1);
1843 for (i = 0; i < cnt; i++) {
1844 ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 0);
1845 if (ret)
1846 return ret;
1847 dev_addr += (sz >> 9);
Adrian Hunter64f71202010-08-11 14:17:51 -07001848 }
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +03001849 getnstimeofday(&ts2);
1850 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
Adrian Hunter64f71202010-08-11 14:17:51 -07001851 return 0;
1852}
1853
1854/*
1855 * Consecutive write performance by transfer size.
1856 */
1857static int mmc_test_profile_seq_write_perf(struct mmc_test_card *test)
1858{
Andy Shevchenko253d6a22011-05-10 15:59:01 +03001859 struct mmc_test_area *t = &test->area;
Adrian Hunterfec4dcc2010-08-11 14:17:51 -07001860 unsigned long sz;
Adrian Hunter64f71202010-08-11 14:17:51 -07001861 int ret;
1862
Andy Shevchenko253d6a22011-05-10 15:59:01 +03001863 for (sz = 512; sz < t->max_tfr; sz <<= 1) {
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +03001864 ret = mmc_test_seq_write_perf(test, sz);
Adrian Hunter64f71202010-08-11 14:17:51 -07001865 if (ret)
1866 return ret;
Adrian Hunter64f71202010-08-11 14:17:51 -07001867 }
Andy Shevchenko253d6a22011-05-10 15:59:01 +03001868 sz = t->max_tfr;
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +03001869 return mmc_test_seq_write_perf(test, sz);
Adrian Hunter64f71202010-08-11 14:17:51 -07001870}
1871
1872/*
1873 * Consecutive trim performance by transfer size.
1874 */
1875static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test)
1876{
Andy Shevchenko253d6a22011-05-10 15:59:01 +03001877 struct mmc_test_area *t = &test->area;
Adrian Hunterfec4dcc2010-08-11 14:17:51 -07001878 unsigned long sz;
1879 unsigned int dev_addr, i, cnt;
Adrian Hunter64f71202010-08-11 14:17:51 -07001880 struct timespec ts1, ts2;
1881 int ret;
1882
1883 if (!mmc_can_trim(test->card))
1884 return RESULT_UNSUP_CARD;
1885
1886 if (!mmc_can_erase(test->card))
1887 return RESULT_UNSUP_HOST;
1888
Andy Shevchenko253d6a22011-05-10 15:59:01 +03001889 for (sz = 512; sz <= t->max_sz; sz <<= 1) {
Adrian Hunter64f71202010-08-11 14:17:51 -07001890 ret = mmc_test_area_erase(test);
1891 if (ret)
1892 return ret;
1893 ret = mmc_test_area_fill(test);
1894 if (ret)
1895 return ret;
Andy Shevchenko253d6a22011-05-10 15:59:01 +03001896 cnt = t->max_sz / sz;
1897 dev_addr = t->dev_addr;
Adrian Hunter64f71202010-08-11 14:17:51 -07001898 getnstimeofday(&ts1);
1899 for (i = 0; i < cnt; i++) {
1900 ret = mmc_erase(test->card, dev_addr, sz >> 9,
1901 MMC_TRIM_ARG);
1902 if (ret)
1903 return ret;
1904 dev_addr += (sz >> 9);
1905 }
1906 getnstimeofday(&ts2);
1907 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1908 }
1909 return 0;
1910}
1911
Adrian Hunterb6056d12011-02-08 13:41:02 +02001912static unsigned int rnd_next = 1;
1913
1914static unsigned int mmc_test_rnd_num(unsigned int rnd_cnt)
1915{
1916 uint64_t r;
1917
1918 rnd_next = rnd_next * 1103515245 + 12345;
1919 r = (rnd_next >> 16) & 0x7fff;
1920 return (r * rnd_cnt) >> 15;
1921}
1922
1923static int mmc_test_rnd_perf(struct mmc_test_card *test, int write, int print,
1924 unsigned long sz)
1925{
1926 unsigned int dev_addr, cnt, rnd_addr, range1, range2, last_ea = 0, ea;
1927 unsigned int ssz;
1928 struct timespec ts1, ts2, ts;
1929 int ret;
1930
1931 ssz = sz >> 9;
1932
1933 rnd_addr = mmc_test_capacity(test->card) / 4;
1934 range1 = rnd_addr / test->card->pref_erase;
1935 range2 = range1 / ssz;
1936
1937 getnstimeofday(&ts1);
1938 for (cnt = 0; cnt < UINT_MAX; cnt++) {
1939 getnstimeofday(&ts2);
1940 ts = timespec_sub(ts2, ts1);
1941 if (ts.tv_sec >= 10)
1942 break;
1943 ea = mmc_test_rnd_num(range1);
1944 if (ea == last_ea)
1945 ea -= 1;
1946 last_ea = ea;
1947 dev_addr = rnd_addr + test->card->pref_erase * ea +
1948 ssz * mmc_test_rnd_num(range2);
1949 ret = mmc_test_area_io(test, sz, dev_addr, write, 0, 0);
1950 if (ret)
1951 return ret;
1952 }
1953 if (print)
1954 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1955 return 0;
1956}
1957
1958static int mmc_test_random_perf(struct mmc_test_card *test, int write)
1959{
Andy Shevchenko253d6a22011-05-10 15:59:01 +03001960 struct mmc_test_area *t = &test->area;
Adrian Hunterb6056d12011-02-08 13:41:02 +02001961 unsigned int next;
1962 unsigned long sz;
1963 int ret;
1964
Andy Shevchenko253d6a22011-05-10 15:59:01 +03001965 for (sz = 512; sz < t->max_tfr; sz <<= 1) {
Adrian Hunterb6056d12011-02-08 13:41:02 +02001966 /*
1967 * When writing, try to get more consistent results by running
1968 * the test twice with exactly the same I/O but outputting the
1969 * results only for the 2nd run.
1970 */
1971 if (write) {
1972 next = rnd_next;
1973 ret = mmc_test_rnd_perf(test, write, 0, sz);
1974 if (ret)
1975 return ret;
1976 rnd_next = next;
1977 }
1978 ret = mmc_test_rnd_perf(test, write, 1, sz);
1979 if (ret)
1980 return ret;
1981 }
Andy Shevchenko253d6a22011-05-10 15:59:01 +03001982 sz = t->max_tfr;
Adrian Hunterb6056d12011-02-08 13:41:02 +02001983 if (write) {
1984 next = rnd_next;
1985 ret = mmc_test_rnd_perf(test, write, 0, sz);
1986 if (ret)
1987 return ret;
1988 rnd_next = next;
1989 }
1990 return mmc_test_rnd_perf(test, write, 1, sz);
1991}
1992
1993/*
1994 * Random read performance by transfer size.
1995 */
1996static int mmc_test_random_read_perf(struct mmc_test_card *test)
1997{
1998 return mmc_test_random_perf(test, 0);
1999}
2000
2001/*
2002 * Random write performance by transfer size.
2003 */
2004static int mmc_test_random_write_perf(struct mmc_test_card *test)
2005{
2006 return mmc_test_random_perf(test, 1);
2007}
2008
Adrian Huntera803d552011-02-08 13:41:03 +02002009static int mmc_test_seq_perf(struct mmc_test_card *test, int write,
2010 unsigned int tot_sz, int max_scatter)
2011{
Andy Shevchenko253d6a22011-05-10 15:59:01 +03002012 struct mmc_test_area *t = &test->area;
Adrian Huntera803d552011-02-08 13:41:03 +02002013 unsigned int dev_addr, i, cnt, sz, ssz;
Chris Ball5a8fba52011-03-16 17:46:45 -04002014 struct timespec ts1, ts2;
Adrian Huntera803d552011-02-08 13:41:03 +02002015 int ret;
2016
Andy Shevchenko253d6a22011-05-10 15:59:01 +03002017 sz = t->max_tfr;
2018
Adrian Huntera803d552011-02-08 13:41:03 +02002019 /*
2020 * In the case of a maximally scattered transfer, the maximum transfer
2021 * size is further limited by using PAGE_SIZE segments.
2022 */
2023 if (max_scatter) {
Adrian Huntera803d552011-02-08 13:41:03 +02002024 unsigned long max_tfr;
2025
2026 if (t->max_seg_sz >= PAGE_SIZE)
2027 max_tfr = t->max_segs * PAGE_SIZE;
2028 else
2029 max_tfr = t->max_segs * t->max_seg_sz;
2030 if (sz > max_tfr)
2031 sz = max_tfr;
2032 }
2033
2034 ssz = sz >> 9;
2035 dev_addr = mmc_test_capacity(test->card) / 4;
2036 if (tot_sz > dev_addr << 9)
2037 tot_sz = dev_addr << 9;
2038 cnt = tot_sz / sz;
2039 dev_addr &= 0xffff0000; /* Round to 64MiB boundary */
2040
2041 getnstimeofday(&ts1);
2042 for (i = 0; i < cnt; i++) {
2043 ret = mmc_test_area_io(test, sz, dev_addr, write,
2044 max_scatter, 0);
2045 if (ret)
2046 return ret;
2047 dev_addr += ssz;
2048 }
2049 getnstimeofday(&ts2);
2050
Adrian Huntera803d552011-02-08 13:41:03 +02002051 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
2052
2053 return 0;
2054}
2055
2056static int mmc_test_large_seq_perf(struct mmc_test_card *test, int write)
2057{
2058 int ret, i;
2059
2060 for (i = 0; i < 10; i++) {
2061 ret = mmc_test_seq_perf(test, write, 10 * 1024 * 1024, 1);
2062 if (ret)
2063 return ret;
2064 }
2065 for (i = 0; i < 5; i++) {
2066 ret = mmc_test_seq_perf(test, write, 100 * 1024 * 1024, 1);
2067 if (ret)
2068 return ret;
2069 }
2070 for (i = 0; i < 3; i++) {
2071 ret = mmc_test_seq_perf(test, write, 1000 * 1024 * 1024, 1);
2072 if (ret)
2073 return ret;
2074 }
2075
2076 return ret;
2077}
2078
2079/*
2080 * Large sequential read performance.
2081 */
2082static int mmc_test_large_seq_read_perf(struct mmc_test_card *test)
2083{
2084 return mmc_test_large_seq_perf(test, 0);
2085}
2086
2087/*
2088 * Large sequential write performance.
2089 */
2090static int mmc_test_large_seq_write_perf(struct mmc_test_card *test)
2091{
2092 return mmc_test_large_seq_perf(test, 1);
2093}
2094
Per Forlin9f9c4182011-07-01 18:55:26 +02002095static int mmc_test_rw_multiple(struct mmc_test_card *test,
2096 struct mmc_test_multiple_rw *tdata,
Per Forlinbf043332011-07-01 18:55:27 +02002097 unsigned int reqsize, unsigned int size,
2098 int min_sg_len)
Per Forlin9f9c4182011-07-01 18:55:26 +02002099{
2100 unsigned int dev_addr;
2101 struct mmc_test_area *t = &test->area;
2102 int ret = 0;
2103
2104 /* Set up test area */
2105 if (size > mmc_test_capacity(test->card) / 2 * 512)
2106 size = mmc_test_capacity(test->card) / 2 * 512;
2107 if (reqsize > t->max_tfr)
2108 reqsize = t->max_tfr;
2109 dev_addr = mmc_test_capacity(test->card) / 4;
2110 if ((dev_addr & 0xffff0000))
2111 dev_addr &= 0xffff0000; /* Round to 64MiB boundary */
2112 else
2113 dev_addr &= 0xfffff800; /* Round to 1MiB boundary */
2114 if (!dev_addr)
2115 goto err;
2116
2117 if (reqsize > size)
2118 return 0;
2119
2120 /* prepare test area */
2121 if (mmc_can_erase(test->card) &&
2122 tdata->prepare & MMC_TEST_PREP_ERASE) {
2123 ret = mmc_erase(test->card, dev_addr,
2124 size / 512, MMC_SECURE_ERASE_ARG);
2125 if (ret)
2126 ret = mmc_erase(test->card, dev_addr,
2127 size / 512, MMC_ERASE_ARG);
2128 if (ret)
2129 goto err;
2130 }
2131
2132 /* Run test */
2133 ret = mmc_test_area_io_seq(test, reqsize, dev_addr,
2134 tdata->do_write, 0, 1, size / reqsize,
Per Forlinbf043332011-07-01 18:55:27 +02002135 tdata->do_nonblock_req, min_sg_len);
Per Forlin9f9c4182011-07-01 18:55:26 +02002136 if (ret)
2137 goto err;
2138
2139 return ret;
2140 err:
Girish K Sa3c76eb2011-10-11 11:44:09 +05302141 pr_info("[%s] error\n", __func__);
Per Forlin9f9c4182011-07-01 18:55:26 +02002142 return ret;
2143}
2144
2145static int mmc_test_rw_multiple_size(struct mmc_test_card *test,
2146 struct mmc_test_multiple_rw *rw)
2147{
2148 int ret = 0;
2149 int i;
2150 void *pre_req = test->card->host->ops->pre_req;
2151 void *post_req = test->card->host->ops->post_req;
2152
2153 if (rw->do_nonblock_req &&
2154 ((!pre_req && post_req) || (pre_req && !post_req))) {
Girish K Sa3c76eb2011-10-11 11:44:09 +05302155 pr_info("error: only one of pre/post is defined\n");
Per Forlin9f9c4182011-07-01 18:55:26 +02002156 return -EINVAL;
2157 }
2158
2159 for (i = 0 ; i < rw->len && ret == 0; i++) {
Per Forlinbf043332011-07-01 18:55:27 +02002160 ret = mmc_test_rw_multiple(test, rw, rw->bs[i], rw->size, 0);
2161 if (ret)
2162 break;
2163 }
2164 return ret;
2165}
2166
2167static int mmc_test_rw_multiple_sg_len(struct mmc_test_card *test,
2168 struct mmc_test_multiple_rw *rw)
2169{
2170 int ret = 0;
2171 int i;
2172
2173 for (i = 0 ; i < rw->len && ret == 0; i++) {
2174 ret = mmc_test_rw_multiple(test, rw, 512*1024, rw->size,
2175 rw->sg_len[i]);
Per Forlin9f9c4182011-07-01 18:55:26 +02002176 if (ret)
2177 break;
2178 }
2179 return ret;
2180}
2181
2182/*
2183 * Multiple blocking write 4k to 4 MB chunks
2184 */
2185static int mmc_test_profile_mult_write_blocking_perf(struct mmc_test_card *test)
2186{
2187 unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2188 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2189 struct mmc_test_multiple_rw test_data = {
2190 .bs = bs,
2191 .size = TEST_AREA_MAX_SIZE,
2192 .len = ARRAY_SIZE(bs),
2193 .do_write = true,
2194 .do_nonblock_req = false,
2195 .prepare = MMC_TEST_PREP_ERASE,
2196 };
2197
2198 return mmc_test_rw_multiple_size(test, &test_data);
2199};
2200
2201/*
2202 * Multiple non-blocking write 4k to 4 MB chunks
2203 */
2204static int mmc_test_profile_mult_write_nonblock_perf(struct mmc_test_card *test)
2205{
2206 unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2207 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2208 struct mmc_test_multiple_rw test_data = {
2209 .bs = bs,
2210 .size = TEST_AREA_MAX_SIZE,
2211 .len = ARRAY_SIZE(bs),
2212 .do_write = true,
2213 .do_nonblock_req = true,
2214 .prepare = MMC_TEST_PREP_ERASE,
2215 };
2216
2217 return mmc_test_rw_multiple_size(test, &test_data);
2218}
2219
2220/*
2221 * Multiple blocking read 4k to 4 MB chunks
2222 */
2223static int mmc_test_profile_mult_read_blocking_perf(struct mmc_test_card *test)
2224{
2225 unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2226 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2227 struct mmc_test_multiple_rw test_data = {
2228 .bs = bs,
2229 .size = TEST_AREA_MAX_SIZE,
2230 .len = ARRAY_SIZE(bs),
2231 .do_write = false,
2232 .do_nonblock_req = false,
2233 .prepare = MMC_TEST_PREP_NONE,
2234 };
2235
2236 return mmc_test_rw_multiple_size(test, &test_data);
2237}
2238
2239/*
2240 * Multiple non-blocking read 4k to 4 MB chunks
2241 */
2242static int mmc_test_profile_mult_read_nonblock_perf(struct mmc_test_card *test)
2243{
2244 unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2245 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2246 struct mmc_test_multiple_rw test_data = {
2247 .bs = bs,
2248 .size = TEST_AREA_MAX_SIZE,
2249 .len = ARRAY_SIZE(bs),
2250 .do_write = false,
2251 .do_nonblock_req = true,
2252 .prepare = MMC_TEST_PREP_NONE,
2253 };
2254
2255 return mmc_test_rw_multiple_size(test, &test_data);
2256}
2257
Per Forlinbf043332011-07-01 18:55:27 +02002258/*
2259 * Multiple blocking write 1 to 512 sg elements
2260 */
2261static int mmc_test_profile_sglen_wr_blocking_perf(struct mmc_test_card *test)
2262{
2263 unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2264 1 << 7, 1 << 8, 1 << 9};
2265 struct mmc_test_multiple_rw test_data = {
2266 .sg_len = sg_len,
2267 .size = TEST_AREA_MAX_SIZE,
2268 .len = ARRAY_SIZE(sg_len),
2269 .do_write = true,
2270 .do_nonblock_req = false,
2271 .prepare = MMC_TEST_PREP_ERASE,
2272 };
2273
2274 return mmc_test_rw_multiple_sg_len(test, &test_data);
2275};
2276
2277/*
2278 * Multiple non-blocking write 1 to 512 sg elements
2279 */
2280static int mmc_test_profile_sglen_wr_nonblock_perf(struct mmc_test_card *test)
2281{
2282 unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2283 1 << 7, 1 << 8, 1 << 9};
2284 struct mmc_test_multiple_rw test_data = {
2285 .sg_len = sg_len,
2286 .size = TEST_AREA_MAX_SIZE,
2287 .len = ARRAY_SIZE(sg_len),
2288 .do_write = true,
2289 .do_nonblock_req = true,
2290 .prepare = MMC_TEST_PREP_ERASE,
2291 };
2292
2293 return mmc_test_rw_multiple_sg_len(test, &test_data);
2294}
2295
2296/*
2297 * Multiple blocking read 1 to 512 sg elements
2298 */
2299static int mmc_test_profile_sglen_r_blocking_perf(struct mmc_test_card *test)
2300{
2301 unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2302 1 << 7, 1 << 8, 1 << 9};
2303 struct mmc_test_multiple_rw test_data = {
2304 .sg_len = sg_len,
2305 .size = TEST_AREA_MAX_SIZE,
2306 .len = ARRAY_SIZE(sg_len),
2307 .do_write = false,
2308 .do_nonblock_req = false,
2309 .prepare = MMC_TEST_PREP_NONE,
2310 };
2311
2312 return mmc_test_rw_multiple_sg_len(test, &test_data);
2313}
2314
2315/*
2316 * Multiple non-blocking read 1 to 512 sg elements
2317 */
2318static int mmc_test_profile_sglen_r_nonblock_perf(struct mmc_test_card *test)
2319{
2320 unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2321 1 << 7, 1 << 8, 1 << 9};
2322 struct mmc_test_multiple_rw test_data = {
2323 .sg_len = sg_len,
2324 .size = TEST_AREA_MAX_SIZE,
2325 .len = ARRAY_SIZE(sg_len),
2326 .do_write = false,
2327 .do_nonblock_req = true,
2328 .prepare = MMC_TEST_PREP_NONE,
2329 };
2330
2331 return mmc_test_rw_multiple_sg_len(test, &test_data);
2332}
2333
Adrian Hunter23113442011-08-29 16:42:14 +03002334/*
2335 * eMMC hardware reset.
2336 */
2337static int mmc_test_hw_reset(struct mmc_test_card *test)
2338{
2339 struct mmc_card *card = test->card;
2340 struct mmc_host *host = card->host;
2341 int err;
2342
2343 err = mmc_hw_reset_check(host);
2344 if (!err)
2345 return RESULT_OK;
2346
2347 if (err == -ENOSYS)
2348 return RESULT_FAIL;
2349
2350 if (err != -EOPNOTSUPP)
2351 return err;
2352
2353 if (!mmc_can_reset(card))
2354 return RESULT_UNSUP_CARD;
2355
2356 return RESULT_UNSUP_HOST;
2357}
2358
Pierre Ossman88ae6002007-08-12 14:23:50 +02002359static const struct mmc_test_case mmc_test_cases[] = {
2360 {
2361 .name = "Basic write (no data verification)",
2362 .run = mmc_test_basic_write,
2363 },
2364
2365 {
2366 .name = "Basic read (no data verification)",
2367 .run = mmc_test_basic_read,
2368 },
2369
2370 {
2371 .name = "Basic write (with data verification)",
Pierre Ossman6b174932008-06-30 09:09:27 +02002372 .prepare = mmc_test_prepare_write,
Pierre Ossman88ae6002007-08-12 14:23:50 +02002373 .run = mmc_test_verify_write,
Pierre Ossman6b174932008-06-30 09:09:27 +02002374 .cleanup = mmc_test_cleanup,
Pierre Ossman88ae6002007-08-12 14:23:50 +02002375 },
2376
2377 {
2378 .name = "Basic read (with data verification)",
Pierre Ossman6b174932008-06-30 09:09:27 +02002379 .prepare = mmc_test_prepare_read,
Pierre Ossman88ae6002007-08-12 14:23:50 +02002380 .run = mmc_test_verify_read,
Pierre Ossman6b174932008-06-30 09:09:27 +02002381 .cleanup = mmc_test_cleanup,
Pierre Ossman88ae6002007-08-12 14:23:50 +02002382 },
2383
2384 {
2385 .name = "Multi-block write",
Pierre Ossman6b174932008-06-30 09:09:27 +02002386 .prepare = mmc_test_prepare_write,
Pierre Ossman88ae6002007-08-12 14:23:50 +02002387 .run = mmc_test_multi_write,
Pierre Ossman6b174932008-06-30 09:09:27 +02002388 .cleanup = mmc_test_cleanup,
Pierre Ossman88ae6002007-08-12 14:23:50 +02002389 },
2390
2391 {
2392 .name = "Multi-block read",
Pierre Ossman6b174932008-06-30 09:09:27 +02002393 .prepare = mmc_test_prepare_read,
Pierre Ossman88ae6002007-08-12 14:23:50 +02002394 .run = mmc_test_multi_read,
Pierre Ossman6b174932008-06-30 09:09:27 +02002395 .cleanup = mmc_test_cleanup,
Pierre Ossman88ae6002007-08-12 14:23:50 +02002396 },
2397
2398 {
2399 .name = "Power of two block writes",
Pierre Ossman6b174932008-06-30 09:09:27 +02002400 .prepare = mmc_test_prepare_write,
Pierre Ossman88ae6002007-08-12 14:23:50 +02002401 .run = mmc_test_pow2_write,
Pierre Ossman6b174932008-06-30 09:09:27 +02002402 .cleanup = mmc_test_cleanup,
Pierre Ossman88ae6002007-08-12 14:23:50 +02002403 },
2404
2405 {
2406 .name = "Power of two block reads",
Pierre Ossman6b174932008-06-30 09:09:27 +02002407 .prepare = mmc_test_prepare_read,
Pierre Ossman88ae6002007-08-12 14:23:50 +02002408 .run = mmc_test_pow2_read,
Pierre Ossman6b174932008-06-30 09:09:27 +02002409 .cleanup = mmc_test_cleanup,
Pierre Ossman88ae6002007-08-12 14:23:50 +02002410 },
2411
2412 {
2413 .name = "Weird sized block writes",
Pierre Ossman6b174932008-06-30 09:09:27 +02002414 .prepare = mmc_test_prepare_write,
Pierre Ossman88ae6002007-08-12 14:23:50 +02002415 .run = mmc_test_weird_write,
Pierre Ossman6b174932008-06-30 09:09:27 +02002416 .cleanup = mmc_test_cleanup,
Pierre Ossman88ae6002007-08-12 14:23:50 +02002417 },
2418
2419 {
2420 .name = "Weird sized block reads",
Pierre Ossman6b174932008-06-30 09:09:27 +02002421 .prepare = mmc_test_prepare_read,
Pierre Ossman88ae6002007-08-12 14:23:50 +02002422 .run = mmc_test_weird_read,
Pierre Ossman6b174932008-06-30 09:09:27 +02002423 .cleanup = mmc_test_cleanup,
Pierre Ossman88ae6002007-08-12 14:23:50 +02002424 },
2425
2426 {
2427 .name = "Badly aligned write",
Pierre Ossman6b174932008-06-30 09:09:27 +02002428 .prepare = mmc_test_prepare_write,
Pierre Ossman88ae6002007-08-12 14:23:50 +02002429 .run = mmc_test_align_write,
Pierre Ossman6b174932008-06-30 09:09:27 +02002430 .cleanup = mmc_test_cleanup,
Pierre Ossman88ae6002007-08-12 14:23:50 +02002431 },
2432
2433 {
2434 .name = "Badly aligned read",
Pierre Ossman6b174932008-06-30 09:09:27 +02002435 .prepare = mmc_test_prepare_read,
Pierre Ossman88ae6002007-08-12 14:23:50 +02002436 .run = mmc_test_align_read,
Pierre Ossman6b174932008-06-30 09:09:27 +02002437 .cleanup = mmc_test_cleanup,
Pierre Ossman88ae6002007-08-12 14:23:50 +02002438 },
2439
2440 {
2441 .name = "Badly aligned multi-block write",
Pierre Ossman6b174932008-06-30 09:09:27 +02002442 .prepare = mmc_test_prepare_write,
Pierre Ossman88ae6002007-08-12 14:23:50 +02002443 .run = mmc_test_align_multi_write,
Pierre Ossman6b174932008-06-30 09:09:27 +02002444 .cleanup = mmc_test_cleanup,
Pierre Ossman88ae6002007-08-12 14:23:50 +02002445 },
2446
2447 {
2448 .name = "Badly aligned multi-block read",
Pierre Ossman6b174932008-06-30 09:09:27 +02002449 .prepare = mmc_test_prepare_read,
Pierre Ossman88ae6002007-08-12 14:23:50 +02002450 .run = mmc_test_align_multi_read,
Pierre Ossman6b174932008-06-30 09:09:27 +02002451 .cleanup = mmc_test_cleanup,
Pierre Ossman88ae6002007-08-12 14:23:50 +02002452 },
2453
2454 {
2455 .name = "Correct xfer_size at write (start failure)",
2456 .run = mmc_test_xfersize_write,
2457 },
2458
2459 {
2460 .name = "Correct xfer_size at read (start failure)",
2461 .run = mmc_test_xfersize_read,
2462 },
2463
2464 {
2465 .name = "Correct xfer_size at write (midway failure)",
2466 .run = mmc_test_multi_xfersize_write,
2467 },
2468
2469 {
2470 .name = "Correct xfer_size at read (midway failure)",
2471 .run = mmc_test_multi_xfersize_read,
2472 },
Pierre Ossman26610812008-07-04 18:17:13 +02002473
2474#ifdef CONFIG_HIGHMEM
2475
2476 {
2477 .name = "Highmem write",
2478 .prepare = mmc_test_prepare_write,
2479 .run = mmc_test_write_high,
2480 .cleanup = mmc_test_cleanup,
2481 },
2482
2483 {
2484 .name = "Highmem read",
2485 .prepare = mmc_test_prepare_read,
2486 .run = mmc_test_read_high,
2487 .cleanup = mmc_test_cleanup,
2488 },
2489
2490 {
2491 .name = "Multi-block highmem write",
2492 .prepare = mmc_test_prepare_write,
2493 .run = mmc_test_multi_write_high,
2494 .cleanup = mmc_test_cleanup,
2495 },
2496
2497 {
2498 .name = "Multi-block highmem read",
2499 .prepare = mmc_test_prepare_read,
2500 .run = mmc_test_multi_read_high,
2501 .cleanup = mmc_test_cleanup,
2502 },
2503
Adrian Hunter64f71202010-08-11 14:17:51 -07002504#else
2505
2506 {
2507 .name = "Highmem write",
2508 .run = mmc_test_no_highmem,
2509 },
2510
2511 {
2512 .name = "Highmem read",
2513 .run = mmc_test_no_highmem,
2514 },
2515
2516 {
2517 .name = "Multi-block highmem write",
2518 .run = mmc_test_no_highmem,
2519 },
2520
2521 {
2522 .name = "Multi-block highmem read",
2523 .run = mmc_test_no_highmem,
2524 },
2525
Pierre Ossman26610812008-07-04 18:17:13 +02002526#endif /* CONFIG_HIGHMEM */
2527
Adrian Hunter64f71202010-08-11 14:17:51 -07002528 {
2529 .name = "Best-case read performance",
2530 .prepare = mmc_test_area_prepare_fill,
2531 .run = mmc_test_best_read_performance,
2532 .cleanup = mmc_test_area_cleanup,
2533 },
2534
2535 {
2536 .name = "Best-case write performance",
2537 .prepare = mmc_test_area_prepare_erase,
2538 .run = mmc_test_best_write_performance,
2539 .cleanup = mmc_test_area_cleanup,
2540 },
2541
2542 {
2543 .name = "Best-case read performance into scattered pages",
2544 .prepare = mmc_test_area_prepare_fill,
2545 .run = mmc_test_best_read_perf_max_scatter,
2546 .cleanup = mmc_test_area_cleanup,
2547 },
2548
2549 {
2550 .name = "Best-case write performance from scattered pages",
2551 .prepare = mmc_test_area_prepare_erase,
2552 .run = mmc_test_best_write_perf_max_scatter,
2553 .cleanup = mmc_test_area_cleanup,
2554 },
2555
2556 {
2557 .name = "Single read performance by transfer size",
2558 .prepare = mmc_test_area_prepare_fill,
2559 .run = mmc_test_profile_read_perf,
2560 .cleanup = mmc_test_area_cleanup,
2561 },
2562
2563 {
2564 .name = "Single write performance by transfer size",
2565 .prepare = mmc_test_area_prepare,
2566 .run = mmc_test_profile_write_perf,
2567 .cleanup = mmc_test_area_cleanup,
2568 },
2569
2570 {
2571 .name = "Single trim performance by transfer size",
2572 .prepare = mmc_test_area_prepare_fill,
2573 .run = mmc_test_profile_trim_perf,
2574 .cleanup = mmc_test_area_cleanup,
2575 },
2576
2577 {
2578 .name = "Consecutive read performance by transfer size",
2579 .prepare = mmc_test_area_prepare_fill,
2580 .run = mmc_test_profile_seq_read_perf,
2581 .cleanup = mmc_test_area_cleanup,
2582 },
2583
2584 {
2585 .name = "Consecutive write performance by transfer size",
2586 .prepare = mmc_test_area_prepare,
2587 .run = mmc_test_profile_seq_write_perf,
2588 .cleanup = mmc_test_area_cleanup,
2589 },
2590
2591 {
2592 .name = "Consecutive trim performance by transfer size",
2593 .prepare = mmc_test_area_prepare,
2594 .run = mmc_test_profile_seq_trim_perf,
2595 .cleanup = mmc_test_area_cleanup,
2596 },
2597
Adrian Hunterb6056d12011-02-08 13:41:02 +02002598 {
2599 .name = "Random read performance by transfer size",
2600 .prepare = mmc_test_area_prepare,
2601 .run = mmc_test_random_read_perf,
2602 .cleanup = mmc_test_area_cleanup,
2603 },
2604
2605 {
2606 .name = "Random write performance by transfer size",
2607 .prepare = mmc_test_area_prepare,
2608 .run = mmc_test_random_write_perf,
2609 .cleanup = mmc_test_area_cleanup,
2610 },
2611
Adrian Huntera803d552011-02-08 13:41:03 +02002612 {
2613 .name = "Large sequential read into scattered pages",
2614 .prepare = mmc_test_area_prepare,
2615 .run = mmc_test_large_seq_read_perf,
2616 .cleanup = mmc_test_area_cleanup,
2617 },
2618
2619 {
2620 .name = "Large sequential write from scattered pages",
2621 .prepare = mmc_test_area_prepare,
2622 .run = mmc_test_large_seq_write_perf,
2623 .cleanup = mmc_test_area_cleanup,
2624 },
2625
Per Forlin9f9c4182011-07-01 18:55:26 +02002626 {
2627 .name = "Write performance with blocking req 4k to 4MB",
2628 .prepare = mmc_test_area_prepare,
2629 .run = mmc_test_profile_mult_write_blocking_perf,
2630 .cleanup = mmc_test_area_cleanup,
2631 },
2632
2633 {
2634 .name = "Write performance with non-blocking req 4k to 4MB",
2635 .prepare = mmc_test_area_prepare,
2636 .run = mmc_test_profile_mult_write_nonblock_perf,
2637 .cleanup = mmc_test_area_cleanup,
2638 },
2639
2640 {
2641 .name = "Read performance with blocking req 4k to 4MB",
2642 .prepare = mmc_test_area_prepare,
2643 .run = mmc_test_profile_mult_read_blocking_perf,
2644 .cleanup = mmc_test_area_cleanup,
2645 },
2646
2647 {
2648 .name = "Read performance with non-blocking req 4k to 4MB",
2649 .prepare = mmc_test_area_prepare,
2650 .run = mmc_test_profile_mult_read_nonblock_perf,
2651 .cleanup = mmc_test_area_cleanup,
2652 },
Per Forlinbf043332011-07-01 18:55:27 +02002653
2654 {
2655 .name = "Write performance blocking req 1 to 512 sg elems",
2656 .prepare = mmc_test_area_prepare,
2657 .run = mmc_test_profile_sglen_wr_blocking_perf,
2658 .cleanup = mmc_test_area_cleanup,
2659 },
2660
2661 {
2662 .name = "Write performance non-blocking req 1 to 512 sg elems",
2663 .prepare = mmc_test_area_prepare,
2664 .run = mmc_test_profile_sglen_wr_nonblock_perf,
2665 .cleanup = mmc_test_area_cleanup,
2666 },
2667
2668 {
2669 .name = "Read performance blocking req 1 to 512 sg elems",
2670 .prepare = mmc_test_area_prepare,
2671 .run = mmc_test_profile_sglen_r_blocking_perf,
2672 .cleanup = mmc_test_area_cleanup,
2673 },
2674
2675 {
2676 .name = "Read performance non-blocking req 1 to 512 sg elems",
2677 .prepare = mmc_test_area_prepare,
2678 .run = mmc_test_profile_sglen_r_nonblock_perf,
2679 .cleanup = mmc_test_area_cleanup,
2680 },
Adrian Hunter23113442011-08-29 16:42:14 +03002681
2682 {
2683 .name = "eMMC hardware reset",
2684 .run = mmc_test_hw_reset,
2685 },
Pierre Ossman88ae6002007-08-12 14:23:50 +02002686};
2687
Akinobu Mitaa6500312008-09-13 19:03:32 +09002688static DEFINE_MUTEX(mmc_test_lock);
Pierre Ossman88ae6002007-08-12 14:23:50 +02002689
Andy Shevchenko3183aa12010-09-01 09:26:47 +03002690static LIST_HEAD(mmc_test_result);
2691
Pierre Ossmanfd8c3262008-05-24 22:36:31 +02002692static void mmc_test_run(struct mmc_test_card *test, int testcase)
Pierre Ossman88ae6002007-08-12 14:23:50 +02002693{
2694 int i, ret;
2695
Girish K Sa3c76eb2011-10-11 11:44:09 +05302696 pr_info("%s: Starting tests of card %s...\n",
Pierre Ossman88ae6002007-08-12 14:23:50 +02002697 mmc_hostname(test->card->host), mmc_card_id(test->card));
2698
2699 mmc_claim_host(test->card->host);
2700
2701 for (i = 0;i < ARRAY_SIZE(mmc_test_cases);i++) {
Andy Shevchenko3183aa12010-09-01 09:26:47 +03002702 struct mmc_test_general_result *gr;
2703
Pierre Ossmanfd8c3262008-05-24 22:36:31 +02002704 if (testcase && ((i + 1) != testcase))
2705 continue;
2706
Girish K Sa3c76eb2011-10-11 11:44:09 +05302707 pr_info("%s: Test case %d. %s...\n",
Pierre Ossman88ae6002007-08-12 14:23:50 +02002708 mmc_hostname(test->card->host), i + 1,
2709 mmc_test_cases[i].name);
2710
2711 if (mmc_test_cases[i].prepare) {
2712 ret = mmc_test_cases[i].prepare(test);
2713 if (ret) {
Girish K Sa3c76eb2011-10-11 11:44:09 +05302714 pr_info("%s: Result: Prepare "
Pierre Ossman88ae6002007-08-12 14:23:50 +02002715 "stage failed! (%d)\n",
2716 mmc_hostname(test->card->host),
2717 ret);
2718 continue;
2719 }
2720 }
2721
Andy Shevchenko3183aa12010-09-01 09:26:47 +03002722 gr = kzalloc(sizeof(struct mmc_test_general_result),
2723 GFP_KERNEL);
2724 if (gr) {
2725 INIT_LIST_HEAD(&gr->tr_lst);
2726
2727 /* Assign data what we know already */
2728 gr->card = test->card;
2729 gr->testcase = i;
2730
2731 /* Append container to global one */
2732 list_add_tail(&gr->link, &mmc_test_result);
2733
2734 /*
2735 * Save the pointer to created container in our private
2736 * structure.
2737 */
2738 test->gr = gr;
2739 }
2740
Pierre Ossman88ae6002007-08-12 14:23:50 +02002741 ret = mmc_test_cases[i].run(test);
2742 switch (ret) {
2743 case RESULT_OK:
Girish K Sa3c76eb2011-10-11 11:44:09 +05302744 pr_info("%s: Result: OK\n",
Pierre Ossman88ae6002007-08-12 14:23:50 +02002745 mmc_hostname(test->card->host));
2746 break;
2747 case RESULT_FAIL:
Girish K Sa3c76eb2011-10-11 11:44:09 +05302748 pr_info("%s: Result: FAILED\n",
Pierre Ossman88ae6002007-08-12 14:23:50 +02002749 mmc_hostname(test->card->host));
2750 break;
2751 case RESULT_UNSUP_HOST:
Girish K Sa3c76eb2011-10-11 11:44:09 +05302752 pr_info("%s: Result: UNSUPPORTED "
Pierre Ossman88ae6002007-08-12 14:23:50 +02002753 "(by host)\n",
2754 mmc_hostname(test->card->host));
2755 break;
2756 case RESULT_UNSUP_CARD:
Girish K Sa3c76eb2011-10-11 11:44:09 +05302757 pr_info("%s: Result: UNSUPPORTED "
Pierre Ossman88ae6002007-08-12 14:23:50 +02002758 "(by card)\n",
2759 mmc_hostname(test->card->host));
2760 break;
2761 default:
Girish K Sa3c76eb2011-10-11 11:44:09 +05302762 pr_info("%s: Result: ERROR (%d)\n",
Pierre Ossman88ae6002007-08-12 14:23:50 +02002763 mmc_hostname(test->card->host), ret);
2764 }
2765
Andy Shevchenko3183aa12010-09-01 09:26:47 +03002766 /* Save the result */
2767 if (gr)
2768 gr->result = ret;
2769
Pierre Ossman88ae6002007-08-12 14:23:50 +02002770 if (mmc_test_cases[i].cleanup) {
2771 ret = mmc_test_cases[i].cleanup(test);
2772 if (ret) {
Girish K Sa3c76eb2011-10-11 11:44:09 +05302773 pr_info("%s: Warning: Cleanup "
Pierre Ossman88ae6002007-08-12 14:23:50 +02002774 "stage failed! (%d)\n",
2775 mmc_hostname(test->card->host),
2776 ret);
2777 }
2778 }
2779 }
2780
2781 mmc_release_host(test->card->host);
2782
Girish K Sa3c76eb2011-10-11 11:44:09 +05302783 pr_info("%s: Tests completed.\n",
Pierre Ossman88ae6002007-08-12 14:23:50 +02002784 mmc_hostname(test->card->host));
2785}
2786
Andy Shevchenko3183aa12010-09-01 09:26:47 +03002787static void mmc_test_free_result(struct mmc_card *card)
2788{
2789 struct mmc_test_general_result *gr, *grs;
2790
2791 mutex_lock(&mmc_test_lock);
2792
2793 list_for_each_entry_safe(gr, grs, &mmc_test_result, link) {
2794 struct mmc_test_transfer_result *tr, *trs;
2795
2796 if (card && gr->card != card)
2797 continue;
2798
2799 list_for_each_entry_safe(tr, trs, &gr->tr_lst, link) {
2800 list_del(&tr->link);
2801 kfree(tr);
2802 }
2803
2804 list_del(&gr->link);
2805 kfree(gr);
2806 }
2807
2808 mutex_unlock(&mmc_test_lock);
2809}
2810
Andy Shevchenko130067e2010-09-10 10:10:50 +03002811static LIST_HEAD(mmc_test_file_test);
2812
2813static int mtf_test_show(struct seq_file *sf, void *data)
Pierre Ossman88ae6002007-08-12 14:23:50 +02002814{
Andy Shevchenko130067e2010-09-10 10:10:50 +03002815 struct mmc_card *card = (struct mmc_card *)sf->private;
Andy Shevchenko3183aa12010-09-01 09:26:47 +03002816 struct mmc_test_general_result *gr;
Andy Shevchenko3183aa12010-09-01 09:26:47 +03002817
Pierre Ossman88ae6002007-08-12 14:23:50 +02002818 mutex_lock(&mmc_test_lock);
Andy Shevchenko3183aa12010-09-01 09:26:47 +03002819
2820 list_for_each_entry(gr, &mmc_test_result, link) {
2821 struct mmc_test_transfer_result *tr;
2822
2823 if (gr->card != card)
2824 continue;
2825
Andy Shevchenko130067e2010-09-10 10:10:50 +03002826 seq_printf(sf, "Test %d: %d\n", gr->testcase + 1, gr->result);
Andy Shevchenko3183aa12010-09-01 09:26:47 +03002827
2828 list_for_each_entry(tr, &gr->tr_lst, link) {
Adrian Hunterb6056d12011-02-08 13:41:02 +02002829 seq_printf(sf, "%u %d %lu.%09lu %u %u.%02u\n",
Andy Shevchenko3183aa12010-09-01 09:26:47 +03002830 tr->count, tr->sectors,
2831 (unsigned long)tr->ts.tv_sec,
2832 (unsigned long)tr->ts.tv_nsec,
Adrian Hunterb6056d12011-02-08 13:41:02 +02002833 tr->rate, tr->iops / 100, tr->iops % 100);
Andy Shevchenko3183aa12010-09-01 09:26:47 +03002834 }
2835 }
2836
Pierre Ossman88ae6002007-08-12 14:23:50 +02002837 mutex_unlock(&mmc_test_lock);
2838
Andy Shevchenko130067e2010-09-10 10:10:50 +03002839 return 0;
Pierre Ossman88ae6002007-08-12 14:23:50 +02002840}
2841
Andy Shevchenko130067e2010-09-10 10:10:50 +03002842static int mtf_test_open(struct inode *inode, struct file *file)
Pierre Ossman88ae6002007-08-12 14:23:50 +02002843{
Andy Shevchenko130067e2010-09-10 10:10:50 +03002844 return single_open(file, mtf_test_show, inode->i_private);
2845}
2846
2847static ssize_t mtf_test_write(struct file *file, const char __user *buf,
2848 size_t count, loff_t *pos)
2849{
2850 struct seq_file *sf = (struct seq_file *)file->private_data;
2851 struct mmc_card *card = (struct mmc_card *)sf->private;
Pierre Ossman88ae6002007-08-12 14:23:50 +02002852 struct mmc_test_card *test;
Andy Shevchenko5c25aee2010-09-01 09:26:46 +03002853 long testcase;
Jingoo Han4be70852013-07-19 16:02:43 +09002854 int ret;
Pierre Ossman88ae6002007-08-12 14:23:50 +02002855
Jingoo Han4be70852013-07-19 16:02:43 +09002856 ret = kstrtol_from_user(buf, count, 10, &testcase);
2857 if (ret)
2858 return ret;
Pierre Ossmanfd8c3262008-05-24 22:36:31 +02002859
Pierre Ossman88ae6002007-08-12 14:23:50 +02002860 test = kzalloc(sizeof(struct mmc_test_card), GFP_KERNEL);
2861 if (!test)
2862 return -ENOMEM;
2863
Andy Shevchenko3183aa12010-09-01 09:26:47 +03002864 /*
2865 * Remove all test cases associated with given card. Thus we have only
2866 * actual data of the last run.
2867 */
2868 mmc_test_free_result(card);
2869
Pierre Ossman88ae6002007-08-12 14:23:50 +02002870 test->card = card;
2871
2872 test->buffer = kzalloc(BUFFER_SIZE, GFP_KERNEL);
Pierre Ossman26610812008-07-04 18:17:13 +02002873#ifdef CONFIG_HIGHMEM
2874 test->highmem = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, BUFFER_ORDER);
2875#endif
2876
2877#ifdef CONFIG_HIGHMEM
2878 if (test->buffer && test->highmem) {
2879#else
Pierre Ossman88ae6002007-08-12 14:23:50 +02002880 if (test->buffer) {
Pierre Ossman26610812008-07-04 18:17:13 +02002881#endif
Pierre Ossman88ae6002007-08-12 14:23:50 +02002882 mutex_lock(&mmc_test_lock);
Pierre Ossmanfd8c3262008-05-24 22:36:31 +02002883 mmc_test_run(test, testcase);
Pierre Ossman88ae6002007-08-12 14:23:50 +02002884 mutex_unlock(&mmc_test_lock);
2885 }
2886
Pierre Ossman26610812008-07-04 18:17:13 +02002887#ifdef CONFIG_HIGHMEM
2888 __free_pages(test->highmem, BUFFER_ORDER);
2889#endif
Pierre Ossman88ae6002007-08-12 14:23:50 +02002890 kfree(test->buffer);
2891 kfree(test);
2892
2893 return count;
2894}
2895
Andy Shevchenko130067e2010-09-10 10:10:50 +03002896static const struct file_operations mmc_test_fops_test = {
2897 .open = mtf_test_open,
2898 .read = seq_read,
2899 .write = mtf_test_write,
2900 .llseek = seq_lseek,
2901 .release = single_release,
2902};
2903
Per Forlin54f3caf2011-07-01 18:55:25 +02002904static int mtf_testlist_show(struct seq_file *sf, void *data)
2905{
2906 int i;
2907
2908 mutex_lock(&mmc_test_lock);
2909
2910 for (i = 0; i < ARRAY_SIZE(mmc_test_cases); i++)
2911 seq_printf(sf, "%d:\t%s\n", i+1, mmc_test_cases[i].name);
2912
2913 mutex_unlock(&mmc_test_lock);
2914
2915 return 0;
2916}
2917
2918static int mtf_testlist_open(struct inode *inode, struct file *file)
2919{
2920 return single_open(file, mtf_testlist_show, inode->i_private);
2921}
2922
2923static const struct file_operations mmc_test_fops_testlist = {
2924 .open = mtf_testlist_open,
2925 .read = seq_read,
2926 .llseek = seq_lseek,
2927 .release = single_release,
2928};
2929
Andy Shevchenkod5a5bd12011-07-22 16:13:36 +03002930static void mmc_test_free_dbgfs_file(struct mmc_card *card)
Andy Shevchenko130067e2010-09-10 10:10:50 +03002931{
2932 struct mmc_test_dbgfs_file *df, *dfs;
2933
2934 mutex_lock(&mmc_test_lock);
2935
2936 list_for_each_entry_safe(df, dfs, &mmc_test_file_test, link) {
2937 if (card && df->card != card)
2938 continue;
2939 debugfs_remove(df->file);
2940 list_del(&df->link);
2941 kfree(df);
2942 }
2943
2944 mutex_unlock(&mmc_test_lock);
2945}
2946
Andy Shevchenkod5a5bd12011-07-22 16:13:36 +03002947static int __mmc_test_register_dbgfs_file(struct mmc_card *card,
Al Virof4ae40a2011-07-24 04:33:43 -04002948 const char *name, umode_t mode, const struct file_operations *fops)
Andy Shevchenko130067e2010-09-10 10:10:50 +03002949{
2950 struct dentry *file = NULL;
2951 struct mmc_test_dbgfs_file *df;
Andy Shevchenko130067e2010-09-10 10:10:50 +03002952
2953 if (card->debugfs_root)
Andy Shevchenkod5a5bd12011-07-22 16:13:36 +03002954 file = debugfs_create_file(name, mode, card->debugfs_root,
2955 card, fops);
Andy Shevchenko130067e2010-09-10 10:10:50 +03002956
2957 if (IS_ERR_OR_NULL(file)) {
2958 dev_err(&card->dev,
Andy Shevchenkod5a5bd12011-07-22 16:13:36 +03002959 "Can't create %s. Perhaps debugfs is disabled.\n",
2960 name);
2961 return -ENODEV;
Andy Shevchenko130067e2010-09-10 10:10:50 +03002962 }
2963
2964 df = kmalloc(sizeof(struct mmc_test_dbgfs_file), GFP_KERNEL);
2965 if (!df) {
2966 debugfs_remove(file);
2967 dev_err(&card->dev,
2968 "Can't allocate memory for internal usage.\n");
Andy Shevchenkod5a5bd12011-07-22 16:13:36 +03002969 return -ENOMEM;
Andy Shevchenko130067e2010-09-10 10:10:50 +03002970 }
2971
2972 df->card = card;
2973 df->file = file;
2974
2975 list_add(&df->link, &mmc_test_file_test);
Andy Shevchenkod5a5bd12011-07-22 16:13:36 +03002976 return 0;
2977}
2978
2979static int mmc_test_register_dbgfs_file(struct mmc_card *card)
2980{
2981 int ret;
2982
2983 mutex_lock(&mmc_test_lock);
2984
2985 ret = __mmc_test_register_dbgfs_file(card, "test", S_IWUSR | S_IRUGO,
2986 &mmc_test_fops_test);
2987 if (ret)
2988 goto err;
2989
2990 ret = __mmc_test_register_dbgfs_file(card, "testlist", S_IRUGO,
2991 &mmc_test_fops_testlist);
2992 if (ret)
2993 goto err;
Andy Shevchenko130067e2010-09-10 10:10:50 +03002994
2995err:
2996 mutex_unlock(&mmc_test_lock);
2997
2998 return ret;
2999}
Pierre Ossman88ae6002007-08-12 14:23:50 +02003000
Ulf Hansson6685ac62014-10-06 13:51:40 +02003001static int mmc_test_probe(struct device *dev)
Pierre Ossman88ae6002007-08-12 14:23:50 +02003002{
Ulf Hansson6685ac62014-10-06 13:51:40 +02003003 struct mmc_card *card = mmc_dev_to_card(dev);
Pierre Ossman88ae6002007-08-12 14:23:50 +02003004 int ret;
3005
Andy Shevchenko63be54c2010-09-01 09:26:45 +03003006 if (!mmc_card_mmc(card) && !mmc_card_sd(card))
Pierre Ossman0121a982008-06-28 17:51:27 +02003007 return -ENODEV;
3008
Andy Shevchenkod5a5bd12011-07-22 16:13:36 +03003009 ret = mmc_test_register_dbgfs_file(card);
Pierre Ossman88ae6002007-08-12 14:23:50 +02003010 if (ret)
3011 return ret;
3012
Pierre Ossman60c9c7b2008-07-22 14:38:35 +02003013 dev_info(&card->dev, "Card claimed for testing.\n");
3014
Pierre Ossman88ae6002007-08-12 14:23:50 +02003015 return 0;
3016}
3017
Ulf Hansson6685ac62014-10-06 13:51:40 +02003018static int mmc_test_remove(struct device *dev)
Pierre Ossman88ae6002007-08-12 14:23:50 +02003019{
Ulf Hansson6685ac62014-10-06 13:51:40 +02003020 struct mmc_card *card = mmc_dev_to_card(dev);
3021
Andy Shevchenko3183aa12010-09-01 09:26:47 +03003022 mmc_test_free_result(card);
Andy Shevchenkod5a5bd12011-07-22 16:13:36 +03003023 mmc_test_free_dbgfs_file(card);
Ulf Hansson6685ac62014-10-06 13:51:40 +02003024
3025 return 0;
Pierre Ossman88ae6002007-08-12 14:23:50 +02003026}
3027
Ulf Hansson6685ac62014-10-06 13:51:40 +02003028static void mmc_test_shutdown(struct device *dev)
Ulf Hansson76287742013-06-10 17:03:40 +02003029{
3030}
3031
Ulf Hansson6685ac62014-10-06 13:51:40 +02003032static struct device_driver mmc_driver = {
3033 .name = "mmc_test",
Pierre Ossman88ae6002007-08-12 14:23:50 +02003034 .probe = mmc_test_probe,
3035 .remove = mmc_test_remove,
Ulf Hansson76287742013-06-10 17:03:40 +02003036 .shutdown = mmc_test_shutdown,
Pierre Ossman88ae6002007-08-12 14:23:50 +02003037};
3038
3039static int __init mmc_test_init(void)
3040{
3041 return mmc_register_driver(&mmc_driver);
3042}
3043
3044static void __exit mmc_test_exit(void)
3045{
Andy Shevchenko3183aa12010-09-01 09:26:47 +03003046 /* Clear stalled data if card is still plugged */
3047 mmc_test_free_result(NULL);
Andy Shevchenkod5a5bd12011-07-22 16:13:36 +03003048 mmc_test_free_dbgfs_file(NULL);
Andy Shevchenko3183aa12010-09-01 09:26:47 +03003049
Pierre Ossman88ae6002007-08-12 14:23:50 +02003050 mmc_unregister_driver(&mmc_driver);
3051}
3052
3053module_init(mmc_test_init);
3054module_exit(mmc_test_exit);
3055
3056MODULE_LICENSE("GPL");
3057MODULE_DESCRIPTION("Multimedia Card (MMC) host test driver");
3058MODULE_AUTHOR("Pierre Ossman");