blob: 2307d7aa9ea777d25518892e6d9dba89b5598f3f [file] [log] [blame]
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14/* MMC block test */
15
16#include <linux/module.h>
17#include <linux/blkdev.h>
18#include <linux/debugfs.h>
19#include <linux/mmc/card.h>
20#include <linux/mmc/host.h>
21#include <linux/delay.h>
22#include <linux/test-iosched.h>
Lee Susmanf18263a2012-10-24 14:14:37 +020023#include <linux/jiffies.h>
Tatyana Brokhman09b010d2012-10-09 13:50:56 +020024#include "queue.h"
Yaniv Gardie9214c82012-10-18 13:58:18 +020025#include <linux/mmc/mmc.h>
Tatyana Brokhman09b010d2012-10-09 13:50:56 +020026
27#define MODULE_NAME "mmc_block_test"
28#define TEST_MAX_SECTOR_RANGE (600*1024*1024) /* 600 MB */
29#define TEST_MAX_BIOS_PER_REQ 120
30#define CMD23_PACKED_BIT (1 << 30)
31#define LARGE_PRIME_1 1103515367
32#define LARGE_PRIME_2 35757
33#define PACKED_HDR_VER_MASK 0x000000FF
34#define PACKED_HDR_RW_MASK 0x0000FF00
35#define PACKED_HDR_NUM_REQS_MASK 0x00FF0000
36#define PACKED_HDR_BITS_16_TO_29_SET 0x3FFF0000
Maya Erezddc55732012-10-17 09:51:01 +020037#define SECTOR_SIZE 512
38#define NUM_OF_SECTORS_PER_BIO ((BIO_U32_SIZE * 4) / SECTOR_SIZE)
39#define BIO_TO_SECTOR(x) (x * NUM_OF_SECTORS_PER_BIO)
Lee Susmanf18263a2012-10-24 14:14:37 +020040/* the desired long test size to be written or read */
41#define LONG_TEST_MAX_NUM_BYTES (50*1024*1024) /* 50MB */
42/* request queue limitation is 128 requests, and we leave 10 spare requests */
43#define TEST_MAX_REQUESTS 118
44#define LONG_TEST_MAX_NUM_REQS (LONG_TEST_MAX_NUM_BYTES / \
45 (TEST_MAX_BIOS_PER_REQ * sizeof(int) * BIO_U32_SIZE))
46/* this doesn't allow the test requests num to be greater than the maximum */
47#define LONG_TEST_ACTUAL_NUM_REQS \
48 ((TEST_MAX_REQUESTS < LONG_TEST_MAX_NUM_REQS) ? \
49 TEST_MAX_REQUESTS : LONG_TEST_MAX_NUM_REQS)
50#define MB_MSEC_RATIO_APPROXIMATION ((1024 * 1024) / 1000)
51/* actual number of bytes in test */
52#define LONG_TEST_ACTUAL_BYTE_NUM (LONG_TEST_ACTUAL_NUM_REQS * \
53 (TEST_MAX_BIOS_PER_REQ * sizeof(int) * BIO_U32_SIZE))
54/* actual number of MiB in test multiplied by 10, for single digit precision*/
55#define LONG_TEST_ACTUAL_MB_NUM_X_10 ((LONG_TEST_ACTUAL_BYTE_NUM * 10) / \
56 (1024 * 1024))
57/* extract integer value */
58#define LONG_TEST_SIZE_INTEGER (LONG_TEST_ACTUAL_MB_NUM_X_10 / 10)
59/* and calculate the MiB value fraction */
60#define LONG_TEST_SIZE_FRACTION (LONG_TEST_ACTUAL_MB_NUM_X_10 - \
61 (LONG_TEST_SIZE_INTEGER * 10))
Tatyana Brokhman09b010d2012-10-09 13:50:56 +020062
63#define test_pr_debug(fmt, args...) pr_debug("%s: "fmt"\n", MODULE_NAME, args)
64#define test_pr_info(fmt, args...) pr_info("%s: "fmt"\n", MODULE_NAME, args)
65#define test_pr_err(fmt, args...) pr_err("%s: "fmt"\n", MODULE_NAME, args)
66
Maya Erezddc55732012-10-17 09:51:01 +020067#define SANITIZE_TEST_TIMEOUT 240000
Yaniv Gardie9214c82012-10-18 13:58:18 +020068#define TEST_REQUEST_NUM_OF_BIOS 3
69
70
71#define CHECK_BKOPS_STATS(stats, exp_bkops, exp_hpi, exp_suspend) \
72 ((stats.bkops != exp_bkops) || \
73 (stats.hpi != exp_hpi) || \
74 (stats.suspend != exp_suspend))
75#define BKOPS_TEST_TIMEOUT 60000
Maya Erezddc55732012-10-17 09:51:01 +020076
Tatyana Brokhman09b010d2012-10-09 13:50:56 +020077enum is_random {
78 NON_RANDOM_TEST,
79 RANDOM_TEST,
80};
81
82enum mmc_block_test_testcases {
83 /* Start of send write packing test group */
84 SEND_WRITE_PACKING_MIN_TESTCASE,
85 TEST_STOP_DUE_TO_READ = SEND_WRITE_PACKING_MIN_TESTCASE,
86 TEST_STOP_DUE_TO_READ_AFTER_MAX_REQS,
87 TEST_STOP_DUE_TO_FLUSH,
88 TEST_STOP_DUE_TO_FLUSH_AFTER_MAX_REQS,
89 TEST_STOP_DUE_TO_EMPTY_QUEUE,
90 TEST_STOP_DUE_TO_MAX_REQ_NUM,
91 TEST_STOP_DUE_TO_THRESHOLD,
92 SEND_WRITE_PACKING_MAX_TESTCASE = TEST_STOP_DUE_TO_THRESHOLD,
93
94 /* Start of err check test group */
95 ERR_CHECK_MIN_TESTCASE,
96 TEST_RET_ABORT = ERR_CHECK_MIN_TESTCASE,
97 TEST_RET_PARTIAL_FOLLOWED_BY_SUCCESS,
98 TEST_RET_PARTIAL_FOLLOWED_BY_ABORT,
99 TEST_RET_PARTIAL_MULTIPLE_UNTIL_SUCCESS,
100 TEST_RET_PARTIAL_MAX_FAIL_IDX,
101 TEST_RET_RETRY,
102 TEST_RET_CMD_ERR,
103 TEST_RET_DATA_ERR,
104 ERR_CHECK_MAX_TESTCASE = TEST_RET_DATA_ERR,
105
106 /* Start of send invalid test group */
107 INVALID_CMD_MIN_TESTCASE,
108 TEST_HDR_INVALID_VERSION = INVALID_CMD_MIN_TESTCASE,
109 TEST_HDR_WRONG_WRITE_CODE,
110 TEST_HDR_INVALID_RW_CODE,
111 TEST_HDR_DIFFERENT_ADDRESSES,
112 TEST_HDR_REQ_NUM_SMALLER_THAN_ACTUAL,
113 TEST_HDR_REQ_NUM_LARGER_THAN_ACTUAL,
114 TEST_HDR_CMD23_PACKED_BIT_SET,
115 TEST_CMD23_MAX_PACKED_WRITES,
116 TEST_CMD23_ZERO_PACKED_WRITES,
117 TEST_CMD23_PACKED_BIT_UNSET,
118 TEST_CMD23_REL_WR_BIT_SET,
119 TEST_CMD23_BITS_16TO29_SET,
120 TEST_CMD23_HDR_BLK_NOT_IN_COUNT,
121 INVALID_CMD_MAX_TESTCASE = TEST_CMD23_HDR_BLK_NOT_IN_COUNT,
Tatyana Brokhman91e1e322012-10-09 13:53:43 +0200122
123 /*
124 * Start of packing control test group.
125 * in these next testcases the abbreviation FB = followed by
126 */
127 PACKING_CONTROL_MIN_TESTCASE,
128 TEST_PACKING_EXP_ONE_OVER_TRIGGER_FB_READ =
129 PACKING_CONTROL_MIN_TESTCASE,
130 TEST_PACKING_EXP_N_OVER_TRIGGER,
131 TEST_PACKING_EXP_N_OVER_TRIGGER_FB_READ,
132 TEST_PACKING_EXP_N_OVER_TRIGGER_FLUSH_N,
133 TEST_PACKING_EXP_THRESHOLD_OVER_TRIGGER,
134 TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS,
135 TEST_PACKING_NOT_EXP_TRIGGER_REQUESTS,
136 TEST_PACKING_NOT_EXP_TRIGGER_READ_TRIGGER,
137 TEST_PACKING_NOT_EXP_TRIGGER_FLUSH_TRIGGER,
138 TEST_PACK_MIX_PACKED_NO_PACKED_PACKED,
139 TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED,
140 PACKING_CONTROL_MAX_TESTCASE = TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED,
Maya Erezddc55732012-10-17 09:51:01 +0200141
142 TEST_WRITE_DISCARD_SANITIZE_READ,
Yaniv Gardie9214c82012-10-18 13:58:18 +0200143
144 /* Start of bkops test group */
145 BKOPS_MIN_TESTCASE,
146 BKOPS_DELAYED_WORK_LEVEL_1 = BKOPS_MIN_TESTCASE,
147 BKOPS_DELAYED_WORK_LEVEL_1_HPI,
148 BKOPS_CANCEL_DELAYED_WORK,
149 BKOPS_URGENT_LEVEL_2,
150 BKOPS_URGENT_LEVEL_2_TWO_REQS,
151 BKOPS_URGENT_LEVEL_3,
152 BKOPS_MAX_TESTCASE = BKOPS_URGENT_LEVEL_3,
Lee Susmanf18263a2012-10-24 14:14:37 +0200153
154 TEST_LONG_SEQUENTIAL_READ,
Lee Susmana35ae6e2012-10-25 16:06:07 +0200155 TEST_LONG_SEQUENTIAL_WRITE,
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200156};
157
158enum mmc_block_test_group {
159 TEST_NO_GROUP,
160 TEST_GENERAL_GROUP,
161 TEST_SEND_WRITE_PACKING_GROUP,
162 TEST_ERR_CHECK_GROUP,
163 TEST_SEND_INVALID_GROUP,
Tatyana Brokhman91e1e322012-10-09 13:53:43 +0200164 TEST_PACKING_CONTROL_GROUP,
Yaniv Gardie9214c82012-10-18 13:58:18 +0200165 TEST_BKOPS_GROUP,
166};
167
168enum bkops_test_stages {
169 BKOPS_STAGE_1,
170 BKOPS_STAGE_2,
171 BKOPS_STAGE_3,
172 BKOPS_STAGE_4,
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200173};
174
175struct mmc_block_test_debug {
176 struct dentry *send_write_packing_test;
177 struct dentry *err_check_test;
178 struct dentry *send_invalid_packed_test;
179 struct dentry *random_test_seed;
Tatyana Brokhman91e1e322012-10-09 13:53:43 +0200180 struct dentry *packing_control_test;
Maya Erezddc55732012-10-17 09:51:01 +0200181 struct dentry *discard_sanitize_test;
Yaniv Gardie9214c82012-10-18 13:58:18 +0200182 struct dentry *bkops_test;
Lee Susmanf18263a2012-10-24 14:14:37 +0200183 struct dentry *long_sequential_read_test;
Lee Susmana35ae6e2012-10-25 16:06:07 +0200184 struct dentry *long_sequential_write_test;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200185};
186
187struct mmc_block_test_data {
188 /* The number of write requests that the test will issue */
189 int num_requests;
190 /* The expected write packing statistics for the current test */
191 struct mmc_wr_pack_stats exp_packed_stats;
192 /*
193 * A user-defined seed for random choices of number of bios written in
194 * a request, and of number of requests issued in a test
195 * This field is randomly updated after each use
196 */
197 unsigned int random_test_seed;
198 /* A retry counter used in err_check tests */
199 int err_check_counter;
200 /* Can be one of the values of enum test_group */
201 enum mmc_block_test_group test_group;
202 /*
203 * Indicates if the current testcase is running with random values of
204 * num_requests and num_bios (in each request)
205 */
206 int is_random;
207 /* Data structure for debugfs dentrys */
208 struct mmc_block_test_debug debug;
209 /*
210 * Data structure containing individual test information, including
211 * self-defined specific data
212 */
213 struct test_info test_info;
214 /* mmc block device test */
215 struct blk_dev_test_type bdt;
Yaniv Gardie9214c82012-10-18 13:58:18 +0200216 /* Current BKOPs test stage */
217 enum bkops_test_stages bkops_stage;
218 /* A wait queue for BKOPs tests */
219 wait_queue_head_t bkops_wait_q;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200220};
221
222static struct mmc_block_test_data *mbtd;
223
224/*
225 * A callback assigned to the packed_test_fn field.
226 * Called from block layer in mmc_blk_packed_hdr_wrq_prep.
227 * Here we alter the packed header or CMD23 in order to send an invalid
228 * packed command to the card.
229 */
230static void test_invalid_packed_cmd(struct request_queue *q,
231 struct mmc_queue_req *mqrq)
232{
233 struct mmc_queue *mq = q->queuedata;
234 u32 *packed_cmd_hdr = mqrq->packed_cmd_hdr;
235 struct request *req = mqrq->req;
236 struct request *second_rq;
237 struct test_request *test_rq;
238 struct mmc_blk_request *brq = &mqrq->brq;
239 int num_requests;
240 int max_packed_reqs;
241
242 if (!mq) {
243 test_pr_err("%s: NULL mq", __func__);
244 return;
245 }
246
247 test_rq = (struct test_request *)req->elv.priv[0];
248 if (!test_rq) {
249 test_pr_err("%s: NULL test_rq", __func__);
250 return;
251 }
252 max_packed_reqs = mq->card->ext_csd.max_packed_writes;
253
254 switch (mbtd->test_info.testcase) {
255 case TEST_HDR_INVALID_VERSION:
256 test_pr_info("%s: set invalid header version", __func__);
257 /* Put 0 in header version field (1 byte, offset 0 in header) */
258 packed_cmd_hdr[0] = packed_cmd_hdr[0] & ~PACKED_HDR_VER_MASK;
259 break;
260 case TEST_HDR_WRONG_WRITE_CODE:
261 test_pr_info("%s: wrong write code", __func__);
262 /* Set R/W field with R value (1 byte, offset 1 in header) */
263 packed_cmd_hdr[0] = packed_cmd_hdr[0] & ~PACKED_HDR_RW_MASK;
264 packed_cmd_hdr[0] = packed_cmd_hdr[0] | 0x00000100;
265 break;
266 case TEST_HDR_INVALID_RW_CODE:
267 test_pr_info("%s: invalid r/w code", __func__);
268 /* Set R/W field with invalid value */
269 packed_cmd_hdr[0] = packed_cmd_hdr[0] & ~PACKED_HDR_RW_MASK;
270 packed_cmd_hdr[0] = packed_cmd_hdr[0] | 0x00000400;
271 break;
272 case TEST_HDR_DIFFERENT_ADDRESSES:
273 test_pr_info("%s: different addresses", __func__);
274 second_rq = list_entry(req->queuelist.next, struct request,
275 queuelist);
276 test_pr_info("%s: test_rq->sector=%ld, second_rq->sector=%ld",
277 __func__, (long)req->__sector,
278 (long)second_rq->__sector);
279 /*
280 * Put start sector of second write request in the first write
281 * request's cmd25 argument in the packed header
282 */
283 packed_cmd_hdr[3] = second_rq->__sector;
284 break;
285 case TEST_HDR_REQ_NUM_SMALLER_THAN_ACTUAL:
286 test_pr_info("%s: request num smaller than actual" , __func__);
287 num_requests = (packed_cmd_hdr[0] & PACKED_HDR_NUM_REQS_MASK)
288 >> 16;
289 /* num of entries is decremented by 1 */
290 num_requests = (num_requests - 1) << 16;
291 /*
292 * Set number of requests field in packed write header to be
293 * smaller than the actual number (1 byte, offset 2 in header)
294 */
295 packed_cmd_hdr[0] = (packed_cmd_hdr[0] &
296 ~PACKED_HDR_NUM_REQS_MASK) + num_requests;
297 break;
298 case TEST_HDR_REQ_NUM_LARGER_THAN_ACTUAL:
299 test_pr_info("%s: request num larger than actual" , __func__);
300 num_requests = (packed_cmd_hdr[0] & PACKED_HDR_NUM_REQS_MASK)
301 >> 16;
302 /* num of entries is incremented by 1 */
303 num_requests = (num_requests + 1) << 16;
304 /*
305 * Set number of requests field in packed write header to be
306 * larger than the actual number (1 byte, offset 2 in header).
307 */
308 packed_cmd_hdr[0] = (packed_cmd_hdr[0] &
309 ~PACKED_HDR_NUM_REQS_MASK) + num_requests;
310 break;
311 case TEST_HDR_CMD23_PACKED_BIT_SET:
312 test_pr_info("%s: header CMD23 packed bit set" , __func__);
313 /*
314 * Set packed bit (bit 30) in cmd23 argument of first and second
315 * write requests in packed write header.
316 * These are located at bytes 2 and 4 in packed write header
317 */
318 packed_cmd_hdr[2] = packed_cmd_hdr[2] | CMD23_PACKED_BIT;
319 packed_cmd_hdr[4] = packed_cmd_hdr[4] | CMD23_PACKED_BIT;
320 break;
321 case TEST_CMD23_MAX_PACKED_WRITES:
322 test_pr_info("%s: CMD23 request num > max_packed_reqs",
323 __func__);
324 /*
325 * Set the individual packed cmd23 request num to
326 * max_packed_reqs + 1
327 */
328 brq->sbc.arg = MMC_CMD23_ARG_PACKED | (max_packed_reqs + 1);
329 break;
330 case TEST_CMD23_ZERO_PACKED_WRITES:
331 test_pr_info("%s: CMD23 request num = 0", __func__);
332 /* Set the individual packed cmd23 request num to zero */
333 brq->sbc.arg = MMC_CMD23_ARG_PACKED;
334 break;
335 case TEST_CMD23_PACKED_BIT_UNSET:
336 test_pr_info("%s: CMD23 packed bit unset", __func__);
337 /*
338 * Set the individual packed cmd23 packed bit to 0,
339 * although there is a packed write request
340 */
341 brq->sbc.arg &= ~CMD23_PACKED_BIT;
342 break;
343 case TEST_CMD23_REL_WR_BIT_SET:
344 test_pr_info("%s: CMD23 REL WR bit set", __func__);
345 /* Set the individual packed cmd23 reliable write bit */
346 brq->sbc.arg = MMC_CMD23_ARG_PACKED | MMC_CMD23_ARG_REL_WR;
347 break;
348 case TEST_CMD23_BITS_16TO29_SET:
349 test_pr_info("%s: CMD23 bits [16-29] set", __func__);
350 brq->sbc.arg = MMC_CMD23_ARG_PACKED |
351 PACKED_HDR_BITS_16_TO_29_SET;
352 break;
353 case TEST_CMD23_HDR_BLK_NOT_IN_COUNT:
354 test_pr_info("%s: CMD23 hdr not in block count", __func__);
355 brq->sbc.arg = MMC_CMD23_ARG_PACKED |
356 ((rq_data_dir(req) == READ) ? 0 : mqrq->packed_blocks);
357 break;
358 default:
359 test_pr_err("%s: unexpected testcase %d",
360 __func__, mbtd->test_info.testcase);
361 break;
362 }
363}
364
365/*
366 * A callback assigned to the err_check_fn field of the mmc_request by the
367 * MMC/card/block layer.
368 * Called upon request completion by the MMC/core layer.
369 * Here we emulate an error return value from the card.
370 */
371static int test_err_check(struct mmc_card *card, struct mmc_async_req *areq)
372{
373 struct mmc_queue_req *mq_rq = container_of(areq, struct mmc_queue_req,
374 mmc_active);
375 struct request_queue *req_q = test_iosched_get_req_queue();
376 struct mmc_queue *mq;
377 int max_packed_reqs;
378 int ret = 0;
Yaniv Gardie9214c82012-10-18 13:58:18 +0200379 struct mmc_blk_request *brq;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200380
381 if (req_q)
382 mq = req_q->queuedata;
383 else {
384 test_pr_err("%s: NULL request_queue", __func__);
385 return 0;
386 }
387
388 if (!mq) {
389 test_pr_err("%s: %s: NULL mq", __func__,
390 mmc_hostname(card->host));
391 return 0;
392 }
393
394 max_packed_reqs = mq->card->ext_csd.max_packed_writes;
395
396 if (!mq_rq) {
397 test_pr_err("%s: %s: NULL mq_rq", __func__,
398 mmc_hostname(card->host));
399 return 0;
400 }
Yaniv Gardie9214c82012-10-18 13:58:18 +0200401 brq = &mq_rq->brq;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200402
403 switch (mbtd->test_info.testcase) {
404 case TEST_RET_ABORT:
405 test_pr_info("%s: return abort", __func__);
406 ret = MMC_BLK_ABORT;
407 break;
408 case TEST_RET_PARTIAL_FOLLOWED_BY_SUCCESS:
409 test_pr_info("%s: return partial followed by success",
410 __func__);
411 /*
412 * Since in this testcase num_requests is always >= 2,
413 * we can be sure that packed_fail_idx is always >= 1
414 */
415 mq_rq->packed_fail_idx = (mbtd->num_requests / 2);
416 test_pr_info("%s: packed_fail_idx = %d"
417 , __func__, mq_rq->packed_fail_idx);
418 mq->err_check_fn = NULL;
419 ret = MMC_BLK_PARTIAL;
420 break;
421 case TEST_RET_PARTIAL_FOLLOWED_BY_ABORT:
422 if (!mbtd->err_check_counter) {
423 test_pr_info("%s: return partial followed by abort",
424 __func__);
425 mbtd->err_check_counter++;
426 /*
427 * Since in this testcase num_requests is always >= 3,
428 * we have that packed_fail_idx is always >= 1
429 */
430 mq_rq->packed_fail_idx = (mbtd->num_requests / 2);
431 test_pr_info("%s: packed_fail_idx = %d"
432 , __func__, mq_rq->packed_fail_idx);
433 ret = MMC_BLK_PARTIAL;
434 break;
435 }
436 mbtd->err_check_counter = 0;
437 mq->err_check_fn = NULL;
438 ret = MMC_BLK_ABORT;
439 break;
440 case TEST_RET_PARTIAL_MULTIPLE_UNTIL_SUCCESS:
441 test_pr_info("%s: return partial multiple until success",
442 __func__);
443 if (++mbtd->err_check_counter >= (mbtd->num_requests)) {
444 mq->err_check_fn = NULL;
445 mbtd->err_check_counter = 0;
446 ret = MMC_BLK_PARTIAL;
447 break;
448 }
449 mq_rq->packed_fail_idx = 1;
450 ret = MMC_BLK_PARTIAL;
451 break;
452 case TEST_RET_PARTIAL_MAX_FAIL_IDX:
453 test_pr_info("%s: return partial max fail_idx", __func__);
454 mq_rq->packed_fail_idx = max_packed_reqs - 1;
455 mq->err_check_fn = NULL;
456 ret = MMC_BLK_PARTIAL;
457 break;
458 case TEST_RET_RETRY:
459 test_pr_info("%s: return retry", __func__);
460 ret = MMC_BLK_RETRY;
461 break;
462 case TEST_RET_CMD_ERR:
463 test_pr_info("%s: return cmd err", __func__);
464 ret = MMC_BLK_CMD_ERR;
465 break;
466 case TEST_RET_DATA_ERR:
467 test_pr_info("%s: return data err", __func__);
468 ret = MMC_BLK_DATA_ERR;
469 break;
Yaniv Gardie9214c82012-10-18 13:58:18 +0200470 case BKOPS_URGENT_LEVEL_2:
471 case BKOPS_URGENT_LEVEL_3:
472 case BKOPS_URGENT_LEVEL_2_TWO_REQS:
473 if (mbtd->err_check_counter++ == 0) {
474 test_pr_info("%s: simulate an exception from the card",
475 __func__);
476 brq->cmd.resp[0] |= R1_EXCEPTION_EVENT;
477 }
478 mq->err_check_fn = NULL;
479 break;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200480 default:
481 test_pr_err("%s: unexpected testcase %d",
482 __func__, mbtd->test_info.testcase);
483 }
484
485 return ret;
486}
487
488/*
489 * This is a specific implementation for the get_test_case_str_fn function
490 * pointer in the test_info data structure. Given a valid test_data instance,
491 * the function returns a string resembling the test name, based on the testcase
492 */
493static char *get_test_case_str(struct test_data *td)
494{
495 if (!td) {
496 test_pr_err("%s: NULL td", __func__);
497 return NULL;
498 }
499
500 switch (td->test_info.testcase) {
501 case TEST_STOP_DUE_TO_FLUSH:
502 return "Test stop due to flush";
503 case TEST_STOP_DUE_TO_FLUSH_AFTER_MAX_REQS:
504 return "Test stop due to flush after max-1 reqs";
505 case TEST_STOP_DUE_TO_READ:
506 return "Test stop due to read";
507 case TEST_STOP_DUE_TO_READ_AFTER_MAX_REQS:
508 return "Test stop due to read after max-1 reqs";
509 case TEST_STOP_DUE_TO_EMPTY_QUEUE:
510 return "Test stop due to empty queue";
511 case TEST_STOP_DUE_TO_MAX_REQ_NUM:
512 return "Test stop due to max req num";
513 case TEST_STOP_DUE_TO_THRESHOLD:
514 return "Test stop due to exceeding threshold";
515 case TEST_RET_ABORT:
516 return "Test err_check return abort";
517 case TEST_RET_PARTIAL_FOLLOWED_BY_SUCCESS:
518 return "Test err_check return partial followed by success";
519 case TEST_RET_PARTIAL_FOLLOWED_BY_ABORT:
520 return "Test err_check return partial followed by abort";
521 case TEST_RET_PARTIAL_MULTIPLE_UNTIL_SUCCESS:
522 return "Test err_check return partial multiple until success";
523 case TEST_RET_PARTIAL_MAX_FAIL_IDX:
524 return "Test err_check return partial max fail index";
525 case TEST_RET_RETRY:
526 return "Test err_check return retry";
527 case TEST_RET_CMD_ERR:
528 return "Test err_check return cmd error";
529 case TEST_RET_DATA_ERR:
530 return "Test err_check return data error";
531 case TEST_HDR_INVALID_VERSION:
532 return "Test invalid - wrong header version";
533 case TEST_HDR_WRONG_WRITE_CODE:
534 return "Test invalid - wrong write code";
535 case TEST_HDR_INVALID_RW_CODE:
536 return "Test invalid - wrong R/W code";
537 case TEST_HDR_DIFFERENT_ADDRESSES:
538 return "Test invalid - header different addresses";
539 case TEST_HDR_REQ_NUM_SMALLER_THAN_ACTUAL:
540 return "Test invalid - header req num smaller than actual";
541 case TEST_HDR_REQ_NUM_LARGER_THAN_ACTUAL:
542 return "Test invalid - header req num larger than actual";
543 case TEST_HDR_CMD23_PACKED_BIT_SET:
544 return "Test invalid - header cmd23 packed bit set";
545 case TEST_CMD23_MAX_PACKED_WRITES:
546 return "Test invalid - cmd23 max packed writes";
547 case TEST_CMD23_ZERO_PACKED_WRITES:
548 return "Test invalid - cmd23 zero packed writes";
549 case TEST_CMD23_PACKED_BIT_UNSET:
550 return "Test invalid - cmd23 packed bit unset";
551 case TEST_CMD23_REL_WR_BIT_SET:
552 return "Test invalid - cmd23 rel wr bit set";
553 case TEST_CMD23_BITS_16TO29_SET:
554 return "Test invalid - cmd23 bits [16-29] set";
555 case TEST_CMD23_HDR_BLK_NOT_IN_COUNT:
556 return "Test invalid - cmd23 header block not in count";
Tatyana Brokhman91e1e322012-10-09 13:53:43 +0200557 case TEST_PACKING_EXP_N_OVER_TRIGGER:
558 return "\nTest packing control - pack n";
559 case TEST_PACKING_EXP_N_OVER_TRIGGER_FB_READ:
560 return "\nTest packing control - pack n followed by read";
561 case TEST_PACKING_EXP_N_OVER_TRIGGER_FLUSH_N:
562 return "\nTest packing control - pack n followed by flush";
563 case TEST_PACKING_EXP_ONE_OVER_TRIGGER_FB_READ:
564 return "\nTest packing control - pack one followed by read";
565 case TEST_PACKING_EXP_THRESHOLD_OVER_TRIGGER:
566 return "\nTest packing control - pack threshold";
567 case TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS:
568 return "\nTest packing control - no packing";
569 case TEST_PACKING_NOT_EXP_TRIGGER_REQUESTS:
570 return "\nTest packing control - no packing, trigger requests";
571 case TEST_PACKING_NOT_EXP_TRIGGER_READ_TRIGGER:
572 return "\nTest packing control - no pack, trigger-read-trigger";
573 case TEST_PACKING_NOT_EXP_TRIGGER_FLUSH_TRIGGER:
574 return "\nTest packing control- no pack, trigger-flush-trigger";
575 case TEST_PACK_MIX_PACKED_NO_PACKED_PACKED:
576 return "\nTest packing control - mix: pack -> no pack -> pack";
577 case TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED:
578 return "\nTest packing control - mix: no pack->pack->no pack";
Maya Erezddc55732012-10-17 09:51:01 +0200579 case TEST_WRITE_DISCARD_SANITIZE_READ:
580 return "\nTest write, discard, sanitize";
Yaniv Gardie9214c82012-10-18 13:58:18 +0200581 case BKOPS_DELAYED_WORK_LEVEL_1:
582 return "\nTest delayed work BKOPS level 1";
583 case BKOPS_DELAYED_WORK_LEVEL_1_HPI:
584 return "\nTest delayed work BKOPS level 1 with HPI";
585 case BKOPS_CANCEL_DELAYED_WORK:
586 return "\nTest cancel delayed BKOPS work";
587 case BKOPS_URGENT_LEVEL_2:
588 return "\nTest urgent BKOPS level 2";
589 case BKOPS_URGENT_LEVEL_2_TWO_REQS:
590 return "\nTest urgent BKOPS level 2, followed by a request";
591 case BKOPS_URGENT_LEVEL_3:
592 return "\nTest urgent BKOPS level 3";
Lee Susmanf18263a2012-10-24 14:14:37 +0200593 case TEST_LONG_SEQUENTIAL_READ:
594 return "Test long sequential read";
Lee Susmana35ae6e2012-10-25 16:06:07 +0200595 case TEST_LONG_SEQUENTIAL_WRITE:
596 return "Test long sequential write";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200597 default:
598 return "Unknown testcase";
599 }
600
601 return NULL;
602}
603
604/*
605 * Compare individual testcase's statistics to the expected statistics:
606 * Compare stop reason and number of packing events
607 */
608static int check_wr_packing_statistics(struct test_data *td)
609{
610 struct mmc_wr_pack_stats *mmc_packed_stats;
611 struct mmc_queue *mq = td->req_q->queuedata;
612 int max_packed_reqs = mq->card->ext_csd.max_packed_writes;
613 int i;
614 struct mmc_card *card = mq->card;
615 struct mmc_wr_pack_stats expected_stats;
616 int *stop_reason;
617 int ret = 0;
618
619 if (!mq) {
620 test_pr_err("%s: NULL mq", __func__);
621 return -EINVAL;
622 }
623
624 expected_stats = mbtd->exp_packed_stats;
625
626 mmc_packed_stats = mmc_blk_get_packed_statistics(card);
627 if (!mmc_packed_stats) {
628 test_pr_err("%s: NULL mmc_packed_stats", __func__);
629 return -EINVAL;
630 }
631
632 if (!mmc_packed_stats->packing_events) {
633 test_pr_err("%s: NULL packing_events", __func__);
634 return -EINVAL;
635 }
636
637 spin_lock(&mmc_packed_stats->lock);
638
639 if (!mmc_packed_stats->enabled) {
640 test_pr_err("%s write packing statistics are not enabled",
641 __func__);
642 ret = -EINVAL;
643 goto exit_err;
644 }
645
646 stop_reason = mmc_packed_stats->pack_stop_reason;
647
Tatyana Brokhman91e1e322012-10-09 13:53:43 +0200648 for (i = 1; i <= max_packed_reqs; ++i) {
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200649 if (mmc_packed_stats->packing_events[i] !=
650 expected_stats.packing_events[i]) {
651 test_pr_err(
652 "%s: Wrong pack stats in index %d, got %d, expected %d",
653 __func__, i, mmc_packed_stats->packing_events[i],
654 expected_stats.packing_events[i]);
655 if (td->fs_wr_reqs_during_test)
656 goto cancel_round;
657 ret = -EINVAL;
658 goto exit_err;
659 }
660 }
661
662 if (mmc_packed_stats->pack_stop_reason[EXCEEDS_SEGMENTS] !=
663 expected_stats.pack_stop_reason[EXCEEDS_SEGMENTS]) {
664 test_pr_err(
665 "%s: Wrong pack stop reason EXCEEDS_SEGMENTS %d, expected %d",
666 __func__, stop_reason[EXCEEDS_SEGMENTS],
667 expected_stats.pack_stop_reason[EXCEEDS_SEGMENTS]);
668 if (td->fs_wr_reqs_during_test)
669 goto cancel_round;
670 ret = -EINVAL;
671 goto exit_err;
672 }
673
674 if (mmc_packed_stats->pack_stop_reason[EXCEEDS_SECTORS] !=
675 expected_stats.pack_stop_reason[EXCEEDS_SECTORS]) {
676 test_pr_err(
677 "%s: Wrong pack stop reason EXCEEDS_SECTORS %d, expected %d",
678 __func__, stop_reason[EXCEEDS_SECTORS],
679 expected_stats.pack_stop_reason[EXCEEDS_SECTORS]);
680 if (td->fs_wr_reqs_during_test)
681 goto cancel_round;
682 ret = -EINVAL;
683 goto exit_err;
684 }
685
686 if (mmc_packed_stats->pack_stop_reason[WRONG_DATA_DIR] !=
687 expected_stats.pack_stop_reason[WRONG_DATA_DIR]) {
688 test_pr_err(
689 "%s: Wrong pack stop reason WRONG_DATA_DIR %d, expected %d",
690 __func__, stop_reason[WRONG_DATA_DIR],
691 expected_stats.pack_stop_reason[WRONG_DATA_DIR]);
692 if (td->fs_wr_reqs_during_test)
693 goto cancel_round;
694 ret = -EINVAL;
695 goto exit_err;
696 }
697
698 if (mmc_packed_stats->pack_stop_reason[FLUSH_OR_DISCARD] !=
699 expected_stats.pack_stop_reason[FLUSH_OR_DISCARD]) {
700 test_pr_err(
701 "%s: Wrong pack stop reason FLUSH_OR_DISCARD %d, expected %d",
702 __func__, stop_reason[FLUSH_OR_DISCARD],
703 expected_stats.pack_stop_reason[FLUSH_OR_DISCARD]);
704 if (td->fs_wr_reqs_during_test)
705 goto cancel_round;
706 ret = -EINVAL;
707 goto exit_err;
708 }
709
710 if (mmc_packed_stats->pack_stop_reason[EMPTY_QUEUE] !=
711 expected_stats.pack_stop_reason[EMPTY_QUEUE]) {
712 test_pr_err(
713 "%s: Wrong pack stop reason EMPTY_QUEUE %d, expected %d",
714 __func__, stop_reason[EMPTY_QUEUE],
715 expected_stats.pack_stop_reason[EMPTY_QUEUE]);
716 if (td->fs_wr_reqs_during_test)
717 goto cancel_round;
718 ret = -EINVAL;
719 goto exit_err;
720 }
721
722 if (mmc_packed_stats->pack_stop_reason[REL_WRITE] !=
723 expected_stats.pack_stop_reason[REL_WRITE]) {
724 test_pr_err(
725 "%s: Wrong pack stop reason REL_WRITE %d, expected %d",
726 __func__, stop_reason[REL_WRITE],
727 expected_stats.pack_stop_reason[REL_WRITE]);
728 if (td->fs_wr_reqs_during_test)
729 goto cancel_round;
730 ret = -EINVAL;
731 goto exit_err;
732 }
733
734exit_err:
735 spin_unlock(&mmc_packed_stats->lock);
736 if (ret && mmc_packed_stats->enabled)
737 print_mmc_packing_stats(card);
738 return ret;
739cancel_round:
740 spin_unlock(&mmc_packed_stats->lock);
741 test_iosched_set_ignore_round(true);
742 return 0;
743}
744
745/*
746 * Pseudo-randomly choose a seed based on the last seed, and update it in
747 * seed_number. then return seed_number (mod max_val), or min_val.
748 */
749static unsigned int pseudo_random_seed(unsigned int *seed_number,
750 unsigned int min_val,
751 unsigned int max_val)
752{
753 int ret = 0;
754
755 if (!seed_number)
756 return 0;
757
758 *seed_number = ((unsigned int)(((unsigned long)*seed_number *
759 (unsigned long)LARGE_PRIME_1) + LARGE_PRIME_2));
760 ret = (unsigned int)((*seed_number) % max_val);
761
762 return (ret > min_val ? ret : min_val);
763}
764
765/*
766 * Given a pseudo-random seed, find a pseudo-random num_of_bios.
767 * Make sure that num_of_bios is not larger than TEST_MAX_SECTOR_RANGE
768 */
769static void pseudo_rnd_num_of_bios(unsigned int *num_bios_seed,
770 unsigned int *num_of_bios)
771{
772 do {
773 *num_of_bios = pseudo_random_seed(num_bios_seed, 1,
774 TEST_MAX_BIOS_PER_REQ);
775 if (!(*num_of_bios))
776 *num_of_bios = 1;
777 } while ((*num_of_bios) * BIO_U32_SIZE * 4 > TEST_MAX_SECTOR_RANGE);
778}
779
780/* Add a single read request to the given td's request queue */
781static int prepare_request_add_read(struct test_data *td)
782{
783 int ret;
784 int start_sec;
785
786 if (td)
787 start_sec = td->start_sector;
788 else {
789 test_pr_err("%s: NULL td", __func__);
790 return 0;
791 }
792
793 test_pr_info("%s: Adding a read request, first req_id=%d", __func__,
794 td->wr_rd_next_req_id);
795
796 ret = test_iosched_add_wr_rd_test_req(0, READ, start_sec, 2,
797 TEST_PATTERN_5A, NULL);
798 if (ret) {
799 test_pr_err("%s: failed to add a read request", __func__);
800 return ret;
801 }
802
803 return 0;
804}
805
806/* Add a single flush request to the given td's request queue */
807static int prepare_request_add_flush(struct test_data *td)
808{
809 int ret;
810
811 if (!td) {
812 test_pr_err("%s: NULL td", __func__);
813 return 0;
814 }
815
816 test_pr_info("%s: Adding a flush request, first req_id=%d", __func__,
817 td->unique_next_req_id);
818 ret = test_iosched_add_unique_test_req(0, REQ_UNIQUE_FLUSH,
819 0, 0, NULL);
820 if (ret) {
821 test_pr_err("%s: failed to add a flush request", __func__);
822 return ret;
823 }
824
825 return ret;
826}
827
828/*
829 * Add num_requets amount of write requests to the given td's request queue.
830 * If random test mode is chosen we pseudo-randomly choose the number of bios
831 * for each write request, otherwise add between 1 to 5 bio per request.
832 */
833static int prepare_request_add_write_reqs(struct test_data *td,
834 int num_requests, int is_err_expected,
835 int is_random)
836{
837 int i;
838 unsigned int start_sec;
839 int num_bios;
840 int ret = 0;
841 unsigned int *bio_seed = &mbtd->random_test_seed;
842
843 if (td)
844 start_sec = td->start_sector;
845 else {
846 test_pr_err("%s: NULL td", __func__);
847 return ret;
848 }
849
850 test_pr_info("%s: Adding %d write requests, first req_id=%d", __func__,
851 num_requests, td->wr_rd_next_req_id);
852
Lee Susmanf18263a2012-10-24 14:14:37 +0200853 for (i = 1 ; i <= num_requests ; i++) {
854 start_sec =
855 td->start_sector + sizeof(int) *
856 BIO_U32_SIZE * td->num_of_write_bios;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200857 if (is_random)
858 pseudo_rnd_num_of_bios(bio_seed, &num_bios);
859 else
860 /*
861 * For the non-random case, give num_bios a value
862 * between 1 and 5, to keep a small number of BIOs
863 */
864 num_bios = (i%5)+1;
865
866 ret = test_iosched_add_wr_rd_test_req(is_err_expected, WRITE,
867 start_sec, num_bios, TEST_PATTERN_5A, NULL);
868
869 if (ret) {
870 test_pr_err("%s: failed to add a write request",
871 __func__);
872 return ret;
873 }
874 }
875 return 0;
876}
877
878/*
879 * Prepare the write, read and flush requests for a generic packed commands
880 * testcase
881 */
882static int prepare_packed_requests(struct test_data *td, int is_err_expected,
883 int num_requests, int is_random)
884{
885 int ret = 0;
886 struct mmc_queue *mq;
887 int max_packed_reqs;
888 struct request_queue *req_q;
889
890 if (!td) {
891 pr_err("%s: NULL td", __func__);
892 return -EINVAL;
893 }
894
895 req_q = td->req_q;
896
897 if (!req_q) {
898 pr_err("%s: NULL request queue", __func__);
899 return -EINVAL;
900 }
901
902 mq = req_q->queuedata;
903 if (!mq) {
904 test_pr_err("%s: NULL mq", __func__);
905 return -EINVAL;
906 }
907
908 max_packed_reqs = mq->card->ext_csd.max_packed_writes;
909
910 if (mbtd->random_test_seed <= 0) {
911 mbtd->random_test_seed =
912 (unsigned int)(get_jiffies_64() & 0xFFFF);
913 test_pr_info("%s: got seed from jiffies %d",
914 __func__, mbtd->random_test_seed);
915 }
916
917 mmc_blk_init_packed_statistics(mq->card);
918
919 ret = prepare_request_add_write_reqs(td, num_requests, is_err_expected,
920 is_random);
921 if (ret)
922 return ret;
923
924 /* Avoid memory corruption in upcoming stats set */
925 if (td->test_info.testcase == TEST_STOP_DUE_TO_THRESHOLD)
926 num_requests--;
927
928 memset((void *)mbtd->exp_packed_stats.pack_stop_reason, 0,
929 sizeof(mbtd->exp_packed_stats.pack_stop_reason));
930 memset(mbtd->exp_packed_stats.packing_events, 0,
931 (max_packed_reqs + 1) * sizeof(u32));
932 if (num_requests <= max_packed_reqs)
933 mbtd->exp_packed_stats.packing_events[num_requests] = 1;
934
935 switch (td->test_info.testcase) {
936 case TEST_STOP_DUE_TO_FLUSH:
937 case TEST_STOP_DUE_TO_FLUSH_AFTER_MAX_REQS:
938 ret = prepare_request_add_flush(td);
939 if (ret)
940 return ret;
941
942 mbtd->exp_packed_stats.pack_stop_reason[FLUSH_OR_DISCARD] = 1;
943 break;
944 case TEST_STOP_DUE_TO_READ:
945 case TEST_STOP_DUE_TO_READ_AFTER_MAX_REQS:
946 ret = prepare_request_add_read(td);
947 if (ret)
948 return ret;
949
950 mbtd->exp_packed_stats.pack_stop_reason[WRONG_DATA_DIR] = 1;
951 break;
952 case TEST_STOP_DUE_TO_THRESHOLD:
953 mbtd->exp_packed_stats.packing_events[num_requests] = 1;
954 mbtd->exp_packed_stats.packing_events[1] = 1;
955 mbtd->exp_packed_stats.pack_stop_reason[THRESHOLD] = 1;
956 mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1;
957 break;
958 case TEST_STOP_DUE_TO_MAX_REQ_NUM:
959 case TEST_RET_PARTIAL_MAX_FAIL_IDX:
960 mbtd->exp_packed_stats.pack_stop_reason[THRESHOLD] = 1;
961 break;
962 default:
963 mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1;
964 }
965 mbtd->num_requests = num_requests;
966
967 return 0;
968}
969
970/*
Tatyana Brokhman91e1e322012-10-09 13:53:43 +0200971 * Prepare the write, read and flush requests for the packing control
972 * testcases
973 */
974static int prepare_packed_control_tests_requests(struct test_data *td,
975 int is_err_expected, int num_requests, int is_random)
976{
977 int ret = 0;
978 struct mmc_queue *mq;
979 int max_packed_reqs;
980 int temp_num_req = num_requests;
981 struct request_queue *req_q;
982 int test_packed_trigger;
983 int num_packed_reqs;
984
985 if (!td) {
986 test_pr_err("%s: NULL td\n", __func__);
987 return -EINVAL;
988 }
989
990 req_q = td->req_q;
991
992 if (!req_q) {
993 test_pr_err("%s: NULL request queue\n", __func__);
994 return -EINVAL;
995 }
996
997 mq = req_q->queuedata;
998 if (!mq) {
999 test_pr_err("%s: NULL mq", __func__);
1000 return -EINVAL;
1001 }
1002
1003 max_packed_reqs = mq->card->ext_csd.max_packed_writes;
1004 test_packed_trigger = mq->num_wr_reqs_to_start_packing;
1005 num_packed_reqs = num_requests - test_packed_trigger;
1006
1007 if (mbtd->random_test_seed == 0) {
1008 mbtd->random_test_seed =
1009 (unsigned int)(get_jiffies_64() & 0xFFFF);
1010 test_pr_info("%s: got seed from jiffies %d",
1011 __func__, mbtd->random_test_seed);
1012 }
1013
1014 mmc_blk_init_packed_statistics(mq->card);
1015
1016 if (td->test_info.testcase ==
1017 TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED) {
1018 temp_num_req = num_requests;
1019 num_requests = test_packed_trigger - 1;
1020 }
1021
1022 /* Verify that the packing is disabled before starting the test */
1023 mq->wr_packing_enabled = false;
1024 mq->num_of_potential_packed_wr_reqs = 0;
1025
1026 if (td->test_info.testcase == TEST_PACK_MIX_PACKED_NO_PACKED_PACKED) {
1027 mq->num_of_potential_packed_wr_reqs = test_packed_trigger + 1;
1028 mq->wr_packing_enabled = true;
1029 num_requests = test_packed_trigger + 2;
1030 }
1031
1032 ret = prepare_request_add_write_reqs(td, num_requests, is_err_expected,
1033 is_random);
1034 if (ret)
1035 goto exit;
1036
1037 if (td->test_info.testcase == TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED)
1038 num_requests = temp_num_req;
1039
1040 memset((void *)mbtd->exp_packed_stats.pack_stop_reason, 0,
1041 sizeof(mbtd->exp_packed_stats.pack_stop_reason));
1042 memset(mbtd->exp_packed_stats.packing_events, 0,
1043 (max_packed_reqs + 1) * sizeof(u32));
1044
1045 switch (td->test_info.testcase) {
1046 case TEST_PACKING_EXP_N_OVER_TRIGGER_FB_READ:
1047 case TEST_PACKING_EXP_ONE_OVER_TRIGGER_FB_READ:
1048 ret = prepare_request_add_read(td);
1049 if (ret)
1050 goto exit;
1051
1052 mbtd->exp_packed_stats.pack_stop_reason[WRONG_DATA_DIR] = 1;
1053 mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 1;
1054 break;
1055 case TEST_PACKING_EXP_N_OVER_TRIGGER_FLUSH_N:
1056 ret = prepare_request_add_flush(td);
1057 if (ret)
1058 goto exit;
1059
1060 ret = prepare_request_add_write_reqs(td, num_packed_reqs,
1061 is_err_expected, is_random);
1062 if (ret)
1063 goto exit;
1064
1065 mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1;
1066 mbtd->exp_packed_stats.pack_stop_reason[FLUSH_OR_DISCARD] = 1;
1067 mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 2;
1068 break;
1069 case TEST_PACKING_NOT_EXP_TRIGGER_READ_TRIGGER:
1070 ret = prepare_request_add_read(td);
1071 if (ret)
1072 goto exit;
1073
1074 ret = prepare_request_add_write_reqs(td, test_packed_trigger,
1075 is_err_expected, is_random);
1076 if (ret)
1077 goto exit;
1078
1079 mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 1;
1080 break;
1081 case TEST_PACKING_NOT_EXP_TRIGGER_FLUSH_TRIGGER:
1082 ret = prepare_request_add_flush(td);
1083 if (ret)
1084 goto exit;
1085
1086 ret = prepare_request_add_write_reqs(td, test_packed_trigger,
1087 is_err_expected, is_random);
1088 if (ret)
1089 goto exit;
1090
1091 mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 1;
1092 break;
1093 case TEST_PACK_MIX_PACKED_NO_PACKED_PACKED:
1094 ret = prepare_request_add_read(td);
1095 if (ret)
1096 goto exit;
1097
1098 ret = prepare_request_add_write_reqs(td, test_packed_trigger-1,
1099 is_err_expected, is_random);
1100 if (ret)
1101 goto exit;
1102
1103 ret = prepare_request_add_write_reqs(td, num_requests,
1104 is_err_expected, is_random);
1105 if (ret)
1106 goto exit;
1107
1108 mbtd->exp_packed_stats.packing_events[num_requests] = 1;
1109 mbtd->exp_packed_stats.packing_events[num_requests-1] = 1;
1110 mbtd->exp_packed_stats.pack_stop_reason[WRONG_DATA_DIR] = 1;
1111 mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1;
1112 break;
1113 case TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED:
1114 ret = prepare_request_add_read(td);
1115 if (ret)
1116 goto exit;
1117
1118 ret = prepare_request_add_write_reqs(td, num_requests,
1119 is_err_expected, is_random);
1120 if (ret)
1121 goto exit;
1122
1123 ret = prepare_request_add_read(td);
1124 if (ret)
1125 goto exit;
1126
1127 ret = prepare_request_add_write_reqs(td, test_packed_trigger-1,
1128 is_err_expected, is_random);
1129 if (ret)
1130 goto exit;
1131
1132 mbtd->exp_packed_stats.pack_stop_reason[WRONG_DATA_DIR] = 1;
1133 mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 1;
1134 break;
1135 case TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS:
1136 case TEST_PACKING_NOT_EXP_TRIGGER_REQUESTS:
1137 mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 1;
1138 break;
1139 default:
1140 mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1;
1141 mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 1;
1142 }
1143 mbtd->num_requests = num_requests;
1144
1145exit:
1146 return ret;
1147}
1148
1149/*
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001150 * Prepare requests for the TEST_RET_PARTIAL_FOLLOWED_BY_ABORT testcase.
1151 * In this testcase we have mixed error expectations from different
1152 * write requests, hence the special prepare function.
1153 */
1154static int prepare_partial_followed_by_abort(struct test_data *td,
1155 int num_requests)
1156{
1157 int i, start_address;
1158 int is_err_expected = 0;
1159 int ret = 0;
1160 struct mmc_queue *mq = test_iosched_get_req_queue()->queuedata;
1161 int max_packed_reqs;
1162
1163 if (!mq) {
1164 test_pr_err("%s: NULL mq", __func__);
1165 return -EINVAL;
1166 }
1167
1168 max_packed_reqs = mq->card->ext_csd.max_packed_writes;
1169
1170 mmc_blk_init_packed_statistics(mq->card);
1171
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001172 for (i = 1; i <= num_requests; i++) {
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001173 if (i > (num_requests / 2))
1174 is_err_expected = 1;
1175
Lee Susmanf18263a2012-10-24 14:14:37 +02001176 start_address = td->start_sector +
1177 sizeof(int) * BIO_U32_SIZE * td->num_of_write_bios;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001178 ret = test_iosched_add_wr_rd_test_req(is_err_expected, WRITE,
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001179 start_address, (i % 5) + 1, TEST_PATTERN_5A,
1180 NULL);
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001181 if (ret) {
1182 test_pr_err("%s: failed to add a write request",
1183 __func__);
1184 return ret;
1185 }
1186 }
1187
1188 memset((void *)&mbtd->exp_packed_stats.pack_stop_reason, 0,
1189 sizeof(mbtd->exp_packed_stats.pack_stop_reason));
1190 memset(mbtd->exp_packed_stats.packing_events, 0,
1191 (max_packed_reqs + 1) * sizeof(u32));
1192 mbtd->exp_packed_stats.packing_events[num_requests] = 1;
1193 mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1;
1194
1195 mbtd->num_requests = num_requests;
1196
1197 return ret;
1198}
1199
1200/*
1201 * Get number of write requests for current testcase. If random test mode was
1202 * chosen, pseudo-randomly choose the number of requests, otherwise set to
1203 * two less than the packing threshold.
1204 */
1205static int get_num_requests(struct test_data *td)
1206{
1207 int *seed = &mbtd->random_test_seed;
1208 struct request_queue *req_q;
1209 struct mmc_queue *mq;
1210 int max_num_requests;
1211 int num_requests;
1212 int min_num_requests = 2;
1213 int is_random = mbtd->is_random;
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001214 int max_for_double;
1215 int test_packed_trigger;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001216
1217 req_q = test_iosched_get_req_queue();
1218 if (req_q)
1219 mq = req_q->queuedata;
1220 else {
1221 test_pr_err("%s: NULL request queue", __func__);
1222 return 0;
1223 }
1224
1225 if (!mq) {
1226 test_pr_err("%s: NULL mq", __func__);
1227 return -EINVAL;
1228 }
1229
1230 max_num_requests = mq->card->ext_csd.max_packed_writes;
1231 num_requests = max_num_requests - 2;
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001232 test_packed_trigger = mq->num_wr_reqs_to_start_packing;
1233
1234 /*
1235 * Here max_for_double is intended for packed control testcases
1236 * in which we issue many write requests. It's purpose is to prevent
1237 * exceeding max number of req_queue requests.
1238 */
1239 max_for_double = max_num_requests - 10;
1240
1241 if (td->test_info.testcase ==
1242 TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS)
1243 /* Don't expect packing, so issue up to trigger-1 reqs */
1244 num_requests = test_packed_trigger - 1;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001245
1246 if (is_random) {
1247 if (td->test_info.testcase ==
1248 TEST_RET_PARTIAL_FOLLOWED_BY_ABORT)
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001249 /*
1250 * Here we don't want num_requests to be less than 1
1251 * as a consequence of division by 2.
1252 */
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001253 min_num_requests = 3;
1254
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001255 if (td->test_info.testcase ==
1256 TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS)
1257 /* Don't expect packing, so issue up to trigger reqs */
1258 max_num_requests = test_packed_trigger;
1259
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001260 num_requests = pseudo_random_seed(seed, min_num_requests,
1261 max_num_requests - 1);
1262 }
1263
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001264 if (td->test_info.testcase ==
1265 TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS)
1266 num_requests -= test_packed_trigger;
1267
1268 if (td->test_info.testcase == TEST_PACKING_EXP_N_OVER_TRIGGER_FLUSH_N)
1269 num_requests =
1270 num_requests > max_for_double ? max_for_double : num_requests;
1271
1272 if (mbtd->test_group == TEST_PACKING_CONTROL_GROUP)
1273 num_requests += test_packed_trigger;
1274
1275 if (td->test_info.testcase == TEST_PACKING_NOT_EXP_TRIGGER_REQUESTS)
1276 num_requests = test_packed_trigger;
1277
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001278 return num_requests;
1279}
1280
Lee Susmanf18263a2012-10-24 14:14:37 +02001281static int prepare_long_test_requests(struct test_data *td)
1282{
1283
1284 int ret;
1285 int start_sec;
1286 int j;
1287 int test_direction;
1288
1289 if (td)
1290 start_sec = td->start_sector;
1291 else {
1292 test_pr_err("%s: NULL td\n", __func__);
1293 return -EINVAL;
1294 }
1295
Lee Susmana35ae6e2012-10-25 16:06:07 +02001296 if (td->test_info.testcase == TEST_LONG_SEQUENTIAL_WRITE)
1297 test_direction = WRITE;
1298 else
1299 test_direction = READ;
Lee Susmanf18263a2012-10-24 14:14:37 +02001300
Lee Susmana35ae6e2012-10-25 16:06:07 +02001301 test_pr_info("%s: Adding %d write requests, first req_id=%d", __func__,
Lee Susmanf18263a2012-10-24 14:14:37 +02001302 LONG_TEST_ACTUAL_NUM_REQS, td->wr_rd_next_req_id);
1303
1304 for (j = 0; j < LONG_TEST_ACTUAL_NUM_REQS; j++) {
1305
1306 ret = test_iosched_add_wr_rd_test_req(0, test_direction,
1307 start_sec,
1308 TEST_MAX_BIOS_PER_REQ,
1309 TEST_NO_PATTERN, NULL);
1310 if (ret) {
1311 test_pr_err("%s: failed to add a bio request",
1312 __func__);
1313 return ret;
1314 }
1315
1316 start_sec +=
1317 (TEST_MAX_BIOS_PER_REQ * sizeof(int) * BIO_U32_SIZE);
1318 }
1319
1320 return 0;
1321}
1322
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001323/*
1324 * An implementation for the prepare_test_fn pointer in the test_info
1325 * data structure. According to the testcase we add the right number of requests
1326 * and decide if an error is expected or not.
1327 */
1328static int prepare_test(struct test_data *td)
1329{
1330 struct mmc_queue *mq = test_iosched_get_req_queue()->queuedata;
1331 int max_num_requests;
1332 int num_requests = 0;
1333 int ret = 0;
1334 int is_random = mbtd->is_random;
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001335 int test_packed_trigger = mq->num_wr_reqs_to_start_packing;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001336
1337 if (!mq) {
1338 test_pr_err("%s: NULL mq", __func__);
1339 return -EINVAL;
1340 }
1341
1342 max_num_requests = mq->card->ext_csd.max_packed_writes;
1343
1344 if (is_random && mbtd->random_test_seed == 0) {
1345 mbtd->random_test_seed =
1346 (unsigned int)(get_jiffies_64() & 0xFFFF);
1347 test_pr_info("%s: got seed from jiffies %d",
1348 __func__, mbtd->random_test_seed);
1349 }
1350
1351 num_requests = get_num_requests(td);
1352
1353 if (mbtd->test_group == TEST_SEND_INVALID_GROUP)
1354 mq->packed_test_fn =
1355 test_invalid_packed_cmd;
1356
1357 if (mbtd->test_group == TEST_ERR_CHECK_GROUP)
1358 mq->err_check_fn = test_err_check;
1359
1360 switch (td->test_info.testcase) {
1361 case TEST_STOP_DUE_TO_FLUSH:
1362 case TEST_STOP_DUE_TO_READ:
1363 case TEST_RET_PARTIAL_FOLLOWED_BY_SUCCESS:
1364 case TEST_RET_PARTIAL_MULTIPLE_UNTIL_SUCCESS:
1365 case TEST_STOP_DUE_TO_EMPTY_QUEUE:
1366 case TEST_CMD23_PACKED_BIT_UNSET:
1367 ret = prepare_packed_requests(td, 0, num_requests, is_random);
1368 break;
1369 case TEST_STOP_DUE_TO_FLUSH_AFTER_MAX_REQS:
1370 case TEST_STOP_DUE_TO_READ_AFTER_MAX_REQS:
1371 ret = prepare_packed_requests(td, 0, max_num_requests - 1,
1372 is_random);
1373 break;
1374 case TEST_RET_PARTIAL_FOLLOWED_BY_ABORT:
1375 ret = prepare_partial_followed_by_abort(td, num_requests);
1376 break;
1377 case TEST_STOP_DUE_TO_MAX_REQ_NUM:
1378 case TEST_RET_PARTIAL_MAX_FAIL_IDX:
1379 ret = prepare_packed_requests(td, 0, max_num_requests,
1380 is_random);
1381 break;
1382 case TEST_STOP_DUE_TO_THRESHOLD:
1383 ret = prepare_packed_requests(td, 0, max_num_requests + 1,
1384 is_random);
1385 break;
1386 case TEST_RET_ABORT:
1387 case TEST_RET_RETRY:
1388 case TEST_RET_CMD_ERR:
1389 case TEST_RET_DATA_ERR:
1390 case TEST_HDR_INVALID_VERSION:
1391 case TEST_HDR_WRONG_WRITE_CODE:
1392 case TEST_HDR_INVALID_RW_CODE:
1393 case TEST_HDR_DIFFERENT_ADDRESSES:
1394 case TEST_HDR_REQ_NUM_SMALLER_THAN_ACTUAL:
1395 case TEST_HDR_REQ_NUM_LARGER_THAN_ACTUAL:
1396 case TEST_CMD23_MAX_PACKED_WRITES:
1397 case TEST_CMD23_ZERO_PACKED_WRITES:
1398 case TEST_CMD23_REL_WR_BIT_SET:
1399 case TEST_CMD23_BITS_16TO29_SET:
1400 case TEST_CMD23_HDR_BLK_NOT_IN_COUNT:
1401 case TEST_HDR_CMD23_PACKED_BIT_SET:
1402 ret = prepare_packed_requests(td, 1, num_requests, is_random);
1403 break;
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001404 case TEST_PACKING_EXP_N_OVER_TRIGGER:
1405 case TEST_PACKING_EXP_N_OVER_TRIGGER_FB_READ:
1406 case TEST_PACKING_NOT_EXP_TRIGGER_REQUESTS:
1407 case TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS:
1408 case TEST_PACK_MIX_PACKED_NO_PACKED_PACKED:
1409 case TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED:
1410 ret = prepare_packed_control_tests_requests(td, 0, num_requests,
1411 is_random);
1412 break;
1413 case TEST_PACKING_EXP_THRESHOLD_OVER_TRIGGER:
1414 ret = prepare_packed_control_tests_requests(td, 0,
1415 max_num_requests, is_random);
1416 break;
1417 case TEST_PACKING_EXP_ONE_OVER_TRIGGER_FB_READ:
1418 ret = prepare_packed_control_tests_requests(td, 0,
1419 test_packed_trigger + 1,
1420 is_random);
1421 break;
1422 case TEST_PACKING_EXP_N_OVER_TRIGGER_FLUSH_N:
1423 ret = prepare_packed_control_tests_requests(td, 0, num_requests,
1424 is_random);
1425 break;
1426 case TEST_PACKING_NOT_EXP_TRIGGER_READ_TRIGGER:
1427 case TEST_PACKING_NOT_EXP_TRIGGER_FLUSH_TRIGGER:
1428 ret = prepare_packed_control_tests_requests(td, 0,
1429 test_packed_trigger, is_random);
1430 break;
Lee Susmana35ae6e2012-10-25 16:06:07 +02001431 case TEST_LONG_SEQUENTIAL_WRITE:
1432 ret = prepare_long_test_requests(td);
1433 break;
Lee Susmanf18263a2012-10-24 14:14:37 +02001434 case TEST_LONG_SEQUENTIAL_READ:
1435 ret = prepare_long_test_requests(td);
1436 break;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001437 default:
1438 test_pr_info("%s: Invalid test case...", __func__);
Lee Susmanf18263a2012-10-24 14:14:37 +02001439 ret = -EINVAL;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001440 }
1441
1442 return ret;
1443}
1444
1445/*
1446 * An implementation for the post_test_fn in the test_info data structure.
1447 * In our case we just reset the function pointers in the mmc_queue in order for
1448 * the FS to be able to dispatch it's requests correctly after the test is
1449 * finished.
1450 */
1451static int post_test(struct test_data *td)
1452{
1453 struct mmc_queue *mq;
1454
1455 if (!td)
1456 return -EINVAL;
1457
1458 mq = td->req_q->queuedata;
1459
1460 if (!mq) {
1461 test_pr_err("%s: NULL mq", __func__);
1462 return -EINVAL;
1463 }
1464
1465 mq->packed_test_fn = NULL;
1466 mq->err_check_fn = NULL;
1467
1468 return 0;
1469}
1470
1471/*
1472 * This function checks, based on the current test's test_group, that the
1473 * packed commands capability and control are set right. In addition, we check
1474 * if the card supports the packed command feature.
1475 */
1476static int validate_packed_commands_settings(void)
1477{
1478 struct request_queue *req_q;
1479 struct mmc_queue *mq;
1480 int max_num_requests;
1481 struct mmc_host *host;
1482
1483 req_q = test_iosched_get_req_queue();
1484 if (!req_q) {
1485 test_pr_err("%s: test_iosched_get_req_queue failed", __func__);
1486 test_iosched_set_test_result(TEST_FAILED);
1487 return -EINVAL;
1488 }
1489
1490 mq = req_q->queuedata;
1491 if (!mq) {
1492 test_pr_err("%s: NULL mq", __func__);
1493 return -EINVAL;
1494 }
1495
1496 max_num_requests = mq->card->ext_csd.max_packed_writes;
1497 host = mq->card->host;
1498
1499 if (!(host->caps2 && MMC_CAP2_PACKED_WR)) {
1500 test_pr_err("%s: Packed Write capability disabled, exit test",
1501 __func__);
1502 test_iosched_set_test_result(TEST_NOT_SUPPORTED);
1503 return -EINVAL;
1504 }
1505
1506 if (max_num_requests == 0) {
1507 test_pr_err(
1508 "%s: no write packing support, ext_csd.max_packed_writes=%d",
1509 __func__, mq->card->ext_csd.max_packed_writes);
1510 test_iosched_set_test_result(TEST_NOT_SUPPORTED);
1511 return -EINVAL;
1512 }
1513
1514 test_pr_info("%s: max number of packed requests supported is %d ",
1515 __func__, max_num_requests);
1516
1517 switch (mbtd->test_group) {
1518 case TEST_SEND_WRITE_PACKING_GROUP:
1519 case TEST_ERR_CHECK_GROUP:
1520 case TEST_SEND_INVALID_GROUP:
1521 /* disable the packing control */
1522 host->caps2 &= ~MMC_CAP2_PACKED_WR_CONTROL;
1523 break;
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001524 case TEST_PACKING_CONTROL_GROUP:
1525 host->caps2 |= MMC_CAP2_PACKED_WR_CONTROL;
1526 break;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001527 default:
1528 break;
1529 }
1530
1531 return 0;
1532}
1533
Maya Erezddc55732012-10-17 09:51:01 +02001534static void pseudo_rnd_sector_and_size(unsigned int *seed,
1535 unsigned int min_start_sector,
1536 unsigned int *start_sector,
1537 unsigned int *num_of_bios)
1538{
1539 unsigned int max_sec = min_start_sector + TEST_MAX_SECTOR_RANGE;
1540 do {
1541 *start_sector = pseudo_random_seed(seed,
1542 1, max_sec);
1543 *num_of_bios = pseudo_random_seed(seed,
1544 1, TEST_MAX_BIOS_PER_REQ);
1545 if (!(*num_of_bios))
1546 *num_of_bios = 1;
1547 } while ((*start_sector < min_start_sector) ||
1548 (*start_sector + (*num_of_bios * BIO_U32_SIZE * 4)) > max_sec);
1549}
1550
1551/* sanitize test functions */
1552static int prepare_write_discard_sanitize_read(struct test_data *td)
1553{
1554 unsigned int start_sector;
1555 unsigned int num_of_bios = 0;
1556 static unsigned int total_bios;
1557 unsigned int *num_bios_seed;
1558 int i = 0;
1559
1560 if (mbtd->random_test_seed == 0) {
1561 mbtd->random_test_seed =
1562 (unsigned int)(get_jiffies_64() & 0xFFFF);
1563 test_pr_info("%s: got seed from jiffies %d",
1564 __func__, mbtd->random_test_seed);
1565 }
1566 num_bios_seed = &mbtd->random_test_seed;
1567
1568 do {
1569 pseudo_rnd_sector_and_size(num_bios_seed, td->start_sector,
1570 &start_sector, &num_of_bios);
1571
1572 /* DISCARD */
1573 total_bios += num_of_bios;
1574 test_pr_info("%s: discard req: id=%d, startSec=%d, NumBios=%d",
1575 __func__, td->unique_next_req_id, start_sector,
1576 num_of_bios);
1577 test_iosched_add_unique_test_req(0, REQ_UNIQUE_DISCARD,
1578 start_sector, BIO_TO_SECTOR(num_of_bios),
1579 NULL);
1580
1581 } while (++i < (BLKDEV_MAX_RQ-10));
1582
1583 test_pr_info("%s: total discard bios = %d", __func__, total_bios);
1584
1585 test_pr_info("%s: add sanitize req", __func__);
1586 test_iosched_add_unique_test_req(0, REQ_UNIQUE_SANITIZE, 0, 0, NULL);
1587
1588 return 0;
1589}
1590
Yaniv Gardie9214c82012-10-18 13:58:18 +02001591/*
1592 * Post test operations for BKOPs test
1593 * Disable the BKOPs statistics and clear the feature flags
1594 */
1595static int bkops_post_test(struct test_data *td)
1596{
1597 struct request_queue *q = td->req_q;
1598 struct mmc_queue *mq = (struct mmc_queue *)q->queuedata;
1599 struct mmc_card *card = mq->card;
1600
1601 mmc_card_clr_doing_bkops(mq->card);
1602 card->ext_csd.raw_bkops_status = 0;
1603
1604 spin_lock(&card->bkops_info.bkops_stats.lock);
1605 card->bkops_info.bkops_stats.enabled = false;
1606 spin_unlock(&card->bkops_info.bkops_stats.lock);
1607
1608 return 0;
1609}
1610
1611/*
1612 * Verify the BKOPs statsistics
1613 */
1614static int check_bkops_result(struct test_data *td)
1615{
1616 struct request_queue *q = td->req_q;
1617 struct mmc_queue *mq = (struct mmc_queue *)q->queuedata;
1618 struct mmc_card *card = mq->card;
1619 struct mmc_bkops_stats *bkops_stat;
1620
1621 if (!card)
1622 goto fail;
1623
1624 bkops_stat = &card->bkops_info.bkops_stats;
1625
1626 test_pr_info("%s: Test results: bkops:(%d,%d,%d) hpi:%d, suspend:%d",
1627 __func__,
1628 bkops_stat->bkops_level[BKOPS_SEVERITY_1_INDEX],
1629 bkops_stat->bkops_level[BKOPS_SEVERITY_2_INDEX],
1630 bkops_stat->bkops_level[BKOPS_SEVERITY_3_INDEX],
1631 bkops_stat->hpi,
1632 bkops_stat->suspend);
1633
1634 switch (mbtd->test_info.testcase) {
1635 case BKOPS_DELAYED_WORK_LEVEL_1:
1636 if ((bkops_stat->bkops_level[BKOPS_SEVERITY_1_INDEX] == 1) &&
1637 (bkops_stat->suspend == 1) &&
1638 (bkops_stat->hpi == 0))
1639 goto exit;
1640 else
1641 goto fail;
1642 break;
1643 case BKOPS_DELAYED_WORK_LEVEL_1_HPI:
1644 if ((bkops_stat->bkops_level[BKOPS_SEVERITY_1_INDEX] == 1) &&
1645 (bkops_stat->suspend == 0) &&
1646 (bkops_stat->hpi == 1))
1647 goto exit;
1648 else
1649 goto fail;
1650 break;
1651 case BKOPS_CANCEL_DELAYED_WORK:
1652 if ((bkops_stat->bkops_level[BKOPS_SEVERITY_1_INDEX] == 0) &&
1653 (bkops_stat->bkops_level[BKOPS_SEVERITY_2_INDEX] == 0) &&
1654 (bkops_stat->bkops_level[BKOPS_SEVERITY_3_INDEX] == 0) &&
1655 (bkops_stat->suspend == 0) &&
1656 (bkops_stat->hpi == 0))
1657 goto exit;
1658 else
1659 goto fail;
1660 case BKOPS_URGENT_LEVEL_2:
1661 case BKOPS_URGENT_LEVEL_2_TWO_REQS:
1662 if ((bkops_stat->bkops_level[BKOPS_SEVERITY_2_INDEX] == 1) &&
1663 (bkops_stat->suspend == 0) &&
1664 (bkops_stat->hpi == 0))
1665 goto exit;
1666 else
1667 goto fail;
1668 case BKOPS_URGENT_LEVEL_3:
1669 if ((bkops_stat->bkops_level[BKOPS_SEVERITY_3_INDEX] == 1) &&
1670 (bkops_stat->suspend == 0) &&
1671 (bkops_stat->hpi == 0))
1672 goto exit;
1673 else
1674 goto fail;
1675 default:
1676 return -EINVAL;
1677 }
1678
1679exit:
1680 return 0;
1681fail:
1682 if (td->fs_wr_reqs_during_test) {
1683 test_pr_info("%s: wr reqs during test, cancel the round",
1684 __func__);
1685 test_iosched_set_ignore_round(true);
1686 return 0;
1687 }
1688
1689 test_pr_info("%s: BKOPs statistics are not as expected, test failed",
1690 __func__);
1691 return -EINVAL;
1692}
1693
1694static void bkops_end_io_final_fn(struct request *rq, int err)
1695{
1696 struct test_request *test_rq =
1697 (struct test_request *)rq->elv.priv[0];
1698 BUG_ON(!test_rq);
1699
1700 test_rq->req_completed = 1;
1701 test_rq->req_result = err;
1702
1703 test_pr_info("%s: request %d completed, err=%d",
1704 __func__, test_rq->req_id, err);
1705
1706 mbtd->bkops_stage = BKOPS_STAGE_4;
1707 wake_up(&mbtd->bkops_wait_q);
1708}
1709
1710static void bkops_end_io_fn(struct request *rq, int err)
1711{
1712 struct test_request *test_rq =
1713 (struct test_request *)rq->elv.priv[0];
1714 BUG_ON(!test_rq);
1715
1716 test_rq->req_completed = 1;
1717 test_rq->req_result = err;
1718
1719 test_pr_info("%s: request %d completed, err=%d",
1720 __func__, test_rq->req_id, err);
1721 mbtd->bkops_stage = BKOPS_STAGE_2;
1722 wake_up(&mbtd->bkops_wait_q);
1723
1724}
1725
1726static int prepare_bkops(struct test_data *td)
1727{
1728 int ret = 0;
1729 struct request_queue *q = td->req_q;
1730 struct mmc_queue *mq = (struct mmc_queue *)q->queuedata;
1731 struct mmc_card *card = mq->card;
1732 struct mmc_bkops_stats *bkops_stat;
1733
1734 if (!card)
1735 return -EINVAL;
1736
1737 bkops_stat = &card->bkops_info.bkops_stats;
1738
1739 if (!card->ext_csd.bkops_en) {
1740 test_pr_err("%s: BKOPS is not enabled by card or host)",
1741 __func__);
1742 return -ENOTSUPP;
1743 }
1744 if (mmc_card_doing_bkops(card)) {
1745 test_pr_err("%s: BKOPS in progress, try later", __func__);
1746 return -EAGAIN;
1747 }
1748
1749 mmc_blk_init_bkops_statistics(card);
1750
1751 if ((mbtd->test_info.testcase == BKOPS_URGENT_LEVEL_2) ||
1752 (mbtd->test_info.testcase == BKOPS_URGENT_LEVEL_2_TWO_REQS) ||
1753 (mbtd->test_info.testcase == BKOPS_URGENT_LEVEL_3))
1754 mq->err_check_fn = test_err_check;
1755 mbtd->err_check_counter = 0;
1756
1757 return ret;
1758}
1759
1760static int run_bkops(struct test_data *td)
1761{
1762 int ret = 0;
1763 struct request_queue *q = td->req_q;
1764 struct mmc_queue *mq = (struct mmc_queue *)q->queuedata;
1765 struct mmc_card *card = mq->card;
1766 struct mmc_bkops_stats *bkops_stat;
1767
1768 if (!card)
1769 return -EINVAL;
1770
1771 bkops_stat = &card->bkops_info.bkops_stats;
1772
1773 switch (mbtd->test_info.testcase) {
1774 case BKOPS_DELAYED_WORK_LEVEL_1:
1775 bkops_stat->ignore_card_bkops_status = true;
1776 card->ext_csd.raw_bkops_status = 1;
1777 card->bkops_info.sectors_changed =
1778 card->bkops_info.min_sectors_to_queue_delayed_work + 1;
1779 mbtd->bkops_stage = BKOPS_STAGE_1;
1780
1781 __blk_run_queue(q);
1782 /* this long sleep makes sure the host starts bkops and
1783 also, gets into suspend */
1784 msleep(10000);
1785
1786 bkops_stat->ignore_card_bkops_status = false;
1787 card->ext_csd.raw_bkops_status = 0;
1788
1789 test_iosched_mark_test_completion();
1790 break;
1791
1792 case BKOPS_DELAYED_WORK_LEVEL_1_HPI:
1793 bkops_stat->ignore_card_bkops_status = true;
1794 card->ext_csd.raw_bkops_status = 1;
1795 card->bkops_info.sectors_changed =
1796 card->bkops_info.min_sectors_to_queue_delayed_work + 1;
1797 mbtd->bkops_stage = BKOPS_STAGE_1;
1798
1799 __blk_run_queue(q);
1800 msleep(card->bkops_info.delay_ms);
1801
1802 ret = test_iosched_add_wr_rd_test_req(0, WRITE,
1803 td->start_sector,
1804 TEST_REQUEST_NUM_OF_BIOS,
1805 TEST_PATTERN_5A,
1806 bkops_end_io_final_fn);
1807 if (ret) {
1808 test_pr_err("%s: failed to add a write request",
1809 __func__);
1810 ret = -EINVAL;
1811 break;
1812 }
1813
1814 td->next_req = list_entry(td->test_queue.prev,
1815 struct test_request, queuelist);
1816 __blk_run_queue(q);
1817 wait_event(mbtd->bkops_wait_q,
1818 mbtd->bkops_stage == BKOPS_STAGE_4);
1819 bkops_stat->ignore_card_bkops_status = false;
1820
1821 test_iosched_mark_test_completion();
1822 break;
1823
1824 case BKOPS_CANCEL_DELAYED_WORK:
1825 bkops_stat->ignore_card_bkops_status = true;
1826 card->ext_csd.raw_bkops_status = 1;
1827 card->bkops_info.sectors_changed =
1828 card->bkops_info.min_sectors_to_queue_delayed_work + 1;
1829 mbtd->bkops_stage = BKOPS_STAGE_1;
1830
1831 __blk_run_queue(q);
1832
1833 ret = test_iosched_add_wr_rd_test_req(0, WRITE,
1834 td->start_sector,
1835 TEST_REQUEST_NUM_OF_BIOS,
1836 TEST_PATTERN_5A,
1837 bkops_end_io_final_fn);
1838 if (ret) {
1839 test_pr_err("%s: failed to add a write request",
1840 __func__);
1841 ret = -EINVAL;
1842 break;
1843 }
1844
1845 td->next_req = list_entry(td->test_queue.prev,
1846 struct test_request, queuelist);
1847 __blk_run_queue(q);
1848 wait_event(mbtd->bkops_wait_q,
1849 mbtd->bkops_stage == BKOPS_STAGE_4);
1850 bkops_stat->ignore_card_bkops_status = false;
1851
1852 test_iosched_mark_test_completion();
1853 break;
1854
1855 case BKOPS_URGENT_LEVEL_2:
1856 case BKOPS_URGENT_LEVEL_3:
1857 bkops_stat->ignore_card_bkops_status = true;
1858 if (mbtd->test_info.testcase == BKOPS_URGENT_LEVEL_2)
1859 card->ext_csd.raw_bkops_status = 2;
1860 else
1861 card->ext_csd.raw_bkops_status = 3;
1862 mbtd->bkops_stage = BKOPS_STAGE_1;
1863
1864 ret = test_iosched_add_wr_rd_test_req(0, WRITE,
1865 td->start_sector,
1866 TEST_REQUEST_NUM_OF_BIOS,
1867 TEST_PATTERN_5A,
1868 bkops_end_io_fn);
1869 if (ret) {
1870 test_pr_err("%s: failed to add a write request",
1871 __func__);
1872 ret = -EINVAL;
1873 break;
1874 }
1875
1876 td->next_req = list_entry(td->test_queue.prev,
1877 struct test_request, queuelist);
1878 __blk_run_queue(q);
1879 wait_event(mbtd->bkops_wait_q,
1880 mbtd->bkops_stage == BKOPS_STAGE_2);
1881 card->ext_csd.raw_bkops_status = 0;
1882
1883 ret = test_iosched_add_wr_rd_test_req(0, WRITE,
1884 td->start_sector,
1885 TEST_REQUEST_NUM_OF_BIOS,
1886 TEST_PATTERN_5A,
1887 bkops_end_io_final_fn);
1888 if (ret) {
1889 test_pr_err("%s: failed to add a write request",
1890 __func__);
1891 ret = -EINVAL;
1892 break;
1893 }
1894
1895 td->next_req = list_entry(td->test_queue.prev,
1896 struct test_request, queuelist);
1897 __blk_run_queue(q);
1898
1899 wait_event(mbtd->bkops_wait_q,
1900 mbtd->bkops_stage == BKOPS_STAGE_4);
1901
1902 bkops_stat->ignore_card_bkops_status = false;
1903 test_iosched_mark_test_completion();
1904 break;
1905
1906 case BKOPS_URGENT_LEVEL_2_TWO_REQS:
1907 mq->wr_packing_enabled = false;
1908 bkops_stat->ignore_card_bkops_status = true;
1909 card->ext_csd.raw_bkops_status = 2;
1910 mbtd->bkops_stage = BKOPS_STAGE_1;
1911
1912 ret = test_iosched_add_wr_rd_test_req(0, WRITE,
1913 td->start_sector,
1914 TEST_REQUEST_NUM_OF_BIOS,
1915 TEST_PATTERN_5A,
1916 NULL);
1917 if (ret) {
1918 test_pr_err("%s: failed to add a write request",
1919 __func__);
1920 ret = -EINVAL;
1921 break;
1922 }
1923
1924 ret = test_iosched_add_wr_rd_test_req(0, WRITE,
1925 td->start_sector,
1926 TEST_REQUEST_NUM_OF_BIOS,
1927 TEST_PATTERN_5A,
1928 bkops_end_io_fn);
1929 if (ret) {
1930 test_pr_err("%s: failed to add a write request",
1931 __func__);
1932 ret = -EINVAL;
1933 break;
1934 }
1935
1936 td->next_req = list_entry(td->test_queue.next,
1937 struct test_request, queuelist);
1938 __blk_run_queue(q);
1939 wait_event(mbtd->bkops_wait_q,
1940 mbtd->bkops_stage == BKOPS_STAGE_2);
1941 card->ext_csd.raw_bkops_status = 0;
1942
1943 ret = test_iosched_add_wr_rd_test_req(0, WRITE,
1944 td->start_sector,
1945 TEST_REQUEST_NUM_OF_BIOS,
1946 TEST_PATTERN_5A,
1947 bkops_end_io_final_fn);
1948 if (ret) {
1949 test_pr_err("%s: failed to add a write request",
1950 __func__);
1951 ret = -EINVAL;
1952 break;
1953 }
1954
1955 td->next_req = list_entry(td->test_queue.prev,
1956 struct test_request, queuelist);
1957 __blk_run_queue(q);
1958
1959 wait_event(mbtd->bkops_wait_q,
1960 mbtd->bkops_stage == BKOPS_STAGE_4);
1961
1962 bkops_stat->ignore_card_bkops_status = false;
1963 test_iosched_mark_test_completion();
1964
1965 break;
1966 default:
1967 test_pr_err("%s: wrong testcase: %d", __func__,
1968 mbtd->test_info.testcase);
1969 ret = -EINVAL;
1970 }
1971 return ret;
1972}
1973
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001974static bool message_repeat;
1975static int test_open(struct inode *inode, struct file *file)
1976{
1977 file->private_data = inode->i_private;
1978 message_repeat = 1;
1979 return 0;
1980}
1981
1982/* send_packing TEST */
1983static ssize_t send_write_packing_test_write(struct file *file,
1984 const char __user *buf,
1985 size_t count,
1986 loff_t *ppos)
1987{
1988 int ret = 0;
1989 int i = 0;
1990 int number = -1;
1991 int j = 0;
1992
1993 test_pr_info("%s: -- send_write_packing TEST --", __func__);
1994
1995 sscanf(buf, "%d", &number);
1996
1997 if (number <= 0)
1998 number = 1;
1999
2000
2001 mbtd->test_group = TEST_SEND_WRITE_PACKING_GROUP;
2002
2003 if (validate_packed_commands_settings())
2004 return count;
2005
2006 if (mbtd->random_test_seed > 0)
2007 test_pr_info("%s: Test seed: %d", __func__,
2008 mbtd->random_test_seed);
2009
2010 memset(&mbtd->test_info, 0, sizeof(struct test_info));
2011
2012 mbtd->test_info.data = mbtd;
2013 mbtd->test_info.prepare_test_fn = prepare_test;
2014 mbtd->test_info.check_test_result_fn = check_wr_packing_statistics;
2015 mbtd->test_info.get_test_case_str_fn = get_test_case_str;
2016 mbtd->test_info.post_test_fn = post_test;
2017
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02002018 for (i = 0; i < number; ++i) {
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02002019 test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
2020 test_pr_info("%s: ====================", __func__);
2021
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02002022 for (j = SEND_WRITE_PACKING_MIN_TESTCASE;
2023 j <= SEND_WRITE_PACKING_MAX_TESTCASE; j++) {
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02002024
2025 mbtd->test_info.testcase = j;
2026 mbtd->is_random = RANDOM_TEST;
2027 ret = test_iosched_start_test(&mbtd->test_info);
2028 if (ret)
2029 break;
2030 /* Allow FS requests to be dispatched */
2031 msleep(1000);
2032 mbtd->test_info.testcase = j;
2033 mbtd->is_random = NON_RANDOM_TEST;
2034 ret = test_iosched_start_test(&mbtd->test_info);
2035 if (ret)
2036 break;
2037 /* Allow FS requests to be dispatched */
2038 msleep(1000);
2039 }
2040 }
2041
2042 test_pr_info("%s: Completed all the test cases.", __func__);
2043
2044 return count;
2045}
2046
2047static ssize_t send_write_packing_test_read(struct file *file,
2048 char __user *buffer,
2049 size_t count,
2050 loff_t *offset)
2051{
2052 memset((void *)buffer, 0, count);
2053
2054 snprintf(buffer, count,
2055 "\nsend_write_packing_test\n"
2056 "=========\n"
2057 "Description:\n"
2058 "This test checks the following scenarios\n"
2059 "- Pack due to FLUSH message\n"
2060 "- Pack due to FLUSH after threshold writes\n"
2061 "- Pack due to READ message\n"
2062 "- Pack due to READ after threshold writes\n"
2063 "- Pack due to empty queue\n"
2064 "- Pack due to threshold writes\n"
2065 "- Pack due to one over threshold writes\n");
2066
2067 if (message_repeat == 1) {
2068 message_repeat = 0;
2069 return strnlen(buffer, count);
2070 } else {
2071 return 0;
2072 }
2073}
2074
2075const struct file_operations send_write_packing_test_ops = {
2076 .open = test_open,
2077 .write = send_write_packing_test_write,
2078 .read = send_write_packing_test_read,
2079};
2080
2081/* err_check TEST */
2082static ssize_t err_check_test_write(struct file *file,
2083 const char __user *buf,
2084 size_t count,
2085 loff_t *ppos)
2086{
2087 int ret = 0;
2088 int i = 0;
2089 int number = -1;
2090 int j = 0;
2091
2092 test_pr_info("%s: -- err_check TEST --", __func__);
2093
2094 sscanf(buf, "%d", &number);
2095
2096 if (number <= 0)
2097 number = 1;
2098
2099 mbtd->test_group = TEST_ERR_CHECK_GROUP;
2100
2101 if (validate_packed_commands_settings())
2102 return count;
2103
2104 if (mbtd->random_test_seed > 0)
2105 test_pr_info("%s: Test seed: %d", __func__,
2106 mbtd->random_test_seed);
2107
2108 memset(&mbtd->test_info, 0, sizeof(struct test_info));
2109
2110 mbtd->test_info.data = mbtd;
2111 mbtd->test_info.prepare_test_fn = prepare_test;
2112 mbtd->test_info.check_test_result_fn = check_wr_packing_statistics;
2113 mbtd->test_info.get_test_case_str_fn = get_test_case_str;
2114 mbtd->test_info.post_test_fn = post_test;
2115
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02002116 for (i = 0; i < number; ++i) {
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02002117 test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
2118 test_pr_info("%s: ====================", __func__);
2119
2120 for (j = ERR_CHECK_MIN_TESTCASE;
2121 j <= ERR_CHECK_MAX_TESTCASE ; j++) {
2122 mbtd->test_info.testcase = j;
2123 mbtd->is_random = RANDOM_TEST;
2124 ret = test_iosched_start_test(&mbtd->test_info);
2125 if (ret)
2126 break;
2127 /* Allow FS requests to be dispatched */
2128 msleep(1000);
2129 mbtd->test_info.testcase = j;
2130 mbtd->is_random = NON_RANDOM_TEST;
2131 ret = test_iosched_start_test(&mbtd->test_info);
2132 if (ret)
2133 break;
2134 /* Allow FS requests to be dispatched */
2135 msleep(1000);
2136 }
2137 }
2138
2139 test_pr_info("%s: Completed all the test cases.", __func__);
2140
2141 return count;
2142}
2143
2144static ssize_t err_check_test_read(struct file *file,
2145 char __user *buffer,
2146 size_t count,
2147 loff_t *offset)
2148{
2149 memset((void *)buffer, 0, count);
2150
2151 snprintf(buffer, count,
2152 "\nerr_check_TEST\n"
2153 "=========\n"
2154 "Description:\n"
2155 "This test checks the following scenarios\n"
2156 "- Return ABORT\n"
2157 "- Return PARTIAL followed by success\n"
2158 "- Return PARTIAL followed by abort\n"
2159 "- Return PARTIAL multiple times until success\n"
2160 "- Return PARTIAL with fail index = threshold\n"
2161 "- Return RETRY\n"
2162 "- Return CMD_ERR\n"
2163 "- Return DATA_ERR\n");
2164
2165 if (message_repeat == 1) {
2166 message_repeat = 0;
2167 return strnlen(buffer, count);
2168 } else {
2169 return 0;
2170 }
2171}
2172
2173const struct file_operations err_check_test_ops = {
2174 .open = test_open,
2175 .write = err_check_test_write,
2176 .read = err_check_test_read,
2177};
2178
2179/* send_invalid_packed TEST */
2180static ssize_t send_invalid_packed_test_write(struct file *file,
2181 const char __user *buf,
2182 size_t count,
2183 loff_t *ppos)
2184{
2185 int ret = 0;
2186 int i = 0;
2187 int number = -1;
2188 int j = 0;
2189 int num_of_failures = 0;
2190
2191 test_pr_info("%s: -- send_invalid_packed TEST --", __func__);
2192
2193 sscanf(buf, "%d", &number);
2194
2195 if (number <= 0)
2196 number = 1;
2197
2198 mbtd->test_group = TEST_SEND_INVALID_GROUP;
2199
2200 if (validate_packed_commands_settings())
2201 return count;
2202
2203 if (mbtd->random_test_seed > 0)
2204 test_pr_info("%s: Test seed: %d", __func__,
2205 mbtd->random_test_seed);
2206
2207 memset(&mbtd->test_info, 0, sizeof(struct test_info));
2208
2209 mbtd->test_info.data = mbtd;
2210 mbtd->test_info.prepare_test_fn = prepare_test;
2211 mbtd->test_info.check_test_result_fn = check_wr_packing_statistics;
2212 mbtd->test_info.get_test_case_str_fn = get_test_case_str;
2213 mbtd->test_info.post_test_fn = post_test;
2214
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02002215 for (i = 0; i < number; ++i) {
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02002216 test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
2217 test_pr_info("%s: ====================", __func__);
2218
2219 for (j = INVALID_CMD_MIN_TESTCASE;
2220 j <= INVALID_CMD_MAX_TESTCASE ; j++) {
2221
2222 mbtd->test_info.testcase = j;
2223 mbtd->is_random = RANDOM_TEST;
2224 ret = test_iosched_start_test(&mbtd->test_info);
2225 if (ret)
2226 num_of_failures++;
2227 /* Allow FS requests to be dispatched */
2228 msleep(1000);
2229
2230 mbtd->test_info.testcase = j;
2231 mbtd->is_random = NON_RANDOM_TEST;
2232 ret = test_iosched_start_test(&mbtd->test_info);
2233 if (ret)
2234 num_of_failures++;
2235 /* Allow FS requests to be dispatched */
2236 msleep(1000);
2237 }
2238 }
2239
2240 test_pr_info("%s: Completed all the test cases.", __func__);
2241
2242 if (num_of_failures > 0) {
2243 test_iosched_set_test_result(TEST_FAILED);
2244 test_pr_err(
2245 "There were %d failures during the test, TEST FAILED",
2246 num_of_failures);
2247 }
2248 return count;
2249}
2250
2251static ssize_t send_invalid_packed_test_read(struct file *file,
2252 char __user *buffer,
2253 size_t count,
2254 loff_t *offset)
2255{
2256 memset((void *)buffer, 0, count);
2257
2258 snprintf(buffer, count,
2259 "\nsend_invalid_packed_TEST\n"
2260 "=========\n"
2261 "Description:\n"
2262 "This test checks the following scenarios\n"
2263 "- Send an invalid header version\n"
2264 "- Send the wrong write code\n"
2265 "- Send an invalid R/W code\n"
2266 "- Send wrong start address in header\n"
2267 "- Send header with block_count smaller than actual\n"
2268 "- Send header with block_count larger than actual\n"
2269 "- Send header CMD23 packed bit set\n"
2270 "- Send CMD23 with block count over threshold\n"
2271 "- Send CMD23 with block_count equals zero\n"
2272 "- Send CMD23 packed bit unset\n"
2273 "- Send CMD23 reliable write bit set\n"
2274 "- Send CMD23 bits [16-29] set\n"
2275 "- Send CMD23 header block not in block_count\n");
2276
2277 if (message_repeat == 1) {
2278 message_repeat = 0;
2279 return strnlen(buffer, count);
2280 } else {
2281 return 0;
2282 }
2283}
2284
2285const struct file_operations send_invalid_packed_test_ops = {
2286 .open = test_open,
2287 .write = send_invalid_packed_test_write,
2288 .read = send_invalid_packed_test_read,
2289};
2290
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02002291/* packing_control TEST */
2292static ssize_t write_packing_control_test_write(struct file *file,
2293 const char __user *buf,
2294 size_t count,
2295 loff_t *ppos)
2296{
2297 int ret = 0;
2298 int i = 0;
2299 int number = -1;
2300 int j = 0;
2301 struct mmc_queue *mq = test_iosched_get_req_queue()->queuedata;
2302 int max_num_requests = mq->card->ext_csd.max_packed_writes;
2303 int test_successful = 1;
2304
2305 test_pr_info("%s: -- write_packing_control TEST --", __func__);
2306
2307 sscanf(buf, "%d", &number);
2308
2309 if (number <= 0)
2310 number = 1;
2311
2312 test_pr_info("%s: max_num_requests = %d ", __func__,
2313 max_num_requests);
2314
2315 memset(&mbtd->test_info, 0, sizeof(struct test_info));
2316 mbtd->test_group = TEST_PACKING_CONTROL_GROUP;
2317
2318 if (validate_packed_commands_settings())
2319 return count;
2320
2321 mbtd->test_info.data = mbtd;
2322 mbtd->test_info.prepare_test_fn = prepare_test;
2323 mbtd->test_info.check_test_result_fn = check_wr_packing_statistics;
2324 mbtd->test_info.get_test_case_str_fn = get_test_case_str;
2325
2326 for (i = 0; i < number; ++i) {
2327 test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
2328 test_pr_info("%s: ====================", __func__);
2329
2330 for (j = PACKING_CONTROL_MIN_TESTCASE;
2331 j <= PACKING_CONTROL_MAX_TESTCASE; j++) {
2332
2333 test_successful = 1;
2334 mbtd->test_info.testcase = j;
2335 mbtd->is_random = RANDOM_TEST;
2336 ret = test_iosched_start_test(&mbtd->test_info);
2337 if (ret) {
2338 test_successful = 0;
2339 break;
2340 }
2341 /* Allow FS requests to be dispatched */
2342 msleep(1000);
2343
2344 mbtd->test_info.testcase = j;
2345 mbtd->is_random = NON_RANDOM_TEST;
2346 ret = test_iosched_start_test(&mbtd->test_info);
2347 if (ret) {
2348 test_successful = 0;
2349 break;
2350 }
2351 /* Allow FS requests to be dispatched */
2352 msleep(1000);
2353 }
2354
2355 if (!test_successful)
2356 break;
2357 }
2358
2359 test_pr_info("%s: Completed all the test cases.", __func__);
2360
2361 return count;
2362}
2363
2364static ssize_t write_packing_control_test_read(struct file *file,
2365 char __user *buffer,
2366 size_t count,
2367 loff_t *offset)
2368{
2369 memset((void *)buffer, 0, count);
2370
2371 snprintf(buffer, count,
2372 "\nwrite_packing_control_test\n"
2373 "=========\n"
2374 "Description:\n"
2375 "This test checks the following scenarios\n"
2376 "- Packing expected - one over trigger\n"
2377 "- Packing expected - N over trigger\n"
2378 "- Packing expected - N over trigger followed by read\n"
2379 "- Packing expected - N over trigger followed by flush\n"
2380 "- Packing expected - threshold over trigger FB by flush\n"
2381 "- Packing not expected - less than trigger\n"
2382 "- Packing not expected - trigger requests\n"
2383 "- Packing not expected - trigger, read, trigger\n"
2384 "- Mixed state - packing -> no packing -> packing\n"
2385 "- Mixed state - no packing -> packing -> no packing\n");
2386
2387 if (message_repeat == 1) {
2388 message_repeat = 0;
2389 return strnlen(buffer, count);
2390 } else {
2391 return 0;
2392 }
2393}
2394
2395const struct file_operations write_packing_control_test_ops = {
2396 .open = test_open,
2397 .write = write_packing_control_test_write,
2398 .read = write_packing_control_test_read,
2399};
2400
Maya Erezddc55732012-10-17 09:51:01 +02002401static ssize_t write_discard_sanitize_test_write(struct file *file,
2402 const char __user *buf,
2403 size_t count,
2404 loff_t *ppos)
2405{
2406 int ret = 0;
2407 int i = 0;
2408 int number = -1;
2409
2410 sscanf(buf, "%d", &number);
2411 if (number <= 0)
2412 number = 1;
2413
2414 test_pr_info("%s: -- write_discard_sanitize TEST --\n", __func__);
2415
2416 memset(&mbtd->test_info, 0, sizeof(struct test_info));
2417
2418 mbtd->test_group = TEST_GENERAL_GROUP;
2419
2420 mbtd->test_info.data = mbtd;
2421 mbtd->test_info.prepare_test_fn = prepare_write_discard_sanitize_read;
2422 mbtd->test_info.get_test_case_str_fn = get_test_case_str;
2423 mbtd->test_info.timeout_msec = SANITIZE_TEST_TIMEOUT;
2424
2425 for (i = 0 ; i < number ; ++i) {
2426 test_pr_info("%s: Cycle # %d / %d\n", __func__, i+1, number);
2427 test_pr_info("%s: ===================", __func__);
2428
2429 mbtd->test_info.testcase = TEST_WRITE_DISCARD_SANITIZE_READ;
2430 ret = test_iosched_start_test(&mbtd->test_info);
2431
2432 if (ret)
2433 break;
2434 }
2435
2436 return count;
2437}
2438
2439const struct file_operations write_discard_sanitize_test_ops = {
2440 .open = test_open,
2441 .write = write_discard_sanitize_test_write,
2442};
2443
Yaniv Gardie9214c82012-10-18 13:58:18 +02002444static ssize_t bkops_test_write(struct file *file,
2445 const char __user *buf,
2446 size_t count,
2447 loff_t *ppos)
2448{
2449 int ret = 0;
2450 int i = 0, j;
2451 int number = -1;
2452
2453 test_pr_info("%s: -- bkops_test TEST --", __func__);
2454
2455 sscanf(buf, "%d", &number);
2456
2457 if (number <= 0)
2458 number = 1;
2459
2460 mbtd->test_group = TEST_BKOPS_GROUP;
2461
2462 memset(&mbtd->test_info, 0, sizeof(struct test_info));
2463
2464 mbtd->test_info.data = mbtd;
2465 mbtd->test_info.prepare_test_fn = prepare_bkops;
2466 mbtd->test_info.check_test_result_fn = check_bkops_result;
2467 mbtd->test_info.get_test_case_str_fn = get_test_case_str;
2468 mbtd->test_info.run_test_fn = run_bkops;
2469 mbtd->test_info.timeout_msec = BKOPS_TEST_TIMEOUT;
2470 mbtd->test_info.post_test_fn = bkops_post_test;
2471
2472 for (i = 0 ; i < number ; ++i) {
2473 test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
2474 test_pr_info("%s: ===================", __func__);
2475 for (j = BKOPS_MIN_TESTCASE ;
2476 j <= BKOPS_MAX_TESTCASE ; j++) {
2477 mbtd->test_info.testcase = j;
2478 ret = test_iosched_start_test(&mbtd->test_info);
2479 if (ret)
2480 break;
2481 }
2482 }
2483
2484 test_pr_info("%s: Completed all the test cases.", __func__);
2485
2486 return count;
2487}
2488
2489static ssize_t bkops_test_read(struct file *file,
2490 char __user *buffer,
2491 size_t count,
2492 loff_t *offset)
2493{
2494 memset((void *)buffer, 0, count);
2495
2496 snprintf(buffer, count,
2497 "\nbkops_test\n========================\n"
2498 "Description:\n"
2499 "This test simulates BKOPS status from card\n"
2500 "and verifies that:\n"
2501 " - Starting BKOPS delayed work, level 1\n"
2502 " - Starting BKOPS delayed work, level 1, with HPI\n"
2503 " - Cancel starting BKOPS delayed work, "
2504 " when a request is received\n"
2505 " - Starting BKOPS urgent, level 2,3\n"
2506 " - Starting BKOPS urgent with 2 requests\n");
2507 return strnlen(buffer, count);
2508}
2509
2510const struct file_operations bkops_test_ops = {
2511 .open = test_open,
2512 .write = bkops_test_write,
2513 .read = bkops_test_read,
2514};
2515
Lee Susmanf18263a2012-10-24 14:14:37 +02002516static ssize_t long_sequential_read_test_write(struct file *file,
2517 const char __user *buf,
2518 size_t count,
2519 loff_t *ppos)
2520{
2521 int ret = 0;
2522 int i = 0;
2523 int number = -1;
2524 unsigned int mtime, integer, fraction;
2525
2526 test_pr_info("%s: -- Long Sequential Read TEST --", __func__);
2527
2528 sscanf(buf, "%d", &number);
2529
2530 if (number <= 0)
2531 number = 1;
2532
2533 memset(&mbtd->test_info, 0, sizeof(struct test_info));
2534 mbtd->test_group = TEST_GENERAL_GROUP;
2535
2536 mbtd->test_info.data = mbtd;
2537 mbtd->test_info.prepare_test_fn = prepare_test;
2538 mbtd->test_info.get_test_case_str_fn = get_test_case_str;
2539
2540 for (i = 0 ; i < number ; ++i) {
2541 test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
2542 test_pr_info("%s: ====================", __func__);
2543
2544 mbtd->test_info.testcase = TEST_LONG_SEQUENTIAL_READ;
2545 mbtd->is_random = NON_RANDOM_TEST;
2546 ret = test_iosched_start_test(&mbtd->test_info);
2547 if (ret)
2548 break;
2549
2550 mtime = jiffies_to_msecs(mbtd->test_info.test_duration);
2551
2552 test_pr_info("%s: time is %u msec, size is %u.%u MiB",
2553 __func__, mtime, LONG_TEST_SIZE_INTEGER,
2554 LONG_TEST_SIZE_FRACTION);
2555
2556 /* we first multiply in order not to lose precision */
2557 mtime *= MB_MSEC_RATIO_APPROXIMATION;
2558 /* divide values to get a MiB/sec integer value with one
2559 digit of precision. Multiply by 10 for one digit precision
2560 */
2561 fraction = integer = (LONG_TEST_ACTUAL_BYTE_NUM * 10) / mtime;
2562 integer /= 10;
2563 /* and calculate the MiB value fraction */
2564 fraction -= integer * 10;
2565
2566 test_pr_info("%s: Throughput: %u.%u MiB/sec\n"
2567 , __func__, integer, fraction);
2568
2569 /* Allow FS requests to be dispatched */
2570 msleep(1000);
2571 }
2572
2573 return count;
2574}
2575
2576static ssize_t long_sequential_read_test_read(struct file *file,
2577 char __user *buffer,
2578 size_t count,
2579 loff_t *offset)
2580{
2581 memset((void *)buffer, 0, count);
2582
2583 snprintf(buffer, count,
2584 "\nlong_sequential_read_test\n"
2585 "=========\n"
2586 "Description:\n"
2587 "This test runs the following scenarios\n"
2588 "- Long Sequential Read Test: this test measures read "
2589 "throughput at the driver level by sequentially reading many "
2590 "large requests.\n");
2591
2592 if (message_repeat == 1) {
2593 message_repeat = 0;
2594 return strnlen(buffer, count);
2595 } else
2596 return 0;
2597}
2598
2599const struct file_operations long_sequential_read_test_ops = {
2600 .open = test_open,
2601 .write = long_sequential_read_test_write,
2602 .read = long_sequential_read_test_read,
2603};
2604
Lee Susmana35ae6e2012-10-25 16:06:07 +02002605static ssize_t long_sequential_write_test_write(struct file *file,
2606 const char __user *buf,
2607 size_t count,
2608 loff_t *ppos)
2609{
2610 int ret = 0;
2611 int i = 0;
2612 int number = -1;
2613 unsigned int mtime, integer, fraction;
2614
2615 test_pr_info("%s: -- Long Sequential Write TEST --", __func__);
2616
2617 sscanf(buf, "%d", &number);
2618
2619 if (number <= 0)
2620 number = 1;
2621
2622 memset(&mbtd->test_info, 0, sizeof(struct test_info));
2623 mbtd->test_group = TEST_GENERAL_GROUP;
2624
2625 mbtd->test_info.data = mbtd;
2626 mbtd->test_info.prepare_test_fn = prepare_test;
2627 mbtd->test_info.get_test_case_str_fn = get_test_case_str;
2628
2629 for (i = 0 ; i < number ; ++i) {
2630 test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
2631 test_pr_info("%s: ====================", __func__);
2632
2633 mbtd->test_info.testcase = TEST_LONG_SEQUENTIAL_WRITE;
2634 mbtd->is_random = NON_RANDOM_TEST;
2635 ret = test_iosched_start_test(&mbtd->test_info);
2636 if (ret)
2637 break;
2638
2639 mtime = jiffies_to_msecs(mbtd->test_info.test_duration);
2640
2641 test_pr_info("%s: time is %u msec, size is %u.%u MiB",
2642 __func__, mtime, LONG_TEST_SIZE_INTEGER,
2643 LONG_TEST_SIZE_FRACTION);
2644
2645 /* we first multiply in order not to lose precision */
2646 mtime *= MB_MSEC_RATIO_APPROXIMATION;
2647 /* divide values to get a MiB/sec integer value with one
2648 digit of precision
2649 */
2650 fraction = integer = (LONG_TEST_ACTUAL_BYTE_NUM * 10) / mtime;
2651 integer /= 10;
2652 /* and calculate the MiB value fraction */
2653 fraction -= integer * 10;
2654
2655 test_pr_info("%s: Throughput: %u.%u MiB/sec\n",
2656 __func__, integer, fraction);
2657
2658 /* Allow FS requests to be dispatched */
2659 msleep(1000);
2660 }
2661
2662 return count;
2663}
2664
2665static ssize_t long_sequential_write_test_read(struct file *file,
2666 char __user *buffer,
2667 size_t count,
2668 loff_t *offset)
2669{
2670 memset((void *)buffer, 0, count);
2671
2672 snprintf(buffer, count,
2673 "\nlong_sequential_write_test\n"
2674 "=========\n"
2675 "Description:\n"
2676 "This test runs the following scenarios\n"
2677 "- Long Sequential Write Test: this test measures write "
2678 "throughput at the driver level by sequentially writing many "
2679 "large requests\n");
2680
2681 if (message_repeat == 1) {
2682 message_repeat = 0;
2683 return strnlen(buffer, count);
2684 } else
2685 return 0;
2686}
2687
2688const struct file_operations long_sequential_write_test_ops = {
2689 .open = test_open,
2690 .write = long_sequential_write_test_write,
2691 .read = long_sequential_write_test_read,
2692};
2693
2694
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02002695static void mmc_block_test_debugfs_cleanup(void)
2696{
2697 debugfs_remove(mbtd->debug.random_test_seed);
2698 debugfs_remove(mbtd->debug.send_write_packing_test);
2699 debugfs_remove(mbtd->debug.err_check_test);
2700 debugfs_remove(mbtd->debug.send_invalid_packed_test);
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02002701 debugfs_remove(mbtd->debug.packing_control_test);
Maya Erezddc55732012-10-17 09:51:01 +02002702 debugfs_remove(mbtd->debug.discard_sanitize_test);
Yaniv Gardie9214c82012-10-18 13:58:18 +02002703 debugfs_remove(mbtd->debug.bkops_test);
Lee Susmanf18263a2012-10-24 14:14:37 +02002704 debugfs_remove(mbtd->debug.long_sequential_read_test);
Lee Susmana35ae6e2012-10-25 16:06:07 +02002705 debugfs_remove(mbtd->debug.long_sequential_write_test);
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02002706}
2707
2708static int mmc_block_test_debugfs_init(void)
2709{
2710 struct dentry *utils_root, *tests_root;
2711
2712 utils_root = test_iosched_get_debugfs_utils_root();
2713 tests_root = test_iosched_get_debugfs_tests_root();
2714
2715 if (!utils_root || !tests_root)
2716 return -EINVAL;
2717
2718 mbtd->debug.random_test_seed = debugfs_create_u32(
2719 "random_test_seed",
2720 S_IRUGO | S_IWUGO,
2721 utils_root,
2722 &mbtd->random_test_seed);
2723
2724 if (!mbtd->debug.random_test_seed)
2725 goto err_nomem;
2726
2727 mbtd->debug.send_write_packing_test =
2728 debugfs_create_file("send_write_packing_test",
2729 S_IRUGO | S_IWUGO,
2730 tests_root,
2731 NULL,
2732 &send_write_packing_test_ops);
2733
2734 if (!mbtd->debug.send_write_packing_test)
2735 goto err_nomem;
2736
2737 mbtd->debug.err_check_test =
2738 debugfs_create_file("err_check_test",
2739 S_IRUGO | S_IWUGO,
2740 tests_root,
2741 NULL,
2742 &err_check_test_ops);
2743
2744 if (!mbtd->debug.err_check_test)
2745 goto err_nomem;
2746
2747 mbtd->debug.send_invalid_packed_test =
2748 debugfs_create_file("send_invalid_packed_test",
2749 S_IRUGO | S_IWUGO,
2750 tests_root,
2751 NULL,
2752 &send_invalid_packed_test_ops);
2753
2754 if (!mbtd->debug.send_invalid_packed_test)
2755 goto err_nomem;
2756
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02002757 mbtd->debug.packing_control_test = debugfs_create_file(
2758 "packing_control_test",
2759 S_IRUGO | S_IWUGO,
2760 tests_root,
2761 NULL,
2762 &write_packing_control_test_ops);
2763
2764 if (!mbtd->debug.packing_control_test)
2765 goto err_nomem;
2766
Maya Erezddc55732012-10-17 09:51:01 +02002767 mbtd->debug.discard_sanitize_test =
2768 debugfs_create_file("write_discard_sanitize_test",
2769 S_IRUGO | S_IWUGO,
2770 tests_root,
2771 NULL,
2772 &write_discard_sanitize_test_ops);
2773 if (!mbtd->debug.discard_sanitize_test) {
2774 mmc_block_test_debugfs_cleanup();
2775 return -ENOMEM;
2776 }
2777
Yaniv Gardie9214c82012-10-18 13:58:18 +02002778 mbtd->debug.bkops_test =
2779 debugfs_create_file("bkops_test",
2780 S_IRUGO | S_IWUGO,
2781 tests_root,
2782 NULL,
2783 &bkops_test_ops);
2784
2785 if (!mbtd->debug.bkops_test)
2786 goto err_nomem;
2787
Lee Susmanf18263a2012-10-24 14:14:37 +02002788 mbtd->debug.long_sequential_read_test = debugfs_create_file(
2789 "long_sequential_read_test",
2790 S_IRUGO | S_IWUGO,
2791 tests_root,
2792 NULL,
2793 &long_sequential_read_test_ops);
2794
2795 if (!mbtd->debug.long_sequential_read_test)
2796 goto err_nomem;
2797
Lee Susmana35ae6e2012-10-25 16:06:07 +02002798 mbtd->debug.long_sequential_write_test = debugfs_create_file(
2799 "long_sequential_write_test",
2800 S_IRUGO | S_IWUGO,
2801 tests_root,
2802 NULL,
2803 &long_sequential_write_test_ops);
2804
2805 if (!mbtd->debug.long_sequential_write_test)
2806 goto err_nomem;
2807
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02002808 return 0;
2809
2810err_nomem:
2811 mmc_block_test_debugfs_cleanup();
2812 return -ENOMEM;
2813}
2814
2815static void mmc_block_test_probe(void)
2816{
2817 struct request_queue *q = test_iosched_get_req_queue();
2818 struct mmc_queue *mq;
2819 int max_packed_reqs;
2820
2821 if (!q) {
2822 test_pr_err("%s: NULL request queue", __func__);
2823 return;
2824 }
2825
2826 mq = q->queuedata;
2827 if (!mq) {
2828 test_pr_err("%s: NULL mq", __func__);
2829 return;
2830 }
2831
2832 max_packed_reqs = mq->card->ext_csd.max_packed_writes;
2833 mbtd->exp_packed_stats.packing_events =
2834 kzalloc((max_packed_reqs + 1) *
2835 sizeof(*mbtd->exp_packed_stats.packing_events),
2836 GFP_KERNEL);
2837
2838 mmc_block_test_debugfs_init();
2839}
2840
2841static void mmc_block_test_remove(void)
2842{
2843 mmc_block_test_debugfs_cleanup();
2844}
2845
2846static int __init mmc_block_test_init(void)
2847{
2848 mbtd = kzalloc(sizeof(struct mmc_block_test_data), GFP_KERNEL);
2849 if (!mbtd) {
2850 test_pr_err("%s: failed to allocate mmc_block_test_data",
2851 __func__);
2852 return -ENODEV;
2853 }
2854
Yaniv Gardie9214c82012-10-18 13:58:18 +02002855 init_waitqueue_head(&mbtd->bkops_wait_q);
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02002856 mbtd->bdt.init_fn = mmc_block_test_probe;
2857 mbtd->bdt.exit_fn = mmc_block_test_remove;
2858 INIT_LIST_HEAD(&mbtd->bdt.list);
2859 test_iosched_register(&mbtd->bdt);
2860
2861 return 0;
2862}
2863
2864static void __exit mmc_block_test_exit(void)
2865{
2866 test_iosched_unregister(&mbtd->bdt);
2867 kfree(mbtd);
2868}
2869
2870module_init(mmc_block_test_init);
2871module_exit(mmc_block_test_exit);
2872
2873MODULE_LICENSE("GPL v2");
2874MODULE_DESCRIPTION("MMC block test");