blob: 5f5a178b87b2ca824d2291789f207d98f86fc4c5 [file] [log] [blame]
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14/* MMC block test */
15
16#include <linux/module.h>
17#include <linux/blkdev.h>
18#include <linux/debugfs.h>
19#include <linux/mmc/card.h>
20#include <linux/mmc/host.h>
21#include <linux/delay.h>
22#include <linux/test-iosched.h>
Lee Susmanf18263a2012-10-24 14:14:37 +020023#include <linux/jiffies.h>
Tatyana Brokhman09b010d2012-10-09 13:50:56 +020024#include "queue.h"
Yaniv Gardie9214c82012-10-18 13:58:18 +020025#include <linux/mmc/mmc.h>
Tatyana Brokhman09b010d2012-10-09 13:50:56 +020026
27#define MODULE_NAME "mmc_block_test"
28#define TEST_MAX_SECTOR_RANGE (600*1024*1024) /* 600 MB */
29#define TEST_MAX_BIOS_PER_REQ 120
30#define CMD23_PACKED_BIT (1 << 30)
31#define LARGE_PRIME_1 1103515367
32#define LARGE_PRIME_2 35757
33#define PACKED_HDR_VER_MASK 0x000000FF
34#define PACKED_HDR_RW_MASK 0x0000FF00
35#define PACKED_HDR_NUM_REQS_MASK 0x00FF0000
36#define PACKED_HDR_BITS_16_TO_29_SET 0x3FFF0000
Maya Erezddc55732012-10-17 09:51:01 +020037#define SECTOR_SIZE 512
38#define NUM_OF_SECTORS_PER_BIO ((BIO_U32_SIZE * 4) / SECTOR_SIZE)
39#define BIO_TO_SECTOR(x) (x * NUM_OF_SECTORS_PER_BIO)
Lee Susmanf18263a2012-10-24 14:14:37 +020040/* the desired long test size to be written or read */
41#define LONG_TEST_MAX_NUM_BYTES (50*1024*1024) /* 50MB */
42/* request queue limitation is 128 requests, and we leave 10 spare requests */
43#define TEST_MAX_REQUESTS 118
44#define LONG_TEST_MAX_NUM_REQS (LONG_TEST_MAX_NUM_BYTES / \
45 (TEST_MAX_BIOS_PER_REQ * sizeof(int) * BIO_U32_SIZE))
46/* this doesn't allow the test requests num to be greater than the maximum */
47#define LONG_TEST_ACTUAL_NUM_REQS \
48 ((TEST_MAX_REQUESTS < LONG_TEST_MAX_NUM_REQS) ? \
49 TEST_MAX_REQUESTS : LONG_TEST_MAX_NUM_REQS)
50#define MB_MSEC_RATIO_APPROXIMATION ((1024 * 1024) / 1000)
51/* actual number of bytes in test */
52#define LONG_TEST_ACTUAL_BYTE_NUM (LONG_TEST_ACTUAL_NUM_REQS * \
53 (TEST_MAX_BIOS_PER_REQ * sizeof(int) * BIO_U32_SIZE))
54/* actual number of MiB in test multiplied by 10, for single digit precision*/
55#define LONG_TEST_ACTUAL_MB_NUM_X_10 ((LONG_TEST_ACTUAL_BYTE_NUM * 10) / \
56 (1024 * 1024))
57/* extract integer value */
58#define LONG_TEST_SIZE_INTEGER (LONG_TEST_ACTUAL_MB_NUM_X_10 / 10)
59/* and calculate the MiB value fraction */
60#define LONG_TEST_SIZE_FRACTION (LONG_TEST_ACTUAL_MB_NUM_X_10 - \
61 (LONG_TEST_SIZE_INTEGER * 10))
Tatyana Brokhman09b010d2012-10-09 13:50:56 +020062
63#define test_pr_debug(fmt, args...) pr_debug("%s: "fmt"\n", MODULE_NAME, args)
64#define test_pr_info(fmt, args...) pr_info("%s: "fmt"\n", MODULE_NAME, args)
65#define test_pr_err(fmt, args...) pr_err("%s: "fmt"\n", MODULE_NAME, args)
66
Maya Erezddc55732012-10-17 09:51:01 +020067#define SANITIZE_TEST_TIMEOUT 240000
Yaniv Gardie9214c82012-10-18 13:58:18 +020068#define TEST_REQUEST_NUM_OF_BIOS 3
69
70
71#define CHECK_BKOPS_STATS(stats, exp_bkops, exp_hpi, exp_suspend) \
72 ((stats.bkops != exp_bkops) || \
73 (stats.hpi != exp_hpi) || \
74 (stats.suspend != exp_suspend))
75#define BKOPS_TEST_TIMEOUT 60000
Maya Erezddc55732012-10-17 09:51:01 +020076
Tatyana Brokhman09b010d2012-10-09 13:50:56 +020077enum is_random {
78 NON_RANDOM_TEST,
79 RANDOM_TEST,
80};
81
82enum mmc_block_test_testcases {
83 /* Start of send write packing test group */
84 SEND_WRITE_PACKING_MIN_TESTCASE,
85 TEST_STOP_DUE_TO_READ = SEND_WRITE_PACKING_MIN_TESTCASE,
86 TEST_STOP_DUE_TO_READ_AFTER_MAX_REQS,
87 TEST_STOP_DUE_TO_FLUSH,
88 TEST_STOP_DUE_TO_FLUSH_AFTER_MAX_REQS,
89 TEST_STOP_DUE_TO_EMPTY_QUEUE,
90 TEST_STOP_DUE_TO_MAX_REQ_NUM,
91 TEST_STOP_DUE_TO_THRESHOLD,
92 SEND_WRITE_PACKING_MAX_TESTCASE = TEST_STOP_DUE_TO_THRESHOLD,
93
94 /* Start of err check test group */
95 ERR_CHECK_MIN_TESTCASE,
96 TEST_RET_ABORT = ERR_CHECK_MIN_TESTCASE,
97 TEST_RET_PARTIAL_FOLLOWED_BY_SUCCESS,
98 TEST_RET_PARTIAL_FOLLOWED_BY_ABORT,
99 TEST_RET_PARTIAL_MULTIPLE_UNTIL_SUCCESS,
100 TEST_RET_PARTIAL_MAX_FAIL_IDX,
101 TEST_RET_RETRY,
102 TEST_RET_CMD_ERR,
103 TEST_RET_DATA_ERR,
104 ERR_CHECK_MAX_TESTCASE = TEST_RET_DATA_ERR,
105
106 /* Start of send invalid test group */
107 INVALID_CMD_MIN_TESTCASE,
108 TEST_HDR_INVALID_VERSION = INVALID_CMD_MIN_TESTCASE,
109 TEST_HDR_WRONG_WRITE_CODE,
110 TEST_HDR_INVALID_RW_CODE,
111 TEST_HDR_DIFFERENT_ADDRESSES,
112 TEST_HDR_REQ_NUM_SMALLER_THAN_ACTUAL,
113 TEST_HDR_REQ_NUM_LARGER_THAN_ACTUAL,
114 TEST_HDR_CMD23_PACKED_BIT_SET,
115 TEST_CMD23_MAX_PACKED_WRITES,
116 TEST_CMD23_ZERO_PACKED_WRITES,
117 TEST_CMD23_PACKED_BIT_UNSET,
118 TEST_CMD23_REL_WR_BIT_SET,
119 TEST_CMD23_BITS_16TO29_SET,
120 TEST_CMD23_HDR_BLK_NOT_IN_COUNT,
121 INVALID_CMD_MAX_TESTCASE = TEST_CMD23_HDR_BLK_NOT_IN_COUNT,
Tatyana Brokhman91e1e322012-10-09 13:53:43 +0200122
123 /*
124 * Start of packing control test group.
125 * in these next testcases the abbreviation FB = followed by
126 */
127 PACKING_CONTROL_MIN_TESTCASE,
128 TEST_PACKING_EXP_ONE_OVER_TRIGGER_FB_READ =
129 PACKING_CONTROL_MIN_TESTCASE,
130 TEST_PACKING_EXP_N_OVER_TRIGGER,
131 TEST_PACKING_EXP_N_OVER_TRIGGER_FB_READ,
132 TEST_PACKING_EXP_N_OVER_TRIGGER_FLUSH_N,
133 TEST_PACKING_EXP_THRESHOLD_OVER_TRIGGER,
134 TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS,
135 TEST_PACKING_NOT_EXP_TRIGGER_REQUESTS,
136 TEST_PACKING_NOT_EXP_TRIGGER_READ_TRIGGER,
137 TEST_PACKING_NOT_EXP_TRIGGER_FLUSH_TRIGGER,
138 TEST_PACK_MIX_PACKED_NO_PACKED_PACKED,
139 TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED,
140 PACKING_CONTROL_MAX_TESTCASE = TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED,
Maya Erezddc55732012-10-17 09:51:01 +0200141
142 TEST_WRITE_DISCARD_SANITIZE_READ,
Yaniv Gardie9214c82012-10-18 13:58:18 +0200143
144 /* Start of bkops test group */
145 BKOPS_MIN_TESTCASE,
146 BKOPS_DELAYED_WORK_LEVEL_1 = BKOPS_MIN_TESTCASE,
147 BKOPS_DELAYED_WORK_LEVEL_1_HPI,
148 BKOPS_CANCEL_DELAYED_WORK,
149 BKOPS_URGENT_LEVEL_2,
150 BKOPS_URGENT_LEVEL_2_TWO_REQS,
151 BKOPS_URGENT_LEVEL_3,
152 BKOPS_MAX_TESTCASE = BKOPS_URGENT_LEVEL_3,
Lee Susmanf18263a2012-10-24 14:14:37 +0200153
154 TEST_LONG_SEQUENTIAL_READ,
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200155};
156
157enum mmc_block_test_group {
158 TEST_NO_GROUP,
159 TEST_GENERAL_GROUP,
160 TEST_SEND_WRITE_PACKING_GROUP,
161 TEST_ERR_CHECK_GROUP,
162 TEST_SEND_INVALID_GROUP,
Tatyana Brokhman91e1e322012-10-09 13:53:43 +0200163 TEST_PACKING_CONTROL_GROUP,
Yaniv Gardie9214c82012-10-18 13:58:18 +0200164 TEST_BKOPS_GROUP,
165};
166
167enum bkops_test_stages {
168 BKOPS_STAGE_1,
169 BKOPS_STAGE_2,
170 BKOPS_STAGE_3,
171 BKOPS_STAGE_4,
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200172};
173
174struct mmc_block_test_debug {
175 struct dentry *send_write_packing_test;
176 struct dentry *err_check_test;
177 struct dentry *send_invalid_packed_test;
178 struct dentry *random_test_seed;
Tatyana Brokhman91e1e322012-10-09 13:53:43 +0200179 struct dentry *packing_control_test;
Maya Erezddc55732012-10-17 09:51:01 +0200180 struct dentry *discard_sanitize_test;
Yaniv Gardie9214c82012-10-18 13:58:18 +0200181 struct dentry *bkops_test;
Lee Susmanf18263a2012-10-24 14:14:37 +0200182 struct dentry *long_sequential_read_test;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200183};
184
185struct mmc_block_test_data {
186 /* The number of write requests that the test will issue */
187 int num_requests;
188 /* The expected write packing statistics for the current test */
189 struct mmc_wr_pack_stats exp_packed_stats;
190 /*
191 * A user-defined seed for random choices of number of bios written in
192 * a request, and of number of requests issued in a test
193 * This field is randomly updated after each use
194 */
195 unsigned int random_test_seed;
196 /* A retry counter used in err_check tests */
197 int err_check_counter;
198 /* Can be one of the values of enum test_group */
199 enum mmc_block_test_group test_group;
200 /*
201 * Indicates if the current testcase is running with random values of
202 * num_requests and num_bios (in each request)
203 */
204 int is_random;
205 /* Data structure for debugfs dentrys */
206 struct mmc_block_test_debug debug;
207 /*
208 * Data structure containing individual test information, including
209 * self-defined specific data
210 */
211 struct test_info test_info;
212 /* mmc block device test */
213 struct blk_dev_test_type bdt;
Yaniv Gardie9214c82012-10-18 13:58:18 +0200214 /* Current BKOPs test stage */
215 enum bkops_test_stages bkops_stage;
216 /* A wait queue for BKOPs tests */
217 wait_queue_head_t bkops_wait_q;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200218};
219
220static struct mmc_block_test_data *mbtd;
221
222/*
223 * A callback assigned to the packed_test_fn field.
224 * Called from block layer in mmc_blk_packed_hdr_wrq_prep.
225 * Here we alter the packed header or CMD23 in order to send an invalid
226 * packed command to the card.
227 */
228static void test_invalid_packed_cmd(struct request_queue *q,
229 struct mmc_queue_req *mqrq)
230{
231 struct mmc_queue *mq = q->queuedata;
232 u32 *packed_cmd_hdr = mqrq->packed_cmd_hdr;
233 struct request *req = mqrq->req;
234 struct request *second_rq;
235 struct test_request *test_rq;
236 struct mmc_blk_request *brq = &mqrq->brq;
237 int num_requests;
238 int max_packed_reqs;
239
240 if (!mq) {
241 test_pr_err("%s: NULL mq", __func__);
242 return;
243 }
244
245 test_rq = (struct test_request *)req->elv.priv[0];
246 if (!test_rq) {
247 test_pr_err("%s: NULL test_rq", __func__);
248 return;
249 }
250 max_packed_reqs = mq->card->ext_csd.max_packed_writes;
251
252 switch (mbtd->test_info.testcase) {
253 case TEST_HDR_INVALID_VERSION:
254 test_pr_info("%s: set invalid header version", __func__);
255 /* Put 0 in header version field (1 byte, offset 0 in header) */
256 packed_cmd_hdr[0] = packed_cmd_hdr[0] & ~PACKED_HDR_VER_MASK;
257 break;
258 case TEST_HDR_WRONG_WRITE_CODE:
259 test_pr_info("%s: wrong write code", __func__);
260 /* Set R/W field with R value (1 byte, offset 1 in header) */
261 packed_cmd_hdr[0] = packed_cmd_hdr[0] & ~PACKED_HDR_RW_MASK;
262 packed_cmd_hdr[0] = packed_cmd_hdr[0] | 0x00000100;
263 break;
264 case TEST_HDR_INVALID_RW_CODE:
265 test_pr_info("%s: invalid r/w code", __func__);
266 /* Set R/W field with invalid value */
267 packed_cmd_hdr[0] = packed_cmd_hdr[0] & ~PACKED_HDR_RW_MASK;
268 packed_cmd_hdr[0] = packed_cmd_hdr[0] | 0x00000400;
269 break;
270 case TEST_HDR_DIFFERENT_ADDRESSES:
271 test_pr_info("%s: different addresses", __func__);
272 second_rq = list_entry(req->queuelist.next, struct request,
273 queuelist);
274 test_pr_info("%s: test_rq->sector=%ld, second_rq->sector=%ld",
275 __func__, (long)req->__sector,
276 (long)second_rq->__sector);
277 /*
278 * Put start sector of second write request in the first write
279 * request's cmd25 argument in the packed header
280 */
281 packed_cmd_hdr[3] = second_rq->__sector;
282 break;
283 case TEST_HDR_REQ_NUM_SMALLER_THAN_ACTUAL:
284 test_pr_info("%s: request num smaller than actual" , __func__);
285 num_requests = (packed_cmd_hdr[0] & PACKED_HDR_NUM_REQS_MASK)
286 >> 16;
287 /* num of entries is decremented by 1 */
288 num_requests = (num_requests - 1) << 16;
289 /*
290 * Set number of requests field in packed write header to be
291 * smaller than the actual number (1 byte, offset 2 in header)
292 */
293 packed_cmd_hdr[0] = (packed_cmd_hdr[0] &
294 ~PACKED_HDR_NUM_REQS_MASK) + num_requests;
295 break;
296 case TEST_HDR_REQ_NUM_LARGER_THAN_ACTUAL:
297 test_pr_info("%s: request num larger than actual" , __func__);
298 num_requests = (packed_cmd_hdr[0] & PACKED_HDR_NUM_REQS_MASK)
299 >> 16;
300 /* num of entries is incremented by 1 */
301 num_requests = (num_requests + 1) << 16;
302 /*
303 * Set number of requests field in packed write header to be
304 * larger than the actual number (1 byte, offset 2 in header).
305 */
306 packed_cmd_hdr[0] = (packed_cmd_hdr[0] &
307 ~PACKED_HDR_NUM_REQS_MASK) + num_requests;
308 break;
309 case TEST_HDR_CMD23_PACKED_BIT_SET:
310 test_pr_info("%s: header CMD23 packed bit set" , __func__);
311 /*
312 * Set packed bit (bit 30) in cmd23 argument of first and second
313 * write requests in packed write header.
314 * These are located at bytes 2 and 4 in packed write header
315 */
316 packed_cmd_hdr[2] = packed_cmd_hdr[2] | CMD23_PACKED_BIT;
317 packed_cmd_hdr[4] = packed_cmd_hdr[4] | CMD23_PACKED_BIT;
318 break;
319 case TEST_CMD23_MAX_PACKED_WRITES:
320 test_pr_info("%s: CMD23 request num > max_packed_reqs",
321 __func__);
322 /*
323 * Set the individual packed cmd23 request num to
324 * max_packed_reqs + 1
325 */
326 brq->sbc.arg = MMC_CMD23_ARG_PACKED | (max_packed_reqs + 1);
327 break;
328 case TEST_CMD23_ZERO_PACKED_WRITES:
329 test_pr_info("%s: CMD23 request num = 0", __func__);
330 /* Set the individual packed cmd23 request num to zero */
331 brq->sbc.arg = MMC_CMD23_ARG_PACKED;
332 break;
333 case TEST_CMD23_PACKED_BIT_UNSET:
334 test_pr_info("%s: CMD23 packed bit unset", __func__);
335 /*
336 * Set the individual packed cmd23 packed bit to 0,
337 * although there is a packed write request
338 */
339 brq->sbc.arg &= ~CMD23_PACKED_BIT;
340 break;
341 case TEST_CMD23_REL_WR_BIT_SET:
342 test_pr_info("%s: CMD23 REL WR bit set", __func__);
343 /* Set the individual packed cmd23 reliable write bit */
344 brq->sbc.arg = MMC_CMD23_ARG_PACKED | MMC_CMD23_ARG_REL_WR;
345 break;
346 case TEST_CMD23_BITS_16TO29_SET:
347 test_pr_info("%s: CMD23 bits [16-29] set", __func__);
348 brq->sbc.arg = MMC_CMD23_ARG_PACKED |
349 PACKED_HDR_BITS_16_TO_29_SET;
350 break;
351 case TEST_CMD23_HDR_BLK_NOT_IN_COUNT:
352 test_pr_info("%s: CMD23 hdr not in block count", __func__);
353 brq->sbc.arg = MMC_CMD23_ARG_PACKED |
354 ((rq_data_dir(req) == READ) ? 0 : mqrq->packed_blocks);
355 break;
356 default:
357 test_pr_err("%s: unexpected testcase %d",
358 __func__, mbtd->test_info.testcase);
359 break;
360 }
361}
362
363/*
364 * A callback assigned to the err_check_fn field of the mmc_request by the
365 * MMC/card/block layer.
366 * Called upon request completion by the MMC/core layer.
367 * Here we emulate an error return value from the card.
368 */
369static int test_err_check(struct mmc_card *card, struct mmc_async_req *areq)
370{
371 struct mmc_queue_req *mq_rq = container_of(areq, struct mmc_queue_req,
372 mmc_active);
373 struct request_queue *req_q = test_iosched_get_req_queue();
374 struct mmc_queue *mq;
375 int max_packed_reqs;
376 int ret = 0;
Yaniv Gardie9214c82012-10-18 13:58:18 +0200377 struct mmc_blk_request *brq;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200378
379 if (req_q)
380 mq = req_q->queuedata;
381 else {
382 test_pr_err("%s: NULL request_queue", __func__);
383 return 0;
384 }
385
386 if (!mq) {
387 test_pr_err("%s: %s: NULL mq", __func__,
388 mmc_hostname(card->host));
389 return 0;
390 }
391
392 max_packed_reqs = mq->card->ext_csd.max_packed_writes;
393
394 if (!mq_rq) {
395 test_pr_err("%s: %s: NULL mq_rq", __func__,
396 mmc_hostname(card->host));
397 return 0;
398 }
Yaniv Gardie9214c82012-10-18 13:58:18 +0200399 brq = &mq_rq->brq;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200400
401 switch (mbtd->test_info.testcase) {
402 case TEST_RET_ABORT:
403 test_pr_info("%s: return abort", __func__);
404 ret = MMC_BLK_ABORT;
405 break;
406 case TEST_RET_PARTIAL_FOLLOWED_BY_SUCCESS:
407 test_pr_info("%s: return partial followed by success",
408 __func__);
409 /*
410 * Since in this testcase num_requests is always >= 2,
411 * we can be sure that packed_fail_idx is always >= 1
412 */
413 mq_rq->packed_fail_idx = (mbtd->num_requests / 2);
414 test_pr_info("%s: packed_fail_idx = %d"
415 , __func__, mq_rq->packed_fail_idx);
416 mq->err_check_fn = NULL;
417 ret = MMC_BLK_PARTIAL;
418 break;
419 case TEST_RET_PARTIAL_FOLLOWED_BY_ABORT:
420 if (!mbtd->err_check_counter) {
421 test_pr_info("%s: return partial followed by abort",
422 __func__);
423 mbtd->err_check_counter++;
424 /*
425 * Since in this testcase num_requests is always >= 3,
426 * we have that packed_fail_idx is always >= 1
427 */
428 mq_rq->packed_fail_idx = (mbtd->num_requests / 2);
429 test_pr_info("%s: packed_fail_idx = %d"
430 , __func__, mq_rq->packed_fail_idx);
431 ret = MMC_BLK_PARTIAL;
432 break;
433 }
434 mbtd->err_check_counter = 0;
435 mq->err_check_fn = NULL;
436 ret = MMC_BLK_ABORT;
437 break;
438 case TEST_RET_PARTIAL_MULTIPLE_UNTIL_SUCCESS:
439 test_pr_info("%s: return partial multiple until success",
440 __func__);
441 if (++mbtd->err_check_counter >= (mbtd->num_requests)) {
442 mq->err_check_fn = NULL;
443 mbtd->err_check_counter = 0;
444 ret = MMC_BLK_PARTIAL;
445 break;
446 }
447 mq_rq->packed_fail_idx = 1;
448 ret = MMC_BLK_PARTIAL;
449 break;
450 case TEST_RET_PARTIAL_MAX_FAIL_IDX:
451 test_pr_info("%s: return partial max fail_idx", __func__);
452 mq_rq->packed_fail_idx = max_packed_reqs - 1;
453 mq->err_check_fn = NULL;
454 ret = MMC_BLK_PARTIAL;
455 break;
456 case TEST_RET_RETRY:
457 test_pr_info("%s: return retry", __func__);
458 ret = MMC_BLK_RETRY;
459 break;
460 case TEST_RET_CMD_ERR:
461 test_pr_info("%s: return cmd err", __func__);
462 ret = MMC_BLK_CMD_ERR;
463 break;
464 case TEST_RET_DATA_ERR:
465 test_pr_info("%s: return data err", __func__);
466 ret = MMC_BLK_DATA_ERR;
467 break;
Yaniv Gardie9214c82012-10-18 13:58:18 +0200468 case BKOPS_URGENT_LEVEL_2:
469 case BKOPS_URGENT_LEVEL_3:
470 case BKOPS_URGENT_LEVEL_2_TWO_REQS:
471 if (mbtd->err_check_counter++ == 0) {
472 test_pr_info("%s: simulate an exception from the card",
473 __func__);
474 brq->cmd.resp[0] |= R1_EXCEPTION_EVENT;
475 }
476 mq->err_check_fn = NULL;
477 break;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200478 default:
479 test_pr_err("%s: unexpected testcase %d",
480 __func__, mbtd->test_info.testcase);
481 }
482
483 return ret;
484}
485
486/*
487 * This is a specific implementation for the get_test_case_str_fn function
488 * pointer in the test_info data structure. Given a valid test_data instance,
489 * the function returns a string resembling the test name, based on the testcase
490 */
491static char *get_test_case_str(struct test_data *td)
492{
493 if (!td) {
494 test_pr_err("%s: NULL td", __func__);
495 return NULL;
496 }
497
498 switch (td->test_info.testcase) {
499 case TEST_STOP_DUE_TO_FLUSH:
500 return "Test stop due to flush";
501 case TEST_STOP_DUE_TO_FLUSH_AFTER_MAX_REQS:
502 return "Test stop due to flush after max-1 reqs";
503 case TEST_STOP_DUE_TO_READ:
504 return "Test stop due to read";
505 case TEST_STOP_DUE_TO_READ_AFTER_MAX_REQS:
506 return "Test stop due to read after max-1 reqs";
507 case TEST_STOP_DUE_TO_EMPTY_QUEUE:
508 return "Test stop due to empty queue";
509 case TEST_STOP_DUE_TO_MAX_REQ_NUM:
510 return "Test stop due to max req num";
511 case TEST_STOP_DUE_TO_THRESHOLD:
512 return "Test stop due to exceeding threshold";
513 case TEST_RET_ABORT:
514 return "Test err_check return abort";
515 case TEST_RET_PARTIAL_FOLLOWED_BY_SUCCESS:
516 return "Test err_check return partial followed by success";
517 case TEST_RET_PARTIAL_FOLLOWED_BY_ABORT:
518 return "Test err_check return partial followed by abort";
519 case TEST_RET_PARTIAL_MULTIPLE_UNTIL_SUCCESS:
520 return "Test err_check return partial multiple until success";
521 case TEST_RET_PARTIAL_MAX_FAIL_IDX:
522 return "Test err_check return partial max fail index";
523 case TEST_RET_RETRY:
524 return "Test err_check return retry";
525 case TEST_RET_CMD_ERR:
526 return "Test err_check return cmd error";
527 case TEST_RET_DATA_ERR:
528 return "Test err_check return data error";
529 case TEST_HDR_INVALID_VERSION:
530 return "Test invalid - wrong header version";
531 case TEST_HDR_WRONG_WRITE_CODE:
532 return "Test invalid - wrong write code";
533 case TEST_HDR_INVALID_RW_CODE:
534 return "Test invalid - wrong R/W code";
535 case TEST_HDR_DIFFERENT_ADDRESSES:
536 return "Test invalid - header different addresses";
537 case TEST_HDR_REQ_NUM_SMALLER_THAN_ACTUAL:
538 return "Test invalid - header req num smaller than actual";
539 case TEST_HDR_REQ_NUM_LARGER_THAN_ACTUAL:
540 return "Test invalid - header req num larger than actual";
541 case TEST_HDR_CMD23_PACKED_BIT_SET:
542 return "Test invalid - header cmd23 packed bit set";
543 case TEST_CMD23_MAX_PACKED_WRITES:
544 return "Test invalid - cmd23 max packed writes";
545 case TEST_CMD23_ZERO_PACKED_WRITES:
546 return "Test invalid - cmd23 zero packed writes";
547 case TEST_CMD23_PACKED_BIT_UNSET:
548 return "Test invalid - cmd23 packed bit unset";
549 case TEST_CMD23_REL_WR_BIT_SET:
550 return "Test invalid - cmd23 rel wr bit set";
551 case TEST_CMD23_BITS_16TO29_SET:
552 return "Test invalid - cmd23 bits [16-29] set";
553 case TEST_CMD23_HDR_BLK_NOT_IN_COUNT:
554 return "Test invalid - cmd23 header block not in count";
Tatyana Brokhman91e1e322012-10-09 13:53:43 +0200555 case TEST_PACKING_EXP_N_OVER_TRIGGER:
556 return "\nTest packing control - pack n";
557 case TEST_PACKING_EXP_N_OVER_TRIGGER_FB_READ:
558 return "\nTest packing control - pack n followed by read";
559 case TEST_PACKING_EXP_N_OVER_TRIGGER_FLUSH_N:
560 return "\nTest packing control - pack n followed by flush";
561 case TEST_PACKING_EXP_ONE_OVER_TRIGGER_FB_READ:
562 return "\nTest packing control - pack one followed by read";
563 case TEST_PACKING_EXP_THRESHOLD_OVER_TRIGGER:
564 return "\nTest packing control - pack threshold";
565 case TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS:
566 return "\nTest packing control - no packing";
567 case TEST_PACKING_NOT_EXP_TRIGGER_REQUESTS:
568 return "\nTest packing control - no packing, trigger requests";
569 case TEST_PACKING_NOT_EXP_TRIGGER_READ_TRIGGER:
570 return "\nTest packing control - no pack, trigger-read-trigger";
571 case TEST_PACKING_NOT_EXP_TRIGGER_FLUSH_TRIGGER:
572 return "\nTest packing control- no pack, trigger-flush-trigger";
573 case TEST_PACK_MIX_PACKED_NO_PACKED_PACKED:
574 return "\nTest packing control - mix: pack -> no pack -> pack";
575 case TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED:
576 return "\nTest packing control - mix: no pack->pack->no pack";
Maya Erezddc55732012-10-17 09:51:01 +0200577 case TEST_WRITE_DISCARD_SANITIZE_READ:
578 return "\nTest write, discard, sanitize";
Yaniv Gardie9214c82012-10-18 13:58:18 +0200579 case BKOPS_DELAYED_WORK_LEVEL_1:
580 return "\nTest delayed work BKOPS level 1";
581 case BKOPS_DELAYED_WORK_LEVEL_1_HPI:
582 return "\nTest delayed work BKOPS level 1 with HPI";
583 case BKOPS_CANCEL_DELAYED_WORK:
584 return "\nTest cancel delayed BKOPS work";
585 case BKOPS_URGENT_LEVEL_2:
586 return "\nTest urgent BKOPS level 2";
587 case BKOPS_URGENT_LEVEL_2_TWO_REQS:
588 return "\nTest urgent BKOPS level 2, followed by a request";
589 case BKOPS_URGENT_LEVEL_3:
590 return "\nTest urgent BKOPS level 3";
Lee Susmanf18263a2012-10-24 14:14:37 +0200591 case TEST_LONG_SEQUENTIAL_READ:
592 return "Test long sequential read";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200593 default:
594 return "Unknown testcase";
595 }
596
597 return NULL;
598}
599
600/*
601 * Compare individual testcase's statistics to the expected statistics:
602 * Compare stop reason and number of packing events
603 */
604static int check_wr_packing_statistics(struct test_data *td)
605{
606 struct mmc_wr_pack_stats *mmc_packed_stats;
607 struct mmc_queue *mq = td->req_q->queuedata;
608 int max_packed_reqs = mq->card->ext_csd.max_packed_writes;
609 int i;
610 struct mmc_card *card = mq->card;
611 struct mmc_wr_pack_stats expected_stats;
612 int *stop_reason;
613 int ret = 0;
614
615 if (!mq) {
616 test_pr_err("%s: NULL mq", __func__);
617 return -EINVAL;
618 }
619
620 expected_stats = mbtd->exp_packed_stats;
621
622 mmc_packed_stats = mmc_blk_get_packed_statistics(card);
623 if (!mmc_packed_stats) {
624 test_pr_err("%s: NULL mmc_packed_stats", __func__);
625 return -EINVAL;
626 }
627
628 if (!mmc_packed_stats->packing_events) {
629 test_pr_err("%s: NULL packing_events", __func__);
630 return -EINVAL;
631 }
632
633 spin_lock(&mmc_packed_stats->lock);
634
635 if (!mmc_packed_stats->enabled) {
636 test_pr_err("%s write packing statistics are not enabled",
637 __func__);
638 ret = -EINVAL;
639 goto exit_err;
640 }
641
642 stop_reason = mmc_packed_stats->pack_stop_reason;
643
Tatyana Brokhman91e1e322012-10-09 13:53:43 +0200644 for (i = 1; i <= max_packed_reqs; ++i) {
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200645 if (mmc_packed_stats->packing_events[i] !=
646 expected_stats.packing_events[i]) {
647 test_pr_err(
648 "%s: Wrong pack stats in index %d, got %d, expected %d",
649 __func__, i, mmc_packed_stats->packing_events[i],
650 expected_stats.packing_events[i]);
651 if (td->fs_wr_reqs_during_test)
652 goto cancel_round;
653 ret = -EINVAL;
654 goto exit_err;
655 }
656 }
657
658 if (mmc_packed_stats->pack_stop_reason[EXCEEDS_SEGMENTS] !=
659 expected_stats.pack_stop_reason[EXCEEDS_SEGMENTS]) {
660 test_pr_err(
661 "%s: Wrong pack stop reason EXCEEDS_SEGMENTS %d, expected %d",
662 __func__, stop_reason[EXCEEDS_SEGMENTS],
663 expected_stats.pack_stop_reason[EXCEEDS_SEGMENTS]);
664 if (td->fs_wr_reqs_during_test)
665 goto cancel_round;
666 ret = -EINVAL;
667 goto exit_err;
668 }
669
670 if (mmc_packed_stats->pack_stop_reason[EXCEEDS_SECTORS] !=
671 expected_stats.pack_stop_reason[EXCEEDS_SECTORS]) {
672 test_pr_err(
673 "%s: Wrong pack stop reason EXCEEDS_SECTORS %d, expected %d",
674 __func__, stop_reason[EXCEEDS_SECTORS],
675 expected_stats.pack_stop_reason[EXCEEDS_SECTORS]);
676 if (td->fs_wr_reqs_during_test)
677 goto cancel_round;
678 ret = -EINVAL;
679 goto exit_err;
680 }
681
682 if (mmc_packed_stats->pack_stop_reason[WRONG_DATA_DIR] !=
683 expected_stats.pack_stop_reason[WRONG_DATA_DIR]) {
684 test_pr_err(
685 "%s: Wrong pack stop reason WRONG_DATA_DIR %d, expected %d",
686 __func__, stop_reason[WRONG_DATA_DIR],
687 expected_stats.pack_stop_reason[WRONG_DATA_DIR]);
688 if (td->fs_wr_reqs_during_test)
689 goto cancel_round;
690 ret = -EINVAL;
691 goto exit_err;
692 }
693
694 if (mmc_packed_stats->pack_stop_reason[FLUSH_OR_DISCARD] !=
695 expected_stats.pack_stop_reason[FLUSH_OR_DISCARD]) {
696 test_pr_err(
697 "%s: Wrong pack stop reason FLUSH_OR_DISCARD %d, expected %d",
698 __func__, stop_reason[FLUSH_OR_DISCARD],
699 expected_stats.pack_stop_reason[FLUSH_OR_DISCARD]);
700 if (td->fs_wr_reqs_during_test)
701 goto cancel_round;
702 ret = -EINVAL;
703 goto exit_err;
704 }
705
706 if (mmc_packed_stats->pack_stop_reason[EMPTY_QUEUE] !=
707 expected_stats.pack_stop_reason[EMPTY_QUEUE]) {
708 test_pr_err(
709 "%s: Wrong pack stop reason EMPTY_QUEUE %d, expected %d",
710 __func__, stop_reason[EMPTY_QUEUE],
711 expected_stats.pack_stop_reason[EMPTY_QUEUE]);
712 if (td->fs_wr_reqs_during_test)
713 goto cancel_round;
714 ret = -EINVAL;
715 goto exit_err;
716 }
717
718 if (mmc_packed_stats->pack_stop_reason[REL_WRITE] !=
719 expected_stats.pack_stop_reason[REL_WRITE]) {
720 test_pr_err(
721 "%s: Wrong pack stop reason REL_WRITE %d, expected %d",
722 __func__, stop_reason[REL_WRITE],
723 expected_stats.pack_stop_reason[REL_WRITE]);
724 if (td->fs_wr_reqs_during_test)
725 goto cancel_round;
726 ret = -EINVAL;
727 goto exit_err;
728 }
729
730exit_err:
731 spin_unlock(&mmc_packed_stats->lock);
732 if (ret && mmc_packed_stats->enabled)
733 print_mmc_packing_stats(card);
734 return ret;
735cancel_round:
736 spin_unlock(&mmc_packed_stats->lock);
737 test_iosched_set_ignore_round(true);
738 return 0;
739}
740
741/*
742 * Pseudo-randomly choose a seed based on the last seed, and update it in
743 * seed_number. then return seed_number (mod max_val), or min_val.
744 */
745static unsigned int pseudo_random_seed(unsigned int *seed_number,
746 unsigned int min_val,
747 unsigned int max_val)
748{
749 int ret = 0;
750
751 if (!seed_number)
752 return 0;
753
754 *seed_number = ((unsigned int)(((unsigned long)*seed_number *
755 (unsigned long)LARGE_PRIME_1) + LARGE_PRIME_2));
756 ret = (unsigned int)((*seed_number) % max_val);
757
758 return (ret > min_val ? ret : min_val);
759}
760
761/*
762 * Given a pseudo-random seed, find a pseudo-random num_of_bios.
763 * Make sure that num_of_bios is not larger than TEST_MAX_SECTOR_RANGE
764 */
765static void pseudo_rnd_num_of_bios(unsigned int *num_bios_seed,
766 unsigned int *num_of_bios)
767{
768 do {
769 *num_of_bios = pseudo_random_seed(num_bios_seed, 1,
770 TEST_MAX_BIOS_PER_REQ);
771 if (!(*num_of_bios))
772 *num_of_bios = 1;
773 } while ((*num_of_bios) * BIO_U32_SIZE * 4 > TEST_MAX_SECTOR_RANGE);
774}
775
776/* Add a single read request to the given td's request queue */
777static int prepare_request_add_read(struct test_data *td)
778{
779 int ret;
780 int start_sec;
781
782 if (td)
783 start_sec = td->start_sector;
784 else {
785 test_pr_err("%s: NULL td", __func__);
786 return 0;
787 }
788
789 test_pr_info("%s: Adding a read request, first req_id=%d", __func__,
790 td->wr_rd_next_req_id);
791
792 ret = test_iosched_add_wr_rd_test_req(0, READ, start_sec, 2,
793 TEST_PATTERN_5A, NULL);
794 if (ret) {
795 test_pr_err("%s: failed to add a read request", __func__);
796 return ret;
797 }
798
799 return 0;
800}
801
802/* Add a single flush request to the given td's request queue */
803static int prepare_request_add_flush(struct test_data *td)
804{
805 int ret;
806
807 if (!td) {
808 test_pr_err("%s: NULL td", __func__);
809 return 0;
810 }
811
812 test_pr_info("%s: Adding a flush request, first req_id=%d", __func__,
813 td->unique_next_req_id);
814 ret = test_iosched_add_unique_test_req(0, REQ_UNIQUE_FLUSH,
815 0, 0, NULL);
816 if (ret) {
817 test_pr_err("%s: failed to add a flush request", __func__);
818 return ret;
819 }
820
821 return ret;
822}
823
824/*
825 * Add num_requets amount of write requests to the given td's request queue.
826 * If random test mode is chosen we pseudo-randomly choose the number of bios
827 * for each write request, otherwise add between 1 to 5 bio per request.
828 */
829static int prepare_request_add_write_reqs(struct test_data *td,
830 int num_requests, int is_err_expected,
831 int is_random)
832{
833 int i;
834 unsigned int start_sec;
835 int num_bios;
836 int ret = 0;
837 unsigned int *bio_seed = &mbtd->random_test_seed;
838
839 if (td)
840 start_sec = td->start_sector;
841 else {
842 test_pr_err("%s: NULL td", __func__);
843 return ret;
844 }
845
846 test_pr_info("%s: Adding %d write requests, first req_id=%d", __func__,
847 num_requests, td->wr_rd_next_req_id);
848
Lee Susmanf18263a2012-10-24 14:14:37 +0200849 for (i = 1 ; i <= num_requests ; i++) {
850 start_sec =
851 td->start_sector + sizeof(int) *
852 BIO_U32_SIZE * td->num_of_write_bios;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200853 if (is_random)
854 pseudo_rnd_num_of_bios(bio_seed, &num_bios);
855 else
856 /*
857 * For the non-random case, give num_bios a value
858 * between 1 and 5, to keep a small number of BIOs
859 */
860 num_bios = (i%5)+1;
861
862 ret = test_iosched_add_wr_rd_test_req(is_err_expected, WRITE,
863 start_sec, num_bios, TEST_PATTERN_5A, NULL);
864
865 if (ret) {
866 test_pr_err("%s: failed to add a write request",
867 __func__);
868 return ret;
869 }
870 }
871 return 0;
872}
873
874/*
875 * Prepare the write, read and flush requests for a generic packed commands
876 * testcase
877 */
878static int prepare_packed_requests(struct test_data *td, int is_err_expected,
879 int num_requests, int is_random)
880{
881 int ret = 0;
882 struct mmc_queue *mq;
883 int max_packed_reqs;
884 struct request_queue *req_q;
885
886 if (!td) {
887 pr_err("%s: NULL td", __func__);
888 return -EINVAL;
889 }
890
891 req_q = td->req_q;
892
893 if (!req_q) {
894 pr_err("%s: NULL request queue", __func__);
895 return -EINVAL;
896 }
897
898 mq = req_q->queuedata;
899 if (!mq) {
900 test_pr_err("%s: NULL mq", __func__);
901 return -EINVAL;
902 }
903
904 max_packed_reqs = mq->card->ext_csd.max_packed_writes;
905
906 if (mbtd->random_test_seed <= 0) {
907 mbtd->random_test_seed =
908 (unsigned int)(get_jiffies_64() & 0xFFFF);
909 test_pr_info("%s: got seed from jiffies %d",
910 __func__, mbtd->random_test_seed);
911 }
912
913 mmc_blk_init_packed_statistics(mq->card);
914
915 ret = prepare_request_add_write_reqs(td, num_requests, is_err_expected,
916 is_random);
917 if (ret)
918 return ret;
919
920 /* Avoid memory corruption in upcoming stats set */
921 if (td->test_info.testcase == TEST_STOP_DUE_TO_THRESHOLD)
922 num_requests--;
923
924 memset((void *)mbtd->exp_packed_stats.pack_stop_reason, 0,
925 sizeof(mbtd->exp_packed_stats.pack_stop_reason));
926 memset(mbtd->exp_packed_stats.packing_events, 0,
927 (max_packed_reqs + 1) * sizeof(u32));
928 if (num_requests <= max_packed_reqs)
929 mbtd->exp_packed_stats.packing_events[num_requests] = 1;
930
931 switch (td->test_info.testcase) {
932 case TEST_STOP_DUE_TO_FLUSH:
933 case TEST_STOP_DUE_TO_FLUSH_AFTER_MAX_REQS:
934 ret = prepare_request_add_flush(td);
935 if (ret)
936 return ret;
937
938 mbtd->exp_packed_stats.pack_stop_reason[FLUSH_OR_DISCARD] = 1;
939 break;
940 case TEST_STOP_DUE_TO_READ:
941 case TEST_STOP_DUE_TO_READ_AFTER_MAX_REQS:
942 ret = prepare_request_add_read(td);
943 if (ret)
944 return ret;
945
946 mbtd->exp_packed_stats.pack_stop_reason[WRONG_DATA_DIR] = 1;
947 break;
948 case TEST_STOP_DUE_TO_THRESHOLD:
949 mbtd->exp_packed_stats.packing_events[num_requests] = 1;
950 mbtd->exp_packed_stats.packing_events[1] = 1;
951 mbtd->exp_packed_stats.pack_stop_reason[THRESHOLD] = 1;
952 mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1;
953 break;
954 case TEST_STOP_DUE_TO_MAX_REQ_NUM:
955 case TEST_RET_PARTIAL_MAX_FAIL_IDX:
956 mbtd->exp_packed_stats.pack_stop_reason[THRESHOLD] = 1;
957 break;
958 default:
959 mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1;
960 }
961 mbtd->num_requests = num_requests;
962
963 return 0;
964}
965
966/*
Tatyana Brokhman91e1e322012-10-09 13:53:43 +0200967 * Prepare the write, read and flush requests for the packing control
968 * testcases
969 */
970static int prepare_packed_control_tests_requests(struct test_data *td,
971 int is_err_expected, int num_requests, int is_random)
972{
973 int ret = 0;
974 struct mmc_queue *mq;
975 int max_packed_reqs;
976 int temp_num_req = num_requests;
977 struct request_queue *req_q;
978 int test_packed_trigger;
979 int num_packed_reqs;
980
981 if (!td) {
982 test_pr_err("%s: NULL td\n", __func__);
983 return -EINVAL;
984 }
985
986 req_q = td->req_q;
987
988 if (!req_q) {
989 test_pr_err("%s: NULL request queue\n", __func__);
990 return -EINVAL;
991 }
992
993 mq = req_q->queuedata;
994 if (!mq) {
995 test_pr_err("%s: NULL mq", __func__);
996 return -EINVAL;
997 }
998
999 max_packed_reqs = mq->card->ext_csd.max_packed_writes;
1000 test_packed_trigger = mq->num_wr_reqs_to_start_packing;
1001 num_packed_reqs = num_requests - test_packed_trigger;
1002
1003 if (mbtd->random_test_seed == 0) {
1004 mbtd->random_test_seed =
1005 (unsigned int)(get_jiffies_64() & 0xFFFF);
1006 test_pr_info("%s: got seed from jiffies %d",
1007 __func__, mbtd->random_test_seed);
1008 }
1009
1010 mmc_blk_init_packed_statistics(mq->card);
1011
1012 if (td->test_info.testcase ==
1013 TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED) {
1014 temp_num_req = num_requests;
1015 num_requests = test_packed_trigger - 1;
1016 }
1017
1018 /* Verify that the packing is disabled before starting the test */
1019 mq->wr_packing_enabled = false;
1020 mq->num_of_potential_packed_wr_reqs = 0;
1021
1022 if (td->test_info.testcase == TEST_PACK_MIX_PACKED_NO_PACKED_PACKED) {
1023 mq->num_of_potential_packed_wr_reqs = test_packed_trigger + 1;
1024 mq->wr_packing_enabled = true;
1025 num_requests = test_packed_trigger + 2;
1026 }
1027
1028 ret = prepare_request_add_write_reqs(td, num_requests, is_err_expected,
1029 is_random);
1030 if (ret)
1031 goto exit;
1032
1033 if (td->test_info.testcase == TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED)
1034 num_requests = temp_num_req;
1035
1036 memset((void *)mbtd->exp_packed_stats.pack_stop_reason, 0,
1037 sizeof(mbtd->exp_packed_stats.pack_stop_reason));
1038 memset(mbtd->exp_packed_stats.packing_events, 0,
1039 (max_packed_reqs + 1) * sizeof(u32));
1040
1041 switch (td->test_info.testcase) {
1042 case TEST_PACKING_EXP_N_OVER_TRIGGER_FB_READ:
1043 case TEST_PACKING_EXP_ONE_OVER_TRIGGER_FB_READ:
1044 ret = prepare_request_add_read(td);
1045 if (ret)
1046 goto exit;
1047
1048 mbtd->exp_packed_stats.pack_stop_reason[WRONG_DATA_DIR] = 1;
1049 mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 1;
1050 break;
1051 case TEST_PACKING_EXP_N_OVER_TRIGGER_FLUSH_N:
1052 ret = prepare_request_add_flush(td);
1053 if (ret)
1054 goto exit;
1055
1056 ret = prepare_request_add_write_reqs(td, num_packed_reqs,
1057 is_err_expected, is_random);
1058 if (ret)
1059 goto exit;
1060
1061 mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1;
1062 mbtd->exp_packed_stats.pack_stop_reason[FLUSH_OR_DISCARD] = 1;
1063 mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 2;
1064 break;
1065 case TEST_PACKING_NOT_EXP_TRIGGER_READ_TRIGGER:
1066 ret = prepare_request_add_read(td);
1067 if (ret)
1068 goto exit;
1069
1070 ret = prepare_request_add_write_reqs(td, test_packed_trigger,
1071 is_err_expected, is_random);
1072 if (ret)
1073 goto exit;
1074
1075 mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 1;
1076 break;
1077 case TEST_PACKING_NOT_EXP_TRIGGER_FLUSH_TRIGGER:
1078 ret = prepare_request_add_flush(td);
1079 if (ret)
1080 goto exit;
1081
1082 ret = prepare_request_add_write_reqs(td, test_packed_trigger,
1083 is_err_expected, is_random);
1084 if (ret)
1085 goto exit;
1086
1087 mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 1;
1088 break;
1089 case TEST_PACK_MIX_PACKED_NO_PACKED_PACKED:
1090 ret = prepare_request_add_read(td);
1091 if (ret)
1092 goto exit;
1093
1094 ret = prepare_request_add_write_reqs(td, test_packed_trigger-1,
1095 is_err_expected, is_random);
1096 if (ret)
1097 goto exit;
1098
1099 ret = prepare_request_add_write_reqs(td, num_requests,
1100 is_err_expected, is_random);
1101 if (ret)
1102 goto exit;
1103
1104 mbtd->exp_packed_stats.packing_events[num_requests] = 1;
1105 mbtd->exp_packed_stats.packing_events[num_requests-1] = 1;
1106 mbtd->exp_packed_stats.pack_stop_reason[WRONG_DATA_DIR] = 1;
1107 mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1;
1108 break;
1109 case TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED:
1110 ret = prepare_request_add_read(td);
1111 if (ret)
1112 goto exit;
1113
1114 ret = prepare_request_add_write_reqs(td, num_requests,
1115 is_err_expected, is_random);
1116 if (ret)
1117 goto exit;
1118
1119 ret = prepare_request_add_read(td);
1120 if (ret)
1121 goto exit;
1122
1123 ret = prepare_request_add_write_reqs(td, test_packed_trigger-1,
1124 is_err_expected, is_random);
1125 if (ret)
1126 goto exit;
1127
1128 mbtd->exp_packed_stats.pack_stop_reason[WRONG_DATA_DIR] = 1;
1129 mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 1;
1130 break;
1131 case TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS:
1132 case TEST_PACKING_NOT_EXP_TRIGGER_REQUESTS:
1133 mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 1;
1134 break;
1135 default:
1136 mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1;
1137 mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 1;
1138 }
1139 mbtd->num_requests = num_requests;
1140
1141exit:
1142 return ret;
1143}
1144
1145/*
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001146 * Prepare requests for the TEST_RET_PARTIAL_FOLLOWED_BY_ABORT testcase.
1147 * In this testcase we have mixed error expectations from different
1148 * write requests, hence the special prepare function.
1149 */
1150static int prepare_partial_followed_by_abort(struct test_data *td,
1151 int num_requests)
1152{
1153 int i, start_address;
1154 int is_err_expected = 0;
1155 int ret = 0;
1156 struct mmc_queue *mq = test_iosched_get_req_queue()->queuedata;
1157 int max_packed_reqs;
1158
1159 if (!mq) {
1160 test_pr_err("%s: NULL mq", __func__);
1161 return -EINVAL;
1162 }
1163
1164 max_packed_reqs = mq->card->ext_csd.max_packed_writes;
1165
1166 mmc_blk_init_packed_statistics(mq->card);
1167
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001168 for (i = 1; i <= num_requests; i++) {
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001169 if (i > (num_requests / 2))
1170 is_err_expected = 1;
1171
Lee Susmanf18263a2012-10-24 14:14:37 +02001172 start_address = td->start_sector +
1173 sizeof(int) * BIO_U32_SIZE * td->num_of_write_bios;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001174 ret = test_iosched_add_wr_rd_test_req(is_err_expected, WRITE,
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001175 start_address, (i % 5) + 1, TEST_PATTERN_5A,
1176 NULL);
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001177 if (ret) {
1178 test_pr_err("%s: failed to add a write request",
1179 __func__);
1180 return ret;
1181 }
1182 }
1183
1184 memset((void *)&mbtd->exp_packed_stats.pack_stop_reason, 0,
1185 sizeof(mbtd->exp_packed_stats.pack_stop_reason));
1186 memset(mbtd->exp_packed_stats.packing_events, 0,
1187 (max_packed_reqs + 1) * sizeof(u32));
1188 mbtd->exp_packed_stats.packing_events[num_requests] = 1;
1189 mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1;
1190
1191 mbtd->num_requests = num_requests;
1192
1193 return ret;
1194}
1195
1196/*
1197 * Get number of write requests for current testcase. If random test mode was
1198 * chosen, pseudo-randomly choose the number of requests, otherwise set to
1199 * two less than the packing threshold.
1200 */
1201static int get_num_requests(struct test_data *td)
1202{
1203 int *seed = &mbtd->random_test_seed;
1204 struct request_queue *req_q;
1205 struct mmc_queue *mq;
1206 int max_num_requests;
1207 int num_requests;
1208 int min_num_requests = 2;
1209 int is_random = mbtd->is_random;
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001210 int max_for_double;
1211 int test_packed_trigger;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001212
1213 req_q = test_iosched_get_req_queue();
1214 if (req_q)
1215 mq = req_q->queuedata;
1216 else {
1217 test_pr_err("%s: NULL request queue", __func__);
1218 return 0;
1219 }
1220
1221 if (!mq) {
1222 test_pr_err("%s: NULL mq", __func__);
1223 return -EINVAL;
1224 }
1225
1226 max_num_requests = mq->card->ext_csd.max_packed_writes;
1227 num_requests = max_num_requests - 2;
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001228 test_packed_trigger = mq->num_wr_reqs_to_start_packing;
1229
1230 /*
1231 * Here max_for_double is intended for packed control testcases
1232 * in which we issue many write requests. It's purpose is to prevent
1233 * exceeding max number of req_queue requests.
1234 */
1235 max_for_double = max_num_requests - 10;
1236
1237 if (td->test_info.testcase ==
1238 TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS)
1239 /* Don't expect packing, so issue up to trigger-1 reqs */
1240 num_requests = test_packed_trigger - 1;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001241
1242 if (is_random) {
1243 if (td->test_info.testcase ==
1244 TEST_RET_PARTIAL_FOLLOWED_BY_ABORT)
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001245 /*
1246 * Here we don't want num_requests to be less than 1
1247 * as a consequence of division by 2.
1248 */
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001249 min_num_requests = 3;
1250
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001251 if (td->test_info.testcase ==
1252 TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS)
1253 /* Don't expect packing, so issue up to trigger reqs */
1254 max_num_requests = test_packed_trigger;
1255
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001256 num_requests = pseudo_random_seed(seed, min_num_requests,
1257 max_num_requests - 1);
1258 }
1259
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001260 if (td->test_info.testcase ==
1261 TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS)
1262 num_requests -= test_packed_trigger;
1263
1264 if (td->test_info.testcase == TEST_PACKING_EXP_N_OVER_TRIGGER_FLUSH_N)
1265 num_requests =
1266 num_requests > max_for_double ? max_for_double : num_requests;
1267
1268 if (mbtd->test_group == TEST_PACKING_CONTROL_GROUP)
1269 num_requests += test_packed_trigger;
1270
1271 if (td->test_info.testcase == TEST_PACKING_NOT_EXP_TRIGGER_REQUESTS)
1272 num_requests = test_packed_trigger;
1273
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001274 return num_requests;
1275}
1276
Lee Susmanf18263a2012-10-24 14:14:37 +02001277static int prepare_long_test_requests(struct test_data *td)
1278{
1279
1280 int ret;
1281 int start_sec;
1282 int j;
1283 int test_direction;
1284
1285 if (td)
1286 start_sec = td->start_sector;
1287 else {
1288 test_pr_err("%s: NULL td\n", __func__);
1289 return -EINVAL;
1290 }
1291
1292 test_direction = READ;
1293
1294 test_pr_info("%s: Adding %d read requests, first req_id=%d", __func__,
1295 LONG_TEST_ACTUAL_NUM_REQS, td->wr_rd_next_req_id);
1296
1297 for (j = 0; j < LONG_TEST_ACTUAL_NUM_REQS; j++) {
1298
1299 ret = test_iosched_add_wr_rd_test_req(0, test_direction,
1300 start_sec,
1301 TEST_MAX_BIOS_PER_REQ,
1302 TEST_NO_PATTERN, NULL);
1303 if (ret) {
1304 test_pr_err("%s: failed to add a bio request",
1305 __func__);
1306 return ret;
1307 }
1308
1309 start_sec +=
1310 (TEST_MAX_BIOS_PER_REQ * sizeof(int) * BIO_U32_SIZE);
1311 }
1312
1313 return 0;
1314}
1315
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001316/*
1317 * An implementation for the prepare_test_fn pointer in the test_info
1318 * data structure. According to the testcase we add the right number of requests
1319 * and decide if an error is expected or not.
1320 */
1321static int prepare_test(struct test_data *td)
1322{
1323 struct mmc_queue *mq = test_iosched_get_req_queue()->queuedata;
1324 int max_num_requests;
1325 int num_requests = 0;
1326 int ret = 0;
1327 int is_random = mbtd->is_random;
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001328 int test_packed_trigger = mq->num_wr_reqs_to_start_packing;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001329
1330 if (!mq) {
1331 test_pr_err("%s: NULL mq", __func__);
1332 return -EINVAL;
1333 }
1334
1335 max_num_requests = mq->card->ext_csd.max_packed_writes;
1336
1337 if (is_random && mbtd->random_test_seed == 0) {
1338 mbtd->random_test_seed =
1339 (unsigned int)(get_jiffies_64() & 0xFFFF);
1340 test_pr_info("%s: got seed from jiffies %d",
1341 __func__, mbtd->random_test_seed);
1342 }
1343
1344 num_requests = get_num_requests(td);
1345
1346 if (mbtd->test_group == TEST_SEND_INVALID_GROUP)
1347 mq->packed_test_fn =
1348 test_invalid_packed_cmd;
1349
1350 if (mbtd->test_group == TEST_ERR_CHECK_GROUP)
1351 mq->err_check_fn = test_err_check;
1352
1353 switch (td->test_info.testcase) {
1354 case TEST_STOP_DUE_TO_FLUSH:
1355 case TEST_STOP_DUE_TO_READ:
1356 case TEST_RET_PARTIAL_FOLLOWED_BY_SUCCESS:
1357 case TEST_RET_PARTIAL_MULTIPLE_UNTIL_SUCCESS:
1358 case TEST_STOP_DUE_TO_EMPTY_QUEUE:
1359 case TEST_CMD23_PACKED_BIT_UNSET:
1360 ret = prepare_packed_requests(td, 0, num_requests, is_random);
1361 break;
1362 case TEST_STOP_DUE_TO_FLUSH_AFTER_MAX_REQS:
1363 case TEST_STOP_DUE_TO_READ_AFTER_MAX_REQS:
1364 ret = prepare_packed_requests(td, 0, max_num_requests - 1,
1365 is_random);
1366 break;
1367 case TEST_RET_PARTIAL_FOLLOWED_BY_ABORT:
1368 ret = prepare_partial_followed_by_abort(td, num_requests);
1369 break;
1370 case TEST_STOP_DUE_TO_MAX_REQ_NUM:
1371 case TEST_RET_PARTIAL_MAX_FAIL_IDX:
1372 ret = prepare_packed_requests(td, 0, max_num_requests,
1373 is_random);
1374 break;
1375 case TEST_STOP_DUE_TO_THRESHOLD:
1376 ret = prepare_packed_requests(td, 0, max_num_requests + 1,
1377 is_random);
1378 break;
1379 case TEST_RET_ABORT:
1380 case TEST_RET_RETRY:
1381 case TEST_RET_CMD_ERR:
1382 case TEST_RET_DATA_ERR:
1383 case TEST_HDR_INVALID_VERSION:
1384 case TEST_HDR_WRONG_WRITE_CODE:
1385 case TEST_HDR_INVALID_RW_CODE:
1386 case TEST_HDR_DIFFERENT_ADDRESSES:
1387 case TEST_HDR_REQ_NUM_SMALLER_THAN_ACTUAL:
1388 case TEST_HDR_REQ_NUM_LARGER_THAN_ACTUAL:
1389 case TEST_CMD23_MAX_PACKED_WRITES:
1390 case TEST_CMD23_ZERO_PACKED_WRITES:
1391 case TEST_CMD23_REL_WR_BIT_SET:
1392 case TEST_CMD23_BITS_16TO29_SET:
1393 case TEST_CMD23_HDR_BLK_NOT_IN_COUNT:
1394 case TEST_HDR_CMD23_PACKED_BIT_SET:
1395 ret = prepare_packed_requests(td, 1, num_requests, is_random);
1396 break;
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001397 case TEST_PACKING_EXP_N_OVER_TRIGGER:
1398 case TEST_PACKING_EXP_N_OVER_TRIGGER_FB_READ:
1399 case TEST_PACKING_NOT_EXP_TRIGGER_REQUESTS:
1400 case TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS:
1401 case TEST_PACK_MIX_PACKED_NO_PACKED_PACKED:
1402 case TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED:
1403 ret = prepare_packed_control_tests_requests(td, 0, num_requests,
1404 is_random);
1405 break;
1406 case TEST_PACKING_EXP_THRESHOLD_OVER_TRIGGER:
1407 ret = prepare_packed_control_tests_requests(td, 0,
1408 max_num_requests, is_random);
1409 break;
1410 case TEST_PACKING_EXP_ONE_OVER_TRIGGER_FB_READ:
1411 ret = prepare_packed_control_tests_requests(td, 0,
1412 test_packed_trigger + 1,
1413 is_random);
1414 break;
1415 case TEST_PACKING_EXP_N_OVER_TRIGGER_FLUSH_N:
1416 ret = prepare_packed_control_tests_requests(td, 0, num_requests,
1417 is_random);
1418 break;
1419 case TEST_PACKING_NOT_EXP_TRIGGER_READ_TRIGGER:
1420 case TEST_PACKING_NOT_EXP_TRIGGER_FLUSH_TRIGGER:
1421 ret = prepare_packed_control_tests_requests(td, 0,
1422 test_packed_trigger, is_random);
1423 break;
Lee Susmanf18263a2012-10-24 14:14:37 +02001424 case TEST_LONG_SEQUENTIAL_READ:
1425 ret = prepare_long_test_requests(td);
1426 break;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001427 default:
1428 test_pr_info("%s: Invalid test case...", __func__);
Lee Susmanf18263a2012-10-24 14:14:37 +02001429 ret = -EINVAL;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001430 }
1431
1432 return ret;
1433}
1434
1435/*
1436 * An implementation for the post_test_fn in the test_info data structure.
1437 * In our case we just reset the function pointers in the mmc_queue in order for
1438 * the FS to be able to dispatch it's requests correctly after the test is
1439 * finished.
1440 */
1441static int post_test(struct test_data *td)
1442{
1443 struct mmc_queue *mq;
1444
1445 if (!td)
1446 return -EINVAL;
1447
1448 mq = td->req_q->queuedata;
1449
1450 if (!mq) {
1451 test_pr_err("%s: NULL mq", __func__);
1452 return -EINVAL;
1453 }
1454
1455 mq->packed_test_fn = NULL;
1456 mq->err_check_fn = NULL;
1457
1458 return 0;
1459}
1460
1461/*
1462 * This function checks, based on the current test's test_group, that the
1463 * packed commands capability and control are set right. In addition, we check
1464 * if the card supports the packed command feature.
1465 */
1466static int validate_packed_commands_settings(void)
1467{
1468 struct request_queue *req_q;
1469 struct mmc_queue *mq;
1470 int max_num_requests;
1471 struct mmc_host *host;
1472
1473 req_q = test_iosched_get_req_queue();
1474 if (!req_q) {
1475 test_pr_err("%s: test_iosched_get_req_queue failed", __func__);
1476 test_iosched_set_test_result(TEST_FAILED);
1477 return -EINVAL;
1478 }
1479
1480 mq = req_q->queuedata;
1481 if (!mq) {
1482 test_pr_err("%s: NULL mq", __func__);
1483 return -EINVAL;
1484 }
1485
1486 max_num_requests = mq->card->ext_csd.max_packed_writes;
1487 host = mq->card->host;
1488
1489 if (!(host->caps2 && MMC_CAP2_PACKED_WR)) {
1490 test_pr_err("%s: Packed Write capability disabled, exit test",
1491 __func__);
1492 test_iosched_set_test_result(TEST_NOT_SUPPORTED);
1493 return -EINVAL;
1494 }
1495
1496 if (max_num_requests == 0) {
1497 test_pr_err(
1498 "%s: no write packing support, ext_csd.max_packed_writes=%d",
1499 __func__, mq->card->ext_csd.max_packed_writes);
1500 test_iosched_set_test_result(TEST_NOT_SUPPORTED);
1501 return -EINVAL;
1502 }
1503
1504 test_pr_info("%s: max number of packed requests supported is %d ",
1505 __func__, max_num_requests);
1506
1507 switch (mbtd->test_group) {
1508 case TEST_SEND_WRITE_PACKING_GROUP:
1509 case TEST_ERR_CHECK_GROUP:
1510 case TEST_SEND_INVALID_GROUP:
1511 /* disable the packing control */
1512 host->caps2 &= ~MMC_CAP2_PACKED_WR_CONTROL;
1513 break;
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001514 case TEST_PACKING_CONTROL_GROUP:
1515 host->caps2 |= MMC_CAP2_PACKED_WR_CONTROL;
1516 break;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001517 default:
1518 break;
1519 }
1520
1521 return 0;
1522}
1523
Maya Erezddc55732012-10-17 09:51:01 +02001524static void pseudo_rnd_sector_and_size(unsigned int *seed,
1525 unsigned int min_start_sector,
1526 unsigned int *start_sector,
1527 unsigned int *num_of_bios)
1528{
1529 unsigned int max_sec = min_start_sector + TEST_MAX_SECTOR_RANGE;
1530 do {
1531 *start_sector = pseudo_random_seed(seed,
1532 1, max_sec);
1533 *num_of_bios = pseudo_random_seed(seed,
1534 1, TEST_MAX_BIOS_PER_REQ);
1535 if (!(*num_of_bios))
1536 *num_of_bios = 1;
1537 } while ((*start_sector < min_start_sector) ||
1538 (*start_sector + (*num_of_bios * BIO_U32_SIZE * 4)) > max_sec);
1539}
1540
1541/* sanitize test functions */
1542static int prepare_write_discard_sanitize_read(struct test_data *td)
1543{
1544 unsigned int start_sector;
1545 unsigned int num_of_bios = 0;
1546 static unsigned int total_bios;
1547 unsigned int *num_bios_seed;
1548 int i = 0;
1549
1550 if (mbtd->random_test_seed == 0) {
1551 mbtd->random_test_seed =
1552 (unsigned int)(get_jiffies_64() & 0xFFFF);
1553 test_pr_info("%s: got seed from jiffies %d",
1554 __func__, mbtd->random_test_seed);
1555 }
1556 num_bios_seed = &mbtd->random_test_seed;
1557
1558 do {
1559 pseudo_rnd_sector_and_size(num_bios_seed, td->start_sector,
1560 &start_sector, &num_of_bios);
1561
1562 /* DISCARD */
1563 total_bios += num_of_bios;
1564 test_pr_info("%s: discard req: id=%d, startSec=%d, NumBios=%d",
1565 __func__, td->unique_next_req_id, start_sector,
1566 num_of_bios);
1567 test_iosched_add_unique_test_req(0, REQ_UNIQUE_DISCARD,
1568 start_sector, BIO_TO_SECTOR(num_of_bios),
1569 NULL);
1570
1571 } while (++i < (BLKDEV_MAX_RQ-10));
1572
1573 test_pr_info("%s: total discard bios = %d", __func__, total_bios);
1574
1575 test_pr_info("%s: add sanitize req", __func__);
1576 test_iosched_add_unique_test_req(0, REQ_UNIQUE_SANITIZE, 0, 0, NULL);
1577
1578 return 0;
1579}
1580
Yaniv Gardie9214c82012-10-18 13:58:18 +02001581/*
1582 * Post test operations for BKOPs test
1583 * Disable the BKOPs statistics and clear the feature flags
1584 */
1585static int bkops_post_test(struct test_data *td)
1586{
1587 struct request_queue *q = td->req_q;
1588 struct mmc_queue *mq = (struct mmc_queue *)q->queuedata;
1589 struct mmc_card *card = mq->card;
1590
1591 mmc_card_clr_doing_bkops(mq->card);
1592 card->ext_csd.raw_bkops_status = 0;
1593
1594 spin_lock(&card->bkops_info.bkops_stats.lock);
1595 card->bkops_info.bkops_stats.enabled = false;
1596 spin_unlock(&card->bkops_info.bkops_stats.lock);
1597
1598 return 0;
1599}
1600
1601/*
1602 * Verify the BKOPs statsistics
1603 */
1604static int check_bkops_result(struct test_data *td)
1605{
1606 struct request_queue *q = td->req_q;
1607 struct mmc_queue *mq = (struct mmc_queue *)q->queuedata;
1608 struct mmc_card *card = mq->card;
1609 struct mmc_bkops_stats *bkops_stat;
1610
1611 if (!card)
1612 goto fail;
1613
1614 bkops_stat = &card->bkops_info.bkops_stats;
1615
1616 test_pr_info("%s: Test results: bkops:(%d,%d,%d) hpi:%d, suspend:%d",
1617 __func__,
1618 bkops_stat->bkops_level[BKOPS_SEVERITY_1_INDEX],
1619 bkops_stat->bkops_level[BKOPS_SEVERITY_2_INDEX],
1620 bkops_stat->bkops_level[BKOPS_SEVERITY_3_INDEX],
1621 bkops_stat->hpi,
1622 bkops_stat->suspend);
1623
1624 switch (mbtd->test_info.testcase) {
1625 case BKOPS_DELAYED_WORK_LEVEL_1:
1626 if ((bkops_stat->bkops_level[BKOPS_SEVERITY_1_INDEX] == 1) &&
1627 (bkops_stat->suspend == 1) &&
1628 (bkops_stat->hpi == 0))
1629 goto exit;
1630 else
1631 goto fail;
1632 break;
1633 case BKOPS_DELAYED_WORK_LEVEL_1_HPI:
1634 if ((bkops_stat->bkops_level[BKOPS_SEVERITY_1_INDEX] == 1) &&
1635 (bkops_stat->suspend == 0) &&
1636 (bkops_stat->hpi == 1))
1637 goto exit;
1638 else
1639 goto fail;
1640 break;
1641 case BKOPS_CANCEL_DELAYED_WORK:
1642 if ((bkops_stat->bkops_level[BKOPS_SEVERITY_1_INDEX] == 0) &&
1643 (bkops_stat->bkops_level[BKOPS_SEVERITY_2_INDEX] == 0) &&
1644 (bkops_stat->bkops_level[BKOPS_SEVERITY_3_INDEX] == 0) &&
1645 (bkops_stat->suspend == 0) &&
1646 (bkops_stat->hpi == 0))
1647 goto exit;
1648 else
1649 goto fail;
1650 case BKOPS_URGENT_LEVEL_2:
1651 case BKOPS_URGENT_LEVEL_2_TWO_REQS:
1652 if ((bkops_stat->bkops_level[BKOPS_SEVERITY_2_INDEX] == 1) &&
1653 (bkops_stat->suspend == 0) &&
1654 (bkops_stat->hpi == 0))
1655 goto exit;
1656 else
1657 goto fail;
1658 case BKOPS_URGENT_LEVEL_3:
1659 if ((bkops_stat->bkops_level[BKOPS_SEVERITY_3_INDEX] == 1) &&
1660 (bkops_stat->suspend == 0) &&
1661 (bkops_stat->hpi == 0))
1662 goto exit;
1663 else
1664 goto fail;
1665 default:
1666 return -EINVAL;
1667 }
1668
1669exit:
1670 return 0;
1671fail:
1672 if (td->fs_wr_reqs_during_test) {
1673 test_pr_info("%s: wr reqs during test, cancel the round",
1674 __func__);
1675 test_iosched_set_ignore_round(true);
1676 return 0;
1677 }
1678
1679 test_pr_info("%s: BKOPs statistics are not as expected, test failed",
1680 __func__);
1681 return -EINVAL;
1682}
1683
1684static void bkops_end_io_final_fn(struct request *rq, int err)
1685{
1686 struct test_request *test_rq =
1687 (struct test_request *)rq->elv.priv[0];
1688 BUG_ON(!test_rq);
1689
1690 test_rq->req_completed = 1;
1691 test_rq->req_result = err;
1692
1693 test_pr_info("%s: request %d completed, err=%d",
1694 __func__, test_rq->req_id, err);
1695
1696 mbtd->bkops_stage = BKOPS_STAGE_4;
1697 wake_up(&mbtd->bkops_wait_q);
1698}
1699
1700static void bkops_end_io_fn(struct request *rq, int err)
1701{
1702 struct test_request *test_rq =
1703 (struct test_request *)rq->elv.priv[0];
1704 BUG_ON(!test_rq);
1705
1706 test_rq->req_completed = 1;
1707 test_rq->req_result = err;
1708
1709 test_pr_info("%s: request %d completed, err=%d",
1710 __func__, test_rq->req_id, err);
1711 mbtd->bkops_stage = BKOPS_STAGE_2;
1712 wake_up(&mbtd->bkops_wait_q);
1713
1714}
1715
1716static int prepare_bkops(struct test_data *td)
1717{
1718 int ret = 0;
1719 struct request_queue *q = td->req_q;
1720 struct mmc_queue *mq = (struct mmc_queue *)q->queuedata;
1721 struct mmc_card *card = mq->card;
1722 struct mmc_bkops_stats *bkops_stat;
1723
1724 if (!card)
1725 return -EINVAL;
1726
1727 bkops_stat = &card->bkops_info.bkops_stats;
1728
1729 if (!card->ext_csd.bkops_en) {
1730 test_pr_err("%s: BKOPS is not enabled by card or host)",
1731 __func__);
1732 return -ENOTSUPP;
1733 }
1734 if (mmc_card_doing_bkops(card)) {
1735 test_pr_err("%s: BKOPS in progress, try later", __func__);
1736 return -EAGAIN;
1737 }
1738
1739 mmc_blk_init_bkops_statistics(card);
1740
1741 if ((mbtd->test_info.testcase == BKOPS_URGENT_LEVEL_2) ||
1742 (mbtd->test_info.testcase == BKOPS_URGENT_LEVEL_2_TWO_REQS) ||
1743 (mbtd->test_info.testcase == BKOPS_URGENT_LEVEL_3))
1744 mq->err_check_fn = test_err_check;
1745 mbtd->err_check_counter = 0;
1746
1747 return ret;
1748}
1749
1750static int run_bkops(struct test_data *td)
1751{
1752 int ret = 0;
1753 struct request_queue *q = td->req_q;
1754 struct mmc_queue *mq = (struct mmc_queue *)q->queuedata;
1755 struct mmc_card *card = mq->card;
1756 struct mmc_bkops_stats *bkops_stat;
1757
1758 if (!card)
1759 return -EINVAL;
1760
1761 bkops_stat = &card->bkops_info.bkops_stats;
1762
1763 switch (mbtd->test_info.testcase) {
1764 case BKOPS_DELAYED_WORK_LEVEL_1:
1765 bkops_stat->ignore_card_bkops_status = true;
1766 card->ext_csd.raw_bkops_status = 1;
1767 card->bkops_info.sectors_changed =
1768 card->bkops_info.min_sectors_to_queue_delayed_work + 1;
1769 mbtd->bkops_stage = BKOPS_STAGE_1;
1770
1771 __blk_run_queue(q);
1772 /* this long sleep makes sure the host starts bkops and
1773 also, gets into suspend */
1774 msleep(10000);
1775
1776 bkops_stat->ignore_card_bkops_status = false;
1777 card->ext_csd.raw_bkops_status = 0;
1778
1779 test_iosched_mark_test_completion();
1780 break;
1781
1782 case BKOPS_DELAYED_WORK_LEVEL_1_HPI:
1783 bkops_stat->ignore_card_bkops_status = true;
1784 card->ext_csd.raw_bkops_status = 1;
1785 card->bkops_info.sectors_changed =
1786 card->bkops_info.min_sectors_to_queue_delayed_work + 1;
1787 mbtd->bkops_stage = BKOPS_STAGE_1;
1788
1789 __blk_run_queue(q);
1790 msleep(card->bkops_info.delay_ms);
1791
1792 ret = test_iosched_add_wr_rd_test_req(0, WRITE,
1793 td->start_sector,
1794 TEST_REQUEST_NUM_OF_BIOS,
1795 TEST_PATTERN_5A,
1796 bkops_end_io_final_fn);
1797 if (ret) {
1798 test_pr_err("%s: failed to add a write request",
1799 __func__);
1800 ret = -EINVAL;
1801 break;
1802 }
1803
1804 td->next_req = list_entry(td->test_queue.prev,
1805 struct test_request, queuelist);
1806 __blk_run_queue(q);
1807 wait_event(mbtd->bkops_wait_q,
1808 mbtd->bkops_stage == BKOPS_STAGE_4);
1809 bkops_stat->ignore_card_bkops_status = false;
1810
1811 test_iosched_mark_test_completion();
1812 break;
1813
1814 case BKOPS_CANCEL_DELAYED_WORK:
1815 bkops_stat->ignore_card_bkops_status = true;
1816 card->ext_csd.raw_bkops_status = 1;
1817 card->bkops_info.sectors_changed =
1818 card->bkops_info.min_sectors_to_queue_delayed_work + 1;
1819 mbtd->bkops_stage = BKOPS_STAGE_1;
1820
1821 __blk_run_queue(q);
1822
1823 ret = test_iosched_add_wr_rd_test_req(0, WRITE,
1824 td->start_sector,
1825 TEST_REQUEST_NUM_OF_BIOS,
1826 TEST_PATTERN_5A,
1827 bkops_end_io_final_fn);
1828 if (ret) {
1829 test_pr_err("%s: failed to add a write request",
1830 __func__);
1831 ret = -EINVAL;
1832 break;
1833 }
1834
1835 td->next_req = list_entry(td->test_queue.prev,
1836 struct test_request, queuelist);
1837 __blk_run_queue(q);
1838 wait_event(mbtd->bkops_wait_q,
1839 mbtd->bkops_stage == BKOPS_STAGE_4);
1840 bkops_stat->ignore_card_bkops_status = false;
1841
1842 test_iosched_mark_test_completion();
1843 break;
1844
1845 case BKOPS_URGENT_LEVEL_2:
1846 case BKOPS_URGENT_LEVEL_3:
1847 bkops_stat->ignore_card_bkops_status = true;
1848 if (mbtd->test_info.testcase == BKOPS_URGENT_LEVEL_2)
1849 card->ext_csd.raw_bkops_status = 2;
1850 else
1851 card->ext_csd.raw_bkops_status = 3;
1852 mbtd->bkops_stage = BKOPS_STAGE_1;
1853
1854 ret = test_iosched_add_wr_rd_test_req(0, WRITE,
1855 td->start_sector,
1856 TEST_REQUEST_NUM_OF_BIOS,
1857 TEST_PATTERN_5A,
1858 bkops_end_io_fn);
1859 if (ret) {
1860 test_pr_err("%s: failed to add a write request",
1861 __func__);
1862 ret = -EINVAL;
1863 break;
1864 }
1865
1866 td->next_req = list_entry(td->test_queue.prev,
1867 struct test_request, queuelist);
1868 __blk_run_queue(q);
1869 wait_event(mbtd->bkops_wait_q,
1870 mbtd->bkops_stage == BKOPS_STAGE_2);
1871 card->ext_csd.raw_bkops_status = 0;
1872
1873 ret = test_iosched_add_wr_rd_test_req(0, WRITE,
1874 td->start_sector,
1875 TEST_REQUEST_NUM_OF_BIOS,
1876 TEST_PATTERN_5A,
1877 bkops_end_io_final_fn);
1878 if (ret) {
1879 test_pr_err("%s: failed to add a write request",
1880 __func__);
1881 ret = -EINVAL;
1882 break;
1883 }
1884
1885 td->next_req = list_entry(td->test_queue.prev,
1886 struct test_request, queuelist);
1887 __blk_run_queue(q);
1888
1889 wait_event(mbtd->bkops_wait_q,
1890 mbtd->bkops_stage == BKOPS_STAGE_4);
1891
1892 bkops_stat->ignore_card_bkops_status = false;
1893 test_iosched_mark_test_completion();
1894 break;
1895
1896 case BKOPS_URGENT_LEVEL_2_TWO_REQS:
1897 mq->wr_packing_enabled = false;
1898 bkops_stat->ignore_card_bkops_status = true;
1899 card->ext_csd.raw_bkops_status = 2;
1900 mbtd->bkops_stage = BKOPS_STAGE_1;
1901
1902 ret = test_iosched_add_wr_rd_test_req(0, WRITE,
1903 td->start_sector,
1904 TEST_REQUEST_NUM_OF_BIOS,
1905 TEST_PATTERN_5A,
1906 NULL);
1907 if (ret) {
1908 test_pr_err("%s: failed to add a write request",
1909 __func__);
1910 ret = -EINVAL;
1911 break;
1912 }
1913
1914 ret = test_iosched_add_wr_rd_test_req(0, WRITE,
1915 td->start_sector,
1916 TEST_REQUEST_NUM_OF_BIOS,
1917 TEST_PATTERN_5A,
1918 bkops_end_io_fn);
1919 if (ret) {
1920 test_pr_err("%s: failed to add a write request",
1921 __func__);
1922 ret = -EINVAL;
1923 break;
1924 }
1925
1926 td->next_req = list_entry(td->test_queue.next,
1927 struct test_request, queuelist);
1928 __blk_run_queue(q);
1929 wait_event(mbtd->bkops_wait_q,
1930 mbtd->bkops_stage == BKOPS_STAGE_2);
1931 card->ext_csd.raw_bkops_status = 0;
1932
1933 ret = test_iosched_add_wr_rd_test_req(0, WRITE,
1934 td->start_sector,
1935 TEST_REQUEST_NUM_OF_BIOS,
1936 TEST_PATTERN_5A,
1937 bkops_end_io_final_fn);
1938 if (ret) {
1939 test_pr_err("%s: failed to add a write request",
1940 __func__);
1941 ret = -EINVAL;
1942 break;
1943 }
1944
1945 td->next_req = list_entry(td->test_queue.prev,
1946 struct test_request, queuelist);
1947 __blk_run_queue(q);
1948
1949 wait_event(mbtd->bkops_wait_q,
1950 mbtd->bkops_stage == BKOPS_STAGE_4);
1951
1952 bkops_stat->ignore_card_bkops_status = false;
1953 test_iosched_mark_test_completion();
1954
1955 break;
1956 default:
1957 test_pr_err("%s: wrong testcase: %d", __func__,
1958 mbtd->test_info.testcase);
1959 ret = -EINVAL;
1960 }
1961 return ret;
1962}
1963
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001964static bool message_repeat;
1965static int test_open(struct inode *inode, struct file *file)
1966{
1967 file->private_data = inode->i_private;
1968 message_repeat = 1;
1969 return 0;
1970}
1971
1972/* send_packing TEST */
1973static ssize_t send_write_packing_test_write(struct file *file,
1974 const char __user *buf,
1975 size_t count,
1976 loff_t *ppos)
1977{
1978 int ret = 0;
1979 int i = 0;
1980 int number = -1;
1981 int j = 0;
1982
1983 test_pr_info("%s: -- send_write_packing TEST --", __func__);
1984
1985 sscanf(buf, "%d", &number);
1986
1987 if (number <= 0)
1988 number = 1;
1989
1990
1991 mbtd->test_group = TEST_SEND_WRITE_PACKING_GROUP;
1992
1993 if (validate_packed_commands_settings())
1994 return count;
1995
1996 if (mbtd->random_test_seed > 0)
1997 test_pr_info("%s: Test seed: %d", __func__,
1998 mbtd->random_test_seed);
1999
2000 memset(&mbtd->test_info, 0, sizeof(struct test_info));
2001
2002 mbtd->test_info.data = mbtd;
2003 mbtd->test_info.prepare_test_fn = prepare_test;
2004 mbtd->test_info.check_test_result_fn = check_wr_packing_statistics;
2005 mbtd->test_info.get_test_case_str_fn = get_test_case_str;
2006 mbtd->test_info.post_test_fn = post_test;
2007
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02002008 for (i = 0; i < number; ++i) {
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02002009 test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
2010 test_pr_info("%s: ====================", __func__);
2011
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02002012 for (j = SEND_WRITE_PACKING_MIN_TESTCASE;
2013 j <= SEND_WRITE_PACKING_MAX_TESTCASE; j++) {
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02002014
2015 mbtd->test_info.testcase = j;
2016 mbtd->is_random = RANDOM_TEST;
2017 ret = test_iosched_start_test(&mbtd->test_info);
2018 if (ret)
2019 break;
2020 /* Allow FS requests to be dispatched */
2021 msleep(1000);
2022 mbtd->test_info.testcase = j;
2023 mbtd->is_random = NON_RANDOM_TEST;
2024 ret = test_iosched_start_test(&mbtd->test_info);
2025 if (ret)
2026 break;
2027 /* Allow FS requests to be dispatched */
2028 msleep(1000);
2029 }
2030 }
2031
2032 test_pr_info("%s: Completed all the test cases.", __func__);
2033
2034 return count;
2035}
2036
2037static ssize_t send_write_packing_test_read(struct file *file,
2038 char __user *buffer,
2039 size_t count,
2040 loff_t *offset)
2041{
2042 memset((void *)buffer, 0, count);
2043
2044 snprintf(buffer, count,
2045 "\nsend_write_packing_test\n"
2046 "=========\n"
2047 "Description:\n"
2048 "This test checks the following scenarios\n"
2049 "- Pack due to FLUSH message\n"
2050 "- Pack due to FLUSH after threshold writes\n"
2051 "- Pack due to READ message\n"
2052 "- Pack due to READ after threshold writes\n"
2053 "- Pack due to empty queue\n"
2054 "- Pack due to threshold writes\n"
2055 "- Pack due to one over threshold writes\n");
2056
2057 if (message_repeat == 1) {
2058 message_repeat = 0;
2059 return strnlen(buffer, count);
2060 } else {
2061 return 0;
2062 }
2063}
2064
2065const struct file_operations send_write_packing_test_ops = {
2066 .open = test_open,
2067 .write = send_write_packing_test_write,
2068 .read = send_write_packing_test_read,
2069};
2070
2071/* err_check TEST */
2072static ssize_t err_check_test_write(struct file *file,
2073 const char __user *buf,
2074 size_t count,
2075 loff_t *ppos)
2076{
2077 int ret = 0;
2078 int i = 0;
2079 int number = -1;
2080 int j = 0;
2081
2082 test_pr_info("%s: -- err_check TEST --", __func__);
2083
2084 sscanf(buf, "%d", &number);
2085
2086 if (number <= 0)
2087 number = 1;
2088
2089 mbtd->test_group = TEST_ERR_CHECK_GROUP;
2090
2091 if (validate_packed_commands_settings())
2092 return count;
2093
2094 if (mbtd->random_test_seed > 0)
2095 test_pr_info("%s: Test seed: %d", __func__,
2096 mbtd->random_test_seed);
2097
2098 memset(&mbtd->test_info, 0, sizeof(struct test_info));
2099
2100 mbtd->test_info.data = mbtd;
2101 mbtd->test_info.prepare_test_fn = prepare_test;
2102 mbtd->test_info.check_test_result_fn = check_wr_packing_statistics;
2103 mbtd->test_info.get_test_case_str_fn = get_test_case_str;
2104 mbtd->test_info.post_test_fn = post_test;
2105
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02002106 for (i = 0; i < number; ++i) {
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02002107 test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
2108 test_pr_info("%s: ====================", __func__);
2109
2110 for (j = ERR_CHECK_MIN_TESTCASE;
2111 j <= ERR_CHECK_MAX_TESTCASE ; j++) {
2112 mbtd->test_info.testcase = j;
2113 mbtd->is_random = RANDOM_TEST;
2114 ret = test_iosched_start_test(&mbtd->test_info);
2115 if (ret)
2116 break;
2117 /* Allow FS requests to be dispatched */
2118 msleep(1000);
2119 mbtd->test_info.testcase = j;
2120 mbtd->is_random = NON_RANDOM_TEST;
2121 ret = test_iosched_start_test(&mbtd->test_info);
2122 if (ret)
2123 break;
2124 /* Allow FS requests to be dispatched */
2125 msleep(1000);
2126 }
2127 }
2128
2129 test_pr_info("%s: Completed all the test cases.", __func__);
2130
2131 return count;
2132}
2133
2134static ssize_t err_check_test_read(struct file *file,
2135 char __user *buffer,
2136 size_t count,
2137 loff_t *offset)
2138{
2139 memset((void *)buffer, 0, count);
2140
2141 snprintf(buffer, count,
2142 "\nerr_check_TEST\n"
2143 "=========\n"
2144 "Description:\n"
2145 "This test checks the following scenarios\n"
2146 "- Return ABORT\n"
2147 "- Return PARTIAL followed by success\n"
2148 "- Return PARTIAL followed by abort\n"
2149 "- Return PARTIAL multiple times until success\n"
2150 "- Return PARTIAL with fail index = threshold\n"
2151 "- Return RETRY\n"
2152 "- Return CMD_ERR\n"
2153 "- Return DATA_ERR\n");
2154
2155 if (message_repeat == 1) {
2156 message_repeat = 0;
2157 return strnlen(buffer, count);
2158 } else {
2159 return 0;
2160 }
2161}
2162
2163const struct file_operations err_check_test_ops = {
2164 .open = test_open,
2165 .write = err_check_test_write,
2166 .read = err_check_test_read,
2167};
2168
2169/* send_invalid_packed TEST */
2170static ssize_t send_invalid_packed_test_write(struct file *file,
2171 const char __user *buf,
2172 size_t count,
2173 loff_t *ppos)
2174{
2175 int ret = 0;
2176 int i = 0;
2177 int number = -1;
2178 int j = 0;
2179 int num_of_failures = 0;
2180
2181 test_pr_info("%s: -- send_invalid_packed TEST --", __func__);
2182
2183 sscanf(buf, "%d", &number);
2184
2185 if (number <= 0)
2186 number = 1;
2187
2188 mbtd->test_group = TEST_SEND_INVALID_GROUP;
2189
2190 if (validate_packed_commands_settings())
2191 return count;
2192
2193 if (mbtd->random_test_seed > 0)
2194 test_pr_info("%s: Test seed: %d", __func__,
2195 mbtd->random_test_seed);
2196
2197 memset(&mbtd->test_info, 0, sizeof(struct test_info));
2198
2199 mbtd->test_info.data = mbtd;
2200 mbtd->test_info.prepare_test_fn = prepare_test;
2201 mbtd->test_info.check_test_result_fn = check_wr_packing_statistics;
2202 mbtd->test_info.get_test_case_str_fn = get_test_case_str;
2203 mbtd->test_info.post_test_fn = post_test;
2204
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02002205 for (i = 0; i < number; ++i) {
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02002206 test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
2207 test_pr_info("%s: ====================", __func__);
2208
2209 for (j = INVALID_CMD_MIN_TESTCASE;
2210 j <= INVALID_CMD_MAX_TESTCASE ; j++) {
2211
2212 mbtd->test_info.testcase = j;
2213 mbtd->is_random = RANDOM_TEST;
2214 ret = test_iosched_start_test(&mbtd->test_info);
2215 if (ret)
2216 num_of_failures++;
2217 /* Allow FS requests to be dispatched */
2218 msleep(1000);
2219
2220 mbtd->test_info.testcase = j;
2221 mbtd->is_random = NON_RANDOM_TEST;
2222 ret = test_iosched_start_test(&mbtd->test_info);
2223 if (ret)
2224 num_of_failures++;
2225 /* Allow FS requests to be dispatched */
2226 msleep(1000);
2227 }
2228 }
2229
2230 test_pr_info("%s: Completed all the test cases.", __func__);
2231
2232 if (num_of_failures > 0) {
2233 test_iosched_set_test_result(TEST_FAILED);
2234 test_pr_err(
2235 "There were %d failures during the test, TEST FAILED",
2236 num_of_failures);
2237 }
2238 return count;
2239}
2240
2241static ssize_t send_invalid_packed_test_read(struct file *file,
2242 char __user *buffer,
2243 size_t count,
2244 loff_t *offset)
2245{
2246 memset((void *)buffer, 0, count);
2247
2248 snprintf(buffer, count,
2249 "\nsend_invalid_packed_TEST\n"
2250 "=========\n"
2251 "Description:\n"
2252 "This test checks the following scenarios\n"
2253 "- Send an invalid header version\n"
2254 "- Send the wrong write code\n"
2255 "- Send an invalid R/W code\n"
2256 "- Send wrong start address in header\n"
2257 "- Send header with block_count smaller than actual\n"
2258 "- Send header with block_count larger than actual\n"
2259 "- Send header CMD23 packed bit set\n"
2260 "- Send CMD23 with block count over threshold\n"
2261 "- Send CMD23 with block_count equals zero\n"
2262 "- Send CMD23 packed bit unset\n"
2263 "- Send CMD23 reliable write bit set\n"
2264 "- Send CMD23 bits [16-29] set\n"
2265 "- Send CMD23 header block not in block_count\n");
2266
2267 if (message_repeat == 1) {
2268 message_repeat = 0;
2269 return strnlen(buffer, count);
2270 } else {
2271 return 0;
2272 }
2273}
2274
2275const struct file_operations send_invalid_packed_test_ops = {
2276 .open = test_open,
2277 .write = send_invalid_packed_test_write,
2278 .read = send_invalid_packed_test_read,
2279};
2280
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02002281/* packing_control TEST */
2282static ssize_t write_packing_control_test_write(struct file *file,
2283 const char __user *buf,
2284 size_t count,
2285 loff_t *ppos)
2286{
2287 int ret = 0;
2288 int i = 0;
2289 int number = -1;
2290 int j = 0;
2291 struct mmc_queue *mq = test_iosched_get_req_queue()->queuedata;
2292 int max_num_requests = mq->card->ext_csd.max_packed_writes;
2293 int test_successful = 1;
2294
2295 test_pr_info("%s: -- write_packing_control TEST --", __func__);
2296
2297 sscanf(buf, "%d", &number);
2298
2299 if (number <= 0)
2300 number = 1;
2301
2302 test_pr_info("%s: max_num_requests = %d ", __func__,
2303 max_num_requests);
2304
2305 memset(&mbtd->test_info, 0, sizeof(struct test_info));
2306 mbtd->test_group = TEST_PACKING_CONTROL_GROUP;
2307
2308 if (validate_packed_commands_settings())
2309 return count;
2310
2311 mbtd->test_info.data = mbtd;
2312 mbtd->test_info.prepare_test_fn = prepare_test;
2313 mbtd->test_info.check_test_result_fn = check_wr_packing_statistics;
2314 mbtd->test_info.get_test_case_str_fn = get_test_case_str;
2315
2316 for (i = 0; i < number; ++i) {
2317 test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
2318 test_pr_info("%s: ====================", __func__);
2319
2320 for (j = PACKING_CONTROL_MIN_TESTCASE;
2321 j <= PACKING_CONTROL_MAX_TESTCASE; j++) {
2322
2323 test_successful = 1;
2324 mbtd->test_info.testcase = j;
2325 mbtd->is_random = RANDOM_TEST;
2326 ret = test_iosched_start_test(&mbtd->test_info);
2327 if (ret) {
2328 test_successful = 0;
2329 break;
2330 }
2331 /* Allow FS requests to be dispatched */
2332 msleep(1000);
2333
2334 mbtd->test_info.testcase = j;
2335 mbtd->is_random = NON_RANDOM_TEST;
2336 ret = test_iosched_start_test(&mbtd->test_info);
2337 if (ret) {
2338 test_successful = 0;
2339 break;
2340 }
2341 /* Allow FS requests to be dispatched */
2342 msleep(1000);
2343 }
2344
2345 if (!test_successful)
2346 break;
2347 }
2348
2349 test_pr_info("%s: Completed all the test cases.", __func__);
2350
2351 return count;
2352}
2353
2354static ssize_t write_packing_control_test_read(struct file *file,
2355 char __user *buffer,
2356 size_t count,
2357 loff_t *offset)
2358{
2359 memset((void *)buffer, 0, count);
2360
2361 snprintf(buffer, count,
2362 "\nwrite_packing_control_test\n"
2363 "=========\n"
2364 "Description:\n"
2365 "This test checks the following scenarios\n"
2366 "- Packing expected - one over trigger\n"
2367 "- Packing expected - N over trigger\n"
2368 "- Packing expected - N over trigger followed by read\n"
2369 "- Packing expected - N over trigger followed by flush\n"
2370 "- Packing expected - threshold over trigger FB by flush\n"
2371 "- Packing not expected - less than trigger\n"
2372 "- Packing not expected - trigger requests\n"
2373 "- Packing not expected - trigger, read, trigger\n"
2374 "- Mixed state - packing -> no packing -> packing\n"
2375 "- Mixed state - no packing -> packing -> no packing\n");
2376
2377 if (message_repeat == 1) {
2378 message_repeat = 0;
2379 return strnlen(buffer, count);
2380 } else {
2381 return 0;
2382 }
2383}
2384
2385const struct file_operations write_packing_control_test_ops = {
2386 .open = test_open,
2387 .write = write_packing_control_test_write,
2388 .read = write_packing_control_test_read,
2389};
2390
Maya Erezddc55732012-10-17 09:51:01 +02002391static ssize_t write_discard_sanitize_test_write(struct file *file,
2392 const char __user *buf,
2393 size_t count,
2394 loff_t *ppos)
2395{
2396 int ret = 0;
2397 int i = 0;
2398 int number = -1;
2399
2400 sscanf(buf, "%d", &number);
2401 if (number <= 0)
2402 number = 1;
2403
2404 test_pr_info("%s: -- write_discard_sanitize TEST --\n", __func__);
2405
2406 memset(&mbtd->test_info, 0, sizeof(struct test_info));
2407
2408 mbtd->test_group = TEST_GENERAL_GROUP;
2409
2410 mbtd->test_info.data = mbtd;
2411 mbtd->test_info.prepare_test_fn = prepare_write_discard_sanitize_read;
2412 mbtd->test_info.get_test_case_str_fn = get_test_case_str;
2413 mbtd->test_info.timeout_msec = SANITIZE_TEST_TIMEOUT;
2414
2415 for (i = 0 ; i < number ; ++i) {
2416 test_pr_info("%s: Cycle # %d / %d\n", __func__, i+1, number);
2417 test_pr_info("%s: ===================", __func__);
2418
2419 mbtd->test_info.testcase = TEST_WRITE_DISCARD_SANITIZE_READ;
2420 ret = test_iosched_start_test(&mbtd->test_info);
2421
2422 if (ret)
2423 break;
2424 }
2425
2426 return count;
2427}
2428
2429const struct file_operations write_discard_sanitize_test_ops = {
2430 .open = test_open,
2431 .write = write_discard_sanitize_test_write,
2432};
2433
Yaniv Gardie9214c82012-10-18 13:58:18 +02002434static ssize_t bkops_test_write(struct file *file,
2435 const char __user *buf,
2436 size_t count,
2437 loff_t *ppos)
2438{
2439 int ret = 0;
2440 int i = 0, j;
2441 int number = -1;
2442
2443 test_pr_info("%s: -- bkops_test TEST --", __func__);
2444
2445 sscanf(buf, "%d", &number);
2446
2447 if (number <= 0)
2448 number = 1;
2449
2450 mbtd->test_group = TEST_BKOPS_GROUP;
2451
2452 memset(&mbtd->test_info, 0, sizeof(struct test_info));
2453
2454 mbtd->test_info.data = mbtd;
2455 mbtd->test_info.prepare_test_fn = prepare_bkops;
2456 mbtd->test_info.check_test_result_fn = check_bkops_result;
2457 mbtd->test_info.get_test_case_str_fn = get_test_case_str;
2458 mbtd->test_info.run_test_fn = run_bkops;
2459 mbtd->test_info.timeout_msec = BKOPS_TEST_TIMEOUT;
2460 mbtd->test_info.post_test_fn = bkops_post_test;
2461
2462 for (i = 0 ; i < number ; ++i) {
2463 test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
2464 test_pr_info("%s: ===================", __func__);
2465 for (j = BKOPS_MIN_TESTCASE ;
2466 j <= BKOPS_MAX_TESTCASE ; j++) {
2467 mbtd->test_info.testcase = j;
2468 ret = test_iosched_start_test(&mbtd->test_info);
2469 if (ret)
2470 break;
2471 }
2472 }
2473
2474 test_pr_info("%s: Completed all the test cases.", __func__);
2475
2476 return count;
2477}
2478
2479static ssize_t bkops_test_read(struct file *file,
2480 char __user *buffer,
2481 size_t count,
2482 loff_t *offset)
2483{
2484 memset((void *)buffer, 0, count);
2485
2486 snprintf(buffer, count,
2487 "\nbkops_test\n========================\n"
2488 "Description:\n"
2489 "This test simulates BKOPS status from card\n"
2490 "and verifies that:\n"
2491 " - Starting BKOPS delayed work, level 1\n"
2492 " - Starting BKOPS delayed work, level 1, with HPI\n"
2493 " - Cancel starting BKOPS delayed work, "
2494 " when a request is received\n"
2495 " - Starting BKOPS urgent, level 2,3\n"
2496 " - Starting BKOPS urgent with 2 requests\n");
2497 return strnlen(buffer, count);
2498}
2499
2500const struct file_operations bkops_test_ops = {
2501 .open = test_open,
2502 .write = bkops_test_write,
2503 .read = bkops_test_read,
2504};
2505
Lee Susmanf18263a2012-10-24 14:14:37 +02002506static ssize_t long_sequential_read_test_write(struct file *file,
2507 const char __user *buf,
2508 size_t count,
2509 loff_t *ppos)
2510{
2511 int ret = 0;
2512 int i = 0;
2513 int number = -1;
2514 unsigned int mtime, integer, fraction;
2515
2516 test_pr_info("%s: -- Long Sequential Read TEST --", __func__);
2517
2518 sscanf(buf, "%d", &number);
2519
2520 if (number <= 0)
2521 number = 1;
2522
2523 memset(&mbtd->test_info, 0, sizeof(struct test_info));
2524 mbtd->test_group = TEST_GENERAL_GROUP;
2525
2526 mbtd->test_info.data = mbtd;
2527 mbtd->test_info.prepare_test_fn = prepare_test;
2528 mbtd->test_info.get_test_case_str_fn = get_test_case_str;
2529
2530 for (i = 0 ; i < number ; ++i) {
2531 test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
2532 test_pr_info("%s: ====================", __func__);
2533
2534 mbtd->test_info.testcase = TEST_LONG_SEQUENTIAL_READ;
2535 mbtd->is_random = NON_RANDOM_TEST;
2536 ret = test_iosched_start_test(&mbtd->test_info);
2537 if (ret)
2538 break;
2539
2540 mtime = jiffies_to_msecs(mbtd->test_info.test_duration);
2541
2542 test_pr_info("%s: time is %u msec, size is %u.%u MiB",
2543 __func__, mtime, LONG_TEST_SIZE_INTEGER,
2544 LONG_TEST_SIZE_FRACTION);
2545
2546 /* we first multiply in order not to lose precision */
2547 mtime *= MB_MSEC_RATIO_APPROXIMATION;
2548 /* divide values to get a MiB/sec integer value with one
2549 digit of precision. Multiply by 10 for one digit precision
2550 */
2551 fraction = integer = (LONG_TEST_ACTUAL_BYTE_NUM * 10) / mtime;
2552 integer /= 10;
2553 /* and calculate the MiB value fraction */
2554 fraction -= integer * 10;
2555
2556 test_pr_info("%s: Throughput: %u.%u MiB/sec\n"
2557 , __func__, integer, fraction);
2558
2559 /* Allow FS requests to be dispatched */
2560 msleep(1000);
2561 }
2562
2563 return count;
2564}
2565
2566static ssize_t long_sequential_read_test_read(struct file *file,
2567 char __user *buffer,
2568 size_t count,
2569 loff_t *offset)
2570{
2571 memset((void *)buffer, 0, count);
2572
2573 snprintf(buffer, count,
2574 "\nlong_sequential_read_test\n"
2575 "=========\n"
2576 "Description:\n"
2577 "This test runs the following scenarios\n"
2578 "- Long Sequential Read Test: this test measures read "
2579 "throughput at the driver level by sequentially reading many "
2580 "large requests.\n");
2581
2582 if (message_repeat == 1) {
2583 message_repeat = 0;
2584 return strnlen(buffer, count);
2585 } else
2586 return 0;
2587}
2588
2589const struct file_operations long_sequential_read_test_ops = {
2590 .open = test_open,
2591 .write = long_sequential_read_test_write,
2592 .read = long_sequential_read_test_read,
2593};
2594
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02002595static void mmc_block_test_debugfs_cleanup(void)
2596{
2597 debugfs_remove(mbtd->debug.random_test_seed);
2598 debugfs_remove(mbtd->debug.send_write_packing_test);
2599 debugfs_remove(mbtd->debug.err_check_test);
2600 debugfs_remove(mbtd->debug.send_invalid_packed_test);
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02002601 debugfs_remove(mbtd->debug.packing_control_test);
Maya Erezddc55732012-10-17 09:51:01 +02002602 debugfs_remove(mbtd->debug.discard_sanitize_test);
Yaniv Gardie9214c82012-10-18 13:58:18 +02002603 debugfs_remove(mbtd->debug.bkops_test);
Lee Susmanf18263a2012-10-24 14:14:37 +02002604 debugfs_remove(mbtd->debug.long_sequential_read_test);
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02002605}
2606
2607static int mmc_block_test_debugfs_init(void)
2608{
2609 struct dentry *utils_root, *tests_root;
2610
2611 utils_root = test_iosched_get_debugfs_utils_root();
2612 tests_root = test_iosched_get_debugfs_tests_root();
2613
2614 if (!utils_root || !tests_root)
2615 return -EINVAL;
2616
2617 mbtd->debug.random_test_seed = debugfs_create_u32(
2618 "random_test_seed",
2619 S_IRUGO | S_IWUGO,
2620 utils_root,
2621 &mbtd->random_test_seed);
2622
2623 if (!mbtd->debug.random_test_seed)
2624 goto err_nomem;
2625
2626 mbtd->debug.send_write_packing_test =
2627 debugfs_create_file("send_write_packing_test",
2628 S_IRUGO | S_IWUGO,
2629 tests_root,
2630 NULL,
2631 &send_write_packing_test_ops);
2632
2633 if (!mbtd->debug.send_write_packing_test)
2634 goto err_nomem;
2635
2636 mbtd->debug.err_check_test =
2637 debugfs_create_file("err_check_test",
2638 S_IRUGO | S_IWUGO,
2639 tests_root,
2640 NULL,
2641 &err_check_test_ops);
2642
2643 if (!mbtd->debug.err_check_test)
2644 goto err_nomem;
2645
2646 mbtd->debug.send_invalid_packed_test =
2647 debugfs_create_file("send_invalid_packed_test",
2648 S_IRUGO | S_IWUGO,
2649 tests_root,
2650 NULL,
2651 &send_invalid_packed_test_ops);
2652
2653 if (!mbtd->debug.send_invalid_packed_test)
2654 goto err_nomem;
2655
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02002656 mbtd->debug.packing_control_test = debugfs_create_file(
2657 "packing_control_test",
2658 S_IRUGO | S_IWUGO,
2659 tests_root,
2660 NULL,
2661 &write_packing_control_test_ops);
2662
2663 if (!mbtd->debug.packing_control_test)
2664 goto err_nomem;
2665
Maya Erezddc55732012-10-17 09:51:01 +02002666 mbtd->debug.discard_sanitize_test =
2667 debugfs_create_file("write_discard_sanitize_test",
2668 S_IRUGO | S_IWUGO,
2669 tests_root,
2670 NULL,
2671 &write_discard_sanitize_test_ops);
2672 if (!mbtd->debug.discard_sanitize_test) {
2673 mmc_block_test_debugfs_cleanup();
2674 return -ENOMEM;
2675 }
2676
Yaniv Gardie9214c82012-10-18 13:58:18 +02002677 mbtd->debug.bkops_test =
2678 debugfs_create_file("bkops_test",
2679 S_IRUGO | S_IWUGO,
2680 tests_root,
2681 NULL,
2682 &bkops_test_ops);
2683
2684 if (!mbtd->debug.bkops_test)
2685 goto err_nomem;
2686
Lee Susmanf18263a2012-10-24 14:14:37 +02002687 mbtd->debug.long_sequential_read_test = debugfs_create_file(
2688 "long_sequential_read_test",
2689 S_IRUGO | S_IWUGO,
2690 tests_root,
2691 NULL,
2692 &long_sequential_read_test_ops);
2693
2694 if (!mbtd->debug.long_sequential_read_test)
2695 goto err_nomem;
2696
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02002697 return 0;
2698
2699err_nomem:
2700 mmc_block_test_debugfs_cleanup();
2701 return -ENOMEM;
2702}
2703
2704static void mmc_block_test_probe(void)
2705{
2706 struct request_queue *q = test_iosched_get_req_queue();
2707 struct mmc_queue *mq;
2708 int max_packed_reqs;
2709
2710 if (!q) {
2711 test_pr_err("%s: NULL request queue", __func__);
2712 return;
2713 }
2714
2715 mq = q->queuedata;
2716 if (!mq) {
2717 test_pr_err("%s: NULL mq", __func__);
2718 return;
2719 }
2720
2721 max_packed_reqs = mq->card->ext_csd.max_packed_writes;
2722 mbtd->exp_packed_stats.packing_events =
2723 kzalloc((max_packed_reqs + 1) *
2724 sizeof(*mbtd->exp_packed_stats.packing_events),
2725 GFP_KERNEL);
2726
2727 mmc_block_test_debugfs_init();
2728}
2729
2730static void mmc_block_test_remove(void)
2731{
2732 mmc_block_test_debugfs_cleanup();
2733}
2734
2735static int __init mmc_block_test_init(void)
2736{
2737 mbtd = kzalloc(sizeof(struct mmc_block_test_data), GFP_KERNEL);
2738 if (!mbtd) {
2739 test_pr_err("%s: failed to allocate mmc_block_test_data",
2740 __func__);
2741 return -ENODEV;
2742 }
2743
Yaniv Gardie9214c82012-10-18 13:58:18 +02002744 init_waitqueue_head(&mbtd->bkops_wait_q);
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02002745 mbtd->bdt.init_fn = mmc_block_test_probe;
2746 mbtd->bdt.exit_fn = mmc_block_test_remove;
2747 INIT_LIST_HEAD(&mbtd->bdt.list);
2748 test_iosched_register(&mbtd->bdt);
2749
2750 return 0;
2751}
2752
2753static void __exit mmc_block_test_exit(void)
2754{
2755 test_iosched_unregister(&mbtd->bdt);
2756 kfree(mbtd);
2757}
2758
2759module_init(mmc_block_test_init);
2760module_exit(mmc_block_test_exit);
2761
2762MODULE_LICENSE("GPL v2");
2763MODULE_DESCRIPTION("MMC block test");