blob: 7d3ac836a92806ce3cd022cbea1ef0c9f4b24b23 [file] [log] [blame]
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14/* MMC block test */
15
16#include <linux/module.h>
17#include <linux/blkdev.h>
18#include <linux/debugfs.h>
19#include <linux/mmc/card.h>
20#include <linux/mmc/host.h>
21#include <linux/delay.h>
22#include <linux/test-iosched.h>
23#include "queue.h"
Yaniv Gardie9214c82012-10-18 13:58:18 +020024#include <linux/mmc/mmc.h>
Tatyana Brokhman09b010d2012-10-09 13:50:56 +020025
26#define MODULE_NAME "mmc_block_test"
27#define TEST_MAX_SECTOR_RANGE (600*1024*1024) /* 600 MB */
28#define TEST_MAX_BIOS_PER_REQ 120
29#define CMD23_PACKED_BIT (1 << 30)
30#define LARGE_PRIME_1 1103515367
31#define LARGE_PRIME_2 35757
32#define PACKED_HDR_VER_MASK 0x000000FF
33#define PACKED_HDR_RW_MASK 0x0000FF00
34#define PACKED_HDR_NUM_REQS_MASK 0x00FF0000
35#define PACKED_HDR_BITS_16_TO_29_SET 0x3FFF0000
Maya Erezddc55732012-10-17 09:51:01 +020036#define SECTOR_SIZE 512
37#define NUM_OF_SECTORS_PER_BIO ((BIO_U32_SIZE * 4) / SECTOR_SIZE)
38#define BIO_TO_SECTOR(x) (x * NUM_OF_SECTORS_PER_BIO)
Tatyana Brokhman09b010d2012-10-09 13:50:56 +020039
40#define test_pr_debug(fmt, args...) pr_debug("%s: "fmt"\n", MODULE_NAME, args)
41#define test_pr_info(fmt, args...) pr_info("%s: "fmt"\n", MODULE_NAME, args)
42#define test_pr_err(fmt, args...) pr_err("%s: "fmt"\n", MODULE_NAME, args)
43
Maya Erezddc55732012-10-17 09:51:01 +020044#define SANITIZE_TEST_TIMEOUT 240000
Yaniv Gardie9214c82012-10-18 13:58:18 +020045#define TEST_REQUEST_NUM_OF_BIOS 3
46
47
48#define CHECK_BKOPS_STATS(stats, exp_bkops, exp_hpi, exp_suspend) \
49 ((stats.bkops != exp_bkops) || \
50 (stats.hpi != exp_hpi) || \
51 (stats.suspend != exp_suspend))
52#define BKOPS_TEST_TIMEOUT 60000
Maya Erezddc55732012-10-17 09:51:01 +020053
Tatyana Brokhman09b010d2012-10-09 13:50:56 +020054enum is_random {
55 NON_RANDOM_TEST,
56 RANDOM_TEST,
57};
58
59enum mmc_block_test_testcases {
60 /* Start of send write packing test group */
61 SEND_WRITE_PACKING_MIN_TESTCASE,
62 TEST_STOP_DUE_TO_READ = SEND_WRITE_PACKING_MIN_TESTCASE,
63 TEST_STOP_DUE_TO_READ_AFTER_MAX_REQS,
64 TEST_STOP_DUE_TO_FLUSH,
65 TEST_STOP_DUE_TO_FLUSH_AFTER_MAX_REQS,
66 TEST_STOP_DUE_TO_EMPTY_QUEUE,
67 TEST_STOP_DUE_TO_MAX_REQ_NUM,
68 TEST_STOP_DUE_TO_THRESHOLD,
69 SEND_WRITE_PACKING_MAX_TESTCASE = TEST_STOP_DUE_TO_THRESHOLD,
70
71 /* Start of err check test group */
72 ERR_CHECK_MIN_TESTCASE,
73 TEST_RET_ABORT = ERR_CHECK_MIN_TESTCASE,
74 TEST_RET_PARTIAL_FOLLOWED_BY_SUCCESS,
75 TEST_RET_PARTIAL_FOLLOWED_BY_ABORT,
76 TEST_RET_PARTIAL_MULTIPLE_UNTIL_SUCCESS,
77 TEST_RET_PARTIAL_MAX_FAIL_IDX,
78 TEST_RET_RETRY,
79 TEST_RET_CMD_ERR,
80 TEST_RET_DATA_ERR,
81 ERR_CHECK_MAX_TESTCASE = TEST_RET_DATA_ERR,
82
83 /* Start of send invalid test group */
84 INVALID_CMD_MIN_TESTCASE,
85 TEST_HDR_INVALID_VERSION = INVALID_CMD_MIN_TESTCASE,
86 TEST_HDR_WRONG_WRITE_CODE,
87 TEST_HDR_INVALID_RW_CODE,
88 TEST_HDR_DIFFERENT_ADDRESSES,
89 TEST_HDR_REQ_NUM_SMALLER_THAN_ACTUAL,
90 TEST_HDR_REQ_NUM_LARGER_THAN_ACTUAL,
91 TEST_HDR_CMD23_PACKED_BIT_SET,
92 TEST_CMD23_MAX_PACKED_WRITES,
93 TEST_CMD23_ZERO_PACKED_WRITES,
94 TEST_CMD23_PACKED_BIT_UNSET,
95 TEST_CMD23_REL_WR_BIT_SET,
96 TEST_CMD23_BITS_16TO29_SET,
97 TEST_CMD23_HDR_BLK_NOT_IN_COUNT,
98 INVALID_CMD_MAX_TESTCASE = TEST_CMD23_HDR_BLK_NOT_IN_COUNT,
Tatyana Brokhman91e1e322012-10-09 13:53:43 +020099
100 /*
101 * Start of packing control test group.
102 * in these next testcases the abbreviation FB = followed by
103 */
104 PACKING_CONTROL_MIN_TESTCASE,
105 TEST_PACKING_EXP_ONE_OVER_TRIGGER_FB_READ =
106 PACKING_CONTROL_MIN_TESTCASE,
107 TEST_PACKING_EXP_N_OVER_TRIGGER,
108 TEST_PACKING_EXP_N_OVER_TRIGGER_FB_READ,
109 TEST_PACKING_EXP_N_OVER_TRIGGER_FLUSH_N,
110 TEST_PACKING_EXP_THRESHOLD_OVER_TRIGGER,
111 TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS,
112 TEST_PACKING_NOT_EXP_TRIGGER_REQUESTS,
113 TEST_PACKING_NOT_EXP_TRIGGER_READ_TRIGGER,
114 TEST_PACKING_NOT_EXP_TRIGGER_FLUSH_TRIGGER,
115 TEST_PACK_MIX_PACKED_NO_PACKED_PACKED,
116 TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED,
117 PACKING_CONTROL_MAX_TESTCASE = TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED,
Maya Erezddc55732012-10-17 09:51:01 +0200118
119 TEST_WRITE_DISCARD_SANITIZE_READ,
Yaniv Gardie9214c82012-10-18 13:58:18 +0200120
121 /* Start of bkops test group */
122 BKOPS_MIN_TESTCASE,
123 BKOPS_DELAYED_WORK_LEVEL_1 = BKOPS_MIN_TESTCASE,
124 BKOPS_DELAYED_WORK_LEVEL_1_HPI,
125 BKOPS_CANCEL_DELAYED_WORK,
126 BKOPS_URGENT_LEVEL_2,
127 BKOPS_URGENT_LEVEL_2_TWO_REQS,
128 BKOPS_URGENT_LEVEL_3,
129 BKOPS_MAX_TESTCASE = BKOPS_URGENT_LEVEL_3,
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200130};
131
132enum mmc_block_test_group {
133 TEST_NO_GROUP,
134 TEST_GENERAL_GROUP,
135 TEST_SEND_WRITE_PACKING_GROUP,
136 TEST_ERR_CHECK_GROUP,
137 TEST_SEND_INVALID_GROUP,
Tatyana Brokhman91e1e322012-10-09 13:53:43 +0200138 TEST_PACKING_CONTROL_GROUP,
Yaniv Gardie9214c82012-10-18 13:58:18 +0200139 TEST_BKOPS_GROUP,
140};
141
142enum bkops_test_stages {
143 BKOPS_STAGE_1,
144 BKOPS_STAGE_2,
145 BKOPS_STAGE_3,
146 BKOPS_STAGE_4,
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200147};
148
149struct mmc_block_test_debug {
150 struct dentry *send_write_packing_test;
151 struct dentry *err_check_test;
152 struct dentry *send_invalid_packed_test;
153 struct dentry *random_test_seed;
Tatyana Brokhman91e1e322012-10-09 13:53:43 +0200154 struct dentry *packing_control_test;
Maya Erezddc55732012-10-17 09:51:01 +0200155 struct dentry *discard_sanitize_test;
Yaniv Gardie9214c82012-10-18 13:58:18 +0200156 struct dentry *bkops_test;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200157};
158
159struct mmc_block_test_data {
160 /* The number of write requests that the test will issue */
161 int num_requests;
162 /* The expected write packing statistics for the current test */
163 struct mmc_wr_pack_stats exp_packed_stats;
164 /*
165 * A user-defined seed for random choices of number of bios written in
166 * a request, and of number of requests issued in a test
167 * This field is randomly updated after each use
168 */
169 unsigned int random_test_seed;
170 /* A retry counter used in err_check tests */
171 int err_check_counter;
172 /* Can be one of the values of enum test_group */
173 enum mmc_block_test_group test_group;
174 /*
175 * Indicates if the current testcase is running with random values of
176 * num_requests and num_bios (in each request)
177 */
178 int is_random;
179 /* Data structure for debugfs dentrys */
180 struct mmc_block_test_debug debug;
181 /*
182 * Data structure containing individual test information, including
183 * self-defined specific data
184 */
185 struct test_info test_info;
186 /* mmc block device test */
187 struct blk_dev_test_type bdt;
Yaniv Gardie9214c82012-10-18 13:58:18 +0200188 /* Current BKOPs test stage */
189 enum bkops_test_stages bkops_stage;
190 /* A wait queue for BKOPs tests */
191 wait_queue_head_t bkops_wait_q;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200192};
193
194static struct mmc_block_test_data *mbtd;
195
196/*
197 * A callback assigned to the packed_test_fn field.
198 * Called from block layer in mmc_blk_packed_hdr_wrq_prep.
199 * Here we alter the packed header or CMD23 in order to send an invalid
200 * packed command to the card.
201 */
202static void test_invalid_packed_cmd(struct request_queue *q,
203 struct mmc_queue_req *mqrq)
204{
205 struct mmc_queue *mq = q->queuedata;
206 u32 *packed_cmd_hdr = mqrq->packed_cmd_hdr;
207 struct request *req = mqrq->req;
208 struct request *second_rq;
209 struct test_request *test_rq;
210 struct mmc_blk_request *brq = &mqrq->brq;
211 int num_requests;
212 int max_packed_reqs;
213
214 if (!mq) {
215 test_pr_err("%s: NULL mq", __func__);
216 return;
217 }
218
219 test_rq = (struct test_request *)req->elv.priv[0];
220 if (!test_rq) {
221 test_pr_err("%s: NULL test_rq", __func__);
222 return;
223 }
224 max_packed_reqs = mq->card->ext_csd.max_packed_writes;
225
226 switch (mbtd->test_info.testcase) {
227 case TEST_HDR_INVALID_VERSION:
228 test_pr_info("%s: set invalid header version", __func__);
229 /* Put 0 in header version field (1 byte, offset 0 in header) */
230 packed_cmd_hdr[0] = packed_cmd_hdr[0] & ~PACKED_HDR_VER_MASK;
231 break;
232 case TEST_HDR_WRONG_WRITE_CODE:
233 test_pr_info("%s: wrong write code", __func__);
234 /* Set R/W field with R value (1 byte, offset 1 in header) */
235 packed_cmd_hdr[0] = packed_cmd_hdr[0] & ~PACKED_HDR_RW_MASK;
236 packed_cmd_hdr[0] = packed_cmd_hdr[0] | 0x00000100;
237 break;
238 case TEST_HDR_INVALID_RW_CODE:
239 test_pr_info("%s: invalid r/w code", __func__);
240 /* Set R/W field with invalid value */
241 packed_cmd_hdr[0] = packed_cmd_hdr[0] & ~PACKED_HDR_RW_MASK;
242 packed_cmd_hdr[0] = packed_cmd_hdr[0] | 0x00000400;
243 break;
244 case TEST_HDR_DIFFERENT_ADDRESSES:
245 test_pr_info("%s: different addresses", __func__);
246 second_rq = list_entry(req->queuelist.next, struct request,
247 queuelist);
248 test_pr_info("%s: test_rq->sector=%ld, second_rq->sector=%ld",
249 __func__, (long)req->__sector,
250 (long)second_rq->__sector);
251 /*
252 * Put start sector of second write request in the first write
253 * request's cmd25 argument in the packed header
254 */
255 packed_cmd_hdr[3] = second_rq->__sector;
256 break;
257 case TEST_HDR_REQ_NUM_SMALLER_THAN_ACTUAL:
258 test_pr_info("%s: request num smaller than actual" , __func__);
259 num_requests = (packed_cmd_hdr[0] & PACKED_HDR_NUM_REQS_MASK)
260 >> 16;
261 /* num of entries is decremented by 1 */
262 num_requests = (num_requests - 1) << 16;
263 /*
264 * Set number of requests field in packed write header to be
265 * smaller than the actual number (1 byte, offset 2 in header)
266 */
267 packed_cmd_hdr[0] = (packed_cmd_hdr[0] &
268 ~PACKED_HDR_NUM_REQS_MASK) + num_requests;
269 break;
270 case TEST_HDR_REQ_NUM_LARGER_THAN_ACTUAL:
271 test_pr_info("%s: request num larger than actual" , __func__);
272 num_requests = (packed_cmd_hdr[0] & PACKED_HDR_NUM_REQS_MASK)
273 >> 16;
274 /* num of entries is incremented by 1 */
275 num_requests = (num_requests + 1) << 16;
276 /*
277 * Set number of requests field in packed write header to be
278 * larger than the actual number (1 byte, offset 2 in header).
279 */
280 packed_cmd_hdr[0] = (packed_cmd_hdr[0] &
281 ~PACKED_HDR_NUM_REQS_MASK) + num_requests;
282 break;
283 case TEST_HDR_CMD23_PACKED_BIT_SET:
284 test_pr_info("%s: header CMD23 packed bit set" , __func__);
285 /*
286 * Set packed bit (bit 30) in cmd23 argument of first and second
287 * write requests in packed write header.
288 * These are located at bytes 2 and 4 in packed write header
289 */
290 packed_cmd_hdr[2] = packed_cmd_hdr[2] | CMD23_PACKED_BIT;
291 packed_cmd_hdr[4] = packed_cmd_hdr[4] | CMD23_PACKED_BIT;
292 break;
293 case TEST_CMD23_MAX_PACKED_WRITES:
294 test_pr_info("%s: CMD23 request num > max_packed_reqs",
295 __func__);
296 /*
297 * Set the individual packed cmd23 request num to
298 * max_packed_reqs + 1
299 */
300 brq->sbc.arg = MMC_CMD23_ARG_PACKED | (max_packed_reqs + 1);
301 break;
302 case TEST_CMD23_ZERO_PACKED_WRITES:
303 test_pr_info("%s: CMD23 request num = 0", __func__);
304 /* Set the individual packed cmd23 request num to zero */
305 brq->sbc.arg = MMC_CMD23_ARG_PACKED;
306 break;
307 case TEST_CMD23_PACKED_BIT_UNSET:
308 test_pr_info("%s: CMD23 packed bit unset", __func__);
309 /*
310 * Set the individual packed cmd23 packed bit to 0,
311 * although there is a packed write request
312 */
313 brq->sbc.arg &= ~CMD23_PACKED_BIT;
314 break;
315 case TEST_CMD23_REL_WR_BIT_SET:
316 test_pr_info("%s: CMD23 REL WR bit set", __func__);
317 /* Set the individual packed cmd23 reliable write bit */
318 brq->sbc.arg = MMC_CMD23_ARG_PACKED | MMC_CMD23_ARG_REL_WR;
319 break;
320 case TEST_CMD23_BITS_16TO29_SET:
321 test_pr_info("%s: CMD23 bits [16-29] set", __func__);
322 brq->sbc.arg = MMC_CMD23_ARG_PACKED |
323 PACKED_HDR_BITS_16_TO_29_SET;
324 break;
325 case TEST_CMD23_HDR_BLK_NOT_IN_COUNT:
326 test_pr_info("%s: CMD23 hdr not in block count", __func__);
327 brq->sbc.arg = MMC_CMD23_ARG_PACKED |
328 ((rq_data_dir(req) == READ) ? 0 : mqrq->packed_blocks);
329 break;
330 default:
331 test_pr_err("%s: unexpected testcase %d",
332 __func__, mbtd->test_info.testcase);
333 break;
334 }
335}
336
337/*
338 * A callback assigned to the err_check_fn field of the mmc_request by the
339 * MMC/card/block layer.
340 * Called upon request completion by the MMC/core layer.
341 * Here we emulate an error return value from the card.
342 */
343static int test_err_check(struct mmc_card *card, struct mmc_async_req *areq)
344{
345 struct mmc_queue_req *mq_rq = container_of(areq, struct mmc_queue_req,
346 mmc_active);
347 struct request_queue *req_q = test_iosched_get_req_queue();
348 struct mmc_queue *mq;
349 int max_packed_reqs;
350 int ret = 0;
Yaniv Gardie9214c82012-10-18 13:58:18 +0200351 struct mmc_blk_request *brq;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200352
353 if (req_q)
354 mq = req_q->queuedata;
355 else {
356 test_pr_err("%s: NULL request_queue", __func__);
357 return 0;
358 }
359
360 if (!mq) {
361 test_pr_err("%s: %s: NULL mq", __func__,
362 mmc_hostname(card->host));
363 return 0;
364 }
365
366 max_packed_reqs = mq->card->ext_csd.max_packed_writes;
367
368 if (!mq_rq) {
369 test_pr_err("%s: %s: NULL mq_rq", __func__,
370 mmc_hostname(card->host));
371 return 0;
372 }
Yaniv Gardie9214c82012-10-18 13:58:18 +0200373 brq = &mq_rq->brq;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200374
375 switch (mbtd->test_info.testcase) {
376 case TEST_RET_ABORT:
377 test_pr_info("%s: return abort", __func__);
378 ret = MMC_BLK_ABORT;
379 break;
380 case TEST_RET_PARTIAL_FOLLOWED_BY_SUCCESS:
381 test_pr_info("%s: return partial followed by success",
382 __func__);
383 /*
384 * Since in this testcase num_requests is always >= 2,
385 * we can be sure that packed_fail_idx is always >= 1
386 */
387 mq_rq->packed_fail_idx = (mbtd->num_requests / 2);
388 test_pr_info("%s: packed_fail_idx = %d"
389 , __func__, mq_rq->packed_fail_idx);
390 mq->err_check_fn = NULL;
391 ret = MMC_BLK_PARTIAL;
392 break;
393 case TEST_RET_PARTIAL_FOLLOWED_BY_ABORT:
394 if (!mbtd->err_check_counter) {
395 test_pr_info("%s: return partial followed by abort",
396 __func__);
397 mbtd->err_check_counter++;
398 /*
399 * Since in this testcase num_requests is always >= 3,
400 * we have that packed_fail_idx is always >= 1
401 */
402 mq_rq->packed_fail_idx = (mbtd->num_requests / 2);
403 test_pr_info("%s: packed_fail_idx = %d"
404 , __func__, mq_rq->packed_fail_idx);
405 ret = MMC_BLK_PARTIAL;
406 break;
407 }
408 mbtd->err_check_counter = 0;
409 mq->err_check_fn = NULL;
410 ret = MMC_BLK_ABORT;
411 break;
412 case TEST_RET_PARTIAL_MULTIPLE_UNTIL_SUCCESS:
413 test_pr_info("%s: return partial multiple until success",
414 __func__);
415 if (++mbtd->err_check_counter >= (mbtd->num_requests)) {
416 mq->err_check_fn = NULL;
417 mbtd->err_check_counter = 0;
418 ret = MMC_BLK_PARTIAL;
419 break;
420 }
421 mq_rq->packed_fail_idx = 1;
422 ret = MMC_BLK_PARTIAL;
423 break;
424 case TEST_RET_PARTIAL_MAX_FAIL_IDX:
425 test_pr_info("%s: return partial max fail_idx", __func__);
426 mq_rq->packed_fail_idx = max_packed_reqs - 1;
427 mq->err_check_fn = NULL;
428 ret = MMC_BLK_PARTIAL;
429 break;
430 case TEST_RET_RETRY:
431 test_pr_info("%s: return retry", __func__);
432 ret = MMC_BLK_RETRY;
433 break;
434 case TEST_RET_CMD_ERR:
435 test_pr_info("%s: return cmd err", __func__);
436 ret = MMC_BLK_CMD_ERR;
437 break;
438 case TEST_RET_DATA_ERR:
439 test_pr_info("%s: return data err", __func__);
440 ret = MMC_BLK_DATA_ERR;
441 break;
Yaniv Gardie9214c82012-10-18 13:58:18 +0200442 case BKOPS_URGENT_LEVEL_2:
443 case BKOPS_URGENT_LEVEL_3:
444 case BKOPS_URGENT_LEVEL_2_TWO_REQS:
445 if (mbtd->err_check_counter++ == 0) {
446 test_pr_info("%s: simulate an exception from the card",
447 __func__);
448 brq->cmd.resp[0] |= R1_EXCEPTION_EVENT;
449 }
450 mq->err_check_fn = NULL;
451 break;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200452 default:
453 test_pr_err("%s: unexpected testcase %d",
454 __func__, mbtd->test_info.testcase);
455 }
456
457 return ret;
458}
459
460/*
461 * This is a specific implementation for the get_test_case_str_fn function
462 * pointer in the test_info data structure. Given a valid test_data instance,
463 * the function returns a string resembling the test name, based on the testcase
464 */
465static char *get_test_case_str(struct test_data *td)
466{
467 if (!td) {
468 test_pr_err("%s: NULL td", __func__);
469 return NULL;
470 }
471
472 switch (td->test_info.testcase) {
473 case TEST_STOP_DUE_TO_FLUSH:
474 return "Test stop due to flush";
475 case TEST_STOP_DUE_TO_FLUSH_AFTER_MAX_REQS:
476 return "Test stop due to flush after max-1 reqs";
477 case TEST_STOP_DUE_TO_READ:
478 return "Test stop due to read";
479 case TEST_STOP_DUE_TO_READ_AFTER_MAX_REQS:
480 return "Test stop due to read after max-1 reqs";
481 case TEST_STOP_DUE_TO_EMPTY_QUEUE:
482 return "Test stop due to empty queue";
483 case TEST_STOP_DUE_TO_MAX_REQ_NUM:
484 return "Test stop due to max req num";
485 case TEST_STOP_DUE_TO_THRESHOLD:
486 return "Test stop due to exceeding threshold";
487 case TEST_RET_ABORT:
488 return "Test err_check return abort";
489 case TEST_RET_PARTIAL_FOLLOWED_BY_SUCCESS:
490 return "Test err_check return partial followed by success";
491 case TEST_RET_PARTIAL_FOLLOWED_BY_ABORT:
492 return "Test err_check return partial followed by abort";
493 case TEST_RET_PARTIAL_MULTIPLE_UNTIL_SUCCESS:
494 return "Test err_check return partial multiple until success";
495 case TEST_RET_PARTIAL_MAX_FAIL_IDX:
496 return "Test err_check return partial max fail index";
497 case TEST_RET_RETRY:
498 return "Test err_check return retry";
499 case TEST_RET_CMD_ERR:
500 return "Test err_check return cmd error";
501 case TEST_RET_DATA_ERR:
502 return "Test err_check return data error";
503 case TEST_HDR_INVALID_VERSION:
504 return "Test invalid - wrong header version";
505 case TEST_HDR_WRONG_WRITE_CODE:
506 return "Test invalid - wrong write code";
507 case TEST_HDR_INVALID_RW_CODE:
508 return "Test invalid - wrong R/W code";
509 case TEST_HDR_DIFFERENT_ADDRESSES:
510 return "Test invalid - header different addresses";
511 case TEST_HDR_REQ_NUM_SMALLER_THAN_ACTUAL:
512 return "Test invalid - header req num smaller than actual";
513 case TEST_HDR_REQ_NUM_LARGER_THAN_ACTUAL:
514 return "Test invalid - header req num larger than actual";
515 case TEST_HDR_CMD23_PACKED_BIT_SET:
516 return "Test invalid - header cmd23 packed bit set";
517 case TEST_CMD23_MAX_PACKED_WRITES:
518 return "Test invalid - cmd23 max packed writes";
519 case TEST_CMD23_ZERO_PACKED_WRITES:
520 return "Test invalid - cmd23 zero packed writes";
521 case TEST_CMD23_PACKED_BIT_UNSET:
522 return "Test invalid - cmd23 packed bit unset";
523 case TEST_CMD23_REL_WR_BIT_SET:
524 return "Test invalid - cmd23 rel wr bit set";
525 case TEST_CMD23_BITS_16TO29_SET:
526 return "Test invalid - cmd23 bits [16-29] set";
527 case TEST_CMD23_HDR_BLK_NOT_IN_COUNT:
528 return "Test invalid - cmd23 header block not in count";
Tatyana Brokhman91e1e322012-10-09 13:53:43 +0200529 case TEST_PACKING_EXP_N_OVER_TRIGGER:
530 return "\nTest packing control - pack n";
531 case TEST_PACKING_EXP_N_OVER_TRIGGER_FB_READ:
532 return "\nTest packing control - pack n followed by read";
533 case TEST_PACKING_EXP_N_OVER_TRIGGER_FLUSH_N:
534 return "\nTest packing control - pack n followed by flush";
535 case TEST_PACKING_EXP_ONE_OVER_TRIGGER_FB_READ:
536 return "\nTest packing control - pack one followed by read";
537 case TEST_PACKING_EXP_THRESHOLD_OVER_TRIGGER:
538 return "\nTest packing control - pack threshold";
539 case TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS:
540 return "\nTest packing control - no packing";
541 case TEST_PACKING_NOT_EXP_TRIGGER_REQUESTS:
542 return "\nTest packing control - no packing, trigger requests";
543 case TEST_PACKING_NOT_EXP_TRIGGER_READ_TRIGGER:
544 return "\nTest packing control - no pack, trigger-read-trigger";
545 case TEST_PACKING_NOT_EXP_TRIGGER_FLUSH_TRIGGER:
546 return "\nTest packing control- no pack, trigger-flush-trigger";
547 case TEST_PACK_MIX_PACKED_NO_PACKED_PACKED:
548 return "\nTest packing control - mix: pack -> no pack -> pack";
549 case TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED:
550 return "\nTest packing control - mix: no pack->pack->no pack";
Maya Erezddc55732012-10-17 09:51:01 +0200551 case TEST_WRITE_DISCARD_SANITIZE_READ:
552 return "\nTest write, discard, sanitize";
Yaniv Gardie9214c82012-10-18 13:58:18 +0200553 case BKOPS_DELAYED_WORK_LEVEL_1:
554 return "\nTest delayed work BKOPS level 1";
555 case BKOPS_DELAYED_WORK_LEVEL_1_HPI:
556 return "\nTest delayed work BKOPS level 1 with HPI";
557 case BKOPS_CANCEL_DELAYED_WORK:
558 return "\nTest cancel delayed BKOPS work";
559 case BKOPS_URGENT_LEVEL_2:
560 return "\nTest urgent BKOPS level 2";
561 case BKOPS_URGENT_LEVEL_2_TWO_REQS:
562 return "\nTest urgent BKOPS level 2, followed by a request";
563 case BKOPS_URGENT_LEVEL_3:
564 return "\nTest urgent BKOPS level 3";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200565 default:
566 return "Unknown testcase";
567 }
568
569 return NULL;
570}
571
572/*
573 * Compare individual testcase's statistics to the expected statistics:
574 * Compare stop reason and number of packing events
575 */
576static int check_wr_packing_statistics(struct test_data *td)
577{
578 struct mmc_wr_pack_stats *mmc_packed_stats;
579 struct mmc_queue *mq = td->req_q->queuedata;
580 int max_packed_reqs = mq->card->ext_csd.max_packed_writes;
581 int i;
582 struct mmc_card *card = mq->card;
583 struct mmc_wr_pack_stats expected_stats;
584 int *stop_reason;
585 int ret = 0;
586
587 if (!mq) {
588 test_pr_err("%s: NULL mq", __func__);
589 return -EINVAL;
590 }
591
592 expected_stats = mbtd->exp_packed_stats;
593
594 mmc_packed_stats = mmc_blk_get_packed_statistics(card);
595 if (!mmc_packed_stats) {
596 test_pr_err("%s: NULL mmc_packed_stats", __func__);
597 return -EINVAL;
598 }
599
600 if (!mmc_packed_stats->packing_events) {
601 test_pr_err("%s: NULL packing_events", __func__);
602 return -EINVAL;
603 }
604
605 spin_lock(&mmc_packed_stats->lock);
606
607 if (!mmc_packed_stats->enabled) {
608 test_pr_err("%s write packing statistics are not enabled",
609 __func__);
610 ret = -EINVAL;
611 goto exit_err;
612 }
613
614 stop_reason = mmc_packed_stats->pack_stop_reason;
615
Tatyana Brokhman91e1e322012-10-09 13:53:43 +0200616 for (i = 1; i <= max_packed_reqs; ++i) {
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200617 if (mmc_packed_stats->packing_events[i] !=
618 expected_stats.packing_events[i]) {
619 test_pr_err(
620 "%s: Wrong pack stats in index %d, got %d, expected %d",
621 __func__, i, mmc_packed_stats->packing_events[i],
622 expected_stats.packing_events[i]);
623 if (td->fs_wr_reqs_during_test)
624 goto cancel_round;
625 ret = -EINVAL;
626 goto exit_err;
627 }
628 }
629
630 if (mmc_packed_stats->pack_stop_reason[EXCEEDS_SEGMENTS] !=
631 expected_stats.pack_stop_reason[EXCEEDS_SEGMENTS]) {
632 test_pr_err(
633 "%s: Wrong pack stop reason EXCEEDS_SEGMENTS %d, expected %d",
634 __func__, stop_reason[EXCEEDS_SEGMENTS],
635 expected_stats.pack_stop_reason[EXCEEDS_SEGMENTS]);
636 if (td->fs_wr_reqs_during_test)
637 goto cancel_round;
638 ret = -EINVAL;
639 goto exit_err;
640 }
641
642 if (mmc_packed_stats->pack_stop_reason[EXCEEDS_SECTORS] !=
643 expected_stats.pack_stop_reason[EXCEEDS_SECTORS]) {
644 test_pr_err(
645 "%s: Wrong pack stop reason EXCEEDS_SECTORS %d, expected %d",
646 __func__, stop_reason[EXCEEDS_SECTORS],
647 expected_stats.pack_stop_reason[EXCEEDS_SECTORS]);
648 if (td->fs_wr_reqs_during_test)
649 goto cancel_round;
650 ret = -EINVAL;
651 goto exit_err;
652 }
653
654 if (mmc_packed_stats->pack_stop_reason[WRONG_DATA_DIR] !=
655 expected_stats.pack_stop_reason[WRONG_DATA_DIR]) {
656 test_pr_err(
657 "%s: Wrong pack stop reason WRONG_DATA_DIR %d, expected %d",
658 __func__, stop_reason[WRONG_DATA_DIR],
659 expected_stats.pack_stop_reason[WRONG_DATA_DIR]);
660 if (td->fs_wr_reqs_during_test)
661 goto cancel_round;
662 ret = -EINVAL;
663 goto exit_err;
664 }
665
666 if (mmc_packed_stats->pack_stop_reason[FLUSH_OR_DISCARD] !=
667 expected_stats.pack_stop_reason[FLUSH_OR_DISCARD]) {
668 test_pr_err(
669 "%s: Wrong pack stop reason FLUSH_OR_DISCARD %d, expected %d",
670 __func__, stop_reason[FLUSH_OR_DISCARD],
671 expected_stats.pack_stop_reason[FLUSH_OR_DISCARD]);
672 if (td->fs_wr_reqs_during_test)
673 goto cancel_round;
674 ret = -EINVAL;
675 goto exit_err;
676 }
677
678 if (mmc_packed_stats->pack_stop_reason[EMPTY_QUEUE] !=
679 expected_stats.pack_stop_reason[EMPTY_QUEUE]) {
680 test_pr_err(
681 "%s: Wrong pack stop reason EMPTY_QUEUE %d, expected %d",
682 __func__, stop_reason[EMPTY_QUEUE],
683 expected_stats.pack_stop_reason[EMPTY_QUEUE]);
684 if (td->fs_wr_reqs_during_test)
685 goto cancel_round;
686 ret = -EINVAL;
687 goto exit_err;
688 }
689
690 if (mmc_packed_stats->pack_stop_reason[REL_WRITE] !=
691 expected_stats.pack_stop_reason[REL_WRITE]) {
692 test_pr_err(
693 "%s: Wrong pack stop reason REL_WRITE %d, expected %d",
694 __func__, stop_reason[REL_WRITE],
695 expected_stats.pack_stop_reason[REL_WRITE]);
696 if (td->fs_wr_reqs_during_test)
697 goto cancel_round;
698 ret = -EINVAL;
699 goto exit_err;
700 }
701
702exit_err:
703 spin_unlock(&mmc_packed_stats->lock);
704 if (ret && mmc_packed_stats->enabled)
705 print_mmc_packing_stats(card);
706 return ret;
707cancel_round:
708 spin_unlock(&mmc_packed_stats->lock);
709 test_iosched_set_ignore_round(true);
710 return 0;
711}
712
713/*
714 * Pseudo-randomly choose a seed based on the last seed, and update it in
715 * seed_number. then return seed_number (mod max_val), or min_val.
716 */
717static unsigned int pseudo_random_seed(unsigned int *seed_number,
718 unsigned int min_val,
719 unsigned int max_val)
720{
721 int ret = 0;
722
723 if (!seed_number)
724 return 0;
725
726 *seed_number = ((unsigned int)(((unsigned long)*seed_number *
727 (unsigned long)LARGE_PRIME_1) + LARGE_PRIME_2));
728 ret = (unsigned int)((*seed_number) % max_val);
729
730 return (ret > min_val ? ret : min_val);
731}
732
733/*
734 * Given a pseudo-random seed, find a pseudo-random num_of_bios.
735 * Make sure that num_of_bios is not larger than TEST_MAX_SECTOR_RANGE
736 */
737static void pseudo_rnd_num_of_bios(unsigned int *num_bios_seed,
738 unsigned int *num_of_bios)
739{
740 do {
741 *num_of_bios = pseudo_random_seed(num_bios_seed, 1,
742 TEST_MAX_BIOS_PER_REQ);
743 if (!(*num_of_bios))
744 *num_of_bios = 1;
745 } while ((*num_of_bios) * BIO_U32_SIZE * 4 > TEST_MAX_SECTOR_RANGE);
746}
747
748/* Add a single read request to the given td's request queue */
749static int prepare_request_add_read(struct test_data *td)
750{
751 int ret;
752 int start_sec;
753
754 if (td)
755 start_sec = td->start_sector;
756 else {
757 test_pr_err("%s: NULL td", __func__);
758 return 0;
759 }
760
761 test_pr_info("%s: Adding a read request, first req_id=%d", __func__,
762 td->wr_rd_next_req_id);
763
764 ret = test_iosched_add_wr_rd_test_req(0, READ, start_sec, 2,
765 TEST_PATTERN_5A, NULL);
766 if (ret) {
767 test_pr_err("%s: failed to add a read request", __func__);
768 return ret;
769 }
770
771 return 0;
772}
773
774/* Add a single flush request to the given td's request queue */
775static int prepare_request_add_flush(struct test_data *td)
776{
777 int ret;
778
779 if (!td) {
780 test_pr_err("%s: NULL td", __func__);
781 return 0;
782 }
783
784 test_pr_info("%s: Adding a flush request, first req_id=%d", __func__,
785 td->unique_next_req_id);
786 ret = test_iosched_add_unique_test_req(0, REQ_UNIQUE_FLUSH,
787 0, 0, NULL);
788 if (ret) {
789 test_pr_err("%s: failed to add a flush request", __func__);
790 return ret;
791 }
792
793 return ret;
794}
795
796/*
797 * Add num_requets amount of write requests to the given td's request queue.
798 * If random test mode is chosen we pseudo-randomly choose the number of bios
799 * for each write request, otherwise add between 1 to 5 bio per request.
800 */
801static int prepare_request_add_write_reqs(struct test_data *td,
802 int num_requests, int is_err_expected,
803 int is_random)
804{
805 int i;
806 unsigned int start_sec;
807 int num_bios;
808 int ret = 0;
809 unsigned int *bio_seed = &mbtd->random_test_seed;
810
811 if (td)
812 start_sec = td->start_sector;
813 else {
814 test_pr_err("%s: NULL td", __func__);
815 return ret;
816 }
817
818 test_pr_info("%s: Adding %d write requests, first req_id=%d", __func__,
819 num_requests, td->wr_rd_next_req_id);
820
Tatyana Brokhman91e1e322012-10-09 13:53:43 +0200821 for (i = 1; i <= num_requests; i++) {
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200822 start_sec = td->start_sector + 4096 * td->num_of_write_bios;
823 if (is_random)
824 pseudo_rnd_num_of_bios(bio_seed, &num_bios);
825 else
826 /*
827 * For the non-random case, give num_bios a value
828 * between 1 and 5, to keep a small number of BIOs
829 */
830 num_bios = (i%5)+1;
831
832 ret = test_iosched_add_wr_rd_test_req(is_err_expected, WRITE,
833 start_sec, num_bios, TEST_PATTERN_5A, NULL);
834
835 if (ret) {
836 test_pr_err("%s: failed to add a write request",
837 __func__);
838 return ret;
839 }
840 }
841 return 0;
842}
843
844/*
845 * Prepare the write, read and flush requests for a generic packed commands
846 * testcase
847 */
848static int prepare_packed_requests(struct test_data *td, int is_err_expected,
849 int num_requests, int is_random)
850{
851 int ret = 0;
852 struct mmc_queue *mq;
853 int max_packed_reqs;
854 struct request_queue *req_q;
855
856 if (!td) {
857 pr_err("%s: NULL td", __func__);
858 return -EINVAL;
859 }
860
861 req_q = td->req_q;
862
863 if (!req_q) {
864 pr_err("%s: NULL request queue", __func__);
865 return -EINVAL;
866 }
867
868 mq = req_q->queuedata;
869 if (!mq) {
870 test_pr_err("%s: NULL mq", __func__);
871 return -EINVAL;
872 }
873
874 max_packed_reqs = mq->card->ext_csd.max_packed_writes;
875
876 if (mbtd->random_test_seed <= 0) {
877 mbtd->random_test_seed =
878 (unsigned int)(get_jiffies_64() & 0xFFFF);
879 test_pr_info("%s: got seed from jiffies %d",
880 __func__, mbtd->random_test_seed);
881 }
882
883 mmc_blk_init_packed_statistics(mq->card);
884
885 ret = prepare_request_add_write_reqs(td, num_requests, is_err_expected,
886 is_random);
887 if (ret)
888 return ret;
889
890 /* Avoid memory corruption in upcoming stats set */
891 if (td->test_info.testcase == TEST_STOP_DUE_TO_THRESHOLD)
892 num_requests--;
893
894 memset((void *)mbtd->exp_packed_stats.pack_stop_reason, 0,
895 sizeof(mbtd->exp_packed_stats.pack_stop_reason));
896 memset(mbtd->exp_packed_stats.packing_events, 0,
897 (max_packed_reqs + 1) * sizeof(u32));
898 if (num_requests <= max_packed_reqs)
899 mbtd->exp_packed_stats.packing_events[num_requests] = 1;
900
901 switch (td->test_info.testcase) {
902 case TEST_STOP_DUE_TO_FLUSH:
903 case TEST_STOP_DUE_TO_FLUSH_AFTER_MAX_REQS:
904 ret = prepare_request_add_flush(td);
905 if (ret)
906 return ret;
907
908 mbtd->exp_packed_stats.pack_stop_reason[FLUSH_OR_DISCARD] = 1;
909 break;
910 case TEST_STOP_DUE_TO_READ:
911 case TEST_STOP_DUE_TO_READ_AFTER_MAX_REQS:
912 ret = prepare_request_add_read(td);
913 if (ret)
914 return ret;
915
916 mbtd->exp_packed_stats.pack_stop_reason[WRONG_DATA_DIR] = 1;
917 break;
918 case TEST_STOP_DUE_TO_THRESHOLD:
919 mbtd->exp_packed_stats.packing_events[num_requests] = 1;
920 mbtd->exp_packed_stats.packing_events[1] = 1;
921 mbtd->exp_packed_stats.pack_stop_reason[THRESHOLD] = 1;
922 mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1;
923 break;
924 case TEST_STOP_DUE_TO_MAX_REQ_NUM:
925 case TEST_RET_PARTIAL_MAX_FAIL_IDX:
926 mbtd->exp_packed_stats.pack_stop_reason[THRESHOLD] = 1;
927 break;
928 default:
929 mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1;
930 }
931 mbtd->num_requests = num_requests;
932
933 return 0;
934}
935
936/*
Tatyana Brokhman91e1e322012-10-09 13:53:43 +0200937 * Prepare the write, read and flush requests for the packing control
938 * testcases
939 */
940static int prepare_packed_control_tests_requests(struct test_data *td,
941 int is_err_expected, int num_requests, int is_random)
942{
943 int ret = 0;
944 struct mmc_queue *mq;
945 int max_packed_reqs;
946 int temp_num_req = num_requests;
947 struct request_queue *req_q;
948 int test_packed_trigger;
949 int num_packed_reqs;
950
951 if (!td) {
952 test_pr_err("%s: NULL td\n", __func__);
953 return -EINVAL;
954 }
955
956 req_q = td->req_q;
957
958 if (!req_q) {
959 test_pr_err("%s: NULL request queue\n", __func__);
960 return -EINVAL;
961 }
962
963 mq = req_q->queuedata;
964 if (!mq) {
965 test_pr_err("%s: NULL mq", __func__);
966 return -EINVAL;
967 }
968
969 max_packed_reqs = mq->card->ext_csd.max_packed_writes;
970 test_packed_trigger = mq->num_wr_reqs_to_start_packing;
971 num_packed_reqs = num_requests - test_packed_trigger;
972
973 if (mbtd->random_test_seed == 0) {
974 mbtd->random_test_seed =
975 (unsigned int)(get_jiffies_64() & 0xFFFF);
976 test_pr_info("%s: got seed from jiffies %d",
977 __func__, mbtd->random_test_seed);
978 }
979
980 mmc_blk_init_packed_statistics(mq->card);
981
982 if (td->test_info.testcase ==
983 TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED) {
984 temp_num_req = num_requests;
985 num_requests = test_packed_trigger - 1;
986 }
987
988 /* Verify that the packing is disabled before starting the test */
989 mq->wr_packing_enabled = false;
990 mq->num_of_potential_packed_wr_reqs = 0;
991
992 if (td->test_info.testcase == TEST_PACK_MIX_PACKED_NO_PACKED_PACKED) {
993 mq->num_of_potential_packed_wr_reqs = test_packed_trigger + 1;
994 mq->wr_packing_enabled = true;
995 num_requests = test_packed_trigger + 2;
996 }
997
998 ret = prepare_request_add_write_reqs(td, num_requests, is_err_expected,
999 is_random);
1000 if (ret)
1001 goto exit;
1002
1003 if (td->test_info.testcase == TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED)
1004 num_requests = temp_num_req;
1005
1006 memset((void *)mbtd->exp_packed_stats.pack_stop_reason, 0,
1007 sizeof(mbtd->exp_packed_stats.pack_stop_reason));
1008 memset(mbtd->exp_packed_stats.packing_events, 0,
1009 (max_packed_reqs + 1) * sizeof(u32));
1010
1011 switch (td->test_info.testcase) {
1012 case TEST_PACKING_EXP_N_OVER_TRIGGER_FB_READ:
1013 case TEST_PACKING_EXP_ONE_OVER_TRIGGER_FB_READ:
1014 ret = prepare_request_add_read(td);
1015 if (ret)
1016 goto exit;
1017
1018 mbtd->exp_packed_stats.pack_stop_reason[WRONG_DATA_DIR] = 1;
1019 mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 1;
1020 break;
1021 case TEST_PACKING_EXP_N_OVER_TRIGGER_FLUSH_N:
1022 ret = prepare_request_add_flush(td);
1023 if (ret)
1024 goto exit;
1025
1026 ret = prepare_request_add_write_reqs(td, num_packed_reqs,
1027 is_err_expected, is_random);
1028 if (ret)
1029 goto exit;
1030
1031 mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1;
1032 mbtd->exp_packed_stats.pack_stop_reason[FLUSH_OR_DISCARD] = 1;
1033 mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 2;
1034 break;
1035 case TEST_PACKING_NOT_EXP_TRIGGER_READ_TRIGGER:
1036 ret = prepare_request_add_read(td);
1037 if (ret)
1038 goto exit;
1039
1040 ret = prepare_request_add_write_reqs(td, test_packed_trigger,
1041 is_err_expected, is_random);
1042 if (ret)
1043 goto exit;
1044
1045 mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 1;
1046 break;
1047 case TEST_PACKING_NOT_EXP_TRIGGER_FLUSH_TRIGGER:
1048 ret = prepare_request_add_flush(td);
1049 if (ret)
1050 goto exit;
1051
1052 ret = prepare_request_add_write_reqs(td, test_packed_trigger,
1053 is_err_expected, is_random);
1054 if (ret)
1055 goto exit;
1056
1057 mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 1;
1058 break;
1059 case TEST_PACK_MIX_PACKED_NO_PACKED_PACKED:
1060 ret = prepare_request_add_read(td);
1061 if (ret)
1062 goto exit;
1063
1064 ret = prepare_request_add_write_reqs(td, test_packed_trigger-1,
1065 is_err_expected, is_random);
1066 if (ret)
1067 goto exit;
1068
1069 ret = prepare_request_add_write_reqs(td, num_requests,
1070 is_err_expected, is_random);
1071 if (ret)
1072 goto exit;
1073
1074 mbtd->exp_packed_stats.packing_events[num_requests] = 1;
1075 mbtd->exp_packed_stats.packing_events[num_requests-1] = 1;
1076 mbtd->exp_packed_stats.pack_stop_reason[WRONG_DATA_DIR] = 1;
1077 mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1;
1078 break;
1079 case TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED:
1080 ret = prepare_request_add_read(td);
1081 if (ret)
1082 goto exit;
1083
1084 ret = prepare_request_add_write_reqs(td, num_requests,
1085 is_err_expected, is_random);
1086 if (ret)
1087 goto exit;
1088
1089 ret = prepare_request_add_read(td);
1090 if (ret)
1091 goto exit;
1092
1093 ret = prepare_request_add_write_reqs(td, test_packed_trigger-1,
1094 is_err_expected, is_random);
1095 if (ret)
1096 goto exit;
1097
1098 mbtd->exp_packed_stats.pack_stop_reason[WRONG_DATA_DIR] = 1;
1099 mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 1;
1100 break;
1101 case TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS:
1102 case TEST_PACKING_NOT_EXP_TRIGGER_REQUESTS:
1103 mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 1;
1104 break;
1105 default:
1106 mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1;
1107 mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 1;
1108 }
1109 mbtd->num_requests = num_requests;
1110
1111exit:
1112 return ret;
1113}
1114
1115/*
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001116 * Prepare requests for the TEST_RET_PARTIAL_FOLLOWED_BY_ABORT testcase.
1117 * In this testcase we have mixed error expectations from different
1118 * write requests, hence the special prepare function.
1119 */
1120static int prepare_partial_followed_by_abort(struct test_data *td,
1121 int num_requests)
1122{
1123 int i, start_address;
1124 int is_err_expected = 0;
1125 int ret = 0;
1126 struct mmc_queue *mq = test_iosched_get_req_queue()->queuedata;
1127 int max_packed_reqs;
1128
1129 if (!mq) {
1130 test_pr_err("%s: NULL mq", __func__);
1131 return -EINVAL;
1132 }
1133
1134 max_packed_reqs = mq->card->ext_csd.max_packed_writes;
1135
1136 mmc_blk_init_packed_statistics(mq->card);
1137
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001138 for (i = 1; i <= num_requests; i++) {
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001139 if (i > (num_requests / 2))
1140 is_err_expected = 1;
1141
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001142 start_address = td->start_sector + 4096 * td->num_of_write_bios;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001143 ret = test_iosched_add_wr_rd_test_req(is_err_expected, WRITE,
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001144 start_address, (i % 5) + 1, TEST_PATTERN_5A,
1145 NULL);
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001146 if (ret) {
1147 test_pr_err("%s: failed to add a write request",
1148 __func__);
1149 return ret;
1150 }
1151 }
1152
1153 memset((void *)&mbtd->exp_packed_stats.pack_stop_reason, 0,
1154 sizeof(mbtd->exp_packed_stats.pack_stop_reason));
1155 memset(mbtd->exp_packed_stats.packing_events, 0,
1156 (max_packed_reqs + 1) * sizeof(u32));
1157 mbtd->exp_packed_stats.packing_events[num_requests] = 1;
1158 mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1;
1159
1160 mbtd->num_requests = num_requests;
1161
1162 return ret;
1163}
1164
1165/*
1166 * Get number of write requests for current testcase. If random test mode was
1167 * chosen, pseudo-randomly choose the number of requests, otherwise set to
1168 * two less than the packing threshold.
1169 */
1170static int get_num_requests(struct test_data *td)
1171{
1172 int *seed = &mbtd->random_test_seed;
1173 struct request_queue *req_q;
1174 struct mmc_queue *mq;
1175 int max_num_requests;
1176 int num_requests;
1177 int min_num_requests = 2;
1178 int is_random = mbtd->is_random;
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001179 int max_for_double;
1180 int test_packed_trigger;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001181
1182 req_q = test_iosched_get_req_queue();
1183 if (req_q)
1184 mq = req_q->queuedata;
1185 else {
1186 test_pr_err("%s: NULL request queue", __func__);
1187 return 0;
1188 }
1189
1190 if (!mq) {
1191 test_pr_err("%s: NULL mq", __func__);
1192 return -EINVAL;
1193 }
1194
1195 max_num_requests = mq->card->ext_csd.max_packed_writes;
1196 num_requests = max_num_requests - 2;
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001197 test_packed_trigger = mq->num_wr_reqs_to_start_packing;
1198
1199 /*
1200 * Here max_for_double is intended for packed control testcases
1201 * in which we issue many write requests. It's purpose is to prevent
1202 * exceeding max number of req_queue requests.
1203 */
1204 max_for_double = max_num_requests - 10;
1205
1206 if (td->test_info.testcase ==
1207 TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS)
1208 /* Don't expect packing, so issue up to trigger-1 reqs */
1209 num_requests = test_packed_trigger - 1;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001210
1211 if (is_random) {
1212 if (td->test_info.testcase ==
1213 TEST_RET_PARTIAL_FOLLOWED_BY_ABORT)
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001214 /*
1215 * Here we don't want num_requests to be less than 1
1216 * as a consequence of division by 2.
1217 */
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001218 min_num_requests = 3;
1219
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001220 if (td->test_info.testcase ==
1221 TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS)
1222 /* Don't expect packing, so issue up to trigger reqs */
1223 max_num_requests = test_packed_trigger;
1224
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001225 num_requests = pseudo_random_seed(seed, min_num_requests,
1226 max_num_requests - 1);
1227 }
1228
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001229 if (td->test_info.testcase ==
1230 TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS)
1231 num_requests -= test_packed_trigger;
1232
1233 if (td->test_info.testcase == TEST_PACKING_EXP_N_OVER_TRIGGER_FLUSH_N)
1234 num_requests =
1235 num_requests > max_for_double ? max_for_double : num_requests;
1236
1237 if (mbtd->test_group == TEST_PACKING_CONTROL_GROUP)
1238 num_requests += test_packed_trigger;
1239
1240 if (td->test_info.testcase == TEST_PACKING_NOT_EXP_TRIGGER_REQUESTS)
1241 num_requests = test_packed_trigger;
1242
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001243 return num_requests;
1244}
1245
1246/*
1247 * An implementation for the prepare_test_fn pointer in the test_info
1248 * data structure. According to the testcase we add the right number of requests
1249 * and decide if an error is expected or not.
1250 */
1251static int prepare_test(struct test_data *td)
1252{
1253 struct mmc_queue *mq = test_iosched_get_req_queue()->queuedata;
1254 int max_num_requests;
1255 int num_requests = 0;
1256 int ret = 0;
1257 int is_random = mbtd->is_random;
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001258 int test_packed_trigger = mq->num_wr_reqs_to_start_packing;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001259
1260 if (!mq) {
1261 test_pr_err("%s: NULL mq", __func__);
1262 return -EINVAL;
1263 }
1264
1265 max_num_requests = mq->card->ext_csd.max_packed_writes;
1266
1267 if (is_random && mbtd->random_test_seed == 0) {
1268 mbtd->random_test_seed =
1269 (unsigned int)(get_jiffies_64() & 0xFFFF);
1270 test_pr_info("%s: got seed from jiffies %d",
1271 __func__, mbtd->random_test_seed);
1272 }
1273
1274 num_requests = get_num_requests(td);
1275
1276 if (mbtd->test_group == TEST_SEND_INVALID_GROUP)
1277 mq->packed_test_fn =
1278 test_invalid_packed_cmd;
1279
1280 if (mbtd->test_group == TEST_ERR_CHECK_GROUP)
1281 mq->err_check_fn = test_err_check;
1282
1283 switch (td->test_info.testcase) {
1284 case TEST_STOP_DUE_TO_FLUSH:
1285 case TEST_STOP_DUE_TO_READ:
1286 case TEST_RET_PARTIAL_FOLLOWED_BY_SUCCESS:
1287 case TEST_RET_PARTIAL_MULTIPLE_UNTIL_SUCCESS:
1288 case TEST_STOP_DUE_TO_EMPTY_QUEUE:
1289 case TEST_CMD23_PACKED_BIT_UNSET:
1290 ret = prepare_packed_requests(td, 0, num_requests, is_random);
1291 break;
1292 case TEST_STOP_DUE_TO_FLUSH_AFTER_MAX_REQS:
1293 case TEST_STOP_DUE_TO_READ_AFTER_MAX_REQS:
1294 ret = prepare_packed_requests(td, 0, max_num_requests - 1,
1295 is_random);
1296 break;
1297 case TEST_RET_PARTIAL_FOLLOWED_BY_ABORT:
1298 ret = prepare_partial_followed_by_abort(td, num_requests);
1299 break;
1300 case TEST_STOP_DUE_TO_MAX_REQ_NUM:
1301 case TEST_RET_PARTIAL_MAX_FAIL_IDX:
1302 ret = prepare_packed_requests(td, 0, max_num_requests,
1303 is_random);
1304 break;
1305 case TEST_STOP_DUE_TO_THRESHOLD:
1306 ret = prepare_packed_requests(td, 0, max_num_requests + 1,
1307 is_random);
1308 break;
1309 case TEST_RET_ABORT:
1310 case TEST_RET_RETRY:
1311 case TEST_RET_CMD_ERR:
1312 case TEST_RET_DATA_ERR:
1313 case TEST_HDR_INVALID_VERSION:
1314 case TEST_HDR_WRONG_WRITE_CODE:
1315 case TEST_HDR_INVALID_RW_CODE:
1316 case TEST_HDR_DIFFERENT_ADDRESSES:
1317 case TEST_HDR_REQ_NUM_SMALLER_THAN_ACTUAL:
1318 case TEST_HDR_REQ_NUM_LARGER_THAN_ACTUAL:
1319 case TEST_CMD23_MAX_PACKED_WRITES:
1320 case TEST_CMD23_ZERO_PACKED_WRITES:
1321 case TEST_CMD23_REL_WR_BIT_SET:
1322 case TEST_CMD23_BITS_16TO29_SET:
1323 case TEST_CMD23_HDR_BLK_NOT_IN_COUNT:
1324 case TEST_HDR_CMD23_PACKED_BIT_SET:
1325 ret = prepare_packed_requests(td, 1, num_requests, is_random);
1326 break;
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001327 case TEST_PACKING_EXP_N_OVER_TRIGGER:
1328 case TEST_PACKING_EXP_N_OVER_TRIGGER_FB_READ:
1329 case TEST_PACKING_NOT_EXP_TRIGGER_REQUESTS:
1330 case TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS:
1331 case TEST_PACK_MIX_PACKED_NO_PACKED_PACKED:
1332 case TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED:
1333 ret = prepare_packed_control_tests_requests(td, 0, num_requests,
1334 is_random);
1335 break;
1336 case TEST_PACKING_EXP_THRESHOLD_OVER_TRIGGER:
1337 ret = prepare_packed_control_tests_requests(td, 0,
1338 max_num_requests, is_random);
1339 break;
1340 case TEST_PACKING_EXP_ONE_OVER_TRIGGER_FB_READ:
1341 ret = prepare_packed_control_tests_requests(td, 0,
1342 test_packed_trigger + 1,
1343 is_random);
1344 break;
1345 case TEST_PACKING_EXP_N_OVER_TRIGGER_FLUSH_N:
1346 ret = prepare_packed_control_tests_requests(td, 0, num_requests,
1347 is_random);
1348 break;
1349 case TEST_PACKING_NOT_EXP_TRIGGER_READ_TRIGGER:
1350 case TEST_PACKING_NOT_EXP_TRIGGER_FLUSH_TRIGGER:
1351 ret = prepare_packed_control_tests_requests(td, 0,
1352 test_packed_trigger, is_random);
1353 break;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001354 default:
1355 test_pr_info("%s: Invalid test case...", __func__);
1356 return -EINVAL;
1357 }
1358
1359 return ret;
1360}
1361
1362/*
1363 * An implementation for the post_test_fn in the test_info data structure.
1364 * In our case we just reset the function pointers in the mmc_queue in order for
1365 * the FS to be able to dispatch it's requests correctly after the test is
1366 * finished.
1367 */
1368static int post_test(struct test_data *td)
1369{
1370 struct mmc_queue *mq;
1371
1372 if (!td)
1373 return -EINVAL;
1374
1375 mq = td->req_q->queuedata;
1376
1377 if (!mq) {
1378 test_pr_err("%s: NULL mq", __func__);
1379 return -EINVAL;
1380 }
1381
1382 mq->packed_test_fn = NULL;
1383 mq->err_check_fn = NULL;
1384
1385 return 0;
1386}
1387
1388/*
1389 * This function checks, based on the current test's test_group, that the
1390 * packed commands capability and control are set right. In addition, we check
1391 * if the card supports the packed command feature.
1392 */
1393static int validate_packed_commands_settings(void)
1394{
1395 struct request_queue *req_q;
1396 struct mmc_queue *mq;
1397 int max_num_requests;
1398 struct mmc_host *host;
1399
1400 req_q = test_iosched_get_req_queue();
1401 if (!req_q) {
1402 test_pr_err("%s: test_iosched_get_req_queue failed", __func__);
1403 test_iosched_set_test_result(TEST_FAILED);
1404 return -EINVAL;
1405 }
1406
1407 mq = req_q->queuedata;
1408 if (!mq) {
1409 test_pr_err("%s: NULL mq", __func__);
1410 return -EINVAL;
1411 }
1412
1413 max_num_requests = mq->card->ext_csd.max_packed_writes;
1414 host = mq->card->host;
1415
1416 if (!(host->caps2 && MMC_CAP2_PACKED_WR)) {
1417 test_pr_err("%s: Packed Write capability disabled, exit test",
1418 __func__);
1419 test_iosched_set_test_result(TEST_NOT_SUPPORTED);
1420 return -EINVAL;
1421 }
1422
1423 if (max_num_requests == 0) {
1424 test_pr_err(
1425 "%s: no write packing support, ext_csd.max_packed_writes=%d",
1426 __func__, mq->card->ext_csd.max_packed_writes);
1427 test_iosched_set_test_result(TEST_NOT_SUPPORTED);
1428 return -EINVAL;
1429 }
1430
1431 test_pr_info("%s: max number of packed requests supported is %d ",
1432 __func__, max_num_requests);
1433
1434 switch (mbtd->test_group) {
1435 case TEST_SEND_WRITE_PACKING_GROUP:
1436 case TEST_ERR_CHECK_GROUP:
1437 case TEST_SEND_INVALID_GROUP:
1438 /* disable the packing control */
1439 host->caps2 &= ~MMC_CAP2_PACKED_WR_CONTROL;
1440 break;
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001441 case TEST_PACKING_CONTROL_GROUP:
1442 host->caps2 |= MMC_CAP2_PACKED_WR_CONTROL;
1443 break;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001444 default:
1445 break;
1446 }
1447
1448 return 0;
1449}
1450
Maya Erezddc55732012-10-17 09:51:01 +02001451static void pseudo_rnd_sector_and_size(unsigned int *seed,
1452 unsigned int min_start_sector,
1453 unsigned int *start_sector,
1454 unsigned int *num_of_bios)
1455{
1456 unsigned int max_sec = min_start_sector + TEST_MAX_SECTOR_RANGE;
1457 do {
1458 *start_sector = pseudo_random_seed(seed,
1459 1, max_sec);
1460 *num_of_bios = pseudo_random_seed(seed,
1461 1, TEST_MAX_BIOS_PER_REQ);
1462 if (!(*num_of_bios))
1463 *num_of_bios = 1;
1464 } while ((*start_sector < min_start_sector) ||
1465 (*start_sector + (*num_of_bios * BIO_U32_SIZE * 4)) > max_sec);
1466}
1467
1468/* sanitize test functions */
1469static int prepare_write_discard_sanitize_read(struct test_data *td)
1470{
1471 unsigned int start_sector;
1472 unsigned int num_of_bios = 0;
1473 static unsigned int total_bios;
1474 unsigned int *num_bios_seed;
1475 int i = 0;
1476
1477 if (mbtd->random_test_seed == 0) {
1478 mbtd->random_test_seed =
1479 (unsigned int)(get_jiffies_64() & 0xFFFF);
1480 test_pr_info("%s: got seed from jiffies %d",
1481 __func__, mbtd->random_test_seed);
1482 }
1483 num_bios_seed = &mbtd->random_test_seed;
1484
1485 do {
1486 pseudo_rnd_sector_and_size(num_bios_seed, td->start_sector,
1487 &start_sector, &num_of_bios);
1488
1489 /* DISCARD */
1490 total_bios += num_of_bios;
1491 test_pr_info("%s: discard req: id=%d, startSec=%d, NumBios=%d",
1492 __func__, td->unique_next_req_id, start_sector,
1493 num_of_bios);
1494 test_iosched_add_unique_test_req(0, REQ_UNIQUE_DISCARD,
1495 start_sector, BIO_TO_SECTOR(num_of_bios),
1496 NULL);
1497
1498 } while (++i < (BLKDEV_MAX_RQ-10));
1499
1500 test_pr_info("%s: total discard bios = %d", __func__, total_bios);
1501
1502 test_pr_info("%s: add sanitize req", __func__);
1503 test_iosched_add_unique_test_req(0, REQ_UNIQUE_SANITIZE, 0, 0, NULL);
1504
1505 return 0;
1506}
1507
Yaniv Gardie9214c82012-10-18 13:58:18 +02001508/*
1509 * Post test operations for BKOPs test
1510 * Disable the BKOPs statistics and clear the feature flags
1511 */
1512static int bkops_post_test(struct test_data *td)
1513{
1514 struct request_queue *q = td->req_q;
1515 struct mmc_queue *mq = (struct mmc_queue *)q->queuedata;
1516 struct mmc_card *card = mq->card;
1517
1518 mmc_card_clr_doing_bkops(mq->card);
1519 card->ext_csd.raw_bkops_status = 0;
1520
1521 spin_lock(&card->bkops_info.bkops_stats.lock);
1522 card->bkops_info.bkops_stats.enabled = false;
1523 spin_unlock(&card->bkops_info.bkops_stats.lock);
1524
1525 return 0;
1526}
1527
1528/*
1529 * Verify the BKOPs statsistics
1530 */
1531static int check_bkops_result(struct test_data *td)
1532{
1533 struct request_queue *q = td->req_q;
1534 struct mmc_queue *mq = (struct mmc_queue *)q->queuedata;
1535 struct mmc_card *card = mq->card;
1536 struct mmc_bkops_stats *bkops_stat;
1537
1538 if (!card)
1539 goto fail;
1540
1541 bkops_stat = &card->bkops_info.bkops_stats;
1542
1543 test_pr_info("%s: Test results: bkops:(%d,%d,%d) hpi:%d, suspend:%d",
1544 __func__,
1545 bkops_stat->bkops_level[BKOPS_SEVERITY_1_INDEX],
1546 bkops_stat->bkops_level[BKOPS_SEVERITY_2_INDEX],
1547 bkops_stat->bkops_level[BKOPS_SEVERITY_3_INDEX],
1548 bkops_stat->hpi,
1549 bkops_stat->suspend);
1550
1551 switch (mbtd->test_info.testcase) {
1552 case BKOPS_DELAYED_WORK_LEVEL_1:
1553 if ((bkops_stat->bkops_level[BKOPS_SEVERITY_1_INDEX] == 1) &&
1554 (bkops_stat->suspend == 1) &&
1555 (bkops_stat->hpi == 0))
1556 goto exit;
1557 else
1558 goto fail;
1559 break;
1560 case BKOPS_DELAYED_WORK_LEVEL_1_HPI:
1561 if ((bkops_stat->bkops_level[BKOPS_SEVERITY_1_INDEX] == 1) &&
1562 (bkops_stat->suspend == 0) &&
1563 (bkops_stat->hpi == 1))
1564 goto exit;
1565 else
1566 goto fail;
1567 break;
1568 case BKOPS_CANCEL_DELAYED_WORK:
1569 if ((bkops_stat->bkops_level[BKOPS_SEVERITY_1_INDEX] == 0) &&
1570 (bkops_stat->bkops_level[BKOPS_SEVERITY_2_INDEX] == 0) &&
1571 (bkops_stat->bkops_level[BKOPS_SEVERITY_3_INDEX] == 0) &&
1572 (bkops_stat->suspend == 0) &&
1573 (bkops_stat->hpi == 0))
1574 goto exit;
1575 else
1576 goto fail;
1577 case BKOPS_URGENT_LEVEL_2:
1578 case BKOPS_URGENT_LEVEL_2_TWO_REQS:
1579 if ((bkops_stat->bkops_level[BKOPS_SEVERITY_2_INDEX] == 1) &&
1580 (bkops_stat->suspend == 0) &&
1581 (bkops_stat->hpi == 0))
1582 goto exit;
1583 else
1584 goto fail;
1585 case BKOPS_URGENT_LEVEL_3:
1586 if ((bkops_stat->bkops_level[BKOPS_SEVERITY_3_INDEX] == 1) &&
1587 (bkops_stat->suspend == 0) &&
1588 (bkops_stat->hpi == 0))
1589 goto exit;
1590 else
1591 goto fail;
1592 default:
1593 return -EINVAL;
1594 }
1595
1596exit:
1597 return 0;
1598fail:
1599 if (td->fs_wr_reqs_during_test) {
1600 test_pr_info("%s: wr reqs during test, cancel the round",
1601 __func__);
1602 test_iosched_set_ignore_round(true);
1603 return 0;
1604 }
1605
1606 test_pr_info("%s: BKOPs statistics are not as expected, test failed",
1607 __func__);
1608 return -EINVAL;
1609}
1610
1611static void bkops_end_io_final_fn(struct request *rq, int err)
1612{
1613 struct test_request *test_rq =
1614 (struct test_request *)rq->elv.priv[0];
1615 BUG_ON(!test_rq);
1616
1617 test_rq->req_completed = 1;
1618 test_rq->req_result = err;
1619
1620 test_pr_info("%s: request %d completed, err=%d",
1621 __func__, test_rq->req_id, err);
1622
1623 mbtd->bkops_stage = BKOPS_STAGE_4;
1624 wake_up(&mbtd->bkops_wait_q);
1625}
1626
1627static void bkops_end_io_fn(struct request *rq, int err)
1628{
1629 struct test_request *test_rq =
1630 (struct test_request *)rq->elv.priv[0];
1631 BUG_ON(!test_rq);
1632
1633 test_rq->req_completed = 1;
1634 test_rq->req_result = err;
1635
1636 test_pr_info("%s: request %d completed, err=%d",
1637 __func__, test_rq->req_id, err);
1638 mbtd->bkops_stage = BKOPS_STAGE_2;
1639 wake_up(&mbtd->bkops_wait_q);
1640
1641}
1642
1643static int prepare_bkops(struct test_data *td)
1644{
1645 int ret = 0;
1646 struct request_queue *q = td->req_q;
1647 struct mmc_queue *mq = (struct mmc_queue *)q->queuedata;
1648 struct mmc_card *card = mq->card;
1649 struct mmc_bkops_stats *bkops_stat;
1650
1651 if (!card)
1652 return -EINVAL;
1653
1654 bkops_stat = &card->bkops_info.bkops_stats;
1655
1656 if (!card->ext_csd.bkops_en) {
1657 test_pr_err("%s: BKOPS is not enabled by card or host)",
1658 __func__);
1659 return -ENOTSUPP;
1660 }
1661 if (mmc_card_doing_bkops(card)) {
1662 test_pr_err("%s: BKOPS in progress, try later", __func__);
1663 return -EAGAIN;
1664 }
1665
1666 mmc_blk_init_bkops_statistics(card);
1667
1668 if ((mbtd->test_info.testcase == BKOPS_URGENT_LEVEL_2) ||
1669 (mbtd->test_info.testcase == BKOPS_URGENT_LEVEL_2_TWO_REQS) ||
1670 (mbtd->test_info.testcase == BKOPS_URGENT_LEVEL_3))
1671 mq->err_check_fn = test_err_check;
1672 mbtd->err_check_counter = 0;
1673
1674 return ret;
1675}
1676
1677static int run_bkops(struct test_data *td)
1678{
1679 int ret = 0;
1680 struct request_queue *q = td->req_q;
1681 struct mmc_queue *mq = (struct mmc_queue *)q->queuedata;
1682 struct mmc_card *card = mq->card;
1683 struct mmc_bkops_stats *bkops_stat;
1684
1685 if (!card)
1686 return -EINVAL;
1687
1688 bkops_stat = &card->bkops_info.bkops_stats;
1689
1690 switch (mbtd->test_info.testcase) {
1691 case BKOPS_DELAYED_WORK_LEVEL_1:
1692 bkops_stat->ignore_card_bkops_status = true;
1693 card->ext_csd.raw_bkops_status = 1;
1694 card->bkops_info.sectors_changed =
1695 card->bkops_info.min_sectors_to_queue_delayed_work + 1;
1696 mbtd->bkops_stage = BKOPS_STAGE_1;
1697
1698 __blk_run_queue(q);
1699 /* this long sleep makes sure the host starts bkops and
1700 also, gets into suspend */
1701 msleep(10000);
1702
1703 bkops_stat->ignore_card_bkops_status = false;
1704 card->ext_csd.raw_bkops_status = 0;
1705
1706 test_iosched_mark_test_completion();
1707 break;
1708
1709 case BKOPS_DELAYED_WORK_LEVEL_1_HPI:
1710 bkops_stat->ignore_card_bkops_status = true;
1711 card->ext_csd.raw_bkops_status = 1;
1712 card->bkops_info.sectors_changed =
1713 card->bkops_info.min_sectors_to_queue_delayed_work + 1;
1714 mbtd->bkops_stage = BKOPS_STAGE_1;
1715
1716 __blk_run_queue(q);
1717 msleep(card->bkops_info.delay_ms);
1718
1719 ret = test_iosched_add_wr_rd_test_req(0, WRITE,
1720 td->start_sector,
1721 TEST_REQUEST_NUM_OF_BIOS,
1722 TEST_PATTERN_5A,
1723 bkops_end_io_final_fn);
1724 if (ret) {
1725 test_pr_err("%s: failed to add a write request",
1726 __func__);
1727 ret = -EINVAL;
1728 break;
1729 }
1730
1731 td->next_req = list_entry(td->test_queue.prev,
1732 struct test_request, queuelist);
1733 __blk_run_queue(q);
1734 wait_event(mbtd->bkops_wait_q,
1735 mbtd->bkops_stage == BKOPS_STAGE_4);
1736 bkops_stat->ignore_card_bkops_status = false;
1737
1738 test_iosched_mark_test_completion();
1739 break;
1740
1741 case BKOPS_CANCEL_DELAYED_WORK:
1742 bkops_stat->ignore_card_bkops_status = true;
1743 card->ext_csd.raw_bkops_status = 1;
1744 card->bkops_info.sectors_changed =
1745 card->bkops_info.min_sectors_to_queue_delayed_work + 1;
1746 mbtd->bkops_stage = BKOPS_STAGE_1;
1747
1748 __blk_run_queue(q);
1749
1750 ret = test_iosched_add_wr_rd_test_req(0, WRITE,
1751 td->start_sector,
1752 TEST_REQUEST_NUM_OF_BIOS,
1753 TEST_PATTERN_5A,
1754 bkops_end_io_final_fn);
1755 if (ret) {
1756 test_pr_err("%s: failed to add a write request",
1757 __func__);
1758 ret = -EINVAL;
1759 break;
1760 }
1761
1762 td->next_req = list_entry(td->test_queue.prev,
1763 struct test_request, queuelist);
1764 __blk_run_queue(q);
1765 wait_event(mbtd->bkops_wait_q,
1766 mbtd->bkops_stage == BKOPS_STAGE_4);
1767 bkops_stat->ignore_card_bkops_status = false;
1768
1769 test_iosched_mark_test_completion();
1770 break;
1771
1772 case BKOPS_URGENT_LEVEL_2:
1773 case BKOPS_URGENT_LEVEL_3:
1774 bkops_stat->ignore_card_bkops_status = true;
1775 if (mbtd->test_info.testcase == BKOPS_URGENT_LEVEL_2)
1776 card->ext_csd.raw_bkops_status = 2;
1777 else
1778 card->ext_csd.raw_bkops_status = 3;
1779 mbtd->bkops_stage = BKOPS_STAGE_1;
1780
1781 ret = test_iosched_add_wr_rd_test_req(0, WRITE,
1782 td->start_sector,
1783 TEST_REQUEST_NUM_OF_BIOS,
1784 TEST_PATTERN_5A,
1785 bkops_end_io_fn);
1786 if (ret) {
1787 test_pr_err("%s: failed to add a write request",
1788 __func__);
1789 ret = -EINVAL;
1790 break;
1791 }
1792
1793 td->next_req = list_entry(td->test_queue.prev,
1794 struct test_request, queuelist);
1795 __blk_run_queue(q);
1796 wait_event(mbtd->bkops_wait_q,
1797 mbtd->bkops_stage == BKOPS_STAGE_2);
1798 card->ext_csd.raw_bkops_status = 0;
1799
1800 ret = test_iosched_add_wr_rd_test_req(0, WRITE,
1801 td->start_sector,
1802 TEST_REQUEST_NUM_OF_BIOS,
1803 TEST_PATTERN_5A,
1804 bkops_end_io_final_fn);
1805 if (ret) {
1806 test_pr_err("%s: failed to add a write request",
1807 __func__);
1808 ret = -EINVAL;
1809 break;
1810 }
1811
1812 td->next_req = list_entry(td->test_queue.prev,
1813 struct test_request, queuelist);
1814 __blk_run_queue(q);
1815
1816 wait_event(mbtd->bkops_wait_q,
1817 mbtd->bkops_stage == BKOPS_STAGE_4);
1818
1819 bkops_stat->ignore_card_bkops_status = false;
1820 test_iosched_mark_test_completion();
1821 break;
1822
1823 case BKOPS_URGENT_LEVEL_2_TWO_REQS:
1824 mq->wr_packing_enabled = false;
1825 bkops_stat->ignore_card_bkops_status = true;
1826 card->ext_csd.raw_bkops_status = 2;
1827 mbtd->bkops_stage = BKOPS_STAGE_1;
1828
1829 ret = test_iosched_add_wr_rd_test_req(0, WRITE,
1830 td->start_sector,
1831 TEST_REQUEST_NUM_OF_BIOS,
1832 TEST_PATTERN_5A,
1833 NULL);
1834 if (ret) {
1835 test_pr_err("%s: failed to add a write request",
1836 __func__);
1837 ret = -EINVAL;
1838 break;
1839 }
1840
1841 ret = test_iosched_add_wr_rd_test_req(0, WRITE,
1842 td->start_sector,
1843 TEST_REQUEST_NUM_OF_BIOS,
1844 TEST_PATTERN_5A,
1845 bkops_end_io_fn);
1846 if (ret) {
1847 test_pr_err("%s: failed to add a write request",
1848 __func__);
1849 ret = -EINVAL;
1850 break;
1851 }
1852
1853 td->next_req = list_entry(td->test_queue.next,
1854 struct test_request, queuelist);
1855 __blk_run_queue(q);
1856 wait_event(mbtd->bkops_wait_q,
1857 mbtd->bkops_stage == BKOPS_STAGE_2);
1858 card->ext_csd.raw_bkops_status = 0;
1859
1860 ret = test_iosched_add_wr_rd_test_req(0, WRITE,
1861 td->start_sector,
1862 TEST_REQUEST_NUM_OF_BIOS,
1863 TEST_PATTERN_5A,
1864 bkops_end_io_final_fn);
1865 if (ret) {
1866 test_pr_err("%s: failed to add a write request",
1867 __func__);
1868 ret = -EINVAL;
1869 break;
1870 }
1871
1872 td->next_req = list_entry(td->test_queue.prev,
1873 struct test_request, queuelist);
1874 __blk_run_queue(q);
1875
1876 wait_event(mbtd->bkops_wait_q,
1877 mbtd->bkops_stage == BKOPS_STAGE_4);
1878
1879 bkops_stat->ignore_card_bkops_status = false;
1880 test_iosched_mark_test_completion();
1881
1882 break;
1883 default:
1884 test_pr_err("%s: wrong testcase: %d", __func__,
1885 mbtd->test_info.testcase);
1886 ret = -EINVAL;
1887 }
1888 return ret;
1889}
1890
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001891static bool message_repeat;
1892static int test_open(struct inode *inode, struct file *file)
1893{
1894 file->private_data = inode->i_private;
1895 message_repeat = 1;
1896 return 0;
1897}
1898
1899/* send_packing TEST */
1900static ssize_t send_write_packing_test_write(struct file *file,
1901 const char __user *buf,
1902 size_t count,
1903 loff_t *ppos)
1904{
1905 int ret = 0;
1906 int i = 0;
1907 int number = -1;
1908 int j = 0;
1909
1910 test_pr_info("%s: -- send_write_packing TEST --", __func__);
1911
1912 sscanf(buf, "%d", &number);
1913
1914 if (number <= 0)
1915 number = 1;
1916
1917
1918 mbtd->test_group = TEST_SEND_WRITE_PACKING_GROUP;
1919
1920 if (validate_packed_commands_settings())
1921 return count;
1922
1923 if (mbtd->random_test_seed > 0)
1924 test_pr_info("%s: Test seed: %d", __func__,
1925 mbtd->random_test_seed);
1926
1927 memset(&mbtd->test_info, 0, sizeof(struct test_info));
1928
1929 mbtd->test_info.data = mbtd;
1930 mbtd->test_info.prepare_test_fn = prepare_test;
1931 mbtd->test_info.check_test_result_fn = check_wr_packing_statistics;
1932 mbtd->test_info.get_test_case_str_fn = get_test_case_str;
1933 mbtd->test_info.post_test_fn = post_test;
1934
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001935 for (i = 0; i < number; ++i) {
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001936 test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
1937 test_pr_info("%s: ====================", __func__);
1938
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001939 for (j = SEND_WRITE_PACKING_MIN_TESTCASE;
1940 j <= SEND_WRITE_PACKING_MAX_TESTCASE; j++) {
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001941
1942 mbtd->test_info.testcase = j;
1943 mbtd->is_random = RANDOM_TEST;
1944 ret = test_iosched_start_test(&mbtd->test_info);
1945 if (ret)
1946 break;
1947 /* Allow FS requests to be dispatched */
1948 msleep(1000);
1949 mbtd->test_info.testcase = j;
1950 mbtd->is_random = NON_RANDOM_TEST;
1951 ret = test_iosched_start_test(&mbtd->test_info);
1952 if (ret)
1953 break;
1954 /* Allow FS requests to be dispatched */
1955 msleep(1000);
1956 }
1957 }
1958
1959 test_pr_info("%s: Completed all the test cases.", __func__);
1960
1961 return count;
1962}
1963
1964static ssize_t send_write_packing_test_read(struct file *file,
1965 char __user *buffer,
1966 size_t count,
1967 loff_t *offset)
1968{
1969 memset((void *)buffer, 0, count);
1970
1971 snprintf(buffer, count,
1972 "\nsend_write_packing_test\n"
1973 "=========\n"
1974 "Description:\n"
1975 "This test checks the following scenarios\n"
1976 "- Pack due to FLUSH message\n"
1977 "- Pack due to FLUSH after threshold writes\n"
1978 "- Pack due to READ message\n"
1979 "- Pack due to READ after threshold writes\n"
1980 "- Pack due to empty queue\n"
1981 "- Pack due to threshold writes\n"
1982 "- Pack due to one over threshold writes\n");
1983
1984 if (message_repeat == 1) {
1985 message_repeat = 0;
1986 return strnlen(buffer, count);
1987 } else {
1988 return 0;
1989 }
1990}
1991
1992const struct file_operations send_write_packing_test_ops = {
1993 .open = test_open,
1994 .write = send_write_packing_test_write,
1995 .read = send_write_packing_test_read,
1996};
1997
1998/* err_check TEST */
1999static ssize_t err_check_test_write(struct file *file,
2000 const char __user *buf,
2001 size_t count,
2002 loff_t *ppos)
2003{
2004 int ret = 0;
2005 int i = 0;
2006 int number = -1;
2007 int j = 0;
2008
2009 test_pr_info("%s: -- err_check TEST --", __func__);
2010
2011 sscanf(buf, "%d", &number);
2012
2013 if (number <= 0)
2014 number = 1;
2015
2016 mbtd->test_group = TEST_ERR_CHECK_GROUP;
2017
2018 if (validate_packed_commands_settings())
2019 return count;
2020
2021 if (mbtd->random_test_seed > 0)
2022 test_pr_info("%s: Test seed: %d", __func__,
2023 mbtd->random_test_seed);
2024
2025 memset(&mbtd->test_info, 0, sizeof(struct test_info));
2026
2027 mbtd->test_info.data = mbtd;
2028 mbtd->test_info.prepare_test_fn = prepare_test;
2029 mbtd->test_info.check_test_result_fn = check_wr_packing_statistics;
2030 mbtd->test_info.get_test_case_str_fn = get_test_case_str;
2031 mbtd->test_info.post_test_fn = post_test;
2032
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02002033 for (i = 0; i < number; ++i) {
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02002034 test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
2035 test_pr_info("%s: ====================", __func__);
2036
2037 for (j = ERR_CHECK_MIN_TESTCASE;
2038 j <= ERR_CHECK_MAX_TESTCASE ; j++) {
2039 mbtd->test_info.testcase = j;
2040 mbtd->is_random = RANDOM_TEST;
2041 ret = test_iosched_start_test(&mbtd->test_info);
2042 if (ret)
2043 break;
2044 /* Allow FS requests to be dispatched */
2045 msleep(1000);
2046 mbtd->test_info.testcase = j;
2047 mbtd->is_random = NON_RANDOM_TEST;
2048 ret = test_iosched_start_test(&mbtd->test_info);
2049 if (ret)
2050 break;
2051 /* Allow FS requests to be dispatched */
2052 msleep(1000);
2053 }
2054 }
2055
2056 test_pr_info("%s: Completed all the test cases.", __func__);
2057
2058 return count;
2059}
2060
2061static ssize_t err_check_test_read(struct file *file,
2062 char __user *buffer,
2063 size_t count,
2064 loff_t *offset)
2065{
2066 memset((void *)buffer, 0, count);
2067
2068 snprintf(buffer, count,
2069 "\nerr_check_TEST\n"
2070 "=========\n"
2071 "Description:\n"
2072 "This test checks the following scenarios\n"
2073 "- Return ABORT\n"
2074 "- Return PARTIAL followed by success\n"
2075 "- Return PARTIAL followed by abort\n"
2076 "- Return PARTIAL multiple times until success\n"
2077 "- Return PARTIAL with fail index = threshold\n"
2078 "- Return RETRY\n"
2079 "- Return CMD_ERR\n"
2080 "- Return DATA_ERR\n");
2081
2082 if (message_repeat == 1) {
2083 message_repeat = 0;
2084 return strnlen(buffer, count);
2085 } else {
2086 return 0;
2087 }
2088}
2089
2090const struct file_operations err_check_test_ops = {
2091 .open = test_open,
2092 .write = err_check_test_write,
2093 .read = err_check_test_read,
2094};
2095
2096/* send_invalid_packed TEST */
2097static ssize_t send_invalid_packed_test_write(struct file *file,
2098 const char __user *buf,
2099 size_t count,
2100 loff_t *ppos)
2101{
2102 int ret = 0;
2103 int i = 0;
2104 int number = -1;
2105 int j = 0;
2106 int num_of_failures = 0;
2107
2108 test_pr_info("%s: -- send_invalid_packed TEST --", __func__);
2109
2110 sscanf(buf, "%d", &number);
2111
2112 if (number <= 0)
2113 number = 1;
2114
2115 mbtd->test_group = TEST_SEND_INVALID_GROUP;
2116
2117 if (validate_packed_commands_settings())
2118 return count;
2119
2120 if (mbtd->random_test_seed > 0)
2121 test_pr_info("%s: Test seed: %d", __func__,
2122 mbtd->random_test_seed);
2123
2124 memset(&mbtd->test_info, 0, sizeof(struct test_info));
2125
2126 mbtd->test_info.data = mbtd;
2127 mbtd->test_info.prepare_test_fn = prepare_test;
2128 mbtd->test_info.check_test_result_fn = check_wr_packing_statistics;
2129 mbtd->test_info.get_test_case_str_fn = get_test_case_str;
2130 mbtd->test_info.post_test_fn = post_test;
2131
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02002132 for (i = 0; i < number; ++i) {
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02002133 test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
2134 test_pr_info("%s: ====================", __func__);
2135
2136 for (j = INVALID_CMD_MIN_TESTCASE;
2137 j <= INVALID_CMD_MAX_TESTCASE ; j++) {
2138
2139 mbtd->test_info.testcase = j;
2140 mbtd->is_random = RANDOM_TEST;
2141 ret = test_iosched_start_test(&mbtd->test_info);
2142 if (ret)
2143 num_of_failures++;
2144 /* Allow FS requests to be dispatched */
2145 msleep(1000);
2146
2147 mbtd->test_info.testcase = j;
2148 mbtd->is_random = NON_RANDOM_TEST;
2149 ret = test_iosched_start_test(&mbtd->test_info);
2150 if (ret)
2151 num_of_failures++;
2152 /* Allow FS requests to be dispatched */
2153 msleep(1000);
2154 }
2155 }
2156
2157 test_pr_info("%s: Completed all the test cases.", __func__);
2158
2159 if (num_of_failures > 0) {
2160 test_iosched_set_test_result(TEST_FAILED);
2161 test_pr_err(
2162 "There were %d failures during the test, TEST FAILED",
2163 num_of_failures);
2164 }
2165 return count;
2166}
2167
2168static ssize_t send_invalid_packed_test_read(struct file *file,
2169 char __user *buffer,
2170 size_t count,
2171 loff_t *offset)
2172{
2173 memset((void *)buffer, 0, count);
2174
2175 snprintf(buffer, count,
2176 "\nsend_invalid_packed_TEST\n"
2177 "=========\n"
2178 "Description:\n"
2179 "This test checks the following scenarios\n"
2180 "- Send an invalid header version\n"
2181 "- Send the wrong write code\n"
2182 "- Send an invalid R/W code\n"
2183 "- Send wrong start address in header\n"
2184 "- Send header with block_count smaller than actual\n"
2185 "- Send header with block_count larger than actual\n"
2186 "- Send header CMD23 packed bit set\n"
2187 "- Send CMD23 with block count over threshold\n"
2188 "- Send CMD23 with block_count equals zero\n"
2189 "- Send CMD23 packed bit unset\n"
2190 "- Send CMD23 reliable write bit set\n"
2191 "- Send CMD23 bits [16-29] set\n"
2192 "- Send CMD23 header block not in block_count\n");
2193
2194 if (message_repeat == 1) {
2195 message_repeat = 0;
2196 return strnlen(buffer, count);
2197 } else {
2198 return 0;
2199 }
2200}
2201
2202const struct file_operations send_invalid_packed_test_ops = {
2203 .open = test_open,
2204 .write = send_invalid_packed_test_write,
2205 .read = send_invalid_packed_test_read,
2206};
2207
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02002208/* packing_control TEST */
2209static ssize_t write_packing_control_test_write(struct file *file,
2210 const char __user *buf,
2211 size_t count,
2212 loff_t *ppos)
2213{
2214 int ret = 0;
2215 int i = 0;
2216 int number = -1;
2217 int j = 0;
2218 struct mmc_queue *mq = test_iosched_get_req_queue()->queuedata;
2219 int max_num_requests = mq->card->ext_csd.max_packed_writes;
2220 int test_successful = 1;
2221
2222 test_pr_info("%s: -- write_packing_control TEST --", __func__);
2223
2224 sscanf(buf, "%d", &number);
2225
2226 if (number <= 0)
2227 number = 1;
2228
2229 test_pr_info("%s: max_num_requests = %d ", __func__,
2230 max_num_requests);
2231
2232 memset(&mbtd->test_info, 0, sizeof(struct test_info));
2233 mbtd->test_group = TEST_PACKING_CONTROL_GROUP;
2234
2235 if (validate_packed_commands_settings())
2236 return count;
2237
2238 mbtd->test_info.data = mbtd;
2239 mbtd->test_info.prepare_test_fn = prepare_test;
2240 mbtd->test_info.check_test_result_fn = check_wr_packing_statistics;
2241 mbtd->test_info.get_test_case_str_fn = get_test_case_str;
2242
2243 for (i = 0; i < number; ++i) {
2244 test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
2245 test_pr_info("%s: ====================", __func__);
2246
2247 for (j = PACKING_CONTROL_MIN_TESTCASE;
2248 j <= PACKING_CONTROL_MAX_TESTCASE; j++) {
2249
2250 test_successful = 1;
2251 mbtd->test_info.testcase = j;
2252 mbtd->is_random = RANDOM_TEST;
2253 ret = test_iosched_start_test(&mbtd->test_info);
2254 if (ret) {
2255 test_successful = 0;
2256 break;
2257 }
2258 /* Allow FS requests to be dispatched */
2259 msleep(1000);
2260
2261 mbtd->test_info.testcase = j;
2262 mbtd->is_random = NON_RANDOM_TEST;
2263 ret = test_iosched_start_test(&mbtd->test_info);
2264 if (ret) {
2265 test_successful = 0;
2266 break;
2267 }
2268 /* Allow FS requests to be dispatched */
2269 msleep(1000);
2270 }
2271
2272 if (!test_successful)
2273 break;
2274 }
2275
2276 test_pr_info("%s: Completed all the test cases.", __func__);
2277
2278 return count;
2279}
2280
2281static ssize_t write_packing_control_test_read(struct file *file,
2282 char __user *buffer,
2283 size_t count,
2284 loff_t *offset)
2285{
2286 memset((void *)buffer, 0, count);
2287
2288 snprintf(buffer, count,
2289 "\nwrite_packing_control_test\n"
2290 "=========\n"
2291 "Description:\n"
2292 "This test checks the following scenarios\n"
2293 "- Packing expected - one over trigger\n"
2294 "- Packing expected - N over trigger\n"
2295 "- Packing expected - N over trigger followed by read\n"
2296 "- Packing expected - N over trigger followed by flush\n"
2297 "- Packing expected - threshold over trigger FB by flush\n"
2298 "- Packing not expected - less than trigger\n"
2299 "- Packing not expected - trigger requests\n"
2300 "- Packing not expected - trigger, read, trigger\n"
2301 "- Mixed state - packing -> no packing -> packing\n"
2302 "- Mixed state - no packing -> packing -> no packing\n");
2303
2304 if (message_repeat == 1) {
2305 message_repeat = 0;
2306 return strnlen(buffer, count);
2307 } else {
2308 return 0;
2309 }
2310}
2311
2312const struct file_operations write_packing_control_test_ops = {
2313 .open = test_open,
2314 .write = write_packing_control_test_write,
2315 .read = write_packing_control_test_read,
2316};
2317
Maya Erezddc55732012-10-17 09:51:01 +02002318static ssize_t write_discard_sanitize_test_write(struct file *file,
2319 const char __user *buf,
2320 size_t count,
2321 loff_t *ppos)
2322{
2323 int ret = 0;
2324 int i = 0;
2325 int number = -1;
2326
2327 sscanf(buf, "%d", &number);
2328 if (number <= 0)
2329 number = 1;
2330
2331 test_pr_info("%s: -- write_discard_sanitize TEST --\n", __func__);
2332
2333 memset(&mbtd->test_info, 0, sizeof(struct test_info));
2334
2335 mbtd->test_group = TEST_GENERAL_GROUP;
2336
2337 mbtd->test_info.data = mbtd;
2338 mbtd->test_info.prepare_test_fn = prepare_write_discard_sanitize_read;
2339 mbtd->test_info.get_test_case_str_fn = get_test_case_str;
2340 mbtd->test_info.timeout_msec = SANITIZE_TEST_TIMEOUT;
2341
2342 for (i = 0 ; i < number ; ++i) {
2343 test_pr_info("%s: Cycle # %d / %d\n", __func__, i+1, number);
2344 test_pr_info("%s: ===================", __func__);
2345
2346 mbtd->test_info.testcase = TEST_WRITE_DISCARD_SANITIZE_READ;
2347 ret = test_iosched_start_test(&mbtd->test_info);
2348
2349 if (ret)
2350 break;
2351 }
2352
2353 return count;
2354}
2355
2356const struct file_operations write_discard_sanitize_test_ops = {
2357 .open = test_open,
2358 .write = write_discard_sanitize_test_write,
2359};
2360
Yaniv Gardie9214c82012-10-18 13:58:18 +02002361static ssize_t bkops_test_write(struct file *file,
2362 const char __user *buf,
2363 size_t count,
2364 loff_t *ppos)
2365{
2366 int ret = 0;
2367 int i = 0, j;
2368 int number = -1;
2369
2370 test_pr_info("%s: -- bkops_test TEST --", __func__);
2371
2372 sscanf(buf, "%d", &number);
2373
2374 if (number <= 0)
2375 number = 1;
2376
2377 mbtd->test_group = TEST_BKOPS_GROUP;
2378
2379 memset(&mbtd->test_info, 0, sizeof(struct test_info));
2380
2381 mbtd->test_info.data = mbtd;
2382 mbtd->test_info.prepare_test_fn = prepare_bkops;
2383 mbtd->test_info.check_test_result_fn = check_bkops_result;
2384 mbtd->test_info.get_test_case_str_fn = get_test_case_str;
2385 mbtd->test_info.run_test_fn = run_bkops;
2386 mbtd->test_info.timeout_msec = BKOPS_TEST_TIMEOUT;
2387 mbtd->test_info.post_test_fn = bkops_post_test;
2388
2389 for (i = 0 ; i < number ; ++i) {
2390 test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
2391 test_pr_info("%s: ===================", __func__);
2392 for (j = BKOPS_MIN_TESTCASE ;
2393 j <= BKOPS_MAX_TESTCASE ; j++) {
2394 mbtd->test_info.testcase = j;
2395 ret = test_iosched_start_test(&mbtd->test_info);
2396 if (ret)
2397 break;
2398 }
2399 }
2400
2401 test_pr_info("%s: Completed all the test cases.", __func__);
2402
2403 return count;
2404}
2405
2406static ssize_t bkops_test_read(struct file *file,
2407 char __user *buffer,
2408 size_t count,
2409 loff_t *offset)
2410{
2411 memset((void *)buffer, 0, count);
2412
2413 snprintf(buffer, count,
2414 "\nbkops_test\n========================\n"
2415 "Description:\n"
2416 "This test simulates BKOPS status from card\n"
2417 "and verifies that:\n"
2418 " - Starting BKOPS delayed work, level 1\n"
2419 " - Starting BKOPS delayed work, level 1, with HPI\n"
2420 " - Cancel starting BKOPS delayed work, "
2421 " when a request is received\n"
2422 " - Starting BKOPS urgent, level 2,3\n"
2423 " - Starting BKOPS urgent with 2 requests\n");
2424 return strnlen(buffer, count);
2425}
2426
2427const struct file_operations bkops_test_ops = {
2428 .open = test_open,
2429 .write = bkops_test_write,
2430 .read = bkops_test_read,
2431};
2432
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02002433static void mmc_block_test_debugfs_cleanup(void)
2434{
2435 debugfs_remove(mbtd->debug.random_test_seed);
2436 debugfs_remove(mbtd->debug.send_write_packing_test);
2437 debugfs_remove(mbtd->debug.err_check_test);
2438 debugfs_remove(mbtd->debug.send_invalid_packed_test);
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02002439 debugfs_remove(mbtd->debug.packing_control_test);
Maya Erezddc55732012-10-17 09:51:01 +02002440 debugfs_remove(mbtd->debug.discard_sanitize_test);
Yaniv Gardie9214c82012-10-18 13:58:18 +02002441 debugfs_remove(mbtd->debug.bkops_test);
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02002442}
2443
2444static int mmc_block_test_debugfs_init(void)
2445{
2446 struct dentry *utils_root, *tests_root;
2447
2448 utils_root = test_iosched_get_debugfs_utils_root();
2449 tests_root = test_iosched_get_debugfs_tests_root();
2450
2451 if (!utils_root || !tests_root)
2452 return -EINVAL;
2453
2454 mbtd->debug.random_test_seed = debugfs_create_u32(
2455 "random_test_seed",
2456 S_IRUGO | S_IWUGO,
2457 utils_root,
2458 &mbtd->random_test_seed);
2459
2460 if (!mbtd->debug.random_test_seed)
2461 goto err_nomem;
2462
2463 mbtd->debug.send_write_packing_test =
2464 debugfs_create_file("send_write_packing_test",
2465 S_IRUGO | S_IWUGO,
2466 tests_root,
2467 NULL,
2468 &send_write_packing_test_ops);
2469
2470 if (!mbtd->debug.send_write_packing_test)
2471 goto err_nomem;
2472
2473 mbtd->debug.err_check_test =
2474 debugfs_create_file("err_check_test",
2475 S_IRUGO | S_IWUGO,
2476 tests_root,
2477 NULL,
2478 &err_check_test_ops);
2479
2480 if (!mbtd->debug.err_check_test)
2481 goto err_nomem;
2482
2483 mbtd->debug.send_invalid_packed_test =
2484 debugfs_create_file("send_invalid_packed_test",
2485 S_IRUGO | S_IWUGO,
2486 tests_root,
2487 NULL,
2488 &send_invalid_packed_test_ops);
2489
2490 if (!mbtd->debug.send_invalid_packed_test)
2491 goto err_nomem;
2492
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02002493 mbtd->debug.packing_control_test = debugfs_create_file(
2494 "packing_control_test",
2495 S_IRUGO | S_IWUGO,
2496 tests_root,
2497 NULL,
2498 &write_packing_control_test_ops);
2499
2500 if (!mbtd->debug.packing_control_test)
2501 goto err_nomem;
2502
Maya Erezddc55732012-10-17 09:51:01 +02002503 mbtd->debug.discard_sanitize_test =
2504 debugfs_create_file("write_discard_sanitize_test",
2505 S_IRUGO | S_IWUGO,
2506 tests_root,
2507 NULL,
2508 &write_discard_sanitize_test_ops);
2509 if (!mbtd->debug.discard_sanitize_test) {
2510 mmc_block_test_debugfs_cleanup();
2511 return -ENOMEM;
2512 }
2513
Yaniv Gardie9214c82012-10-18 13:58:18 +02002514 mbtd->debug.bkops_test =
2515 debugfs_create_file("bkops_test",
2516 S_IRUGO | S_IWUGO,
2517 tests_root,
2518 NULL,
2519 &bkops_test_ops);
2520
2521 if (!mbtd->debug.bkops_test)
2522 goto err_nomem;
2523
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02002524 return 0;
2525
2526err_nomem:
2527 mmc_block_test_debugfs_cleanup();
2528 return -ENOMEM;
2529}
2530
2531static void mmc_block_test_probe(void)
2532{
2533 struct request_queue *q = test_iosched_get_req_queue();
2534 struct mmc_queue *mq;
2535 int max_packed_reqs;
2536
2537 if (!q) {
2538 test_pr_err("%s: NULL request queue", __func__);
2539 return;
2540 }
2541
2542 mq = q->queuedata;
2543 if (!mq) {
2544 test_pr_err("%s: NULL mq", __func__);
2545 return;
2546 }
2547
2548 max_packed_reqs = mq->card->ext_csd.max_packed_writes;
2549 mbtd->exp_packed_stats.packing_events =
2550 kzalloc((max_packed_reqs + 1) *
2551 sizeof(*mbtd->exp_packed_stats.packing_events),
2552 GFP_KERNEL);
2553
2554 mmc_block_test_debugfs_init();
2555}
2556
2557static void mmc_block_test_remove(void)
2558{
2559 mmc_block_test_debugfs_cleanup();
2560}
2561
2562static int __init mmc_block_test_init(void)
2563{
2564 mbtd = kzalloc(sizeof(struct mmc_block_test_data), GFP_KERNEL);
2565 if (!mbtd) {
2566 test_pr_err("%s: failed to allocate mmc_block_test_data",
2567 __func__);
2568 return -ENODEV;
2569 }
2570
Yaniv Gardie9214c82012-10-18 13:58:18 +02002571 init_waitqueue_head(&mbtd->bkops_wait_q);
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02002572 mbtd->bdt.init_fn = mmc_block_test_probe;
2573 mbtd->bdt.exit_fn = mmc_block_test_remove;
2574 INIT_LIST_HEAD(&mbtd->bdt.list);
2575 test_iosched_register(&mbtd->bdt);
2576
2577 return 0;
2578}
2579
2580static void __exit mmc_block_test_exit(void)
2581{
2582 test_iosched_unregister(&mbtd->bdt);
2583 kfree(mbtd);
2584}
2585
2586module_init(mmc_block_test_init);
2587module_exit(mmc_block_test_exit);
2588
2589MODULE_LICENSE("GPL v2");
2590MODULE_DESCRIPTION("MMC block test");