blob: 610a822ba6033d4d8b0359b86792158fc055f31f [file] [log] [blame]
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14/* MMC block test */
15
16#include <linux/module.h>
17#include <linux/blkdev.h>
18#include <linux/debugfs.h>
19#include <linux/mmc/card.h>
20#include <linux/mmc/host.h>
21#include <linux/delay.h>
22#include <linux/test-iosched.h>
Lee Susmanf18263a2012-10-24 14:14:37 +020023#include <linux/jiffies.h>
Tatyana Brokhman09b010d2012-10-09 13:50:56 +020024#include "queue.h"
Yaniv Gardie9214c82012-10-18 13:58:18 +020025#include <linux/mmc/mmc.h>
Tatyana Brokhman09b010d2012-10-09 13:50:56 +020026
27#define MODULE_NAME "mmc_block_test"
28#define TEST_MAX_SECTOR_RANGE (600*1024*1024) /* 600 MB */
29#define TEST_MAX_BIOS_PER_REQ 120
30#define CMD23_PACKED_BIT (1 << 30)
31#define LARGE_PRIME_1 1103515367
32#define LARGE_PRIME_2 35757
33#define PACKED_HDR_VER_MASK 0x000000FF
34#define PACKED_HDR_RW_MASK 0x0000FF00
35#define PACKED_HDR_NUM_REQS_MASK 0x00FF0000
36#define PACKED_HDR_BITS_16_TO_29_SET 0x3FFF0000
Maya Erezddc55732012-10-17 09:51:01 +020037#define SECTOR_SIZE 512
38#define NUM_OF_SECTORS_PER_BIO ((BIO_U32_SIZE * 4) / SECTOR_SIZE)
39#define BIO_TO_SECTOR(x) (x * NUM_OF_SECTORS_PER_BIO)
Lee Susmanf18263a2012-10-24 14:14:37 +020040/* the desired long test size to be written or read */
41#define LONG_TEST_MAX_NUM_BYTES (50*1024*1024) /* 50MB */
42/* request queue limitation is 128 requests, and we leave 10 spare requests */
43#define TEST_MAX_REQUESTS 118
44#define LONG_TEST_MAX_NUM_REQS (LONG_TEST_MAX_NUM_BYTES / \
45 (TEST_MAX_BIOS_PER_REQ * sizeof(int) * BIO_U32_SIZE))
46/* this doesn't allow the test requests num to be greater than the maximum */
47#define LONG_TEST_ACTUAL_NUM_REQS \
48 ((TEST_MAX_REQUESTS < LONG_TEST_MAX_NUM_REQS) ? \
49 TEST_MAX_REQUESTS : LONG_TEST_MAX_NUM_REQS)
50#define MB_MSEC_RATIO_APPROXIMATION ((1024 * 1024) / 1000)
51/* actual number of bytes in test */
52#define LONG_TEST_ACTUAL_BYTE_NUM (LONG_TEST_ACTUAL_NUM_REQS * \
53 (TEST_MAX_BIOS_PER_REQ * sizeof(int) * BIO_U32_SIZE))
54/* actual number of MiB in test multiplied by 10, for single digit precision*/
55#define LONG_TEST_ACTUAL_MB_NUM_X_10 ((LONG_TEST_ACTUAL_BYTE_NUM * 10) / \
56 (1024 * 1024))
57/* extract integer value */
58#define LONG_TEST_SIZE_INTEGER (LONG_TEST_ACTUAL_MB_NUM_X_10 / 10)
59/* and calculate the MiB value fraction */
60#define LONG_TEST_SIZE_FRACTION (LONG_TEST_ACTUAL_MB_NUM_X_10 - \
61 (LONG_TEST_SIZE_INTEGER * 10))
Tatyana Brokhman09b010d2012-10-09 13:50:56 +020062
63#define test_pr_debug(fmt, args...) pr_debug("%s: "fmt"\n", MODULE_NAME, args)
64#define test_pr_info(fmt, args...) pr_info("%s: "fmt"\n", MODULE_NAME, args)
65#define test_pr_err(fmt, args...) pr_err("%s: "fmt"\n", MODULE_NAME, args)
66
Maya Erezddc55732012-10-17 09:51:01 +020067#define SANITIZE_TEST_TIMEOUT 240000
Yaniv Gardie9214c82012-10-18 13:58:18 +020068#define TEST_REQUEST_NUM_OF_BIOS 3
69
70
71#define CHECK_BKOPS_STATS(stats, exp_bkops, exp_hpi, exp_suspend) \
72 ((stats.bkops != exp_bkops) || \
73 (stats.hpi != exp_hpi) || \
74 (stats.suspend != exp_suspend))
75#define BKOPS_TEST_TIMEOUT 60000
Maya Erezddc55732012-10-17 09:51:01 +020076
Tatyana Brokhman09b010d2012-10-09 13:50:56 +020077enum is_random {
78 NON_RANDOM_TEST,
79 RANDOM_TEST,
80};
81
82enum mmc_block_test_testcases {
83 /* Start of send write packing test group */
84 SEND_WRITE_PACKING_MIN_TESTCASE,
85 TEST_STOP_DUE_TO_READ = SEND_WRITE_PACKING_MIN_TESTCASE,
86 TEST_STOP_DUE_TO_READ_AFTER_MAX_REQS,
87 TEST_STOP_DUE_TO_FLUSH,
88 TEST_STOP_DUE_TO_FLUSH_AFTER_MAX_REQS,
89 TEST_STOP_DUE_TO_EMPTY_QUEUE,
90 TEST_STOP_DUE_TO_MAX_REQ_NUM,
91 TEST_STOP_DUE_TO_THRESHOLD,
92 SEND_WRITE_PACKING_MAX_TESTCASE = TEST_STOP_DUE_TO_THRESHOLD,
93
94 /* Start of err check test group */
95 ERR_CHECK_MIN_TESTCASE,
96 TEST_RET_ABORT = ERR_CHECK_MIN_TESTCASE,
97 TEST_RET_PARTIAL_FOLLOWED_BY_SUCCESS,
98 TEST_RET_PARTIAL_FOLLOWED_BY_ABORT,
99 TEST_RET_PARTIAL_MULTIPLE_UNTIL_SUCCESS,
100 TEST_RET_PARTIAL_MAX_FAIL_IDX,
101 TEST_RET_RETRY,
102 TEST_RET_CMD_ERR,
103 TEST_RET_DATA_ERR,
104 ERR_CHECK_MAX_TESTCASE = TEST_RET_DATA_ERR,
105
106 /* Start of send invalid test group */
107 INVALID_CMD_MIN_TESTCASE,
108 TEST_HDR_INVALID_VERSION = INVALID_CMD_MIN_TESTCASE,
109 TEST_HDR_WRONG_WRITE_CODE,
110 TEST_HDR_INVALID_RW_CODE,
111 TEST_HDR_DIFFERENT_ADDRESSES,
112 TEST_HDR_REQ_NUM_SMALLER_THAN_ACTUAL,
113 TEST_HDR_REQ_NUM_LARGER_THAN_ACTUAL,
114 TEST_HDR_CMD23_PACKED_BIT_SET,
115 TEST_CMD23_MAX_PACKED_WRITES,
116 TEST_CMD23_ZERO_PACKED_WRITES,
117 TEST_CMD23_PACKED_BIT_UNSET,
118 TEST_CMD23_REL_WR_BIT_SET,
119 TEST_CMD23_BITS_16TO29_SET,
120 TEST_CMD23_HDR_BLK_NOT_IN_COUNT,
121 INVALID_CMD_MAX_TESTCASE = TEST_CMD23_HDR_BLK_NOT_IN_COUNT,
Tatyana Brokhman91e1e322012-10-09 13:53:43 +0200122
123 /*
124 * Start of packing control test group.
125 * in these next testcases the abbreviation FB = followed by
126 */
127 PACKING_CONTROL_MIN_TESTCASE,
128 TEST_PACKING_EXP_ONE_OVER_TRIGGER_FB_READ =
129 PACKING_CONTROL_MIN_TESTCASE,
130 TEST_PACKING_EXP_N_OVER_TRIGGER,
131 TEST_PACKING_EXP_N_OVER_TRIGGER_FB_READ,
132 TEST_PACKING_EXP_N_OVER_TRIGGER_FLUSH_N,
133 TEST_PACKING_EXP_THRESHOLD_OVER_TRIGGER,
134 TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS,
135 TEST_PACKING_NOT_EXP_TRIGGER_REQUESTS,
136 TEST_PACKING_NOT_EXP_TRIGGER_READ_TRIGGER,
137 TEST_PACKING_NOT_EXP_TRIGGER_FLUSH_TRIGGER,
138 TEST_PACK_MIX_PACKED_NO_PACKED_PACKED,
139 TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED,
140 PACKING_CONTROL_MAX_TESTCASE = TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED,
Maya Erezddc55732012-10-17 09:51:01 +0200141
142 TEST_WRITE_DISCARD_SANITIZE_READ,
Yaniv Gardie9214c82012-10-18 13:58:18 +0200143
144 /* Start of bkops test group */
145 BKOPS_MIN_TESTCASE,
146 BKOPS_DELAYED_WORK_LEVEL_1 = BKOPS_MIN_TESTCASE,
147 BKOPS_DELAYED_WORK_LEVEL_1_HPI,
148 BKOPS_CANCEL_DELAYED_WORK,
149 BKOPS_URGENT_LEVEL_2,
150 BKOPS_URGENT_LEVEL_2_TWO_REQS,
151 BKOPS_URGENT_LEVEL_3,
152 BKOPS_MAX_TESTCASE = BKOPS_URGENT_LEVEL_3,
Lee Susmanf18263a2012-10-24 14:14:37 +0200153
154 TEST_LONG_SEQUENTIAL_READ,
Lee Susmana35ae6e2012-10-25 16:06:07 +0200155 TEST_LONG_SEQUENTIAL_WRITE,
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200156};
157
158enum mmc_block_test_group {
159 TEST_NO_GROUP,
160 TEST_GENERAL_GROUP,
161 TEST_SEND_WRITE_PACKING_GROUP,
162 TEST_ERR_CHECK_GROUP,
163 TEST_SEND_INVALID_GROUP,
Tatyana Brokhman91e1e322012-10-09 13:53:43 +0200164 TEST_PACKING_CONTROL_GROUP,
Yaniv Gardie9214c82012-10-18 13:58:18 +0200165 TEST_BKOPS_GROUP,
166};
167
168enum bkops_test_stages {
169 BKOPS_STAGE_1,
170 BKOPS_STAGE_2,
171 BKOPS_STAGE_3,
172 BKOPS_STAGE_4,
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200173};
174
175struct mmc_block_test_debug {
176 struct dentry *send_write_packing_test;
177 struct dentry *err_check_test;
178 struct dentry *send_invalid_packed_test;
179 struct dentry *random_test_seed;
Tatyana Brokhman91e1e322012-10-09 13:53:43 +0200180 struct dentry *packing_control_test;
Maya Erezddc55732012-10-17 09:51:01 +0200181 struct dentry *discard_sanitize_test;
Yaniv Gardie9214c82012-10-18 13:58:18 +0200182 struct dentry *bkops_test;
Lee Susmanf18263a2012-10-24 14:14:37 +0200183 struct dentry *long_sequential_read_test;
Lee Susmana35ae6e2012-10-25 16:06:07 +0200184 struct dentry *long_sequential_write_test;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200185};
186
187struct mmc_block_test_data {
188 /* The number of write requests that the test will issue */
189 int num_requests;
190 /* The expected write packing statistics for the current test */
191 struct mmc_wr_pack_stats exp_packed_stats;
192 /*
193 * A user-defined seed for random choices of number of bios written in
194 * a request, and of number of requests issued in a test
195 * This field is randomly updated after each use
196 */
197 unsigned int random_test_seed;
198 /* A retry counter used in err_check tests */
199 int err_check_counter;
200 /* Can be one of the values of enum test_group */
201 enum mmc_block_test_group test_group;
202 /*
203 * Indicates if the current testcase is running with random values of
204 * num_requests and num_bios (in each request)
205 */
206 int is_random;
207 /* Data structure for debugfs dentrys */
208 struct mmc_block_test_debug debug;
209 /*
210 * Data structure containing individual test information, including
211 * self-defined specific data
212 */
213 struct test_info test_info;
214 /* mmc block device test */
215 struct blk_dev_test_type bdt;
Yaniv Gardie9214c82012-10-18 13:58:18 +0200216 /* Current BKOPs test stage */
217 enum bkops_test_stages bkops_stage;
218 /* A wait queue for BKOPs tests */
219 wait_queue_head_t bkops_wait_q;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200220};
221
222static struct mmc_block_test_data *mbtd;
223
Lee Susmane868f8a2012-11-04 15:04:41 +0200224void print_mmc_packing_stats(struct mmc_card *card)
225{
226 int i;
227 int max_num_of_packed_reqs = 0;
228
229 if ((!card) || (!card->wr_pack_stats.packing_events))
230 return;
231
232 max_num_of_packed_reqs = card->ext_csd.max_packed_writes;
233
234 spin_lock(&card->wr_pack_stats.lock);
235
236 pr_info("%s: write packing statistics:\n",
237 mmc_hostname(card->host));
238
239 for (i = 1 ; i <= max_num_of_packed_reqs ; ++i) {
240 if (card->wr_pack_stats.packing_events[i] != 0)
241 pr_info("%s: Packed %d reqs - %d times\n",
242 mmc_hostname(card->host), i,
243 card->wr_pack_stats.packing_events[i]);
244 }
245
246 pr_info("%s: stopped packing due to the following reasons:\n",
247 mmc_hostname(card->host));
248
249 if (card->wr_pack_stats.pack_stop_reason[EXCEEDS_SEGMENTS])
250 pr_info("%s: %d times: exceedmax num of segments\n",
251 mmc_hostname(card->host),
252 card->wr_pack_stats.pack_stop_reason[EXCEEDS_SEGMENTS]);
253 if (card->wr_pack_stats.pack_stop_reason[EXCEEDS_SECTORS])
254 pr_info("%s: %d times: exceeding the max num of sectors\n",
255 mmc_hostname(card->host),
256 card->wr_pack_stats.pack_stop_reason[EXCEEDS_SECTORS]);
257 if (card->wr_pack_stats.pack_stop_reason[WRONG_DATA_DIR])
258 pr_info("%s: %d times: wrong data direction\n",
259 mmc_hostname(card->host),
260 card->wr_pack_stats.pack_stop_reason[WRONG_DATA_DIR]);
261 if (card->wr_pack_stats.pack_stop_reason[FLUSH_OR_DISCARD])
262 pr_info("%s: %d times: flush or discard\n",
263 mmc_hostname(card->host),
264 card->wr_pack_stats.pack_stop_reason[FLUSH_OR_DISCARD]);
265 if (card->wr_pack_stats.pack_stop_reason[EMPTY_QUEUE])
266 pr_info("%s: %d times: empty queue\n",
267 mmc_hostname(card->host),
268 card->wr_pack_stats.pack_stop_reason[EMPTY_QUEUE]);
269 if (card->wr_pack_stats.pack_stop_reason[REL_WRITE])
270 pr_info("%s: %d times: rel write\n",
271 mmc_hostname(card->host),
272 card->wr_pack_stats.pack_stop_reason[REL_WRITE]);
273 if (card->wr_pack_stats.pack_stop_reason[THRESHOLD])
274 pr_info("%s: %d times: Threshold\n",
275 mmc_hostname(card->host),
276 card->wr_pack_stats.pack_stop_reason[THRESHOLD]);
277
278 spin_unlock(&card->wr_pack_stats.lock);
279}
280
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200281/*
282 * A callback assigned to the packed_test_fn field.
283 * Called from block layer in mmc_blk_packed_hdr_wrq_prep.
284 * Here we alter the packed header or CMD23 in order to send an invalid
285 * packed command to the card.
286 */
287static void test_invalid_packed_cmd(struct request_queue *q,
288 struct mmc_queue_req *mqrq)
289{
290 struct mmc_queue *mq = q->queuedata;
291 u32 *packed_cmd_hdr = mqrq->packed_cmd_hdr;
292 struct request *req = mqrq->req;
293 struct request *second_rq;
294 struct test_request *test_rq;
295 struct mmc_blk_request *brq = &mqrq->brq;
296 int num_requests;
297 int max_packed_reqs;
298
299 if (!mq) {
300 test_pr_err("%s: NULL mq", __func__);
301 return;
302 }
303
304 test_rq = (struct test_request *)req->elv.priv[0];
305 if (!test_rq) {
306 test_pr_err("%s: NULL test_rq", __func__);
307 return;
308 }
309 max_packed_reqs = mq->card->ext_csd.max_packed_writes;
310
311 switch (mbtd->test_info.testcase) {
312 case TEST_HDR_INVALID_VERSION:
313 test_pr_info("%s: set invalid header version", __func__);
314 /* Put 0 in header version field (1 byte, offset 0 in header) */
315 packed_cmd_hdr[0] = packed_cmd_hdr[0] & ~PACKED_HDR_VER_MASK;
316 break;
317 case TEST_HDR_WRONG_WRITE_CODE:
318 test_pr_info("%s: wrong write code", __func__);
319 /* Set R/W field with R value (1 byte, offset 1 in header) */
320 packed_cmd_hdr[0] = packed_cmd_hdr[0] & ~PACKED_HDR_RW_MASK;
321 packed_cmd_hdr[0] = packed_cmd_hdr[0] | 0x00000100;
322 break;
323 case TEST_HDR_INVALID_RW_CODE:
324 test_pr_info("%s: invalid r/w code", __func__);
325 /* Set R/W field with invalid value */
326 packed_cmd_hdr[0] = packed_cmd_hdr[0] & ~PACKED_HDR_RW_MASK;
327 packed_cmd_hdr[0] = packed_cmd_hdr[0] | 0x00000400;
328 break;
329 case TEST_HDR_DIFFERENT_ADDRESSES:
330 test_pr_info("%s: different addresses", __func__);
331 second_rq = list_entry(req->queuelist.next, struct request,
332 queuelist);
333 test_pr_info("%s: test_rq->sector=%ld, second_rq->sector=%ld",
334 __func__, (long)req->__sector,
335 (long)second_rq->__sector);
336 /*
337 * Put start sector of second write request in the first write
338 * request's cmd25 argument in the packed header
339 */
340 packed_cmd_hdr[3] = second_rq->__sector;
341 break;
342 case TEST_HDR_REQ_NUM_SMALLER_THAN_ACTUAL:
343 test_pr_info("%s: request num smaller than actual" , __func__);
344 num_requests = (packed_cmd_hdr[0] & PACKED_HDR_NUM_REQS_MASK)
345 >> 16;
346 /* num of entries is decremented by 1 */
347 num_requests = (num_requests - 1) << 16;
348 /*
349 * Set number of requests field in packed write header to be
350 * smaller than the actual number (1 byte, offset 2 in header)
351 */
352 packed_cmd_hdr[0] = (packed_cmd_hdr[0] &
353 ~PACKED_HDR_NUM_REQS_MASK) + num_requests;
354 break;
355 case TEST_HDR_REQ_NUM_LARGER_THAN_ACTUAL:
356 test_pr_info("%s: request num larger than actual" , __func__);
357 num_requests = (packed_cmd_hdr[0] & PACKED_HDR_NUM_REQS_MASK)
358 >> 16;
359 /* num of entries is incremented by 1 */
360 num_requests = (num_requests + 1) << 16;
361 /*
362 * Set number of requests field in packed write header to be
363 * larger than the actual number (1 byte, offset 2 in header).
364 */
365 packed_cmd_hdr[0] = (packed_cmd_hdr[0] &
366 ~PACKED_HDR_NUM_REQS_MASK) + num_requests;
367 break;
368 case TEST_HDR_CMD23_PACKED_BIT_SET:
369 test_pr_info("%s: header CMD23 packed bit set" , __func__);
370 /*
371 * Set packed bit (bit 30) in cmd23 argument of first and second
372 * write requests in packed write header.
373 * These are located at bytes 2 and 4 in packed write header
374 */
375 packed_cmd_hdr[2] = packed_cmd_hdr[2] | CMD23_PACKED_BIT;
376 packed_cmd_hdr[4] = packed_cmd_hdr[4] | CMD23_PACKED_BIT;
377 break;
378 case TEST_CMD23_MAX_PACKED_WRITES:
379 test_pr_info("%s: CMD23 request num > max_packed_reqs",
380 __func__);
381 /*
382 * Set the individual packed cmd23 request num to
383 * max_packed_reqs + 1
384 */
385 brq->sbc.arg = MMC_CMD23_ARG_PACKED | (max_packed_reqs + 1);
386 break;
387 case TEST_CMD23_ZERO_PACKED_WRITES:
388 test_pr_info("%s: CMD23 request num = 0", __func__);
389 /* Set the individual packed cmd23 request num to zero */
390 brq->sbc.arg = MMC_CMD23_ARG_PACKED;
391 break;
392 case TEST_CMD23_PACKED_BIT_UNSET:
393 test_pr_info("%s: CMD23 packed bit unset", __func__);
394 /*
395 * Set the individual packed cmd23 packed bit to 0,
396 * although there is a packed write request
397 */
398 brq->sbc.arg &= ~CMD23_PACKED_BIT;
399 break;
400 case TEST_CMD23_REL_WR_BIT_SET:
401 test_pr_info("%s: CMD23 REL WR bit set", __func__);
402 /* Set the individual packed cmd23 reliable write bit */
403 brq->sbc.arg = MMC_CMD23_ARG_PACKED | MMC_CMD23_ARG_REL_WR;
404 break;
405 case TEST_CMD23_BITS_16TO29_SET:
406 test_pr_info("%s: CMD23 bits [16-29] set", __func__);
407 brq->sbc.arg = MMC_CMD23_ARG_PACKED |
408 PACKED_HDR_BITS_16_TO_29_SET;
409 break;
410 case TEST_CMD23_HDR_BLK_NOT_IN_COUNT:
411 test_pr_info("%s: CMD23 hdr not in block count", __func__);
412 brq->sbc.arg = MMC_CMD23_ARG_PACKED |
413 ((rq_data_dir(req) == READ) ? 0 : mqrq->packed_blocks);
414 break;
415 default:
416 test_pr_err("%s: unexpected testcase %d",
417 __func__, mbtd->test_info.testcase);
418 break;
419 }
420}
421
422/*
423 * A callback assigned to the err_check_fn field of the mmc_request by the
424 * MMC/card/block layer.
425 * Called upon request completion by the MMC/core layer.
426 * Here we emulate an error return value from the card.
427 */
428static int test_err_check(struct mmc_card *card, struct mmc_async_req *areq)
429{
430 struct mmc_queue_req *mq_rq = container_of(areq, struct mmc_queue_req,
431 mmc_active);
432 struct request_queue *req_q = test_iosched_get_req_queue();
433 struct mmc_queue *mq;
434 int max_packed_reqs;
435 int ret = 0;
Yaniv Gardie9214c82012-10-18 13:58:18 +0200436 struct mmc_blk_request *brq;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200437
438 if (req_q)
439 mq = req_q->queuedata;
440 else {
441 test_pr_err("%s: NULL request_queue", __func__);
442 return 0;
443 }
444
445 if (!mq) {
446 test_pr_err("%s: %s: NULL mq", __func__,
447 mmc_hostname(card->host));
448 return 0;
449 }
450
451 max_packed_reqs = mq->card->ext_csd.max_packed_writes;
452
453 if (!mq_rq) {
454 test_pr_err("%s: %s: NULL mq_rq", __func__,
455 mmc_hostname(card->host));
456 return 0;
457 }
Yaniv Gardie9214c82012-10-18 13:58:18 +0200458 brq = &mq_rq->brq;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200459
460 switch (mbtd->test_info.testcase) {
461 case TEST_RET_ABORT:
462 test_pr_info("%s: return abort", __func__);
463 ret = MMC_BLK_ABORT;
464 break;
465 case TEST_RET_PARTIAL_FOLLOWED_BY_SUCCESS:
466 test_pr_info("%s: return partial followed by success",
467 __func__);
468 /*
469 * Since in this testcase num_requests is always >= 2,
470 * we can be sure that packed_fail_idx is always >= 1
471 */
472 mq_rq->packed_fail_idx = (mbtd->num_requests / 2);
473 test_pr_info("%s: packed_fail_idx = %d"
474 , __func__, mq_rq->packed_fail_idx);
475 mq->err_check_fn = NULL;
476 ret = MMC_BLK_PARTIAL;
477 break;
478 case TEST_RET_PARTIAL_FOLLOWED_BY_ABORT:
479 if (!mbtd->err_check_counter) {
480 test_pr_info("%s: return partial followed by abort",
481 __func__);
482 mbtd->err_check_counter++;
483 /*
484 * Since in this testcase num_requests is always >= 3,
485 * we have that packed_fail_idx is always >= 1
486 */
487 mq_rq->packed_fail_idx = (mbtd->num_requests / 2);
488 test_pr_info("%s: packed_fail_idx = %d"
489 , __func__, mq_rq->packed_fail_idx);
490 ret = MMC_BLK_PARTIAL;
491 break;
492 }
493 mbtd->err_check_counter = 0;
494 mq->err_check_fn = NULL;
495 ret = MMC_BLK_ABORT;
496 break;
497 case TEST_RET_PARTIAL_MULTIPLE_UNTIL_SUCCESS:
498 test_pr_info("%s: return partial multiple until success",
499 __func__);
500 if (++mbtd->err_check_counter >= (mbtd->num_requests)) {
501 mq->err_check_fn = NULL;
502 mbtd->err_check_counter = 0;
503 ret = MMC_BLK_PARTIAL;
504 break;
505 }
506 mq_rq->packed_fail_idx = 1;
507 ret = MMC_BLK_PARTIAL;
508 break;
509 case TEST_RET_PARTIAL_MAX_FAIL_IDX:
510 test_pr_info("%s: return partial max fail_idx", __func__);
511 mq_rq->packed_fail_idx = max_packed_reqs - 1;
512 mq->err_check_fn = NULL;
513 ret = MMC_BLK_PARTIAL;
514 break;
515 case TEST_RET_RETRY:
516 test_pr_info("%s: return retry", __func__);
517 ret = MMC_BLK_RETRY;
518 break;
519 case TEST_RET_CMD_ERR:
520 test_pr_info("%s: return cmd err", __func__);
521 ret = MMC_BLK_CMD_ERR;
522 break;
523 case TEST_RET_DATA_ERR:
524 test_pr_info("%s: return data err", __func__);
525 ret = MMC_BLK_DATA_ERR;
526 break;
Yaniv Gardie9214c82012-10-18 13:58:18 +0200527 case BKOPS_URGENT_LEVEL_2:
528 case BKOPS_URGENT_LEVEL_3:
529 case BKOPS_URGENT_LEVEL_2_TWO_REQS:
530 if (mbtd->err_check_counter++ == 0) {
531 test_pr_info("%s: simulate an exception from the card",
532 __func__);
533 brq->cmd.resp[0] |= R1_EXCEPTION_EVENT;
534 }
535 mq->err_check_fn = NULL;
536 break;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200537 default:
538 test_pr_err("%s: unexpected testcase %d",
539 __func__, mbtd->test_info.testcase);
540 }
541
542 return ret;
543}
544
545/*
546 * This is a specific implementation for the get_test_case_str_fn function
547 * pointer in the test_info data structure. Given a valid test_data instance,
548 * the function returns a string resembling the test name, based on the testcase
549 */
550static char *get_test_case_str(struct test_data *td)
551{
552 if (!td) {
553 test_pr_err("%s: NULL td", __func__);
554 return NULL;
555 }
556
Lee Susman039ce092012-11-15 13:36:15 +0200557switch (td->test_info.testcase) {
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200558 case TEST_STOP_DUE_TO_FLUSH:
Lee Susman039ce092012-11-15 13:36:15 +0200559 return "\"stop due to flush\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200560 case TEST_STOP_DUE_TO_FLUSH_AFTER_MAX_REQS:
Lee Susman039ce092012-11-15 13:36:15 +0200561 return "\"stop due to flush after max-1 reqs\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200562 case TEST_STOP_DUE_TO_READ:
Lee Susman039ce092012-11-15 13:36:15 +0200563 return "\"stop due to read\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200564 case TEST_STOP_DUE_TO_READ_AFTER_MAX_REQS:
Lee Susman039ce092012-11-15 13:36:15 +0200565 return "\"stop due to read after max-1 reqs\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200566 case TEST_STOP_DUE_TO_EMPTY_QUEUE:
Lee Susman039ce092012-11-15 13:36:15 +0200567 return "\"stop due to empty queue\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200568 case TEST_STOP_DUE_TO_MAX_REQ_NUM:
Lee Susman039ce092012-11-15 13:36:15 +0200569 return "\"stop due to max req num\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200570 case TEST_STOP_DUE_TO_THRESHOLD:
Lee Susman039ce092012-11-15 13:36:15 +0200571 return "\"stop due to exceeding threshold\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200572 case TEST_RET_ABORT:
Lee Susman039ce092012-11-15 13:36:15 +0200573 return "\"err_check return abort\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200574 case TEST_RET_PARTIAL_FOLLOWED_BY_SUCCESS:
Lee Susman039ce092012-11-15 13:36:15 +0200575 return "\"err_check return partial followed by success\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200576 case TEST_RET_PARTIAL_FOLLOWED_BY_ABORT:
Lee Susman039ce092012-11-15 13:36:15 +0200577 return "\"err_check return partial followed by abort\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200578 case TEST_RET_PARTIAL_MULTIPLE_UNTIL_SUCCESS:
Lee Susman039ce092012-11-15 13:36:15 +0200579 return "\"err_check return partial multiple until success\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200580 case TEST_RET_PARTIAL_MAX_FAIL_IDX:
Lee Susman039ce092012-11-15 13:36:15 +0200581 return "\"err_check return partial max fail index\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200582 case TEST_RET_RETRY:
Lee Susman039ce092012-11-15 13:36:15 +0200583 return "\"err_check return retry\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200584 case TEST_RET_CMD_ERR:
Lee Susman039ce092012-11-15 13:36:15 +0200585 return "\"err_check return cmd error\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200586 case TEST_RET_DATA_ERR:
Lee Susman039ce092012-11-15 13:36:15 +0200587 return "\"err_check return data error\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200588 case TEST_HDR_INVALID_VERSION:
Lee Susman039ce092012-11-15 13:36:15 +0200589 return "\"invalid - wrong header version\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200590 case TEST_HDR_WRONG_WRITE_CODE:
Lee Susman039ce092012-11-15 13:36:15 +0200591 return "\"invalid - wrong write code\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200592 case TEST_HDR_INVALID_RW_CODE:
Lee Susman039ce092012-11-15 13:36:15 +0200593 return "\"invalid - wrong R/W code\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200594 case TEST_HDR_DIFFERENT_ADDRESSES:
Lee Susman039ce092012-11-15 13:36:15 +0200595 return "\"invalid - header different addresses\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200596 case TEST_HDR_REQ_NUM_SMALLER_THAN_ACTUAL:
Lee Susman039ce092012-11-15 13:36:15 +0200597 return "\"invalid - header req num smaller than actual\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200598 case TEST_HDR_REQ_NUM_LARGER_THAN_ACTUAL:
Lee Susman039ce092012-11-15 13:36:15 +0200599 return "\"invalid - header req num larger than actual\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200600 case TEST_HDR_CMD23_PACKED_BIT_SET:
Lee Susman039ce092012-11-15 13:36:15 +0200601 return "\"invalid - header cmd23 packed bit set\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200602 case TEST_CMD23_MAX_PACKED_WRITES:
Lee Susman039ce092012-11-15 13:36:15 +0200603 return "\"invalid - cmd23 max packed writes\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200604 case TEST_CMD23_ZERO_PACKED_WRITES:
Lee Susman039ce092012-11-15 13:36:15 +0200605 return "\"invalid - cmd23 zero packed writes\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200606 case TEST_CMD23_PACKED_BIT_UNSET:
Lee Susman039ce092012-11-15 13:36:15 +0200607 return "\"invalid - cmd23 packed bit unset\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200608 case TEST_CMD23_REL_WR_BIT_SET:
Lee Susman039ce092012-11-15 13:36:15 +0200609 return "\"invalid - cmd23 rel wr bit set\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200610 case TEST_CMD23_BITS_16TO29_SET:
Lee Susman039ce092012-11-15 13:36:15 +0200611 return "\"invalid - cmd23 bits [16-29] set\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200612 case TEST_CMD23_HDR_BLK_NOT_IN_COUNT:
Lee Susman039ce092012-11-15 13:36:15 +0200613 return "\"invalid - cmd23 header block not in count\"";
Tatyana Brokhman91e1e322012-10-09 13:53:43 +0200614 case TEST_PACKING_EXP_N_OVER_TRIGGER:
Lee Susman039ce092012-11-15 13:36:15 +0200615 return "\"packing control - pack n\"";
Tatyana Brokhman91e1e322012-10-09 13:53:43 +0200616 case TEST_PACKING_EXP_N_OVER_TRIGGER_FB_READ:
Lee Susman039ce092012-11-15 13:36:15 +0200617 return "\"packing control - pack n followed by read\"";
Tatyana Brokhman91e1e322012-10-09 13:53:43 +0200618 case TEST_PACKING_EXP_N_OVER_TRIGGER_FLUSH_N:
Lee Susman039ce092012-11-15 13:36:15 +0200619 return "\"packing control - pack n followed by flush\"";
Tatyana Brokhman91e1e322012-10-09 13:53:43 +0200620 case TEST_PACKING_EXP_ONE_OVER_TRIGGER_FB_READ:
Lee Susman039ce092012-11-15 13:36:15 +0200621 return "\"packing control - pack one followed by read\"";
Tatyana Brokhman91e1e322012-10-09 13:53:43 +0200622 case TEST_PACKING_EXP_THRESHOLD_OVER_TRIGGER:
Lee Susman039ce092012-11-15 13:36:15 +0200623 return "\"packing control - pack threshold\"";
Tatyana Brokhman91e1e322012-10-09 13:53:43 +0200624 case TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS:
Lee Susman039ce092012-11-15 13:36:15 +0200625 return "\"packing control - no packing\"";
Tatyana Brokhman91e1e322012-10-09 13:53:43 +0200626 case TEST_PACKING_NOT_EXP_TRIGGER_REQUESTS:
Lee Susman039ce092012-11-15 13:36:15 +0200627 return "\"packing control - no packing, trigger requests\"";
Tatyana Brokhman91e1e322012-10-09 13:53:43 +0200628 case TEST_PACKING_NOT_EXP_TRIGGER_READ_TRIGGER:
Lee Susman039ce092012-11-15 13:36:15 +0200629 return "\"packing control - no pack, trigger-read-trigger\"";
Tatyana Brokhman91e1e322012-10-09 13:53:43 +0200630 case TEST_PACKING_NOT_EXP_TRIGGER_FLUSH_TRIGGER:
Lee Susman039ce092012-11-15 13:36:15 +0200631 return "\"packing control- no pack, trigger-flush-trigger\"";
Tatyana Brokhman91e1e322012-10-09 13:53:43 +0200632 case TEST_PACK_MIX_PACKED_NO_PACKED_PACKED:
Lee Susman039ce092012-11-15 13:36:15 +0200633 return "\"packing control - mix: pack -> no pack -> pack\"";
Tatyana Brokhman91e1e322012-10-09 13:53:43 +0200634 case TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED:
Lee Susman039ce092012-11-15 13:36:15 +0200635 return "\"packing control - mix: no pack->pack->no pack\"";
Maya Erezddc55732012-10-17 09:51:01 +0200636 case TEST_WRITE_DISCARD_SANITIZE_READ:
Lee Susman039ce092012-11-15 13:36:15 +0200637 return "\"write, discard, sanitize\"";
Yaniv Gardie9214c82012-10-18 13:58:18 +0200638 case BKOPS_DELAYED_WORK_LEVEL_1:
Lee Susman039ce092012-11-15 13:36:15 +0200639 return "\"delayed work BKOPS level 1\"";
Yaniv Gardie9214c82012-10-18 13:58:18 +0200640 case BKOPS_DELAYED_WORK_LEVEL_1_HPI:
Lee Susman039ce092012-11-15 13:36:15 +0200641 return "\"delayed work BKOPS level 1 with HPI\"";
Yaniv Gardie9214c82012-10-18 13:58:18 +0200642 case BKOPS_CANCEL_DELAYED_WORK:
Lee Susman039ce092012-11-15 13:36:15 +0200643 return "\"cancel delayed BKOPS work\"";
Yaniv Gardie9214c82012-10-18 13:58:18 +0200644 case BKOPS_URGENT_LEVEL_2:
Lee Susman039ce092012-11-15 13:36:15 +0200645 return "\"urgent BKOPS level 2\"";
Yaniv Gardie9214c82012-10-18 13:58:18 +0200646 case BKOPS_URGENT_LEVEL_2_TWO_REQS:
Lee Susman039ce092012-11-15 13:36:15 +0200647 return "\"urgent BKOPS level 2, followed by a request\"";
Yaniv Gardie9214c82012-10-18 13:58:18 +0200648 case BKOPS_URGENT_LEVEL_3:
Lee Susman039ce092012-11-15 13:36:15 +0200649 return "\"urgent BKOPS level 3\"";
Lee Susmanf18263a2012-10-24 14:14:37 +0200650 case TEST_LONG_SEQUENTIAL_READ:
Lee Susman039ce092012-11-15 13:36:15 +0200651 return "\"long sequential read\"";
Lee Susmana35ae6e2012-10-25 16:06:07 +0200652 case TEST_LONG_SEQUENTIAL_WRITE:
Lee Susman039ce092012-11-15 13:36:15 +0200653 return "\"long sequential write\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200654 default:
Lee Susman039ce092012-11-15 13:36:15 +0200655 return " Unknown testcase";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200656 }
657
658 return NULL;
659}
660
661/*
662 * Compare individual testcase's statistics to the expected statistics:
663 * Compare stop reason and number of packing events
664 */
665static int check_wr_packing_statistics(struct test_data *td)
666{
667 struct mmc_wr_pack_stats *mmc_packed_stats;
668 struct mmc_queue *mq = td->req_q->queuedata;
669 int max_packed_reqs = mq->card->ext_csd.max_packed_writes;
670 int i;
671 struct mmc_card *card = mq->card;
672 struct mmc_wr_pack_stats expected_stats;
673 int *stop_reason;
674 int ret = 0;
675
676 if (!mq) {
677 test_pr_err("%s: NULL mq", __func__);
678 return -EINVAL;
679 }
680
681 expected_stats = mbtd->exp_packed_stats;
682
683 mmc_packed_stats = mmc_blk_get_packed_statistics(card);
684 if (!mmc_packed_stats) {
685 test_pr_err("%s: NULL mmc_packed_stats", __func__);
686 return -EINVAL;
687 }
688
689 if (!mmc_packed_stats->packing_events) {
690 test_pr_err("%s: NULL packing_events", __func__);
691 return -EINVAL;
692 }
693
694 spin_lock(&mmc_packed_stats->lock);
695
696 if (!mmc_packed_stats->enabled) {
697 test_pr_err("%s write packing statistics are not enabled",
698 __func__);
699 ret = -EINVAL;
700 goto exit_err;
701 }
702
703 stop_reason = mmc_packed_stats->pack_stop_reason;
704
Tatyana Brokhman91e1e322012-10-09 13:53:43 +0200705 for (i = 1; i <= max_packed_reqs; ++i) {
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200706 if (mmc_packed_stats->packing_events[i] !=
707 expected_stats.packing_events[i]) {
708 test_pr_err(
709 "%s: Wrong pack stats in index %d, got %d, expected %d",
710 __func__, i, mmc_packed_stats->packing_events[i],
711 expected_stats.packing_events[i]);
712 if (td->fs_wr_reqs_during_test)
713 goto cancel_round;
714 ret = -EINVAL;
715 goto exit_err;
716 }
717 }
718
719 if (mmc_packed_stats->pack_stop_reason[EXCEEDS_SEGMENTS] !=
720 expected_stats.pack_stop_reason[EXCEEDS_SEGMENTS]) {
721 test_pr_err(
722 "%s: Wrong pack stop reason EXCEEDS_SEGMENTS %d, expected %d",
723 __func__, stop_reason[EXCEEDS_SEGMENTS],
724 expected_stats.pack_stop_reason[EXCEEDS_SEGMENTS]);
725 if (td->fs_wr_reqs_during_test)
726 goto cancel_round;
727 ret = -EINVAL;
728 goto exit_err;
729 }
730
731 if (mmc_packed_stats->pack_stop_reason[EXCEEDS_SECTORS] !=
732 expected_stats.pack_stop_reason[EXCEEDS_SECTORS]) {
733 test_pr_err(
734 "%s: Wrong pack stop reason EXCEEDS_SECTORS %d, expected %d",
735 __func__, stop_reason[EXCEEDS_SECTORS],
736 expected_stats.pack_stop_reason[EXCEEDS_SECTORS]);
737 if (td->fs_wr_reqs_during_test)
738 goto cancel_round;
739 ret = -EINVAL;
740 goto exit_err;
741 }
742
743 if (mmc_packed_stats->pack_stop_reason[WRONG_DATA_DIR] !=
744 expected_stats.pack_stop_reason[WRONG_DATA_DIR]) {
745 test_pr_err(
746 "%s: Wrong pack stop reason WRONG_DATA_DIR %d, expected %d",
747 __func__, stop_reason[WRONG_DATA_DIR],
748 expected_stats.pack_stop_reason[WRONG_DATA_DIR]);
749 if (td->fs_wr_reqs_during_test)
750 goto cancel_round;
751 ret = -EINVAL;
752 goto exit_err;
753 }
754
755 if (mmc_packed_stats->pack_stop_reason[FLUSH_OR_DISCARD] !=
756 expected_stats.pack_stop_reason[FLUSH_OR_DISCARD]) {
757 test_pr_err(
758 "%s: Wrong pack stop reason FLUSH_OR_DISCARD %d, expected %d",
759 __func__, stop_reason[FLUSH_OR_DISCARD],
760 expected_stats.pack_stop_reason[FLUSH_OR_DISCARD]);
761 if (td->fs_wr_reqs_during_test)
762 goto cancel_round;
763 ret = -EINVAL;
764 goto exit_err;
765 }
766
767 if (mmc_packed_stats->pack_stop_reason[EMPTY_QUEUE] !=
768 expected_stats.pack_stop_reason[EMPTY_QUEUE]) {
769 test_pr_err(
770 "%s: Wrong pack stop reason EMPTY_QUEUE %d, expected %d",
771 __func__, stop_reason[EMPTY_QUEUE],
772 expected_stats.pack_stop_reason[EMPTY_QUEUE]);
773 if (td->fs_wr_reqs_during_test)
774 goto cancel_round;
775 ret = -EINVAL;
776 goto exit_err;
777 }
778
779 if (mmc_packed_stats->pack_stop_reason[REL_WRITE] !=
780 expected_stats.pack_stop_reason[REL_WRITE]) {
781 test_pr_err(
782 "%s: Wrong pack stop reason REL_WRITE %d, expected %d",
783 __func__, stop_reason[REL_WRITE],
784 expected_stats.pack_stop_reason[REL_WRITE]);
785 if (td->fs_wr_reqs_during_test)
786 goto cancel_round;
787 ret = -EINVAL;
788 goto exit_err;
789 }
790
791exit_err:
792 spin_unlock(&mmc_packed_stats->lock);
793 if (ret && mmc_packed_stats->enabled)
794 print_mmc_packing_stats(card);
795 return ret;
796cancel_round:
797 spin_unlock(&mmc_packed_stats->lock);
798 test_iosched_set_ignore_round(true);
799 return 0;
800}
801
802/*
803 * Pseudo-randomly choose a seed based on the last seed, and update it in
804 * seed_number. then return seed_number (mod max_val), or min_val.
805 */
806static unsigned int pseudo_random_seed(unsigned int *seed_number,
807 unsigned int min_val,
808 unsigned int max_val)
809{
810 int ret = 0;
811
812 if (!seed_number)
813 return 0;
814
815 *seed_number = ((unsigned int)(((unsigned long)*seed_number *
816 (unsigned long)LARGE_PRIME_1) + LARGE_PRIME_2));
817 ret = (unsigned int)((*seed_number) % max_val);
818
819 return (ret > min_val ? ret : min_val);
820}
821
822/*
823 * Given a pseudo-random seed, find a pseudo-random num_of_bios.
824 * Make sure that num_of_bios is not larger than TEST_MAX_SECTOR_RANGE
825 */
826static void pseudo_rnd_num_of_bios(unsigned int *num_bios_seed,
827 unsigned int *num_of_bios)
828{
829 do {
830 *num_of_bios = pseudo_random_seed(num_bios_seed, 1,
831 TEST_MAX_BIOS_PER_REQ);
832 if (!(*num_of_bios))
833 *num_of_bios = 1;
834 } while ((*num_of_bios) * BIO_U32_SIZE * 4 > TEST_MAX_SECTOR_RANGE);
835}
836
837/* Add a single read request to the given td's request queue */
838static int prepare_request_add_read(struct test_data *td)
839{
840 int ret;
841 int start_sec;
842
843 if (td)
844 start_sec = td->start_sector;
845 else {
846 test_pr_err("%s: NULL td", __func__);
847 return 0;
848 }
849
850 test_pr_info("%s: Adding a read request, first req_id=%d", __func__,
851 td->wr_rd_next_req_id);
852
853 ret = test_iosched_add_wr_rd_test_req(0, READ, start_sec, 2,
854 TEST_PATTERN_5A, NULL);
855 if (ret) {
856 test_pr_err("%s: failed to add a read request", __func__);
857 return ret;
858 }
859
860 return 0;
861}
862
863/* Add a single flush request to the given td's request queue */
864static int prepare_request_add_flush(struct test_data *td)
865{
866 int ret;
867
868 if (!td) {
869 test_pr_err("%s: NULL td", __func__);
870 return 0;
871 }
872
873 test_pr_info("%s: Adding a flush request, first req_id=%d", __func__,
874 td->unique_next_req_id);
875 ret = test_iosched_add_unique_test_req(0, REQ_UNIQUE_FLUSH,
876 0, 0, NULL);
877 if (ret) {
878 test_pr_err("%s: failed to add a flush request", __func__);
879 return ret;
880 }
881
882 return ret;
883}
884
885/*
886 * Add num_requets amount of write requests to the given td's request queue.
887 * If random test mode is chosen we pseudo-randomly choose the number of bios
888 * for each write request, otherwise add between 1 to 5 bio per request.
889 */
890static int prepare_request_add_write_reqs(struct test_data *td,
891 int num_requests, int is_err_expected,
892 int is_random)
893{
894 int i;
895 unsigned int start_sec;
896 int num_bios;
897 int ret = 0;
898 unsigned int *bio_seed = &mbtd->random_test_seed;
899
900 if (td)
901 start_sec = td->start_sector;
902 else {
903 test_pr_err("%s: NULL td", __func__);
904 return ret;
905 }
906
907 test_pr_info("%s: Adding %d write requests, first req_id=%d", __func__,
908 num_requests, td->wr_rd_next_req_id);
909
Lee Susmanf18263a2012-10-24 14:14:37 +0200910 for (i = 1 ; i <= num_requests ; i++) {
911 start_sec =
912 td->start_sector + sizeof(int) *
913 BIO_U32_SIZE * td->num_of_write_bios;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200914 if (is_random)
915 pseudo_rnd_num_of_bios(bio_seed, &num_bios);
916 else
917 /*
918 * For the non-random case, give num_bios a value
919 * between 1 and 5, to keep a small number of BIOs
920 */
921 num_bios = (i%5)+1;
922
923 ret = test_iosched_add_wr_rd_test_req(is_err_expected, WRITE,
924 start_sec, num_bios, TEST_PATTERN_5A, NULL);
925
926 if (ret) {
927 test_pr_err("%s: failed to add a write request",
928 __func__);
929 return ret;
930 }
931 }
932 return 0;
933}
934
935/*
936 * Prepare the write, read and flush requests for a generic packed commands
937 * testcase
938 */
939static int prepare_packed_requests(struct test_data *td, int is_err_expected,
940 int num_requests, int is_random)
941{
942 int ret = 0;
943 struct mmc_queue *mq;
944 int max_packed_reqs;
945 struct request_queue *req_q;
946
947 if (!td) {
948 pr_err("%s: NULL td", __func__);
949 return -EINVAL;
950 }
951
952 req_q = td->req_q;
953
954 if (!req_q) {
955 pr_err("%s: NULL request queue", __func__);
956 return -EINVAL;
957 }
958
959 mq = req_q->queuedata;
960 if (!mq) {
961 test_pr_err("%s: NULL mq", __func__);
962 return -EINVAL;
963 }
964
965 max_packed_reqs = mq->card->ext_csd.max_packed_writes;
966
967 if (mbtd->random_test_seed <= 0) {
968 mbtd->random_test_seed =
969 (unsigned int)(get_jiffies_64() & 0xFFFF);
970 test_pr_info("%s: got seed from jiffies %d",
971 __func__, mbtd->random_test_seed);
972 }
973
974 mmc_blk_init_packed_statistics(mq->card);
975
976 ret = prepare_request_add_write_reqs(td, num_requests, is_err_expected,
977 is_random);
978 if (ret)
979 return ret;
980
981 /* Avoid memory corruption in upcoming stats set */
982 if (td->test_info.testcase == TEST_STOP_DUE_TO_THRESHOLD)
983 num_requests--;
984
985 memset((void *)mbtd->exp_packed_stats.pack_stop_reason, 0,
986 sizeof(mbtd->exp_packed_stats.pack_stop_reason));
987 memset(mbtd->exp_packed_stats.packing_events, 0,
988 (max_packed_reqs + 1) * sizeof(u32));
989 if (num_requests <= max_packed_reqs)
990 mbtd->exp_packed_stats.packing_events[num_requests] = 1;
991
992 switch (td->test_info.testcase) {
993 case TEST_STOP_DUE_TO_FLUSH:
994 case TEST_STOP_DUE_TO_FLUSH_AFTER_MAX_REQS:
995 ret = prepare_request_add_flush(td);
996 if (ret)
997 return ret;
998
999 mbtd->exp_packed_stats.pack_stop_reason[FLUSH_OR_DISCARD] = 1;
1000 break;
1001 case TEST_STOP_DUE_TO_READ:
1002 case TEST_STOP_DUE_TO_READ_AFTER_MAX_REQS:
1003 ret = prepare_request_add_read(td);
1004 if (ret)
1005 return ret;
1006
1007 mbtd->exp_packed_stats.pack_stop_reason[WRONG_DATA_DIR] = 1;
1008 break;
1009 case TEST_STOP_DUE_TO_THRESHOLD:
1010 mbtd->exp_packed_stats.packing_events[num_requests] = 1;
1011 mbtd->exp_packed_stats.packing_events[1] = 1;
1012 mbtd->exp_packed_stats.pack_stop_reason[THRESHOLD] = 1;
1013 mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1;
1014 break;
1015 case TEST_STOP_DUE_TO_MAX_REQ_NUM:
1016 case TEST_RET_PARTIAL_MAX_FAIL_IDX:
1017 mbtd->exp_packed_stats.pack_stop_reason[THRESHOLD] = 1;
1018 break;
1019 default:
1020 mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1;
1021 }
1022 mbtd->num_requests = num_requests;
1023
1024 return 0;
1025}
1026
1027/*
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001028 * Prepare the write, read and flush requests for the packing control
1029 * testcases
1030 */
1031static int prepare_packed_control_tests_requests(struct test_data *td,
1032 int is_err_expected, int num_requests, int is_random)
1033{
1034 int ret = 0;
1035 struct mmc_queue *mq;
1036 int max_packed_reqs;
1037 int temp_num_req = num_requests;
1038 struct request_queue *req_q;
1039 int test_packed_trigger;
1040 int num_packed_reqs;
1041
1042 if (!td) {
1043 test_pr_err("%s: NULL td\n", __func__);
1044 return -EINVAL;
1045 }
1046
1047 req_q = td->req_q;
1048
1049 if (!req_q) {
1050 test_pr_err("%s: NULL request queue\n", __func__);
1051 return -EINVAL;
1052 }
1053
1054 mq = req_q->queuedata;
1055 if (!mq) {
1056 test_pr_err("%s: NULL mq", __func__);
1057 return -EINVAL;
1058 }
1059
1060 max_packed_reqs = mq->card->ext_csd.max_packed_writes;
1061 test_packed_trigger = mq->num_wr_reqs_to_start_packing;
1062 num_packed_reqs = num_requests - test_packed_trigger;
1063
1064 if (mbtd->random_test_seed == 0) {
1065 mbtd->random_test_seed =
1066 (unsigned int)(get_jiffies_64() & 0xFFFF);
1067 test_pr_info("%s: got seed from jiffies %d",
1068 __func__, mbtd->random_test_seed);
1069 }
1070
1071 mmc_blk_init_packed_statistics(mq->card);
1072
1073 if (td->test_info.testcase ==
1074 TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED) {
1075 temp_num_req = num_requests;
1076 num_requests = test_packed_trigger - 1;
1077 }
1078
1079 /* Verify that the packing is disabled before starting the test */
1080 mq->wr_packing_enabled = false;
1081 mq->num_of_potential_packed_wr_reqs = 0;
1082
1083 if (td->test_info.testcase == TEST_PACK_MIX_PACKED_NO_PACKED_PACKED) {
1084 mq->num_of_potential_packed_wr_reqs = test_packed_trigger + 1;
1085 mq->wr_packing_enabled = true;
1086 num_requests = test_packed_trigger + 2;
1087 }
1088
1089 ret = prepare_request_add_write_reqs(td, num_requests, is_err_expected,
1090 is_random);
1091 if (ret)
1092 goto exit;
1093
1094 if (td->test_info.testcase == TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED)
1095 num_requests = temp_num_req;
1096
1097 memset((void *)mbtd->exp_packed_stats.pack_stop_reason, 0,
1098 sizeof(mbtd->exp_packed_stats.pack_stop_reason));
1099 memset(mbtd->exp_packed_stats.packing_events, 0,
1100 (max_packed_reqs + 1) * sizeof(u32));
1101
1102 switch (td->test_info.testcase) {
1103 case TEST_PACKING_EXP_N_OVER_TRIGGER_FB_READ:
1104 case TEST_PACKING_EXP_ONE_OVER_TRIGGER_FB_READ:
1105 ret = prepare_request_add_read(td);
1106 if (ret)
1107 goto exit;
1108
1109 mbtd->exp_packed_stats.pack_stop_reason[WRONG_DATA_DIR] = 1;
1110 mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 1;
1111 break;
1112 case TEST_PACKING_EXP_N_OVER_TRIGGER_FLUSH_N:
1113 ret = prepare_request_add_flush(td);
1114 if (ret)
1115 goto exit;
1116
1117 ret = prepare_request_add_write_reqs(td, num_packed_reqs,
1118 is_err_expected, is_random);
1119 if (ret)
1120 goto exit;
1121
1122 mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1;
1123 mbtd->exp_packed_stats.pack_stop_reason[FLUSH_OR_DISCARD] = 1;
1124 mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 2;
1125 break;
1126 case TEST_PACKING_NOT_EXP_TRIGGER_READ_TRIGGER:
1127 ret = prepare_request_add_read(td);
1128 if (ret)
1129 goto exit;
1130
1131 ret = prepare_request_add_write_reqs(td, test_packed_trigger,
1132 is_err_expected, is_random);
1133 if (ret)
1134 goto exit;
1135
1136 mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 1;
1137 break;
1138 case TEST_PACKING_NOT_EXP_TRIGGER_FLUSH_TRIGGER:
1139 ret = prepare_request_add_flush(td);
1140 if (ret)
1141 goto exit;
1142
1143 ret = prepare_request_add_write_reqs(td, test_packed_trigger,
1144 is_err_expected, is_random);
1145 if (ret)
1146 goto exit;
1147
1148 mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 1;
1149 break;
1150 case TEST_PACK_MIX_PACKED_NO_PACKED_PACKED:
1151 ret = prepare_request_add_read(td);
1152 if (ret)
1153 goto exit;
1154
1155 ret = prepare_request_add_write_reqs(td, test_packed_trigger-1,
1156 is_err_expected, is_random);
1157 if (ret)
1158 goto exit;
1159
1160 ret = prepare_request_add_write_reqs(td, num_requests,
1161 is_err_expected, is_random);
1162 if (ret)
1163 goto exit;
1164
1165 mbtd->exp_packed_stats.packing_events[num_requests] = 1;
1166 mbtd->exp_packed_stats.packing_events[num_requests-1] = 1;
1167 mbtd->exp_packed_stats.pack_stop_reason[WRONG_DATA_DIR] = 1;
1168 mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1;
1169 break;
1170 case TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED:
1171 ret = prepare_request_add_read(td);
1172 if (ret)
1173 goto exit;
1174
1175 ret = prepare_request_add_write_reqs(td, num_requests,
1176 is_err_expected, is_random);
1177 if (ret)
1178 goto exit;
1179
1180 ret = prepare_request_add_read(td);
1181 if (ret)
1182 goto exit;
1183
1184 ret = prepare_request_add_write_reqs(td, test_packed_trigger-1,
1185 is_err_expected, is_random);
1186 if (ret)
1187 goto exit;
1188
1189 mbtd->exp_packed_stats.pack_stop_reason[WRONG_DATA_DIR] = 1;
1190 mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 1;
1191 break;
1192 case TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS:
1193 case TEST_PACKING_NOT_EXP_TRIGGER_REQUESTS:
1194 mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 1;
1195 break;
1196 default:
1197 mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1;
1198 mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 1;
1199 }
1200 mbtd->num_requests = num_requests;
1201
1202exit:
1203 return ret;
1204}
1205
1206/*
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001207 * Prepare requests for the TEST_RET_PARTIAL_FOLLOWED_BY_ABORT testcase.
1208 * In this testcase we have mixed error expectations from different
1209 * write requests, hence the special prepare function.
1210 */
1211static int prepare_partial_followed_by_abort(struct test_data *td,
1212 int num_requests)
1213{
1214 int i, start_address;
1215 int is_err_expected = 0;
1216 int ret = 0;
1217 struct mmc_queue *mq = test_iosched_get_req_queue()->queuedata;
1218 int max_packed_reqs;
1219
1220 if (!mq) {
1221 test_pr_err("%s: NULL mq", __func__);
1222 return -EINVAL;
1223 }
1224
1225 max_packed_reqs = mq->card->ext_csd.max_packed_writes;
1226
1227 mmc_blk_init_packed_statistics(mq->card);
1228
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001229 for (i = 1; i <= num_requests; i++) {
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001230 if (i > (num_requests / 2))
1231 is_err_expected = 1;
1232
Lee Susmanf18263a2012-10-24 14:14:37 +02001233 start_address = td->start_sector +
1234 sizeof(int) * BIO_U32_SIZE * td->num_of_write_bios;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001235 ret = test_iosched_add_wr_rd_test_req(is_err_expected, WRITE,
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001236 start_address, (i % 5) + 1, TEST_PATTERN_5A,
1237 NULL);
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001238 if (ret) {
1239 test_pr_err("%s: failed to add a write request",
1240 __func__);
1241 return ret;
1242 }
1243 }
1244
1245 memset((void *)&mbtd->exp_packed_stats.pack_stop_reason, 0,
1246 sizeof(mbtd->exp_packed_stats.pack_stop_reason));
1247 memset(mbtd->exp_packed_stats.packing_events, 0,
1248 (max_packed_reqs + 1) * sizeof(u32));
1249 mbtd->exp_packed_stats.packing_events[num_requests] = 1;
1250 mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1;
1251
1252 mbtd->num_requests = num_requests;
1253
1254 return ret;
1255}
1256
1257/*
1258 * Get number of write requests for current testcase. If random test mode was
1259 * chosen, pseudo-randomly choose the number of requests, otherwise set to
1260 * two less than the packing threshold.
1261 */
1262static int get_num_requests(struct test_data *td)
1263{
1264 int *seed = &mbtd->random_test_seed;
1265 struct request_queue *req_q;
1266 struct mmc_queue *mq;
1267 int max_num_requests;
1268 int num_requests;
1269 int min_num_requests = 2;
1270 int is_random = mbtd->is_random;
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001271 int max_for_double;
1272 int test_packed_trigger;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001273
1274 req_q = test_iosched_get_req_queue();
1275 if (req_q)
1276 mq = req_q->queuedata;
1277 else {
1278 test_pr_err("%s: NULL request queue", __func__);
1279 return 0;
1280 }
1281
1282 if (!mq) {
1283 test_pr_err("%s: NULL mq", __func__);
1284 return -EINVAL;
1285 }
1286
1287 max_num_requests = mq->card->ext_csd.max_packed_writes;
1288 num_requests = max_num_requests - 2;
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001289 test_packed_trigger = mq->num_wr_reqs_to_start_packing;
1290
1291 /*
1292 * Here max_for_double is intended for packed control testcases
1293 * in which we issue many write requests. It's purpose is to prevent
1294 * exceeding max number of req_queue requests.
1295 */
1296 max_for_double = max_num_requests - 10;
1297
1298 if (td->test_info.testcase ==
1299 TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS)
1300 /* Don't expect packing, so issue up to trigger-1 reqs */
1301 num_requests = test_packed_trigger - 1;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001302
1303 if (is_random) {
1304 if (td->test_info.testcase ==
1305 TEST_RET_PARTIAL_FOLLOWED_BY_ABORT)
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001306 /*
1307 * Here we don't want num_requests to be less than 1
1308 * as a consequence of division by 2.
1309 */
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001310 min_num_requests = 3;
1311
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001312 if (td->test_info.testcase ==
1313 TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS)
1314 /* Don't expect packing, so issue up to trigger reqs */
1315 max_num_requests = test_packed_trigger;
1316
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001317 num_requests = pseudo_random_seed(seed, min_num_requests,
1318 max_num_requests - 1);
1319 }
1320
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001321 if (td->test_info.testcase ==
1322 TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS)
1323 num_requests -= test_packed_trigger;
1324
1325 if (td->test_info.testcase == TEST_PACKING_EXP_N_OVER_TRIGGER_FLUSH_N)
1326 num_requests =
1327 num_requests > max_for_double ? max_for_double : num_requests;
1328
1329 if (mbtd->test_group == TEST_PACKING_CONTROL_GROUP)
1330 num_requests += test_packed_trigger;
1331
1332 if (td->test_info.testcase == TEST_PACKING_NOT_EXP_TRIGGER_REQUESTS)
1333 num_requests = test_packed_trigger;
1334
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001335 return num_requests;
1336}
1337
Lee Susmanf18263a2012-10-24 14:14:37 +02001338static int prepare_long_test_requests(struct test_data *td)
1339{
1340
1341 int ret;
1342 int start_sec;
1343 int j;
1344 int test_direction;
1345
1346 if (td)
1347 start_sec = td->start_sector;
1348 else {
1349 test_pr_err("%s: NULL td\n", __func__);
1350 return -EINVAL;
1351 }
1352
Lee Susmana35ae6e2012-10-25 16:06:07 +02001353 if (td->test_info.testcase == TEST_LONG_SEQUENTIAL_WRITE)
1354 test_direction = WRITE;
1355 else
1356 test_direction = READ;
Lee Susmanf18263a2012-10-24 14:14:37 +02001357
Lee Susmana35ae6e2012-10-25 16:06:07 +02001358 test_pr_info("%s: Adding %d write requests, first req_id=%d", __func__,
Lee Susmanf18263a2012-10-24 14:14:37 +02001359 LONG_TEST_ACTUAL_NUM_REQS, td->wr_rd_next_req_id);
1360
1361 for (j = 0; j < LONG_TEST_ACTUAL_NUM_REQS; j++) {
1362
1363 ret = test_iosched_add_wr_rd_test_req(0, test_direction,
1364 start_sec,
1365 TEST_MAX_BIOS_PER_REQ,
1366 TEST_NO_PATTERN, NULL);
1367 if (ret) {
1368 test_pr_err("%s: failed to add a bio request",
1369 __func__);
1370 return ret;
1371 }
1372
1373 start_sec +=
1374 (TEST_MAX_BIOS_PER_REQ * sizeof(int) * BIO_U32_SIZE);
1375 }
1376
1377 return 0;
1378}
1379
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001380/*
1381 * An implementation for the prepare_test_fn pointer in the test_info
1382 * data structure. According to the testcase we add the right number of requests
1383 * and decide if an error is expected or not.
1384 */
1385static int prepare_test(struct test_data *td)
1386{
1387 struct mmc_queue *mq = test_iosched_get_req_queue()->queuedata;
1388 int max_num_requests;
1389 int num_requests = 0;
1390 int ret = 0;
1391 int is_random = mbtd->is_random;
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001392 int test_packed_trigger = mq->num_wr_reqs_to_start_packing;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001393
1394 if (!mq) {
1395 test_pr_err("%s: NULL mq", __func__);
1396 return -EINVAL;
1397 }
1398
1399 max_num_requests = mq->card->ext_csd.max_packed_writes;
1400
1401 if (is_random && mbtd->random_test_seed == 0) {
1402 mbtd->random_test_seed =
1403 (unsigned int)(get_jiffies_64() & 0xFFFF);
1404 test_pr_info("%s: got seed from jiffies %d",
1405 __func__, mbtd->random_test_seed);
1406 }
1407
1408 num_requests = get_num_requests(td);
1409
1410 if (mbtd->test_group == TEST_SEND_INVALID_GROUP)
1411 mq->packed_test_fn =
1412 test_invalid_packed_cmd;
1413
1414 if (mbtd->test_group == TEST_ERR_CHECK_GROUP)
1415 mq->err_check_fn = test_err_check;
1416
1417 switch (td->test_info.testcase) {
1418 case TEST_STOP_DUE_TO_FLUSH:
1419 case TEST_STOP_DUE_TO_READ:
1420 case TEST_RET_PARTIAL_FOLLOWED_BY_SUCCESS:
1421 case TEST_RET_PARTIAL_MULTIPLE_UNTIL_SUCCESS:
1422 case TEST_STOP_DUE_TO_EMPTY_QUEUE:
1423 case TEST_CMD23_PACKED_BIT_UNSET:
1424 ret = prepare_packed_requests(td, 0, num_requests, is_random);
1425 break;
1426 case TEST_STOP_DUE_TO_FLUSH_AFTER_MAX_REQS:
1427 case TEST_STOP_DUE_TO_READ_AFTER_MAX_REQS:
1428 ret = prepare_packed_requests(td, 0, max_num_requests - 1,
1429 is_random);
1430 break;
1431 case TEST_RET_PARTIAL_FOLLOWED_BY_ABORT:
1432 ret = prepare_partial_followed_by_abort(td, num_requests);
1433 break;
1434 case TEST_STOP_DUE_TO_MAX_REQ_NUM:
1435 case TEST_RET_PARTIAL_MAX_FAIL_IDX:
1436 ret = prepare_packed_requests(td, 0, max_num_requests,
1437 is_random);
1438 break;
1439 case TEST_STOP_DUE_TO_THRESHOLD:
1440 ret = prepare_packed_requests(td, 0, max_num_requests + 1,
1441 is_random);
1442 break;
1443 case TEST_RET_ABORT:
1444 case TEST_RET_RETRY:
1445 case TEST_RET_CMD_ERR:
1446 case TEST_RET_DATA_ERR:
1447 case TEST_HDR_INVALID_VERSION:
1448 case TEST_HDR_WRONG_WRITE_CODE:
1449 case TEST_HDR_INVALID_RW_CODE:
1450 case TEST_HDR_DIFFERENT_ADDRESSES:
1451 case TEST_HDR_REQ_NUM_SMALLER_THAN_ACTUAL:
1452 case TEST_HDR_REQ_NUM_LARGER_THAN_ACTUAL:
1453 case TEST_CMD23_MAX_PACKED_WRITES:
1454 case TEST_CMD23_ZERO_PACKED_WRITES:
1455 case TEST_CMD23_REL_WR_BIT_SET:
1456 case TEST_CMD23_BITS_16TO29_SET:
1457 case TEST_CMD23_HDR_BLK_NOT_IN_COUNT:
1458 case TEST_HDR_CMD23_PACKED_BIT_SET:
1459 ret = prepare_packed_requests(td, 1, num_requests, is_random);
1460 break;
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001461 case TEST_PACKING_EXP_N_OVER_TRIGGER:
1462 case TEST_PACKING_EXP_N_OVER_TRIGGER_FB_READ:
1463 case TEST_PACKING_NOT_EXP_TRIGGER_REQUESTS:
1464 case TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS:
1465 case TEST_PACK_MIX_PACKED_NO_PACKED_PACKED:
1466 case TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED:
1467 ret = prepare_packed_control_tests_requests(td, 0, num_requests,
1468 is_random);
1469 break;
1470 case TEST_PACKING_EXP_THRESHOLD_OVER_TRIGGER:
1471 ret = prepare_packed_control_tests_requests(td, 0,
1472 max_num_requests, is_random);
1473 break;
1474 case TEST_PACKING_EXP_ONE_OVER_TRIGGER_FB_READ:
1475 ret = prepare_packed_control_tests_requests(td, 0,
1476 test_packed_trigger + 1,
1477 is_random);
1478 break;
1479 case TEST_PACKING_EXP_N_OVER_TRIGGER_FLUSH_N:
1480 ret = prepare_packed_control_tests_requests(td, 0, num_requests,
1481 is_random);
1482 break;
1483 case TEST_PACKING_NOT_EXP_TRIGGER_READ_TRIGGER:
1484 case TEST_PACKING_NOT_EXP_TRIGGER_FLUSH_TRIGGER:
1485 ret = prepare_packed_control_tests_requests(td, 0,
1486 test_packed_trigger, is_random);
1487 break;
Lee Susmana35ae6e2012-10-25 16:06:07 +02001488 case TEST_LONG_SEQUENTIAL_WRITE:
1489 ret = prepare_long_test_requests(td);
1490 break;
Lee Susmanf18263a2012-10-24 14:14:37 +02001491 case TEST_LONG_SEQUENTIAL_READ:
1492 ret = prepare_long_test_requests(td);
1493 break;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001494 default:
1495 test_pr_info("%s: Invalid test case...", __func__);
Lee Susmanf18263a2012-10-24 14:14:37 +02001496 ret = -EINVAL;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001497 }
1498
1499 return ret;
1500}
1501
1502/*
1503 * An implementation for the post_test_fn in the test_info data structure.
1504 * In our case we just reset the function pointers in the mmc_queue in order for
1505 * the FS to be able to dispatch it's requests correctly after the test is
1506 * finished.
1507 */
1508static int post_test(struct test_data *td)
1509{
1510 struct mmc_queue *mq;
1511
1512 if (!td)
1513 return -EINVAL;
1514
1515 mq = td->req_q->queuedata;
1516
1517 if (!mq) {
1518 test_pr_err("%s: NULL mq", __func__);
1519 return -EINVAL;
1520 }
1521
1522 mq->packed_test_fn = NULL;
1523 mq->err_check_fn = NULL;
1524
1525 return 0;
1526}
1527
1528/*
1529 * This function checks, based on the current test's test_group, that the
1530 * packed commands capability and control are set right. In addition, we check
1531 * if the card supports the packed command feature.
1532 */
1533static int validate_packed_commands_settings(void)
1534{
1535 struct request_queue *req_q;
1536 struct mmc_queue *mq;
1537 int max_num_requests;
1538 struct mmc_host *host;
1539
1540 req_q = test_iosched_get_req_queue();
1541 if (!req_q) {
1542 test_pr_err("%s: test_iosched_get_req_queue failed", __func__);
1543 test_iosched_set_test_result(TEST_FAILED);
1544 return -EINVAL;
1545 }
1546
1547 mq = req_q->queuedata;
1548 if (!mq) {
1549 test_pr_err("%s: NULL mq", __func__);
1550 return -EINVAL;
1551 }
1552
1553 max_num_requests = mq->card->ext_csd.max_packed_writes;
1554 host = mq->card->host;
1555
1556 if (!(host->caps2 && MMC_CAP2_PACKED_WR)) {
1557 test_pr_err("%s: Packed Write capability disabled, exit test",
1558 __func__);
1559 test_iosched_set_test_result(TEST_NOT_SUPPORTED);
1560 return -EINVAL;
1561 }
1562
1563 if (max_num_requests == 0) {
1564 test_pr_err(
1565 "%s: no write packing support, ext_csd.max_packed_writes=%d",
1566 __func__, mq->card->ext_csd.max_packed_writes);
1567 test_iosched_set_test_result(TEST_NOT_SUPPORTED);
1568 return -EINVAL;
1569 }
1570
1571 test_pr_info("%s: max number of packed requests supported is %d ",
1572 __func__, max_num_requests);
1573
1574 switch (mbtd->test_group) {
1575 case TEST_SEND_WRITE_PACKING_GROUP:
1576 case TEST_ERR_CHECK_GROUP:
1577 case TEST_SEND_INVALID_GROUP:
1578 /* disable the packing control */
1579 host->caps2 &= ~MMC_CAP2_PACKED_WR_CONTROL;
1580 break;
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001581 case TEST_PACKING_CONTROL_GROUP:
1582 host->caps2 |= MMC_CAP2_PACKED_WR_CONTROL;
1583 break;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001584 default:
1585 break;
1586 }
1587
1588 return 0;
1589}
1590
Maya Erezddc55732012-10-17 09:51:01 +02001591static void pseudo_rnd_sector_and_size(unsigned int *seed,
1592 unsigned int min_start_sector,
1593 unsigned int *start_sector,
1594 unsigned int *num_of_bios)
1595{
1596 unsigned int max_sec = min_start_sector + TEST_MAX_SECTOR_RANGE;
1597 do {
1598 *start_sector = pseudo_random_seed(seed,
1599 1, max_sec);
1600 *num_of_bios = pseudo_random_seed(seed,
1601 1, TEST_MAX_BIOS_PER_REQ);
1602 if (!(*num_of_bios))
1603 *num_of_bios = 1;
1604 } while ((*start_sector < min_start_sector) ||
1605 (*start_sector + (*num_of_bios * BIO_U32_SIZE * 4)) > max_sec);
1606}
1607
1608/* sanitize test functions */
1609static int prepare_write_discard_sanitize_read(struct test_data *td)
1610{
1611 unsigned int start_sector;
1612 unsigned int num_of_bios = 0;
1613 static unsigned int total_bios;
1614 unsigned int *num_bios_seed;
1615 int i = 0;
1616
1617 if (mbtd->random_test_seed == 0) {
1618 mbtd->random_test_seed =
1619 (unsigned int)(get_jiffies_64() & 0xFFFF);
1620 test_pr_info("%s: got seed from jiffies %d",
1621 __func__, mbtd->random_test_seed);
1622 }
1623 num_bios_seed = &mbtd->random_test_seed;
1624
1625 do {
1626 pseudo_rnd_sector_and_size(num_bios_seed, td->start_sector,
1627 &start_sector, &num_of_bios);
1628
1629 /* DISCARD */
1630 total_bios += num_of_bios;
1631 test_pr_info("%s: discard req: id=%d, startSec=%d, NumBios=%d",
1632 __func__, td->unique_next_req_id, start_sector,
1633 num_of_bios);
1634 test_iosched_add_unique_test_req(0, REQ_UNIQUE_DISCARD,
1635 start_sector, BIO_TO_SECTOR(num_of_bios),
1636 NULL);
1637
1638 } while (++i < (BLKDEV_MAX_RQ-10));
1639
1640 test_pr_info("%s: total discard bios = %d", __func__, total_bios);
1641
1642 test_pr_info("%s: add sanitize req", __func__);
1643 test_iosched_add_unique_test_req(0, REQ_UNIQUE_SANITIZE, 0, 0, NULL);
1644
1645 return 0;
1646}
1647
Yaniv Gardie9214c82012-10-18 13:58:18 +02001648/*
1649 * Post test operations for BKOPs test
1650 * Disable the BKOPs statistics and clear the feature flags
1651 */
1652static int bkops_post_test(struct test_data *td)
1653{
1654 struct request_queue *q = td->req_q;
1655 struct mmc_queue *mq = (struct mmc_queue *)q->queuedata;
1656 struct mmc_card *card = mq->card;
1657
1658 mmc_card_clr_doing_bkops(mq->card);
1659 card->ext_csd.raw_bkops_status = 0;
1660
1661 spin_lock(&card->bkops_info.bkops_stats.lock);
1662 card->bkops_info.bkops_stats.enabled = false;
1663 spin_unlock(&card->bkops_info.bkops_stats.lock);
1664
1665 return 0;
1666}
1667
1668/*
1669 * Verify the BKOPs statsistics
1670 */
1671static int check_bkops_result(struct test_data *td)
1672{
1673 struct request_queue *q = td->req_q;
1674 struct mmc_queue *mq = (struct mmc_queue *)q->queuedata;
1675 struct mmc_card *card = mq->card;
1676 struct mmc_bkops_stats *bkops_stat;
1677
1678 if (!card)
1679 goto fail;
1680
1681 bkops_stat = &card->bkops_info.bkops_stats;
1682
1683 test_pr_info("%s: Test results: bkops:(%d,%d,%d) hpi:%d, suspend:%d",
1684 __func__,
1685 bkops_stat->bkops_level[BKOPS_SEVERITY_1_INDEX],
1686 bkops_stat->bkops_level[BKOPS_SEVERITY_2_INDEX],
1687 bkops_stat->bkops_level[BKOPS_SEVERITY_3_INDEX],
1688 bkops_stat->hpi,
1689 bkops_stat->suspend);
1690
1691 switch (mbtd->test_info.testcase) {
1692 case BKOPS_DELAYED_WORK_LEVEL_1:
1693 if ((bkops_stat->bkops_level[BKOPS_SEVERITY_1_INDEX] == 1) &&
1694 (bkops_stat->suspend == 1) &&
1695 (bkops_stat->hpi == 0))
1696 goto exit;
1697 else
1698 goto fail;
1699 break;
1700 case BKOPS_DELAYED_WORK_LEVEL_1_HPI:
1701 if ((bkops_stat->bkops_level[BKOPS_SEVERITY_1_INDEX] == 1) &&
1702 (bkops_stat->suspend == 0) &&
1703 (bkops_stat->hpi == 1))
1704 goto exit;
Yaniv Gardidced8e42012-11-25 16:00:40 +02001705 /* this might happen due to timing issues */
1706 else if
1707 ((bkops_stat->bkops_level[BKOPS_SEVERITY_1_INDEX] == 0) &&
1708 (bkops_stat->suspend == 0) &&
1709 (bkops_stat->hpi == 0))
1710 goto ignore;
Yaniv Gardie9214c82012-10-18 13:58:18 +02001711 else
1712 goto fail;
1713 break;
1714 case BKOPS_CANCEL_DELAYED_WORK:
1715 if ((bkops_stat->bkops_level[BKOPS_SEVERITY_1_INDEX] == 0) &&
1716 (bkops_stat->bkops_level[BKOPS_SEVERITY_2_INDEX] == 0) &&
1717 (bkops_stat->bkops_level[BKOPS_SEVERITY_3_INDEX] == 0) &&
1718 (bkops_stat->suspend == 0) &&
1719 (bkops_stat->hpi == 0))
1720 goto exit;
1721 else
1722 goto fail;
1723 case BKOPS_URGENT_LEVEL_2:
1724 case BKOPS_URGENT_LEVEL_2_TWO_REQS:
1725 if ((bkops_stat->bkops_level[BKOPS_SEVERITY_2_INDEX] == 1) &&
1726 (bkops_stat->suspend == 0) &&
1727 (bkops_stat->hpi == 0))
1728 goto exit;
1729 else
1730 goto fail;
1731 case BKOPS_URGENT_LEVEL_3:
1732 if ((bkops_stat->bkops_level[BKOPS_SEVERITY_3_INDEX] == 1) &&
1733 (bkops_stat->suspend == 0) &&
1734 (bkops_stat->hpi == 0))
1735 goto exit;
1736 else
1737 goto fail;
1738 default:
1739 return -EINVAL;
1740 }
1741
1742exit:
1743 return 0;
Yaniv Gardidced8e42012-11-25 16:00:40 +02001744ignore:
1745 test_iosched_set_ignore_round(true);
1746 return 0;
Yaniv Gardie9214c82012-10-18 13:58:18 +02001747fail:
1748 if (td->fs_wr_reqs_during_test) {
1749 test_pr_info("%s: wr reqs during test, cancel the round",
1750 __func__);
1751 test_iosched_set_ignore_round(true);
1752 return 0;
1753 }
1754
1755 test_pr_info("%s: BKOPs statistics are not as expected, test failed",
1756 __func__);
1757 return -EINVAL;
1758}
1759
1760static void bkops_end_io_final_fn(struct request *rq, int err)
1761{
1762 struct test_request *test_rq =
1763 (struct test_request *)rq->elv.priv[0];
1764 BUG_ON(!test_rq);
1765
1766 test_rq->req_completed = 1;
1767 test_rq->req_result = err;
1768
1769 test_pr_info("%s: request %d completed, err=%d",
1770 __func__, test_rq->req_id, err);
1771
1772 mbtd->bkops_stage = BKOPS_STAGE_4;
1773 wake_up(&mbtd->bkops_wait_q);
1774}
1775
1776static void bkops_end_io_fn(struct request *rq, int err)
1777{
1778 struct test_request *test_rq =
1779 (struct test_request *)rq->elv.priv[0];
1780 BUG_ON(!test_rq);
1781
1782 test_rq->req_completed = 1;
1783 test_rq->req_result = err;
1784
1785 test_pr_info("%s: request %d completed, err=%d",
1786 __func__, test_rq->req_id, err);
1787 mbtd->bkops_stage = BKOPS_STAGE_2;
1788 wake_up(&mbtd->bkops_wait_q);
1789
1790}
1791
1792static int prepare_bkops(struct test_data *td)
1793{
1794 int ret = 0;
1795 struct request_queue *q = td->req_q;
1796 struct mmc_queue *mq = (struct mmc_queue *)q->queuedata;
1797 struct mmc_card *card = mq->card;
1798 struct mmc_bkops_stats *bkops_stat;
1799
1800 if (!card)
1801 return -EINVAL;
1802
1803 bkops_stat = &card->bkops_info.bkops_stats;
1804
1805 if (!card->ext_csd.bkops_en) {
1806 test_pr_err("%s: BKOPS is not enabled by card or host)",
1807 __func__);
1808 return -ENOTSUPP;
1809 }
1810 if (mmc_card_doing_bkops(card)) {
1811 test_pr_err("%s: BKOPS in progress, try later", __func__);
1812 return -EAGAIN;
1813 }
1814
1815 mmc_blk_init_bkops_statistics(card);
1816
1817 if ((mbtd->test_info.testcase == BKOPS_URGENT_LEVEL_2) ||
1818 (mbtd->test_info.testcase == BKOPS_URGENT_LEVEL_2_TWO_REQS) ||
1819 (mbtd->test_info.testcase == BKOPS_URGENT_LEVEL_3))
1820 mq->err_check_fn = test_err_check;
1821 mbtd->err_check_counter = 0;
1822
1823 return ret;
1824}
1825
1826static int run_bkops(struct test_data *td)
1827{
1828 int ret = 0;
1829 struct request_queue *q = td->req_q;
1830 struct mmc_queue *mq = (struct mmc_queue *)q->queuedata;
1831 struct mmc_card *card = mq->card;
1832 struct mmc_bkops_stats *bkops_stat;
1833
1834 if (!card)
1835 return -EINVAL;
1836
1837 bkops_stat = &card->bkops_info.bkops_stats;
1838
1839 switch (mbtd->test_info.testcase) {
1840 case BKOPS_DELAYED_WORK_LEVEL_1:
1841 bkops_stat->ignore_card_bkops_status = true;
1842 card->ext_csd.raw_bkops_status = 1;
1843 card->bkops_info.sectors_changed =
1844 card->bkops_info.min_sectors_to_queue_delayed_work + 1;
1845 mbtd->bkops_stage = BKOPS_STAGE_1;
1846
1847 __blk_run_queue(q);
1848 /* this long sleep makes sure the host starts bkops and
1849 also, gets into suspend */
1850 msleep(10000);
1851
1852 bkops_stat->ignore_card_bkops_status = false;
1853 card->ext_csd.raw_bkops_status = 0;
1854
1855 test_iosched_mark_test_completion();
1856 break;
1857
1858 case BKOPS_DELAYED_WORK_LEVEL_1_HPI:
1859 bkops_stat->ignore_card_bkops_status = true;
1860 card->ext_csd.raw_bkops_status = 1;
1861 card->bkops_info.sectors_changed =
1862 card->bkops_info.min_sectors_to_queue_delayed_work + 1;
1863 mbtd->bkops_stage = BKOPS_STAGE_1;
1864
1865 __blk_run_queue(q);
1866 msleep(card->bkops_info.delay_ms);
1867
1868 ret = test_iosched_add_wr_rd_test_req(0, WRITE,
1869 td->start_sector,
1870 TEST_REQUEST_NUM_OF_BIOS,
1871 TEST_PATTERN_5A,
1872 bkops_end_io_final_fn);
1873 if (ret) {
1874 test_pr_err("%s: failed to add a write request",
1875 __func__);
1876 ret = -EINVAL;
1877 break;
1878 }
1879
1880 td->next_req = list_entry(td->test_queue.prev,
1881 struct test_request, queuelist);
1882 __blk_run_queue(q);
1883 wait_event(mbtd->bkops_wait_q,
1884 mbtd->bkops_stage == BKOPS_STAGE_4);
1885 bkops_stat->ignore_card_bkops_status = false;
1886
1887 test_iosched_mark_test_completion();
1888 break;
1889
1890 case BKOPS_CANCEL_DELAYED_WORK:
1891 bkops_stat->ignore_card_bkops_status = true;
1892 card->ext_csd.raw_bkops_status = 1;
1893 card->bkops_info.sectors_changed =
1894 card->bkops_info.min_sectors_to_queue_delayed_work + 1;
1895 mbtd->bkops_stage = BKOPS_STAGE_1;
1896
1897 __blk_run_queue(q);
1898
1899 ret = test_iosched_add_wr_rd_test_req(0, WRITE,
1900 td->start_sector,
1901 TEST_REQUEST_NUM_OF_BIOS,
1902 TEST_PATTERN_5A,
1903 bkops_end_io_final_fn);
1904 if (ret) {
1905 test_pr_err("%s: failed to add a write request",
1906 __func__);
1907 ret = -EINVAL;
1908 break;
1909 }
1910
1911 td->next_req = list_entry(td->test_queue.prev,
1912 struct test_request, queuelist);
1913 __blk_run_queue(q);
1914 wait_event(mbtd->bkops_wait_q,
1915 mbtd->bkops_stage == BKOPS_STAGE_4);
1916 bkops_stat->ignore_card_bkops_status = false;
1917
1918 test_iosched_mark_test_completion();
1919 break;
1920
1921 case BKOPS_URGENT_LEVEL_2:
1922 case BKOPS_URGENT_LEVEL_3:
1923 bkops_stat->ignore_card_bkops_status = true;
1924 if (mbtd->test_info.testcase == BKOPS_URGENT_LEVEL_2)
1925 card->ext_csd.raw_bkops_status = 2;
1926 else
1927 card->ext_csd.raw_bkops_status = 3;
1928 mbtd->bkops_stage = BKOPS_STAGE_1;
1929
1930 ret = test_iosched_add_wr_rd_test_req(0, WRITE,
1931 td->start_sector,
1932 TEST_REQUEST_NUM_OF_BIOS,
1933 TEST_PATTERN_5A,
1934 bkops_end_io_fn);
1935 if (ret) {
1936 test_pr_err("%s: failed to add a write request",
1937 __func__);
1938 ret = -EINVAL;
1939 break;
1940 }
1941
1942 td->next_req = list_entry(td->test_queue.prev,
1943 struct test_request, queuelist);
1944 __blk_run_queue(q);
1945 wait_event(mbtd->bkops_wait_q,
1946 mbtd->bkops_stage == BKOPS_STAGE_2);
1947 card->ext_csd.raw_bkops_status = 0;
1948
1949 ret = test_iosched_add_wr_rd_test_req(0, WRITE,
1950 td->start_sector,
1951 TEST_REQUEST_NUM_OF_BIOS,
1952 TEST_PATTERN_5A,
1953 bkops_end_io_final_fn);
1954 if (ret) {
1955 test_pr_err("%s: failed to add a write request",
1956 __func__);
1957 ret = -EINVAL;
1958 break;
1959 }
1960
1961 td->next_req = list_entry(td->test_queue.prev,
1962 struct test_request, queuelist);
1963 __blk_run_queue(q);
1964
1965 wait_event(mbtd->bkops_wait_q,
1966 mbtd->bkops_stage == BKOPS_STAGE_4);
1967
1968 bkops_stat->ignore_card_bkops_status = false;
1969 test_iosched_mark_test_completion();
1970 break;
1971
1972 case BKOPS_URGENT_LEVEL_2_TWO_REQS:
1973 mq->wr_packing_enabled = false;
1974 bkops_stat->ignore_card_bkops_status = true;
1975 card->ext_csd.raw_bkops_status = 2;
1976 mbtd->bkops_stage = BKOPS_STAGE_1;
1977
1978 ret = test_iosched_add_wr_rd_test_req(0, WRITE,
1979 td->start_sector,
1980 TEST_REQUEST_NUM_OF_BIOS,
1981 TEST_PATTERN_5A,
1982 NULL);
1983 if (ret) {
1984 test_pr_err("%s: failed to add a write request",
1985 __func__);
1986 ret = -EINVAL;
1987 break;
1988 }
1989
1990 ret = test_iosched_add_wr_rd_test_req(0, WRITE,
1991 td->start_sector,
1992 TEST_REQUEST_NUM_OF_BIOS,
1993 TEST_PATTERN_5A,
1994 bkops_end_io_fn);
1995 if (ret) {
1996 test_pr_err("%s: failed to add a write request",
1997 __func__);
1998 ret = -EINVAL;
1999 break;
2000 }
2001
2002 td->next_req = list_entry(td->test_queue.next,
2003 struct test_request, queuelist);
2004 __blk_run_queue(q);
2005 wait_event(mbtd->bkops_wait_q,
2006 mbtd->bkops_stage == BKOPS_STAGE_2);
2007 card->ext_csd.raw_bkops_status = 0;
2008
2009 ret = test_iosched_add_wr_rd_test_req(0, WRITE,
2010 td->start_sector,
2011 TEST_REQUEST_NUM_OF_BIOS,
2012 TEST_PATTERN_5A,
2013 bkops_end_io_final_fn);
2014 if (ret) {
2015 test_pr_err("%s: failed to add a write request",
2016 __func__);
2017 ret = -EINVAL;
2018 break;
2019 }
2020
2021 td->next_req = list_entry(td->test_queue.prev,
2022 struct test_request, queuelist);
2023 __blk_run_queue(q);
2024
2025 wait_event(mbtd->bkops_wait_q,
2026 mbtd->bkops_stage == BKOPS_STAGE_4);
2027
2028 bkops_stat->ignore_card_bkops_status = false;
2029 test_iosched_mark_test_completion();
2030
2031 break;
2032 default:
2033 test_pr_err("%s: wrong testcase: %d", __func__,
2034 mbtd->test_info.testcase);
2035 ret = -EINVAL;
2036 }
2037 return ret;
2038}
2039
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02002040static bool message_repeat;
2041static int test_open(struct inode *inode, struct file *file)
2042{
2043 file->private_data = inode->i_private;
2044 message_repeat = 1;
2045 return 0;
2046}
2047
2048/* send_packing TEST */
2049static ssize_t send_write_packing_test_write(struct file *file,
2050 const char __user *buf,
2051 size_t count,
2052 loff_t *ppos)
2053{
2054 int ret = 0;
2055 int i = 0;
2056 int number = -1;
2057 int j = 0;
2058
2059 test_pr_info("%s: -- send_write_packing TEST --", __func__);
2060
2061 sscanf(buf, "%d", &number);
2062
2063 if (number <= 0)
2064 number = 1;
2065
2066
2067 mbtd->test_group = TEST_SEND_WRITE_PACKING_GROUP;
2068
2069 if (validate_packed_commands_settings())
2070 return count;
2071
2072 if (mbtd->random_test_seed > 0)
2073 test_pr_info("%s: Test seed: %d", __func__,
2074 mbtd->random_test_seed);
2075
2076 memset(&mbtd->test_info, 0, sizeof(struct test_info));
2077
2078 mbtd->test_info.data = mbtd;
2079 mbtd->test_info.prepare_test_fn = prepare_test;
2080 mbtd->test_info.check_test_result_fn = check_wr_packing_statistics;
2081 mbtd->test_info.get_test_case_str_fn = get_test_case_str;
2082 mbtd->test_info.post_test_fn = post_test;
2083
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02002084 for (i = 0; i < number; ++i) {
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02002085 test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
2086 test_pr_info("%s: ====================", __func__);
2087
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02002088 for (j = SEND_WRITE_PACKING_MIN_TESTCASE;
2089 j <= SEND_WRITE_PACKING_MAX_TESTCASE; j++) {
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02002090
2091 mbtd->test_info.testcase = j;
2092 mbtd->is_random = RANDOM_TEST;
2093 ret = test_iosched_start_test(&mbtd->test_info);
2094 if (ret)
2095 break;
2096 /* Allow FS requests to be dispatched */
2097 msleep(1000);
2098 mbtd->test_info.testcase = j;
2099 mbtd->is_random = NON_RANDOM_TEST;
2100 ret = test_iosched_start_test(&mbtd->test_info);
2101 if (ret)
2102 break;
2103 /* Allow FS requests to be dispatched */
2104 msleep(1000);
2105 }
2106 }
2107
2108 test_pr_info("%s: Completed all the test cases.", __func__);
2109
2110 return count;
2111}
2112
2113static ssize_t send_write_packing_test_read(struct file *file,
2114 char __user *buffer,
2115 size_t count,
2116 loff_t *offset)
2117{
2118 memset((void *)buffer, 0, count);
2119
2120 snprintf(buffer, count,
2121 "\nsend_write_packing_test\n"
2122 "=========\n"
2123 "Description:\n"
2124 "This test checks the following scenarios\n"
2125 "- Pack due to FLUSH message\n"
2126 "- Pack due to FLUSH after threshold writes\n"
2127 "- Pack due to READ message\n"
2128 "- Pack due to READ after threshold writes\n"
2129 "- Pack due to empty queue\n"
2130 "- Pack due to threshold writes\n"
2131 "- Pack due to one over threshold writes\n");
2132
2133 if (message_repeat == 1) {
2134 message_repeat = 0;
2135 return strnlen(buffer, count);
2136 } else {
2137 return 0;
2138 }
2139}
2140
2141const struct file_operations send_write_packing_test_ops = {
2142 .open = test_open,
2143 .write = send_write_packing_test_write,
2144 .read = send_write_packing_test_read,
2145};
2146
2147/* err_check TEST */
2148static ssize_t err_check_test_write(struct file *file,
2149 const char __user *buf,
2150 size_t count,
2151 loff_t *ppos)
2152{
2153 int ret = 0;
2154 int i = 0;
2155 int number = -1;
2156 int j = 0;
2157
2158 test_pr_info("%s: -- err_check TEST --", __func__);
2159
2160 sscanf(buf, "%d", &number);
2161
2162 if (number <= 0)
2163 number = 1;
2164
2165 mbtd->test_group = TEST_ERR_CHECK_GROUP;
2166
2167 if (validate_packed_commands_settings())
2168 return count;
2169
2170 if (mbtd->random_test_seed > 0)
2171 test_pr_info("%s: Test seed: %d", __func__,
2172 mbtd->random_test_seed);
2173
2174 memset(&mbtd->test_info, 0, sizeof(struct test_info));
2175
2176 mbtd->test_info.data = mbtd;
2177 mbtd->test_info.prepare_test_fn = prepare_test;
2178 mbtd->test_info.check_test_result_fn = check_wr_packing_statistics;
2179 mbtd->test_info.get_test_case_str_fn = get_test_case_str;
2180 mbtd->test_info.post_test_fn = post_test;
2181
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02002182 for (i = 0; i < number; ++i) {
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02002183 test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
2184 test_pr_info("%s: ====================", __func__);
2185
2186 for (j = ERR_CHECK_MIN_TESTCASE;
2187 j <= ERR_CHECK_MAX_TESTCASE ; j++) {
2188 mbtd->test_info.testcase = j;
2189 mbtd->is_random = RANDOM_TEST;
2190 ret = test_iosched_start_test(&mbtd->test_info);
2191 if (ret)
2192 break;
2193 /* Allow FS requests to be dispatched */
2194 msleep(1000);
2195 mbtd->test_info.testcase = j;
2196 mbtd->is_random = NON_RANDOM_TEST;
2197 ret = test_iosched_start_test(&mbtd->test_info);
2198 if (ret)
2199 break;
2200 /* Allow FS requests to be dispatched */
2201 msleep(1000);
2202 }
2203 }
2204
2205 test_pr_info("%s: Completed all the test cases.", __func__);
2206
2207 return count;
2208}
2209
2210static ssize_t err_check_test_read(struct file *file,
2211 char __user *buffer,
2212 size_t count,
2213 loff_t *offset)
2214{
2215 memset((void *)buffer, 0, count);
2216
2217 snprintf(buffer, count,
2218 "\nerr_check_TEST\n"
2219 "=========\n"
2220 "Description:\n"
2221 "This test checks the following scenarios\n"
2222 "- Return ABORT\n"
2223 "- Return PARTIAL followed by success\n"
2224 "- Return PARTIAL followed by abort\n"
2225 "- Return PARTIAL multiple times until success\n"
2226 "- Return PARTIAL with fail index = threshold\n"
2227 "- Return RETRY\n"
2228 "- Return CMD_ERR\n"
2229 "- Return DATA_ERR\n");
2230
2231 if (message_repeat == 1) {
2232 message_repeat = 0;
2233 return strnlen(buffer, count);
2234 } else {
2235 return 0;
2236 }
2237}
2238
2239const struct file_operations err_check_test_ops = {
2240 .open = test_open,
2241 .write = err_check_test_write,
2242 .read = err_check_test_read,
2243};
2244
2245/* send_invalid_packed TEST */
2246static ssize_t send_invalid_packed_test_write(struct file *file,
2247 const char __user *buf,
2248 size_t count,
2249 loff_t *ppos)
2250{
2251 int ret = 0;
2252 int i = 0;
2253 int number = -1;
2254 int j = 0;
2255 int num_of_failures = 0;
2256
2257 test_pr_info("%s: -- send_invalid_packed TEST --", __func__);
2258
2259 sscanf(buf, "%d", &number);
2260
2261 if (number <= 0)
2262 number = 1;
2263
2264 mbtd->test_group = TEST_SEND_INVALID_GROUP;
2265
2266 if (validate_packed_commands_settings())
2267 return count;
2268
2269 if (mbtd->random_test_seed > 0)
2270 test_pr_info("%s: Test seed: %d", __func__,
2271 mbtd->random_test_seed);
2272
2273 memset(&mbtd->test_info, 0, sizeof(struct test_info));
2274
2275 mbtd->test_info.data = mbtd;
2276 mbtd->test_info.prepare_test_fn = prepare_test;
2277 mbtd->test_info.check_test_result_fn = check_wr_packing_statistics;
2278 mbtd->test_info.get_test_case_str_fn = get_test_case_str;
2279 mbtd->test_info.post_test_fn = post_test;
2280
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02002281 for (i = 0; i < number; ++i) {
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02002282 test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
2283 test_pr_info("%s: ====================", __func__);
2284
2285 for (j = INVALID_CMD_MIN_TESTCASE;
2286 j <= INVALID_CMD_MAX_TESTCASE ; j++) {
2287
2288 mbtd->test_info.testcase = j;
2289 mbtd->is_random = RANDOM_TEST;
2290 ret = test_iosched_start_test(&mbtd->test_info);
2291 if (ret)
2292 num_of_failures++;
2293 /* Allow FS requests to be dispatched */
2294 msleep(1000);
2295
2296 mbtd->test_info.testcase = j;
2297 mbtd->is_random = NON_RANDOM_TEST;
2298 ret = test_iosched_start_test(&mbtd->test_info);
2299 if (ret)
2300 num_of_failures++;
2301 /* Allow FS requests to be dispatched */
2302 msleep(1000);
2303 }
2304 }
2305
2306 test_pr_info("%s: Completed all the test cases.", __func__);
2307
2308 if (num_of_failures > 0) {
2309 test_iosched_set_test_result(TEST_FAILED);
2310 test_pr_err(
2311 "There were %d failures during the test, TEST FAILED",
2312 num_of_failures);
2313 }
2314 return count;
2315}
2316
2317static ssize_t send_invalid_packed_test_read(struct file *file,
2318 char __user *buffer,
2319 size_t count,
2320 loff_t *offset)
2321{
2322 memset((void *)buffer, 0, count);
2323
2324 snprintf(buffer, count,
2325 "\nsend_invalid_packed_TEST\n"
2326 "=========\n"
2327 "Description:\n"
2328 "This test checks the following scenarios\n"
2329 "- Send an invalid header version\n"
2330 "- Send the wrong write code\n"
2331 "- Send an invalid R/W code\n"
2332 "- Send wrong start address in header\n"
2333 "- Send header with block_count smaller than actual\n"
2334 "- Send header with block_count larger than actual\n"
2335 "- Send header CMD23 packed bit set\n"
2336 "- Send CMD23 with block count over threshold\n"
2337 "- Send CMD23 with block_count equals zero\n"
2338 "- Send CMD23 packed bit unset\n"
2339 "- Send CMD23 reliable write bit set\n"
2340 "- Send CMD23 bits [16-29] set\n"
2341 "- Send CMD23 header block not in block_count\n");
2342
2343 if (message_repeat == 1) {
2344 message_repeat = 0;
2345 return strnlen(buffer, count);
2346 } else {
2347 return 0;
2348 }
2349}
2350
2351const struct file_operations send_invalid_packed_test_ops = {
2352 .open = test_open,
2353 .write = send_invalid_packed_test_write,
2354 .read = send_invalid_packed_test_read,
2355};
2356
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02002357/* packing_control TEST */
2358static ssize_t write_packing_control_test_write(struct file *file,
2359 const char __user *buf,
2360 size_t count,
2361 loff_t *ppos)
2362{
2363 int ret = 0;
2364 int i = 0;
2365 int number = -1;
2366 int j = 0;
2367 struct mmc_queue *mq = test_iosched_get_req_queue()->queuedata;
2368 int max_num_requests = mq->card->ext_csd.max_packed_writes;
2369 int test_successful = 1;
2370
2371 test_pr_info("%s: -- write_packing_control TEST --", __func__);
2372
2373 sscanf(buf, "%d", &number);
2374
2375 if (number <= 0)
2376 number = 1;
2377
2378 test_pr_info("%s: max_num_requests = %d ", __func__,
2379 max_num_requests);
2380
2381 memset(&mbtd->test_info, 0, sizeof(struct test_info));
2382 mbtd->test_group = TEST_PACKING_CONTROL_GROUP;
2383
2384 if (validate_packed_commands_settings())
2385 return count;
2386
2387 mbtd->test_info.data = mbtd;
2388 mbtd->test_info.prepare_test_fn = prepare_test;
2389 mbtd->test_info.check_test_result_fn = check_wr_packing_statistics;
2390 mbtd->test_info.get_test_case_str_fn = get_test_case_str;
2391
2392 for (i = 0; i < number; ++i) {
2393 test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
2394 test_pr_info("%s: ====================", __func__);
2395
2396 for (j = PACKING_CONTROL_MIN_TESTCASE;
2397 j <= PACKING_CONTROL_MAX_TESTCASE; j++) {
2398
2399 test_successful = 1;
2400 mbtd->test_info.testcase = j;
2401 mbtd->is_random = RANDOM_TEST;
2402 ret = test_iosched_start_test(&mbtd->test_info);
2403 if (ret) {
2404 test_successful = 0;
2405 break;
2406 }
2407 /* Allow FS requests to be dispatched */
2408 msleep(1000);
2409
2410 mbtd->test_info.testcase = j;
2411 mbtd->is_random = NON_RANDOM_TEST;
2412 ret = test_iosched_start_test(&mbtd->test_info);
2413 if (ret) {
2414 test_successful = 0;
2415 break;
2416 }
2417 /* Allow FS requests to be dispatched */
2418 msleep(1000);
2419 }
2420
2421 if (!test_successful)
2422 break;
2423 }
2424
2425 test_pr_info("%s: Completed all the test cases.", __func__);
2426
2427 return count;
2428}
2429
2430static ssize_t write_packing_control_test_read(struct file *file,
2431 char __user *buffer,
2432 size_t count,
2433 loff_t *offset)
2434{
2435 memset((void *)buffer, 0, count);
2436
2437 snprintf(buffer, count,
2438 "\nwrite_packing_control_test\n"
2439 "=========\n"
2440 "Description:\n"
2441 "This test checks the following scenarios\n"
2442 "- Packing expected - one over trigger\n"
2443 "- Packing expected - N over trigger\n"
2444 "- Packing expected - N over trigger followed by read\n"
2445 "- Packing expected - N over trigger followed by flush\n"
2446 "- Packing expected - threshold over trigger FB by flush\n"
2447 "- Packing not expected - less than trigger\n"
2448 "- Packing not expected - trigger requests\n"
2449 "- Packing not expected - trigger, read, trigger\n"
2450 "- Mixed state - packing -> no packing -> packing\n"
2451 "- Mixed state - no packing -> packing -> no packing\n");
2452
2453 if (message_repeat == 1) {
2454 message_repeat = 0;
2455 return strnlen(buffer, count);
2456 } else {
2457 return 0;
2458 }
2459}
2460
2461const struct file_operations write_packing_control_test_ops = {
2462 .open = test_open,
2463 .write = write_packing_control_test_write,
2464 .read = write_packing_control_test_read,
2465};
2466
Maya Erezddc55732012-10-17 09:51:01 +02002467static ssize_t write_discard_sanitize_test_write(struct file *file,
2468 const char __user *buf,
2469 size_t count,
2470 loff_t *ppos)
2471{
2472 int ret = 0;
2473 int i = 0;
2474 int number = -1;
2475
2476 sscanf(buf, "%d", &number);
2477 if (number <= 0)
2478 number = 1;
2479
2480 test_pr_info("%s: -- write_discard_sanitize TEST --\n", __func__);
2481
2482 memset(&mbtd->test_info, 0, sizeof(struct test_info));
2483
2484 mbtd->test_group = TEST_GENERAL_GROUP;
2485
2486 mbtd->test_info.data = mbtd;
2487 mbtd->test_info.prepare_test_fn = prepare_write_discard_sanitize_read;
2488 mbtd->test_info.get_test_case_str_fn = get_test_case_str;
2489 mbtd->test_info.timeout_msec = SANITIZE_TEST_TIMEOUT;
2490
2491 for (i = 0 ; i < number ; ++i) {
2492 test_pr_info("%s: Cycle # %d / %d\n", __func__, i+1, number);
2493 test_pr_info("%s: ===================", __func__);
2494
2495 mbtd->test_info.testcase = TEST_WRITE_DISCARD_SANITIZE_READ;
2496 ret = test_iosched_start_test(&mbtd->test_info);
2497
2498 if (ret)
2499 break;
2500 }
2501
2502 return count;
2503}
2504
2505const struct file_operations write_discard_sanitize_test_ops = {
2506 .open = test_open,
2507 .write = write_discard_sanitize_test_write,
2508};
2509
Yaniv Gardie9214c82012-10-18 13:58:18 +02002510static ssize_t bkops_test_write(struct file *file,
2511 const char __user *buf,
2512 size_t count,
2513 loff_t *ppos)
2514{
2515 int ret = 0;
2516 int i = 0, j;
2517 int number = -1;
2518
2519 test_pr_info("%s: -- bkops_test TEST --", __func__);
2520
2521 sscanf(buf, "%d", &number);
2522
2523 if (number <= 0)
2524 number = 1;
2525
2526 mbtd->test_group = TEST_BKOPS_GROUP;
2527
2528 memset(&mbtd->test_info, 0, sizeof(struct test_info));
2529
2530 mbtd->test_info.data = mbtd;
2531 mbtd->test_info.prepare_test_fn = prepare_bkops;
2532 mbtd->test_info.check_test_result_fn = check_bkops_result;
2533 mbtd->test_info.get_test_case_str_fn = get_test_case_str;
2534 mbtd->test_info.run_test_fn = run_bkops;
2535 mbtd->test_info.timeout_msec = BKOPS_TEST_TIMEOUT;
2536 mbtd->test_info.post_test_fn = bkops_post_test;
2537
2538 for (i = 0 ; i < number ; ++i) {
2539 test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
2540 test_pr_info("%s: ===================", __func__);
2541 for (j = BKOPS_MIN_TESTCASE ;
2542 j <= BKOPS_MAX_TESTCASE ; j++) {
2543 mbtd->test_info.testcase = j;
2544 ret = test_iosched_start_test(&mbtd->test_info);
2545 if (ret)
2546 break;
2547 }
2548 }
2549
2550 test_pr_info("%s: Completed all the test cases.", __func__);
2551
2552 return count;
2553}
2554
2555static ssize_t bkops_test_read(struct file *file,
2556 char __user *buffer,
2557 size_t count,
2558 loff_t *offset)
2559{
2560 memset((void *)buffer, 0, count);
2561
2562 snprintf(buffer, count,
2563 "\nbkops_test\n========================\n"
2564 "Description:\n"
2565 "This test simulates BKOPS status from card\n"
2566 "and verifies that:\n"
2567 " - Starting BKOPS delayed work, level 1\n"
2568 " - Starting BKOPS delayed work, level 1, with HPI\n"
2569 " - Cancel starting BKOPS delayed work, "
2570 " when a request is received\n"
2571 " - Starting BKOPS urgent, level 2,3\n"
2572 " - Starting BKOPS urgent with 2 requests\n");
2573 return strnlen(buffer, count);
2574}
2575
2576const struct file_operations bkops_test_ops = {
2577 .open = test_open,
2578 .write = bkops_test_write,
2579 .read = bkops_test_read,
2580};
2581
Lee Susmanf18263a2012-10-24 14:14:37 +02002582static ssize_t long_sequential_read_test_write(struct file *file,
2583 const char __user *buf,
2584 size_t count,
2585 loff_t *ppos)
2586{
2587 int ret = 0;
2588 int i = 0;
2589 int number = -1;
2590 unsigned int mtime, integer, fraction;
2591
2592 test_pr_info("%s: -- Long Sequential Read TEST --", __func__);
2593
2594 sscanf(buf, "%d", &number);
2595
2596 if (number <= 0)
2597 number = 1;
2598
2599 memset(&mbtd->test_info, 0, sizeof(struct test_info));
2600 mbtd->test_group = TEST_GENERAL_GROUP;
2601
2602 mbtd->test_info.data = mbtd;
2603 mbtd->test_info.prepare_test_fn = prepare_test;
2604 mbtd->test_info.get_test_case_str_fn = get_test_case_str;
2605
2606 for (i = 0 ; i < number ; ++i) {
2607 test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
2608 test_pr_info("%s: ====================", __func__);
2609
2610 mbtd->test_info.testcase = TEST_LONG_SEQUENTIAL_READ;
2611 mbtd->is_random = NON_RANDOM_TEST;
2612 ret = test_iosched_start_test(&mbtd->test_info);
2613 if (ret)
2614 break;
2615
2616 mtime = jiffies_to_msecs(mbtd->test_info.test_duration);
2617
2618 test_pr_info("%s: time is %u msec, size is %u.%u MiB",
2619 __func__, mtime, LONG_TEST_SIZE_INTEGER,
2620 LONG_TEST_SIZE_FRACTION);
2621
2622 /* we first multiply in order not to lose precision */
2623 mtime *= MB_MSEC_RATIO_APPROXIMATION;
2624 /* divide values to get a MiB/sec integer value with one
2625 digit of precision. Multiply by 10 for one digit precision
2626 */
2627 fraction = integer = (LONG_TEST_ACTUAL_BYTE_NUM * 10) / mtime;
2628 integer /= 10;
2629 /* and calculate the MiB value fraction */
2630 fraction -= integer * 10;
2631
2632 test_pr_info("%s: Throughput: %u.%u MiB/sec\n"
2633 , __func__, integer, fraction);
2634
2635 /* Allow FS requests to be dispatched */
2636 msleep(1000);
2637 }
2638
2639 return count;
2640}
2641
2642static ssize_t long_sequential_read_test_read(struct file *file,
2643 char __user *buffer,
2644 size_t count,
2645 loff_t *offset)
2646{
2647 memset((void *)buffer, 0, count);
2648
2649 snprintf(buffer, count,
2650 "\nlong_sequential_read_test\n"
2651 "=========\n"
2652 "Description:\n"
2653 "This test runs the following scenarios\n"
2654 "- Long Sequential Read Test: this test measures read "
2655 "throughput at the driver level by sequentially reading many "
2656 "large requests.\n");
2657
2658 if (message_repeat == 1) {
2659 message_repeat = 0;
2660 return strnlen(buffer, count);
2661 } else
2662 return 0;
2663}
2664
2665const struct file_operations long_sequential_read_test_ops = {
2666 .open = test_open,
2667 .write = long_sequential_read_test_write,
2668 .read = long_sequential_read_test_read,
2669};
2670
Lee Susmana35ae6e2012-10-25 16:06:07 +02002671static ssize_t long_sequential_write_test_write(struct file *file,
2672 const char __user *buf,
2673 size_t count,
2674 loff_t *ppos)
2675{
2676 int ret = 0;
2677 int i = 0;
2678 int number = -1;
2679 unsigned int mtime, integer, fraction;
2680
2681 test_pr_info("%s: -- Long Sequential Write TEST --", __func__);
2682
2683 sscanf(buf, "%d", &number);
2684
2685 if (number <= 0)
2686 number = 1;
2687
2688 memset(&mbtd->test_info, 0, sizeof(struct test_info));
2689 mbtd->test_group = TEST_GENERAL_GROUP;
2690
2691 mbtd->test_info.data = mbtd;
2692 mbtd->test_info.prepare_test_fn = prepare_test;
2693 mbtd->test_info.get_test_case_str_fn = get_test_case_str;
2694
2695 for (i = 0 ; i < number ; ++i) {
2696 test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
2697 test_pr_info("%s: ====================", __func__);
2698
2699 mbtd->test_info.testcase = TEST_LONG_SEQUENTIAL_WRITE;
2700 mbtd->is_random = NON_RANDOM_TEST;
2701 ret = test_iosched_start_test(&mbtd->test_info);
2702 if (ret)
2703 break;
2704
2705 mtime = jiffies_to_msecs(mbtd->test_info.test_duration);
2706
2707 test_pr_info("%s: time is %u msec, size is %u.%u MiB",
2708 __func__, mtime, LONG_TEST_SIZE_INTEGER,
2709 LONG_TEST_SIZE_FRACTION);
2710
2711 /* we first multiply in order not to lose precision */
2712 mtime *= MB_MSEC_RATIO_APPROXIMATION;
2713 /* divide values to get a MiB/sec integer value with one
2714 digit of precision
2715 */
2716 fraction = integer = (LONG_TEST_ACTUAL_BYTE_NUM * 10) / mtime;
2717 integer /= 10;
2718 /* and calculate the MiB value fraction */
2719 fraction -= integer * 10;
2720
2721 test_pr_info("%s: Throughput: %u.%u MiB/sec\n",
2722 __func__, integer, fraction);
2723
2724 /* Allow FS requests to be dispatched */
2725 msleep(1000);
2726 }
2727
2728 return count;
2729}
2730
2731static ssize_t long_sequential_write_test_read(struct file *file,
2732 char __user *buffer,
2733 size_t count,
2734 loff_t *offset)
2735{
2736 memset((void *)buffer, 0, count);
2737
2738 snprintf(buffer, count,
2739 "\nlong_sequential_write_test\n"
2740 "=========\n"
2741 "Description:\n"
2742 "This test runs the following scenarios\n"
2743 "- Long Sequential Write Test: this test measures write "
2744 "throughput at the driver level by sequentially writing many "
2745 "large requests\n");
2746
2747 if (message_repeat == 1) {
2748 message_repeat = 0;
2749 return strnlen(buffer, count);
2750 } else
2751 return 0;
2752}
2753
2754const struct file_operations long_sequential_write_test_ops = {
2755 .open = test_open,
2756 .write = long_sequential_write_test_write,
2757 .read = long_sequential_write_test_read,
2758};
2759
2760
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02002761static void mmc_block_test_debugfs_cleanup(void)
2762{
2763 debugfs_remove(mbtd->debug.random_test_seed);
2764 debugfs_remove(mbtd->debug.send_write_packing_test);
2765 debugfs_remove(mbtd->debug.err_check_test);
2766 debugfs_remove(mbtd->debug.send_invalid_packed_test);
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02002767 debugfs_remove(mbtd->debug.packing_control_test);
Maya Erezddc55732012-10-17 09:51:01 +02002768 debugfs_remove(mbtd->debug.discard_sanitize_test);
Yaniv Gardie9214c82012-10-18 13:58:18 +02002769 debugfs_remove(mbtd->debug.bkops_test);
Lee Susmanf18263a2012-10-24 14:14:37 +02002770 debugfs_remove(mbtd->debug.long_sequential_read_test);
Lee Susmana35ae6e2012-10-25 16:06:07 +02002771 debugfs_remove(mbtd->debug.long_sequential_write_test);
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02002772}
2773
2774static int mmc_block_test_debugfs_init(void)
2775{
2776 struct dentry *utils_root, *tests_root;
2777
2778 utils_root = test_iosched_get_debugfs_utils_root();
2779 tests_root = test_iosched_get_debugfs_tests_root();
2780
2781 if (!utils_root || !tests_root)
2782 return -EINVAL;
2783
2784 mbtd->debug.random_test_seed = debugfs_create_u32(
2785 "random_test_seed",
2786 S_IRUGO | S_IWUGO,
2787 utils_root,
2788 &mbtd->random_test_seed);
2789
2790 if (!mbtd->debug.random_test_seed)
2791 goto err_nomem;
2792
2793 mbtd->debug.send_write_packing_test =
2794 debugfs_create_file("send_write_packing_test",
2795 S_IRUGO | S_IWUGO,
2796 tests_root,
2797 NULL,
2798 &send_write_packing_test_ops);
2799
2800 if (!mbtd->debug.send_write_packing_test)
2801 goto err_nomem;
2802
2803 mbtd->debug.err_check_test =
2804 debugfs_create_file("err_check_test",
2805 S_IRUGO | S_IWUGO,
2806 tests_root,
2807 NULL,
2808 &err_check_test_ops);
2809
2810 if (!mbtd->debug.err_check_test)
2811 goto err_nomem;
2812
2813 mbtd->debug.send_invalid_packed_test =
2814 debugfs_create_file("send_invalid_packed_test",
2815 S_IRUGO | S_IWUGO,
2816 tests_root,
2817 NULL,
2818 &send_invalid_packed_test_ops);
2819
2820 if (!mbtd->debug.send_invalid_packed_test)
2821 goto err_nomem;
2822
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02002823 mbtd->debug.packing_control_test = debugfs_create_file(
2824 "packing_control_test",
2825 S_IRUGO | S_IWUGO,
2826 tests_root,
2827 NULL,
2828 &write_packing_control_test_ops);
2829
2830 if (!mbtd->debug.packing_control_test)
2831 goto err_nomem;
2832
Maya Erezddc55732012-10-17 09:51:01 +02002833 mbtd->debug.discard_sanitize_test =
2834 debugfs_create_file("write_discard_sanitize_test",
2835 S_IRUGO | S_IWUGO,
2836 tests_root,
2837 NULL,
2838 &write_discard_sanitize_test_ops);
2839 if (!mbtd->debug.discard_sanitize_test) {
2840 mmc_block_test_debugfs_cleanup();
2841 return -ENOMEM;
2842 }
2843
Yaniv Gardie9214c82012-10-18 13:58:18 +02002844 mbtd->debug.bkops_test =
2845 debugfs_create_file("bkops_test",
2846 S_IRUGO | S_IWUGO,
2847 tests_root,
2848 NULL,
2849 &bkops_test_ops);
2850
2851 if (!mbtd->debug.bkops_test)
2852 goto err_nomem;
2853
Lee Susmanf18263a2012-10-24 14:14:37 +02002854 mbtd->debug.long_sequential_read_test = debugfs_create_file(
2855 "long_sequential_read_test",
2856 S_IRUGO | S_IWUGO,
2857 tests_root,
2858 NULL,
2859 &long_sequential_read_test_ops);
2860
2861 if (!mbtd->debug.long_sequential_read_test)
2862 goto err_nomem;
2863
Lee Susmana35ae6e2012-10-25 16:06:07 +02002864 mbtd->debug.long_sequential_write_test = debugfs_create_file(
2865 "long_sequential_write_test",
2866 S_IRUGO | S_IWUGO,
2867 tests_root,
2868 NULL,
2869 &long_sequential_write_test_ops);
2870
2871 if (!mbtd->debug.long_sequential_write_test)
2872 goto err_nomem;
2873
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02002874 return 0;
2875
2876err_nomem:
2877 mmc_block_test_debugfs_cleanup();
2878 return -ENOMEM;
2879}
2880
2881static void mmc_block_test_probe(void)
2882{
2883 struct request_queue *q = test_iosched_get_req_queue();
2884 struct mmc_queue *mq;
2885 int max_packed_reqs;
2886
2887 if (!q) {
2888 test_pr_err("%s: NULL request queue", __func__);
2889 return;
2890 }
2891
2892 mq = q->queuedata;
2893 if (!mq) {
2894 test_pr_err("%s: NULL mq", __func__);
2895 return;
2896 }
2897
2898 max_packed_reqs = mq->card->ext_csd.max_packed_writes;
2899 mbtd->exp_packed_stats.packing_events =
2900 kzalloc((max_packed_reqs + 1) *
2901 sizeof(*mbtd->exp_packed_stats.packing_events),
2902 GFP_KERNEL);
2903
2904 mmc_block_test_debugfs_init();
2905}
2906
2907static void mmc_block_test_remove(void)
2908{
2909 mmc_block_test_debugfs_cleanup();
2910}
2911
2912static int __init mmc_block_test_init(void)
2913{
2914 mbtd = kzalloc(sizeof(struct mmc_block_test_data), GFP_KERNEL);
2915 if (!mbtd) {
2916 test_pr_err("%s: failed to allocate mmc_block_test_data",
2917 __func__);
2918 return -ENODEV;
2919 }
2920
Yaniv Gardie9214c82012-10-18 13:58:18 +02002921 init_waitqueue_head(&mbtd->bkops_wait_q);
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02002922 mbtd->bdt.init_fn = mmc_block_test_probe;
2923 mbtd->bdt.exit_fn = mmc_block_test_remove;
2924 INIT_LIST_HEAD(&mbtd->bdt.list);
2925 test_iosched_register(&mbtd->bdt);
2926
2927 return 0;
2928}
2929
2930static void __exit mmc_block_test_exit(void)
2931{
2932 test_iosched_unregister(&mbtd->bdt);
2933 kfree(mbtd);
2934}
2935
2936module_init(mmc_block_test_init);
2937module_exit(mmc_block_test_exit);
2938
2939MODULE_LICENSE("GPL v2");
2940MODULE_DESCRIPTION("MMC block test");