blob: 7a4d19e37bcebcf05e80ee51c7178a7776cf145d [file] [log] [blame]
Lee Susman70160bb2013-01-06 10:57:30 +02001/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14/* MMC block test */
15
16#include <linux/module.h>
17#include <linux/blkdev.h>
18#include <linux/debugfs.h>
19#include <linux/mmc/card.h>
20#include <linux/mmc/host.h>
21#include <linux/delay.h>
22#include <linux/test-iosched.h>
Lee Susmanf18263a2012-10-24 14:14:37 +020023#include <linux/jiffies.h>
Tatyana Brokhman09b010d2012-10-09 13:50:56 +020024#include "queue.h"
Yaniv Gardie9214c82012-10-18 13:58:18 +020025#include <linux/mmc/mmc.h>
Tatyana Brokhman09b010d2012-10-09 13:50:56 +020026
27#define MODULE_NAME "mmc_block_test"
28#define TEST_MAX_SECTOR_RANGE (600*1024*1024) /* 600 MB */
Lee Susman70160bb2013-01-06 10:57:30 +020029#define TEST_MAX_BIOS_PER_REQ 128
Tatyana Brokhman09b010d2012-10-09 13:50:56 +020030#define CMD23_PACKED_BIT (1 << 30)
31#define LARGE_PRIME_1 1103515367
32#define LARGE_PRIME_2 35757
33#define PACKED_HDR_VER_MASK 0x000000FF
34#define PACKED_HDR_RW_MASK 0x0000FF00
35#define PACKED_HDR_NUM_REQS_MASK 0x00FF0000
36#define PACKED_HDR_BITS_16_TO_29_SET 0x3FFF0000
Maya Erezddc55732012-10-17 09:51:01 +020037#define SECTOR_SIZE 512
38#define NUM_OF_SECTORS_PER_BIO ((BIO_U32_SIZE * 4) / SECTOR_SIZE)
39#define BIO_TO_SECTOR(x) (x * NUM_OF_SECTORS_PER_BIO)
Lee Susman70160bb2013-01-06 10:57:30 +020040/* the desired long test size to be read */
41#define LONG_READ_TEST_MAX_NUM_BYTES (50*1024*1024) /* 50MB */
42/* the minimum amount of requests that will be created */
43#define LONG_WRITE_TEST_MIN_NUM_REQS 200 /* 100MB */
Lee Susmanf18263a2012-10-24 14:14:37 +020044/* request queue limitation is 128 requests, and we leave 10 spare requests */
45#define TEST_MAX_REQUESTS 118
Lee Susman70160bb2013-01-06 10:57:30 +020046#define LONG_READ_TEST_MAX_NUM_REQS (LONG_READ_TEST_MAX_NUM_BYTES / \
Lee Susmanf18263a2012-10-24 14:14:37 +020047 (TEST_MAX_BIOS_PER_REQ * sizeof(int) * BIO_U32_SIZE))
48/* this doesn't allow the test requests num to be greater than the maximum */
Lee Susman70160bb2013-01-06 10:57:30 +020049#define LONG_READ_TEST_ACTUAL_NUM_REQS \
50 ((TEST_MAX_REQUESTS < LONG_READ_TEST_MAX_NUM_REQS) ? \
51 TEST_MAX_REQUESTS : LONG_READ_TEST_MAX_NUM_REQS)
Lee Susmanf18263a2012-10-24 14:14:37 +020052#define MB_MSEC_RATIO_APPROXIMATION ((1024 * 1024) / 1000)
53/* actual number of bytes in test */
Lee Susman70160bb2013-01-06 10:57:30 +020054#define LONG_READ_NUM_BYTES (LONG_READ_TEST_ACTUAL_NUM_REQS * \
Lee Susmanf18263a2012-10-24 14:14:37 +020055 (TEST_MAX_BIOS_PER_REQ * sizeof(int) * BIO_U32_SIZE))
56/* actual number of MiB in test multiplied by 10, for single digit precision*/
Lee Susman70160bb2013-01-06 10:57:30 +020057#define BYTE_TO_MB_x_10(x) ((x * 10) / (1024 * 1024))
Lee Susmanf18263a2012-10-24 14:14:37 +020058/* extract integer value */
Lee Susman70160bb2013-01-06 10:57:30 +020059#define LONG_TEST_SIZE_INTEGER(x) (BYTE_TO_MB_x_10(x) / 10)
Lee Susmanf18263a2012-10-24 14:14:37 +020060/* and calculate the MiB value fraction */
Lee Susman70160bb2013-01-06 10:57:30 +020061#define LONG_TEST_SIZE_FRACTION(x) (BYTE_TO_MB_x_10(x) - \
62 (LONG_TEST_SIZE_INTEGER(x) * 10))
63#define LONG_WRITE_TEST_SLEEP_TIME_MS 5
Tatyana Brokhman09b010d2012-10-09 13:50:56 +020064
65#define test_pr_debug(fmt, args...) pr_debug("%s: "fmt"\n", MODULE_NAME, args)
66#define test_pr_info(fmt, args...) pr_info("%s: "fmt"\n", MODULE_NAME, args)
67#define test_pr_err(fmt, args...) pr_err("%s: "fmt"\n", MODULE_NAME, args)
68
Maya Erezddc55732012-10-17 09:51:01 +020069#define SANITIZE_TEST_TIMEOUT 240000
Lee Susmanb09c0412012-12-19 14:28:52 +020070#define NEW_REQ_TEST_SLEEP_TIME 1
71#define NEW_REQ_TEST_NUM_BIOS 64
Yaniv Gardie9214c82012-10-18 13:58:18 +020072#define TEST_REQUEST_NUM_OF_BIOS 3
73
Yaniv Gardie9214c82012-10-18 13:58:18 +020074#define CHECK_BKOPS_STATS(stats, exp_bkops, exp_hpi, exp_suspend) \
75 ((stats.bkops != exp_bkops) || \
76 (stats.hpi != exp_hpi) || \
77 (stats.suspend != exp_suspend))
78#define BKOPS_TEST_TIMEOUT 60000
Maya Erezddc55732012-10-17 09:51:01 +020079
Tatyana Brokhman09b010d2012-10-09 13:50:56 +020080enum is_random {
81 NON_RANDOM_TEST,
82 RANDOM_TEST,
83};
84
85enum mmc_block_test_testcases {
86 /* Start of send write packing test group */
87 SEND_WRITE_PACKING_MIN_TESTCASE,
88 TEST_STOP_DUE_TO_READ = SEND_WRITE_PACKING_MIN_TESTCASE,
89 TEST_STOP_DUE_TO_READ_AFTER_MAX_REQS,
90 TEST_STOP_DUE_TO_FLUSH,
91 TEST_STOP_DUE_TO_FLUSH_AFTER_MAX_REQS,
92 TEST_STOP_DUE_TO_EMPTY_QUEUE,
93 TEST_STOP_DUE_TO_MAX_REQ_NUM,
94 TEST_STOP_DUE_TO_THRESHOLD,
95 SEND_WRITE_PACKING_MAX_TESTCASE = TEST_STOP_DUE_TO_THRESHOLD,
96
97 /* Start of err check test group */
98 ERR_CHECK_MIN_TESTCASE,
99 TEST_RET_ABORT = ERR_CHECK_MIN_TESTCASE,
100 TEST_RET_PARTIAL_FOLLOWED_BY_SUCCESS,
101 TEST_RET_PARTIAL_FOLLOWED_BY_ABORT,
102 TEST_RET_PARTIAL_MULTIPLE_UNTIL_SUCCESS,
103 TEST_RET_PARTIAL_MAX_FAIL_IDX,
104 TEST_RET_RETRY,
105 TEST_RET_CMD_ERR,
106 TEST_RET_DATA_ERR,
107 ERR_CHECK_MAX_TESTCASE = TEST_RET_DATA_ERR,
108
109 /* Start of send invalid test group */
110 INVALID_CMD_MIN_TESTCASE,
111 TEST_HDR_INVALID_VERSION = INVALID_CMD_MIN_TESTCASE,
112 TEST_HDR_WRONG_WRITE_CODE,
113 TEST_HDR_INVALID_RW_CODE,
114 TEST_HDR_DIFFERENT_ADDRESSES,
115 TEST_HDR_REQ_NUM_SMALLER_THAN_ACTUAL,
116 TEST_HDR_REQ_NUM_LARGER_THAN_ACTUAL,
117 TEST_HDR_CMD23_PACKED_BIT_SET,
118 TEST_CMD23_MAX_PACKED_WRITES,
119 TEST_CMD23_ZERO_PACKED_WRITES,
120 TEST_CMD23_PACKED_BIT_UNSET,
121 TEST_CMD23_REL_WR_BIT_SET,
122 TEST_CMD23_BITS_16TO29_SET,
123 TEST_CMD23_HDR_BLK_NOT_IN_COUNT,
124 INVALID_CMD_MAX_TESTCASE = TEST_CMD23_HDR_BLK_NOT_IN_COUNT,
Tatyana Brokhman91e1e322012-10-09 13:53:43 +0200125
126 /*
127 * Start of packing control test group.
128 * in these next testcases the abbreviation FB = followed by
129 */
130 PACKING_CONTROL_MIN_TESTCASE,
131 TEST_PACKING_EXP_ONE_OVER_TRIGGER_FB_READ =
132 PACKING_CONTROL_MIN_TESTCASE,
133 TEST_PACKING_EXP_N_OVER_TRIGGER,
134 TEST_PACKING_EXP_N_OVER_TRIGGER_FB_READ,
135 TEST_PACKING_EXP_N_OVER_TRIGGER_FLUSH_N,
136 TEST_PACKING_EXP_THRESHOLD_OVER_TRIGGER,
137 TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS,
138 TEST_PACKING_NOT_EXP_TRIGGER_REQUESTS,
139 TEST_PACKING_NOT_EXP_TRIGGER_READ_TRIGGER,
140 TEST_PACKING_NOT_EXP_TRIGGER_FLUSH_TRIGGER,
141 TEST_PACK_MIX_PACKED_NO_PACKED_PACKED,
142 TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED,
143 PACKING_CONTROL_MAX_TESTCASE = TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED,
Maya Erezddc55732012-10-17 09:51:01 +0200144
145 TEST_WRITE_DISCARD_SANITIZE_READ,
Yaniv Gardie9214c82012-10-18 13:58:18 +0200146
147 /* Start of bkops test group */
148 BKOPS_MIN_TESTCASE,
149 BKOPS_DELAYED_WORK_LEVEL_1 = BKOPS_MIN_TESTCASE,
150 BKOPS_DELAYED_WORK_LEVEL_1_HPI,
151 BKOPS_CANCEL_DELAYED_WORK,
152 BKOPS_URGENT_LEVEL_2,
153 BKOPS_URGENT_LEVEL_2_TWO_REQS,
154 BKOPS_URGENT_LEVEL_3,
155 BKOPS_MAX_TESTCASE = BKOPS_URGENT_LEVEL_3,
Lee Susmanf18263a2012-10-24 14:14:37 +0200156
157 TEST_LONG_SEQUENTIAL_READ,
Lee Susmana35ae6e2012-10-25 16:06:07 +0200158 TEST_LONG_SEQUENTIAL_WRITE,
Lee Susmanb09c0412012-12-19 14:28:52 +0200159
160 TEST_NEW_REQ_NOTIFICATION,
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200161};
162
163enum mmc_block_test_group {
164 TEST_NO_GROUP,
165 TEST_GENERAL_GROUP,
166 TEST_SEND_WRITE_PACKING_GROUP,
167 TEST_ERR_CHECK_GROUP,
168 TEST_SEND_INVALID_GROUP,
Tatyana Brokhman91e1e322012-10-09 13:53:43 +0200169 TEST_PACKING_CONTROL_GROUP,
Yaniv Gardie9214c82012-10-18 13:58:18 +0200170 TEST_BKOPS_GROUP,
Lee Susmanb09c0412012-12-19 14:28:52 +0200171 TEST_NEW_NOTIFICATION_GROUP,
Yaniv Gardie9214c82012-10-18 13:58:18 +0200172};
173
174enum bkops_test_stages {
175 BKOPS_STAGE_1,
176 BKOPS_STAGE_2,
177 BKOPS_STAGE_3,
178 BKOPS_STAGE_4,
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200179};
180
181struct mmc_block_test_debug {
182 struct dentry *send_write_packing_test;
183 struct dentry *err_check_test;
184 struct dentry *send_invalid_packed_test;
185 struct dentry *random_test_seed;
Tatyana Brokhman91e1e322012-10-09 13:53:43 +0200186 struct dentry *packing_control_test;
Maya Erezddc55732012-10-17 09:51:01 +0200187 struct dentry *discard_sanitize_test;
Yaniv Gardie9214c82012-10-18 13:58:18 +0200188 struct dentry *bkops_test;
Lee Susmanf18263a2012-10-24 14:14:37 +0200189 struct dentry *long_sequential_read_test;
Lee Susmana35ae6e2012-10-25 16:06:07 +0200190 struct dentry *long_sequential_write_test;
Lee Susmanb09c0412012-12-19 14:28:52 +0200191 struct dentry *new_req_notification_test;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200192};
193
194struct mmc_block_test_data {
195 /* The number of write requests that the test will issue */
196 int num_requests;
197 /* The expected write packing statistics for the current test */
198 struct mmc_wr_pack_stats exp_packed_stats;
199 /*
200 * A user-defined seed for random choices of number of bios written in
201 * a request, and of number of requests issued in a test
202 * This field is randomly updated after each use
203 */
204 unsigned int random_test_seed;
205 /* A retry counter used in err_check tests */
206 int err_check_counter;
207 /* Can be one of the values of enum test_group */
208 enum mmc_block_test_group test_group;
209 /*
210 * Indicates if the current testcase is running with random values of
211 * num_requests and num_bios (in each request)
212 */
213 int is_random;
214 /* Data structure for debugfs dentrys */
215 struct mmc_block_test_debug debug;
216 /*
217 * Data structure containing individual test information, including
218 * self-defined specific data
219 */
220 struct test_info test_info;
221 /* mmc block device test */
222 struct blk_dev_test_type bdt;
Yaniv Gardie9214c82012-10-18 13:58:18 +0200223 /* Current BKOPs test stage */
224 enum bkops_test_stages bkops_stage;
225 /* A wait queue for BKOPs tests */
226 wait_queue_head_t bkops_wait_q;
Lee Susman70160bb2013-01-06 10:57:30 +0200227 /* A counter for the number of test requests completed */
228 unsigned int completed_req_count;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200229};
230
231static struct mmc_block_test_data *mbtd;
232
Lee Susmane868f8a2012-11-04 15:04:41 +0200233void print_mmc_packing_stats(struct mmc_card *card)
234{
235 int i;
236 int max_num_of_packed_reqs = 0;
237
238 if ((!card) || (!card->wr_pack_stats.packing_events))
239 return;
240
241 max_num_of_packed_reqs = card->ext_csd.max_packed_writes;
242
243 spin_lock(&card->wr_pack_stats.lock);
244
245 pr_info("%s: write packing statistics:\n",
246 mmc_hostname(card->host));
247
248 for (i = 1 ; i <= max_num_of_packed_reqs ; ++i) {
249 if (card->wr_pack_stats.packing_events[i] != 0)
250 pr_info("%s: Packed %d reqs - %d times\n",
251 mmc_hostname(card->host), i,
252 card->wr_pack_stats.packing_events[i]);
253 }
254
255 pr_info("%s: stopped packing due to the following reasons:\n",
256 mmc_hostname(card->host));
257
258 if (card->wr_pack_stats.pack_stop_reason[EXCEEDS_SEGMENTS])
259 pr_info("%s: %d times: exceedmax num of segments\n",
260 mmc_hostname(card->host),
261 card->wr_pack_stats.pack_stop_reason[EXCEEDS_SEGMENTS]);
262 if (card->wr_pack_stats.pack_stop_reason[EXCEEDS_SECTORS])
263 pr_info("%s: %d times: exceeding the max num of sectors\n",
264 mmc_hostname(card->host),
265 card->wr_pack_stats.pack_stop_reason[EXCEEDS_SECTORS]);
266 if (card->wr_pack_stats.pack_stop_reason[WRONG_DATA_DIR])
267 pr_info("%s: %d times: wrong data direction\n",
268 mmc_hostname(card->host),
269 card->wr_pack_stats.pack_stop_reason[WRONG_DATA_DIR]);
270 if (card->wr_pack_stats.pack_stop_reason[FLUSH_OR_DISCARD])
271 pr_info("%s: %d times: flush or discard\n",
272 mmc_hostname(card->host),
273 card->wr_pack_stats.pack_stop_reason[FLUSH_OR_DISCARD]);
274 if (card->wr_pack_stats.pack_stop_reason[EMPTY_QUEUE])
275 pr_info("%s: %d times: empty queue\n",
276 mmc_hostname(card->host),
277 card->wr_pack_stats.pack_stop_reason[EMPTY_QUEUE]);
278 if (card->wr_pack_stats.pack_stop_reason[REL_WRITE])
279 pr_info("%s: %d times: rel write\n",
280 mmc_hostname(card->host),
281 card->wr_pack_stats.pack_stop_reason[REL_WRITE]);
282 if (card->wr_pack_stats.pack_stop_reason[THRESHOLD])
283 pr_info("%s: %d times: Threshold\n",
284 mmc_hostname(card->host),
285 card->wr_pack_stats.pack_stop_reason[THRESHOLD]);
286
287 spin_unlock(&card->wr_pack_stats.lock);
288}
289
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200290/*
291 * A callback assigned to the packed_test_fn field.
292 * Called from block layer in mmc_blk_packed_hdr_wrq_prep.
293 * Here we alter the packed header or CMD23 in order to send an invalid
294 * packed command to the card.
295 */
296static void test_invalid_packed_cmd(struct request_queue *q,
297 struct mmc_queue_req *mqrq)
298{
299 struct mmc_queue *mq = q->queuedata;
300 u32 *packed_cmd_hdr = mqrq->packed_cmd_hdr;
301 struct request *req = mqrq->req;
302 struct request *second_rq;
303 struct test_request *test_rq;
304 struct mmc_blk_request *brq = &mqrq->brq;
305 int num_requests;
306 int max_packed_reqs;
307
308 if (!mq) {
309 test_pr_err("%s: NULL mq", __func__);
310 return;
311 }
312
313 test_rq = (struct test_request *)req->elv.priv[0];
314 if (!test_rq) {
315 test_pr_err("%s: NULL test_rq", __func__);
316 return;
317 }
318 max_packed_reqs = mq->card->ext_csd.max_packed_writes;
319
320 switch (mbtd->test_info.testcase) {
321 case TEST_HDR_INVALID_VERSION:
322 test_pr_info("%s: set invalid header version", __func__);
323 /* Put 0 in header version field (1 byte, offset 0 in header) */
324 packed_cmd_hdr[0] = packed_cmd_hdr[0] & ~PACKED_HDR_VER_MASK;
325 break;
326 case TEST_HDR_WRONG_WRITE_CODE:
327 test_pr_info("%s: wrong write code", __func__);
328 /* Set R/W field with R value (1 byte, offset 1 in header) */
329 packed_cmd_hdr[0] = packed_cmd_hdr[0] & ~PACKED_HDR_RW_MASK;
330 packed_cmd_hdr[0] = packed_cmd_hdr[0] | 0x00000100;
331 break;
332 case TEST_HDR_INVALID_RW_CODE:
333 test_pr_info("%s: invalid r/w code", __func__);
334 /* Set R/W field with invalid value */
335 packed_cmd_hdr[0] = packed_cmd_hdr[0] & ~PACKED_HDR_RW_MASK;
336 packed_cmd_hdr[0] = packed_cmd_hdr[0] | 0x00000400;
337 break;
338 case TEST_HDR_DIFFERENT_ADDRESSES:
339 test_pr_info("%s: different addresses", __func__);
340 second_rq = list_entry(req->queuelist.next, struct request,
341 queuelist);
342 test_pr_info("%s: test_rq->sector=%ld, second_rq->sector=%ld",
343 __func__, (long)req->__sector,
344 (long)second_rq->__sector);
345 /*
346 * Put start sector of second write request in the first write
347 * request's cmd25 argument in the packed header
348 */
349 packed_cmd_hdr[3] = second_rq->__sector;
350 break;
351 case TEST_HDR_REQ_NUM_SMALLER_THAN_ACTUAL:
352 test_pr_info("%s: request num smaller than actual" , __func__);
353 num_requests = (packed_cmd_hdr[0] & PACKED_HDR_NUM_REQS_MASK)
354 >> 16;
355 /* num of entries is decremented by 1 */
356 num_requests = (num_requests - 1) << 16;
357 /*
358 * Set number of requests field in packed write header to be
359 * smaller than the actual number (1 byte, offset 2 in header)
360 */
361 packed_cmd_hdr[0] = (packed_cmd_hdr[0] &
362 ~PACKED_HDR_NUM_REQS_MASK) + num_requests;
363 break;
364 case TEST_HDR_REQ_NUM_LARGER_THAN_ACTUAL:
365 test_pr_info("%s: request num larger than actual" , __func__);
366 num_requests = (packed_cmd_hdr[0] & PACKED_HDR_NUM_REQS_MASK)
367 >> 16;
368 /* num of entries is incremented by 1 */
369 num_requests = (num_requests + 1) << 16;
370 /*
371 * Set number of requests field in packed write header to be
372 * larger than the actual number (1 byte, offset 2 in header).
373 */
374 packed_cmd_hdr[0] = (packed_cmd_hdr[0] &
375 ~PACKED_HDR_NUM_REQS_MASK) + num_requests;
376 break;
377 case TEST_HDR_CMD23_PACKED_BIT_SET:
378 test_pr_info("%s: header CMD23 packed bit set" , __func__);
379 /*
380 * Set packed bit (bit 30) in cmd23 argument of first and second
381 * write requests in packed write header.
382 * These are located at bytes 2 and 4 in packed write header
383 */
384 packed_cmd_hdr[2] = packed_cmd_hdr[2] | CMD23_PACKED_BIT;
385 packed_cmd_hdr[4] = packed_cmd_hdr[4] | CMD23_PACKED_BIT;
386 break;
387 case TEST_CMD23_MAX_PACKED_WRITES:
388 test_pr_info("%s: CMD23 request num > max_packed_reqs",
389 __func__);
390 /*
391 * Set the individual packed cmd23 request num to
392 * max_packed_reqs + 1
393 */
394 brq->sbc.arg = MMC_CMD23_ARG_PACKED | (max_packed_reqs + 1);
395 break;
396 case TEST_CMD23_ZERO_PACKED_WRITES:
397 test_pr_info("%s: CMD23 request num = 0", __func__);
398 /* Set the individual packed cmd23 request num to zero */
399 brq->sbc.arg = MMC_CMD23_ARG_PACKED;
400 break;
401 case TEST_CMD23_PACKED_BIT_UNSET:
402 test_pr_info("%s: CMD23 packed bit unset", __func__);
403 /*
404 * Set the individual packed cmd23 packed bit to 0,
405 * although there is a packed write request
406 */
407 brq->sbc.arg &= ~CMD23_PACKED_BIT;
408 break;
409 case TEST_CMD23_REL_WR_BIT_SET:
410 test_pr_info("%s: CMD23 REL WR bit set", __func__);
411 /* Set the individual packed cmd23 reliable write bit */
412 brq->sbc.arg = MMC_CMD23_ARG_PACKED | MMC_CMD23_ARG_REL_WR;
413 break;
414 case TEST_CMD23_BITS_16TO29_SET:
415 test_pr_info("%s: CMD23 bits [16-29] set", __func__);
416 brq->sbc.arg = MMC_CMD23_ARG_PACKED |
417 PACKED_HDR_BITS_16_TO_29_SET;
418 break;
419 case TEST_CMD23_HDR_BLK_NOT_IN_COUNT:
420 test_pr_info("%s: CMD23 hdr not in block count", __func__);
421 brq->sbc.arg = MMC_CMD23_ARG_PACKED |
422 ((rq_data_dir(req) == READ) ? 0 : mqrq->packed_blocks);
423 break;
424 default:
425 test_pr_err("%s: unexpected testcase %d",
426 __func__, mbtd->test_info.testcase);
427 break;
428 }
429}
430
431/*
432 * A callback assigned to the err_check_fn field of the mmc_request by the
433 * MMC/card/block layer.
434 * Called upon request completion by the MMC/core layer.
435 * Here we emulate an error return value from the card.
436 */
437static int test_err_check(struct mmc_card *card, struct mmc_async_req *areq)
438{
439 struct mmc_queue_req *mq_rq = container_of(areq, struct mmc_queue_req,
440 mmc_active);
441 struct request_queue *req_q = test_iosched_get_req_queue();
442 struct mmc_queue *mq;
443 int max_packed_reqs;
444 int ret = 0;
Yaniv Gardie9214c82012-10-18 13:58:18 +0200445 struct mmc_blk_request *brq;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200446
447 if (req_q)
448 mq = req_q->queuedata;
449 else {
450 test_pr_err("%s: NULL request_queue", __func__);
451 return 0;
452 }
453
454 if (!mq) {
455 test_pr_err("%s: %s: NULL mq", __func__,
456 mmc_hostname(card->host));
457 return 0;
458 }
459
460 max_packed_reqs = mq->card->ext_csd.max_packed_writes;
461
462 if (!mq_rq) {
463 test_pr_err("%s: %s: NULL mq_rq", __func__,
464 mmc_hostname(card->host));
465 return 0;
466 }
Yaniv Gardie9214c82012-10-18 13:58:18 +0200467 brq = &mq_rq->brq;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200468
469 switch (mbtd->test_info.testcase) {
470 case TEST_RET_ABORT:
471 test_pr_info("%s: return abort", __func__);
472 ret = MMC_BLK_ABORT;
473 break;
474 case TEST_RET_PARTIAL_FOLLOWED_BY_SUCCESS:
475 test_pr_info("%s: return partial followed by success",
476 __func__);
477 /*
478 * Since in this testcase num_requests is always >= 2,
479 * we can be sure that packed_fail_idx is always >= 1
480 */
481 mq_rq->packed_fail_idx = (mbtd->num_requests / 2);
482 test_pr_info("%s: packed_fail_idx = %d"
483 , __func__, mq_rq->packed_fail_idx);
484 mq->err_check_fn = NULL;
485 ret = MMC_BLK_PARTIAL;
486 break;
487 case TEST_RET_PARTIAL_FOLLOWED_BY_ABORT:
488 if (!mbtd->err_check_counter) {
489 test_pr_info("%s: return partial followed by abort",
490 __func__);
491 mbtd->err_check_counter++;
492 /*
493 * Since in this testcase num_requests is always >= 3,
494 * we have that packed_fail_idx is always >= 1
495 */
496 mq_rq->packed_fail_idx = (mbtd->num_requests / 2);
497 test_pr_info("%s: packed_fail_idx = %d"
498 , __func__, mq_rq->packed_fail_idx);
499 ret = MMC_BLK_PARTIAL;
500 break;
501 }
502 mbtd->err_check_counter = 0;
503 mq->err_check_fn = NULL;
504 ret = MMC_BLK_ABORT;
505 break;
506 case TEST_RET_PARTIAL_MULTIPLE_UNTIL_SUCCESS:
507 test_pr_info("%s: return partial multiple until success",
508 __func__);
509 if (++mbtd->err_check_counter >= (mbtd->num_requests)) {
510 mq->err_check_fn = NULL;
511 mbtd->err_check_counter = 0;
512 ret = MMC_BLK_PARTIAL;
513 break;
514 }
515 mq_rq->packed_fail_idx = 1;
516 ret = MMC_BLK_PARTIAL;
517 break;
518 case TEST_RET_PARTIAL_MAX_FAIL_IDX:
519 test_pr_info("%s: return partial max fail_idx", __func__);
520 mq_rq->packed_fail_idx = max_packed_reqs - 1;
521 mq->err_check_fn = NULL;
522 ret = MMC_BLK_PARTIAL;
523 break;
524 case TEST_RET_RETRY:
525 test_pr_info("%s: return retry", __func__);
526 ret = MMC_BLK_RETRY;
527 break;
528 case TEST_RET_CMD_ERR:
529 test_pr_info("%s: return cmd err", __func__);
530 ret = MMC_BLK_CMD_ERR;
531 break;
532 case TEST_RET_DATA_ERR:
533 test_pr_info("%s: return data err", __func__);
534 ret = MMC_BLK_DATA_ERR;
535 break;
Yaniv Gardie9214c82012-10-18 13:58:18 +0200536 case BKOPS_URGENT_LEVEL_2:
537 case BKOPS_URGENT_LEVEL_3:
538 case BKOPS_URGENT_LEVEL_2_TWO_REQS:
539 if (mbtd->err_check_counter++ == 0) {
540 test_pr_info("%s: simulate an exception from the card",
541 __func__);
542 brq->cmd.resp[0] |= R1_EXCEPTION_EVENT;
543 }
544 mq->err_check_fn = NULL;
545 break;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200546 default:
547 test_pr_err("%s: unexpected testcase %d",
548 __func__, mbtd->test_info.testcase);
549 }
550
551 return ret;
552}
553
554/*
555 * This is a specific implementation for the get_test_case_str_fn function
556 * pointer in the test_info data structure. Given a valid test_data instance,
557 * the function returns a string resembling the test name, based on the testcase
558 */
559static char *get_test_case_str(struct test_data *td)
560{
561 if (!td) {
562 test_pr_err("%s: NULL td", __func__);
563 return NULL;
564 }
565
Lee Susman039ce092012-11-15 13:36:15 +0200566switch (td->test_info.testcase) {
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200567 case TEST_STOP_DUE_TO_FLUSH:
Lee Susman039ce092012-11-15 13:36:15 +0200568 return "\"stop due to flush\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200569 case TEST_STOP_DUE_TO_FLUSH_AFTER_MAX_REQS:
Lee Susman039ce092012-11-15 13:36:15 +0200570 return "\"stop due to flush after max-1 reqs\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200571 case TEST_STOP_DUE_TO_READ:
Lee Susman039ce092012-11-15 13:36:15 +0200572 return "\"stop due to read\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200573 case TEST_STOP_DUE_TO_READ_AFTER_MAX_REQS:
Lee Susman039ce092012-11-15 13:36:15 +0200574 return "\"stop due to read after max-1 reqs\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200575 case TEST_STOP_DUE_TO_EMPTY_QUEUE:
Lee Susman039ce092012-11-15 13:36:15 +0200576 return "\"stop due to empty queue\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200577 case TEST_STOP_DUE_TO_MAX_REQ_NUM:
Lee Susman039ce092012-11-15 13:36:15 +0200578 return "\"stop due to max req num\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200579 case TEST_STOP_DUE_TO_THRESHOLD:
Lee Susman039ce092012-11-15 13:36:15 +0200580 return "\"stop due to exceeding threshold\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200581 case TEST_RET_ABORT:
Lee Susman039ce092012-11-15 13:36:15 +0200582 return "\"err_check return abort\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200583 case TEST_RET_PARTIAL_FOLLOWED_BY_SUCCESS:
Lee Susman039ce092012-11-15 13:36:15 +0200584 return "\"err_check return partial followed by success\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200585 case TEST_RET_PARTIAL_FOLLOWED_BY_ABORT:
Lee Susman039ce092012-11-15 13:36:15 +0200586 return "\"err_check return partial followed by abort\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200587 case TEST_RET_PARTIAL_MULTIPLE_UNTIL_SUCCESS:
Lee Susman039ce092012-11-15 13:36:15 +0200588 return "\"err_check return partial multiple until success\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200589 case TEST_RET_PARTIAL_MAX_FAIL_IDX:
Lee Susman039ce092012-11-15 13:36:15 +0200590 return "\"err_check return partial max fail index\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200591 case TEST_RET_RETRY:
Lee Susman039ce092012-11-15 13:36:15 +0200592 return "\"err_check return retry\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200593 case TEST_RET_CMD_ERR:
Lee Susman039ce092012-11-15 13:36:15 +0200594 return "\"err_check return cmd error\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200595 case TEST_RET_DATA_ERR:
Lee Susman039ce092012-11-15 13:36:15 +0200596 return "\"err_check return data error\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200597 case TEST_HDR_INVALID_VERSION:
Lee Susman039ce092012-11-15 13:36:15 +0200598 return "\"invalid - wrong header version\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200599 case TEST_HDR_WRONG_WRITE_CODE:
Lee Susman039ce092012-11-15 13:36:15 +0200600 return "\"invalid - wrong write code\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200601 case TEST_HDR_INVALID_RW_CODE:
Lee Susman039ce092012-11-15 13:36:15 +0200602 return "\"invalid - wrong R/W code\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200603 case TEST_HDR_DIFFERENT_ADDRESSES:
Lee Susman039ce092012-11-15 13:36:15 +0200604 return "\"invalid - header different addresses\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200605 case TEST_HDR_REQ_NUM_SMALLER_THAN_ACTUAL:
Lee Susman039ce092012-11-15 13:36:15 +0200606 return "\"invalid - header req num smaller than actual\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200607 case TEST_HDR_REQ_NUM_LARGER_THAN_ACTUAL:
Lee Susman039ce092012-11-15 13:36:15 +0200608 return "\"invalid - header req num larger than actual\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200609 case TEST_HDR_CMD23_PACKED_BIT_SET:
Lee Susman039ce092012-11-15 13:36:15 +0200610 return "\"invalid - header cmd23 packed bit set\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200611 case TEST_CMD23_MAX_PACKED_WRITES:
Lee Susman039ce092012-11-15 13:36:15 +0200612 return "\"invalid - cmd23 max packed writes\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200613 case TEST_CMD23_ZERO_PACKED_WRITES:
Lee Susman039ce092012-11-15 13:36:15 +0200614 return "\"invalid - cmd23 zero packed writes\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200615 case TEST_CMD23_PACKED_BIT_UNSET:
Lee Susman039ce092012-11-15 13:36:15 +0200616 return "\"invalid - cmd23 packed bit unset\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200617 case TEST_CMD23_REL_WR_BIT_SET:
Lee Susman039ce092012-11-15 13:36:15 +0200618 return "\"invalid - cmd23 rel wr bit set\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200619 case TEST_CMD23_BITS_16TO29_SET:
Lee Susman039ce092012-11-15 13:36:15 +0200620 return "\"invalid - cmd23 bits [16-29] set\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200621 case TEST_CMD23_HDR_BLK_NOT_IN_COUNT:
Lee Susman039ce092012-11-15 13:36:15 +0200622 return "\"invalid - cmd23 header block not in count\"";
Tatyana Brokhman91e1e322012-10-09 13:53:43 +0200623 case TEST_PACKING_EXP_N_OVER_TRIGGER:
Lee Susman039ce092012-11-15 13:36:15 +0200624 return "\"packing control - pack n\"";
Tatyana Brokhman91e1e322012-10-09 13:53:43 +0200625 case TEST_PACKING_EXP_N_OVER_TRIGGER_FB_READ:
Lee Susman039ce092012-11-15 13:36:15 +0200626 return "\"packing control - pack n followed by read\"";
Tatyana Brokhman91e1e322012-10-09 13:53:43 +0200627 case TEST_PACKING_EXP_N_OVER_TRIGGER_FLUSH_N:
Lee Susman039ce092012-11-15 13:36:15 +0200628 return "\"packing control - pack n followed by flush\"";
Tatyana Brokhman91e1e322012-10-09 13:53:43 +0200629 case TEST_PACKING_EXP_ONE_OVER_TRIGGER_FB_READ:
Lee Susman039ce092012-11-15 13:36:15 +0200630 return "\"packing control - pack one followed by read\"";
Tatyana Brokhman91e1e322012-10-09 13:53:43 +0200631 case TEST_PACKING_EXP_THRESHOLD_OVER_TRIGGER:
Lee Susman039ce092012-11-15 13:36:15 +0200632 return "\"packing control - pack threshold\"";
Tatyana Brokhman91e1e322012-10-09 13:53:43 +0200633 case TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS:
Lee Susman039ce092012-11-15 13:36:15 +0200634 return "\"packing control - no packing\"";
Tatyana Brokhman91e1e322012-10-09 13:53:43 +0200635 case TEST_PACKING_NOT_EXP_TRIGGER_REQUESTS:
Lee Susman039ce092012-11-15 13:36:15 +0200636 return "\"packing control - no packing, trigger requests\"";
Tatyana Brokhman91e1e322012-10-09 13:53:43 +0200637 case TEST_PACKING_NOT_EXP_TRIGGER_READ_TRIGGER:
Lee Susman039ce092012-11-15 13:36:15 +0200638 return "\"packing control - no pack, trigger-read-trigger\"";
Tatyana Brokhman91e1e322012-10-09 13:53:43 +0200639 case TEST_PACKING_NOT_EXP_TRIGGER_FLUSH_TRIGGER:
Lee Susman039ce092012-11-15 13:36:15 +0200640 return "\"packing control- no pack, trigger-flush-trigger\"";
Tatyana Brokhman91e1e322012-10-09 13:53:43 +0200641 case TEST_PACK_MIX_PACKED_NO_PACKED_PACKED:
Lee Susman039ce092012-11-15 13:36:15 +0200642 return "\"packing control - mix: pack -> no pack -> pack\"";
Tatyana Brokhman91e1e322012-10-09 13:53:43 +0200643 case TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED:
Lee Susman039ce092012-11-15 13:36:15 +0200644 return "\"packing control - mix: no pack->pack->no pack\"";
Maya Erezddc55732012-10-17 09:51:01 +0200645 case TEST_WRITE_DISCARD_SANITIZE_READ:
Lee Susman039ce092012-11-15 13:36:15 +0200646 return "\"write, discard, sanitize\"";
Yaniv Gardie9214c82012-10-18 13:58:18 +0200647 case BKOPS_DELAYED_WORK_LEVEL_1:
Lee Susman039ce092012-11-15 13:36:15 +0200648 return "\"delayed work BKOPS level 1\"";
Yaniv Gardie9214c82012-10-18 13:58:18 +0200649 case BKOPS_DELAYED_WORK_LEVEL_1_HPI:
Lee Susman039ce092012-11-15 13:36:15 +0200650 return "\"delayed work BKOPS level 1 with HPI\"";
Yaniv Gardie9214c82012-10-18 13:58:18 +0200651 case BKOPS_CANCEL_DELAYED_WORK:
Lee Susman039ce092012-11-15 13:36:15 +0200652 return "\"cancel delayed BKOPS work\"";
Yaniv Gardie9214c82012-10-18 13:58:18 +0200653 case BKOPS_URGENT_LEVEL_2:
Lee Susman039ce092012-11-15 13:36:15 +0200654 return "\"urgent BKOPS level 2\"";
Yaniv Gardie9214c82012-10-18 13:58:18 +0200655 case BKOPS_URGENT_LEVEL_2_TWO_REQS:
Lee Susman039ce092012-11-15 13:36:15 +0200656 return "\"urgent BKOPS level 2, followed by a request\"";
Yaniv Gardie9214c82012-10-18 13:58:18 +0200657 case BKOPS_URGENT_LEVEL_3:
Lee Susman039ce092012-11-15 13:36:15 +0200658 return "\"urgent BKOPS level 3\"";
Lee Susmanf18263a2012-10-24 14:14:37 +0200659 case TEST_LONG_SEQUENTIAL_READ:
Lee Susman039ce092012-11-15 13:36:15 +0200660 return "\"long sequential read\"";
Lee Susmana35ae6e2012-10-25 16:06:07 +0200661 case TEST_LONG_SEQUENTIAL_WRITE:
Lee Susman039ce092012-11-15 13:36:15 +0200662 return "\"long sequential write\"";
Lee Susmanb09c0412012-12-19 14:28:52 +0200663 case TEST_NEW_REQ_NOTIFICATION:
664 return "\"new request notification test\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200665 default:
Lee Susman039ce092012-11-15 13:36:15 +0200666 return " Unknown testcase";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200667 }
668
669 return NULL;
670}
671
672/*
673 * Compare individual testcase's statistics to the expected statistics:
674 * Compare stop reason and number of packing events
675 */
676static int check_wr_packing_statistics(struct test_data *td)
677{
678 struct mmc_wr_pack_stats *mmc_packed_stats;
679 struct mmc_queue *mq = td->req_q->queuedata;
680 int max_packed_reqs = mq->card->ext_csd.max_packed_writes;
681 int i;
682 struct mmc_card *card = mq->card;
683 struct mmc_wr_pack_stats expected_stats;
684 int *stop_reason;
685 int ret = 0;
686
687 if (!mq) {
688 test_pr_err("%s: NULL mq", __func__);
689 return -EINVAL;
690 }
691
692 expected_stats = mbtd->exp_packed_stats;
693
694 mmc_packed_stats = mmc_blk_get_packed_statistics(card);
695 if (!mmc_packed_stats) {
696 test_pr_err("%s: NULL mmc_packed_stats", __func__);
697 return -EINVAL;
698 }
699
700 if (!mmc_packed_stats->packing_events) {
701 test_pr_err("%s: NULL packing_events", __func__);
702 return -EINVAL;
703 }
704
705 spin_lock(&mmc_packed_stats->lock);
706
707 if (!mmc_packed_stats->enabled) {
708 test_pr_err("%s write packing statistics are not enabled",
709 __func__);
710 ret = -EINVAL;
711 goto exit_err;
712 }
713
714 stop_reason = mmc_packed_stats->pack_stop_reason;
715
Tatyana Brokhman91e1e322012-10-09 13:53:43 +0200716 for (i = 1; i <= max_packed_reqs; ++i) {
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200717 if (mmc_packed_stats->packing_events[i] !=
718 expected_stats.packing_events[i]) {
719 test_pr_err(
720 "%s: Wrong pack stats in index %d, got %d, expected %d",
721 __func__, i, mmc_packed_stats->packing_events[i],
722 expected_stats.packing_events[i]);
723 if (td->fs_wr_reqs_during_test)
724 goto cancel_round;
725 ret = -EINVAL;
726 goto exit_err;
727 }
728 }
729
730 if (mmc_packed_stats->pack_stop_reason[EXCEEDS_SEGMENTS] !=
731 expected_stats.pack_stop_reason[EXCEEDS_SEGMENTS]) {
732 test_pr_err(
733 "%s: Wrong pack stop reason EXCEEDS_SEGMENTS %d, expected %d",
734 __func__, stop_reason[EXCEEDS_SEGMENTS],
735 expected_stats.pack_stop_reason[EXCEEDS_SEGMENTS]);
736 if (td->fs_wr_reqs_during_test)
737 goto cancel_round;
738 ret = -EINVAL;
739 goto exit_err;
740 }
741
742 if (mmc_packed_stats->pack_stop_reason[EXCEEDS_SECTORS] !=
743 expected_stats.pack_stop_reason[EXCEEDS_SECTORS]) {
744 test_pr_err(
745 "%s: Wrong pack stop reason EXCEEDS_SECTORS %d, expected %d",
746 __func__, stop_reason[EXCEEDS_SECTORS],
747 expected_stats.pack_stop_reason[EXCEEDS_SECTORS]);
748 if (td->fs_wr_reqs_during_test)
749 goto cancel_round;
750 ret = -EINVAL;
751 goto exit_err;
752 }
753
754 if (mmc_packed_stats->pack_stop_reason[WRONG_DATA_DIR] !=
755 expected_stats.pack_stop_reason[WRONG_DATA_DIR]) {
756 test_pr_err(
757 "%s: Wrong pack stop reason WRONG_DATA_DIR %d, expected %d",
758 __func__, stop_reason[WRONG_DATA_DIR],
759 expected_stats.pack_stop_reason[WRONG_DATA_DIR]);
760 if (td->fs_wr_reqs_during_test)
761 goto cancel_round;
762 ret = -EINVAL;
763 goto exit_err;
764 }
765
766 if (mmc_packed_stats->pack_stop_reason[FLUSH_OR_DISCARD] !=
767 expected_stats.pack_stop_reason[FLUSH_OR_DISCARD]) {
768 test_pr_err(
769 "%s: Wrong pack stop reason FLUSH_OR_DISCARD %d, expected %d",
770 __func__, stop_reason[FLUSH_OR_DISCARD],
771 expected_stats.pack_stop_reason[FLUSH_OR_DISCARD]);
772 if (td->fs_wr_reqs_during_test)
773 goto cancel_round;
774 ret = -EINVAL;
775 goto exit_err;
776 }
777
778 if (mmc_packed_stats->pack_stop_reason[EMPTY_QUEUE] !=
779 expected_stats.pack_stop_reason[EMPTY_QUEUE]) {
780 test_pr_err(
781 "%s: Wrong pack stop reason EMPTY_QUEUE %d, expected %d",
782 __func__, stop_reason[EMPTY_QUEUE],
783 expected_stats.pack_stop_reason[EMPTY_QUEUE]);
784 if (td->fs_wr_reqs_during_test)
785 goto cancel_round;
786 ret = -EINVAL;
787 goto exit_err;
788 }
789
790 if (mmc_packed_stats->pack_stop_reason[REL_WRITE] !=
791 expected_stats.pack_stop_reason[REL_WRITE]) {
792 test_pr_err(
793 "%s: Wrong pack stop reason REL_WRITE %d, expected %d",
794 __func__, stop_reason[REL_WRITE],
795 expected_stats.pack_stop_reason[REL_WRITE]);
796 if (td->fs_wr_reqs_during_test)
797 goto cancel_round;
798 ret = -EINVAL;
799 goto exit_err;
800 }
801
802exit_err:
803 spin_unlock(&mmc_packed_stats->lock);
804 if (ret && mmc_packed_stats->enabled)
805 print_mmc_packing_stats(card);
806 return ret;
807cancel_round:
808 spin_unlock(&mmc_packed_stats->lock);
809 test_iosched_set_ignore_round(true);
810 return 0;
811}
812
813/*
814 * Pseudo-randomly choose a seed based on the last seed, and update it in
815 * seed_number. then return seed_number (mod max_val), or min_val.
816 */
817static unsigned int pseudo_random_seed(unsigned int *seed_number,
818 unsigned int min_val,
819 unsigned int max_val)
820{
821 int ret = 0;
822
823 if (!seed_number)
824 return 0;
825
826 *seed_number = ((unsigned int)(((unsigned long)*seed_number *
827 (unsigned long)LARGE_PRIME_1) + LARGE_PRIME_2));
828 ret = (unsigned int)((*seed_number) % max_val);
829
830 return (ret > min_val ? ret : min_val);
831}
832
833/*
834 * Given a pseudo-random seed, find a pseudo-random num_of_bios.
835 * Make sure that num_of_bios is not larger than TEST_MAX_SECTOR_RANGE
836 */
837static void pseudo_rnd_num_of_bios(unsigned int *num_bios_seed,
838 unsigned int *num_of_bios)
839{
840 do {
841 *num_of_bios = pseudo_random_seed(num_bios_seed, 1,
842 TEST_MAX_BIOS_PER_REQ);
843 if (!(*num_of_bios))
844 *num_of_bios = 1;
845 } while ((*num_of_bios) * BIO_U32_SIZE * 4 > TEST_MAX_SECTOR_RANGE);
846}
847
848/* Add a single read request to the given td's request queue */
849static int prepare_request_add_read(struct test_data *td)
850{
851 int ret;
852 int start_sec;
853
854 if (td)
855 start_sec = td->start_sector;
856 else {
857 test_pr_err("%s: NULL td", __func__);
858 return 0;
859 }
860
861 test_pr_info("%s: Adding a read request, first req_id=%d", __func__,
862 td->wr_rd_next_req_id);
863
864 ret = test_iosched_add_wr_rd_test_req(0, READ, start_sec, 2,
865 TEST_PATTERN_5A, NULL);
866 if (ret) {
867 test_pr_err("%s: failed to add a read request", __func__);
868 return ret;
869 }
870
871 return 0;
872}
873
874/* Add a single flush request to the given td's request queue */
875static int prepare_request_add_flush(struct test_data *td)
876{
877 int ret;
878
879 if (!td) {
880 test_pr_err("%s: NULL td", __func__);
881 return 0;
882 }
883
884 test_pr_info("%s: Adding a flush request, first req_id=%d", __func__,
885 td->unique_next_req_id);
886 ret = test_iosched_add_unique_test_req(0, REQ_UNIQUE_FLUSH,
887 0, 0, NULL);
888 if (ret) {
889 test_pr_err("%s: failed to add a flush request", __func__);
890 return ret;
891 }
892
893 return ret;
894}
895
896/*
897 * Add num_requets amount of write requests to the given td's request queue.
898 * If random test mode is chosen we pseudo-randomly choose the number of bios
899 * for each write request, otherwise add between 1 to 5 bio per request.
900 */
901static int prepare_request_add_write_reqs(struct test_data *td,
902 int num_requests, int is_err_expected,
903 int is_random)
904{
905 int i;
906 unsigned int start_sec;
907 int num_bios;
908 int ret = 0;
909 unsigned int *bio_seed = &mbtd->random_test_seed;
910
911 if (td)
912 start_sec = td->start_sector;
913 else {
914 test_pr_err("%s: NULL td", __func__);
915 return ret;
916 }
917
918 test_pr_info("%s: Adding %d write requests, first req_id=%d", __func__,
919 num_requests, td->wr_rd_next_req_id);
920
Lee Susmanf18263a2012-10-24 14:14:37 +0200921 for (i = 1 ; i <= num_requests ; i++) {
922 start_sec =
923 td->start_sector + sizeof(int) *
924 BIO_U32_SIZE * td->num_of_write_bios;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200925 if (is_random)
926 pseudo_rnd_num_of_bios(bio_seed, &num_bios);
927 else
928 /*
929 * For the non-random case, give num_bios a value
930 * between 1 and 5, to keep a small number of BIOs
931 */
932 num_bios = (i%5)+1;
933
934 ret = test_iosched_add_wr_rd_test_req(is_err_expected, WRITE,
935 start_sec, num_bios, TEST_PATTERN_5A, NULL);
936
937 if (ret) {
938 test_pr_err("%s: failed to add a write request",
939 __func__);
940 return ret;
941 }
942 }
943 return 0;
944}
945
946/*
947 * Prepare the write, read and flush requests for a generic packed commands
948 * testcase
949 */
950static int prepare_packed_requests(struct test_data *td, int is_err_expected,
951 int num_requests, int is_random)
952{
953 int ret = 0;
954 struct mmc_queue *mq;
955 int max_packed_reqs;
956 struct request_queue *req_q;
957
958 if (!td) {
959 pr_err("%s: NULL td", __func__);
960 return -EINVAL;
961 }
962
963 req_q = td->req_q;
964
965 if (!req_q) {
966 pr_err("%s: NULL request queue", __func__);
967 return -EINVAL;
968 }
969
970 mq = req_q->queuedata;
971 if (!mq) {
972 test_pr_err("%s: NULL mq", __func__);
973 return -EINVAL;
974 }
975
976 max_packed_reqs = mq->card->ext_csd.max_packed_writes;
977
978 if (mbtd->random_test_seed <= 0) {
979 mbtd->random_test_seed =
980 (unsigned int)(get_jiffies_64() & 0xFFFF);
981 test_pr_info("%s: got seed from jiffies %d",
982 __func__, mbtd->random_test_seed);
983 }
984
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200985 ret = prepare_request_add_write_reqs(td, num_requests, is_err_expected,
986 is_random);
987 if (ret)
988 return ret;
989
990 /* Avoid memory corruption in upcoming stats set */
991 if (td->test_info.testcase == TEST_STOP_DUE_TO_THRESHOLD)
992 num_requests--;
993
994 memset((void *)mbtd->exp_packed_stats.pack_stop_reason, 0,
995 sizeof(mbtd->exp_packed_stats.pack_stop_reason));
996 memset(mbtd->exp_packed_stats.packing_events, 0,
997 (max_packed_reqs + 1) * sizeof(u32));
998 if (num_requests <= max_packed_reqs)
999 mbtd->exp_packed_stats.packing_events[num_requests] = 1;
1000
1001 switch (td->test_info.testcase) {
1002 case TEST_STOP_DUE_TO_FLUSH:
1003 case TEST_STOP_DUE_TO_FLUSH_AFTER_MAX_REQS:
1004 ret = prepare_request_add_flush(td);
1005 if (ret)
1006 return ret;
1007
1008 mbtd->exp_packed_stats.pack_stop_reason[FLUSH_OR_DISCARD] = 1;
1009 break;
1010 case TEST_STOP_DUE_TO_READ:
1011 case TEST_STOP_DUE_TO_READ_AFTER_MAX_REQS:
1012 ret = prepare_request_add_read(td);
1013 if (ret)
1014 return ret;
1015
1016 mbtd->exp_packed_stats.pack_stop_reason[WRONG_DATA_DIR] = 1;
1017 break;
1018 case TEST_STOP_DUE_TO_THRESHOLD:
1019 mbtd->exp_packed_stats.packing_events[num_requests] = 1;
1020 mbtd->exp_packed_stats.packing_events[1] = 1;
1021 mbtd->exp_packed_stats.pack_stop_reason[THRESHOLD] = 1;
1022 mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1;
1023 break;
1024 case TEST_STOP_DUE_TO_MAX_REQ_NUM:
1025 case TEST_RET_PARTIAL_MAX_FAIL_IDX:
1026 mbtd->exp_packed_stats.pack_stop_reason[THRESHOLD] = 1;
1027 break;
1028 default:
1029 mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1;
1030 }
1031 mbtd->num_requests = num_requests;
1032
1033 return 0;
1034}
1035
1036/*
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001037 * Prepare the write, read and flush requests for the packing control
1038 * testcases
1039 */
1040static int prepare_packed_control_tests_requests(struct test_data *td,
1041 int is_err_expected, int num_requests, int is_random)
1042{
1043 int ret = 0;
1044 struct mmc_queue *mq;
1045 int max_packed_reqs;
1046 int temp_num_req = num_requests;
1047 struct request_queue *req_q;
1048 int test_packed_trigger;
1049 int num_packed_reqs;
1050
1051 if (!td) {
1052 test_pr_err("%s: NULL td\n", __func__);
1053 return -EINVAL;
1054 }
1055
1056 req_q = td->req_q;
1057
1058 if (!req_q) {
1059 test_pr_err("%s: NULL request queue\n", __func__);
1060 return -EINVAL;
1061 }
1062
1063 mq = req_q->queuedata;
1064 if (!mq) {
1065 test_pr_err("%s: NULL mq", __func__);
1066 return -EINVAL;
1067 }
1068
1069 max_packed_reqs = mq->card->ext_csd.max_packed_writes;
1070 test_packed_trigger = mq->num_wr_reqs_to_start_packing;
1071 num_packed_reqs = num_requests - test_packed_trigger;
1072
1073 if (mbtd->random_test_seed == 0) {
1074 mbtd->random_test_seed =
1075 (unsigned int)(get_jiffies_64() & 0xFFFF);
1076 test_pr_info("%s: got seed from jiffies %d",
1077 __func__, mbtd->random_test_seed);
1078 }
1079
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001080 if (td->test_info.testcase ==
1081 TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED) {
1082 temp_num_req = num_requests;
1083 num_requests = test_packed_trigger - 1;
1084 }
1085
1086 /* Verify that the packing is disabled before starting the test */
1087 mq->wr_packing_enabled = false;
1088 mq->num_of_potential_packed_wr_reqs = 0;
1089
1090 if (td->test_info.testcase == TEST_PACK_MIX_PACKED_NO_PACKED_PACKED) {
1091 mq->num_of_potential_packed_wr_reqs = test_packed_trigger + 1;
1092 mq->wr_packing_enabled = true;
1093 num_requests = test_packed_trigger + 2;
1094 }
1095
1096 ret = prepare_request_add_write_reqs(td, num_requests, is_err_expected,
1097 is_random);
1098 if (ret)
1099 goto exit;
1100
1101 if (td->test_info.testcase == TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED)
1102 num_requests = temp_num_req;
1103
1104 memset((void *)mbtd->exp_packed_stats.pack_stop_reason, 0,
1105 sizeof(mbtd->exp_packed_stats.pack_stop_reason));
1106 memset(mbtd->exp_packed_stats.packing_events, 0,
1107 (max_packed_reqs + 1) * sizeof(u32));
1108
1109 switch (td->test_info.testcase) {
1110 case TEST_PACKING_EXP_N_OVER_TRIGGER_FB_READ:
1111 case TEST_PACKING_EXP_ONE_OVER_TRIGGER_FB_READ:
1112 ret = prepare_request_add_read(td);
1113 if (ret)
1114 goto exit;
1115
1116 mbtd->exp_packed_stats.pack_stop_reason[WRONG_DATA_DIR] = 1;
1117 mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 1;
1118 break;
1119 case TEST_PACKING_EXP_N_OVER_TRIGGER_FLUSH_N:
1120 ret = prepare_request_add_flush(td);
1121 if (ret)
1122 goto exit;
1123
1124 ret = prepare_request_add_write_reqs(td, num_packed_reqs,
1125 is_err_expected, is_random);
1126 if (ret)
1127 goto exit;
1128
1129 mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1;
1130 mbtd->exp_packed_stats.pack_stop_reason[FLUSH_OR_DISCARD] = 1;
1131 mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 2;
1132 break;
1133 case TEST_PACKING_NOT_EXP_TRIGGER_READ_TRIGGER:
1134 ret = prepare_request_add_read(td);
1135 if (ret)
1136 goto exit;
1137
1138 ret = prepare_request_add_write_reqs(td, test_packed_trigger,
1139 is_err_expected, is_random);
1140 if (ret)
1141 goto exit;
1142
1143 mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 1;
1144 break;
1145 case TEST_PACKING_NOT_EXP_TRIGGER_FLUSH_TRIGGER:
1146 ret = prepare_request_add_flush(td);
1147 if (ret)
1148 goto exit;
1149
1150 ret = prepare_request_add_write_reqs(td, test_packed_trigger,
1151 is_err_expected, is_random);
1152 if (ret)
1153 goto exit;
1154
1155 mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 1;
1156 break;
1157 case TEST_PACK_MIX_PACKED_NO_PACKED_PACKED:
1158 ret = prepare_request_add_read(td);
1159 if (ret)
1160 goto exit;
1161
1162 ret = prepare_request_add_write_reqs(td, test_packed_trigger-1,
1163 is_err_expected, is_random);
1164 if (ret)
1165 goto exit;
1166
1167 ret = prepare_request_add_write_reqs(td, num_requests,
1168 is_err_expected, is_random);
1169 if (ret)
1170 goto exit;
1171
1172 mbtd->exp_packed_stats.packing_events[num_requests] = 1;
1173 mbtd->exp_packed_stats.packing_events[num_requests-1] = 1;
1174 mbtd->exp_packed_stats.pack_stop_reason[WRONG_DATA_DIR] = 1;
1175 mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1;
1176 break;
1177 case TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED:
1178 ret = prepare_request_add_read(td);
1179 if (ret)
1180 goto exit;
1181
1182 ret = prepare_request_add_write_reqs(td, num_requests,
1183 is_err_expected, is_random);
1184 if (ret)
1185 goto exit;
1186
1187 ret = prepare_request_add_read(td);
1188 if (ret)
1189 goto exit;
1190
1191 ret = prepare_request_add_write_reqs(td, test_packed_trigger-1,
1192 is_err_expected, is_random);
1193 if (ret)
1194 goto exit;
1195
1196 mbtd->exp_packed_stats.pack_stop_reason[WRONG_DATA_DIR] = 1;
1197 mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 1;
1198 break;
1199 case TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS:
1200 case TEST_PACKING_NOT_EXP_TRIGGER_REQUESTS:
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001201 break;
1202 default:
Maya Erez5000f1c2012-12-18 09:03:50 +02001203 BUG_ON(num_packed_reqs < 0);
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001204 mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1;
1205 mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 1;
1206 }
1207 mbtd->num_requests = num_requests;
1208
1209exit:
1210 return ret;
1211}
1212
1213/*
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001214 * Prepare requests for the TEST_RET_PARTIAL_FOLLOWED_BY_ABORT testcase.
1215 * In this testcase we have mixed error expectations from different
1216 * write requests, hence the special prepare function.
1217 */
1218static int prepare_partial_followed_by_abort(struct test_data *td,
1219 int num_requests)
1220{
1221 int i, start_address;
1222 int is_err_expected = 0;
1223 int ret = 0;
1224 struct mmc_queue *mq = test_iosched_get_req_queue()->queuedata;
1225 int max_packed_reqs;
1226
1227 if (!mq) {
1228 test_pr_err("%s: NULL mq", __func__);
1229 return -EINVAL;
1230 }
1231
1232 max_packed_reqs = mq->card->ext_csd.max_packed_writes;
1233
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001234 for (i = 1; i <= num_requests; i++) {
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001235 if (i > (num_requests / 2))
1236 is_err_expected = 1;
1237
Lee Susmanf18263a2012-10-24 14:14:37 +02001238 start_address = td->start_sector +
1239 sizeof(int) * BIO_U32_SIZE * td->num_of_write_bios;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001240 ret = test_iosched_add_wr_rd_test_req(is_err_expected, WRITE,
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001241 start_address, (i % 5) + 1, TEST_PATTERN_5A,
1242 NULL);
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001243 if (ret) {
1244 test_pr_err("%s: failed to add a write request",
1245 __func__);
1246 return ret;
1247 }
1248 }
1249
1250 memset((void *)&mbtd->exp_packed_stats.pack_stop_reason, 0,
1251 sizeof(mbtd->exp_packed_stats.pack_stop_reason));
1252 memset(mbtd->exp_packed_stats.packing_events, 0,
1253 (max_packed_reqs + 1) * sizeof(u32));
1254 mbtd->exp_packed_stats.packing_events[num_requests] = 1;
1255 mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1;
1256
1257 mbtd->num_requests = num_requests;
1258
1259 return ret;
1260}
1261
1262/*
1263 * Get number of write requests for current testcase. If random test mode was
1264 * chosen, pseudo-randomly choose the number of requests, otherwise set to
1265 * two less than the packing threshold.
1266 */
1267static int get_num_requests(struct test_data *td)
1268{
1269 int *seed = &mbtd->random_test_seed;
1270 struct request_queue *req_q;
1271 struct mmc_queue *mq;
1272 int max_num_requests;
1273 int num_requests;
1274 int min_num_requests = 2;
1275 int is_random = mbtd->is_random;
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001276 int max_for_double;
1277 int test_packed_trigger;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001278
1279 req_q = test_iosched_get_req_queue();
1280 if (req_q)
1281 mq = req_q->queuedata;
1282 else {
1283 test_pr_err("%s: NULL request queue", __func__);
1284 return 0;
1285 }
1286
1287 if (!mq) {
1288 test_pr_err("%s: NULL mq", __func__);
1289 return -EINVAL;
1290 }
1291
1292 max_num_requests = mq->card->ext_csd.max_packed_writes;
1293 num_requests = max_num_requests - 2;
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001294 test_packed_trigger = mq->num_wr_reqs_to_start_packing;
1295
1296 /*
1297 * Here max_for_double is intended for packed control testcases
1298 * in which we issue many write requests. It's purpose is to prevent
1299 * exceeding max number of req_queue requests.
1300 */
1301 max_for_double = max_num_requests - 10;
1302
1303 if (td->test_info.testcase ==
1304 TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS)
1305 /* Don't expect packing, so issue up to trigger-1 reqs */
1306 num_requests = test_packed_trigger - 1;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001307
1308 if (is_random) {
1309 if (td->test_info.testcase ==
1310 TEST_RET_PARTIAL_FOLLOWED_BY_ABORT)
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001311 /*
1312 * Here we don't want num_requests to be less than 1
1313 * as a consequence of division by 2.
1314 */
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001315 min_num_requests = 3;
1316
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001317 if (td->test_info.testcase ==
1318 TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS)
1319 /* Don't expect packing, so issue up to trigger reqs */
1320 max_num_requests = test_packed_trigger;
1321
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001322 num_requests = pseudo_random_seed(seed, min_num_requests,
1323 max_num_requests - 1);
1324 }
1325
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001326 if (td->test_info.testcase ==
1327 TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS)
1328 num_requests -= test_packed_trigger;
1329
1330 if (td->test_info.testcase == TEST_PACKING_EXP_N_OVER_TRIGGER_FLUSH_N)
1331 num_requests =
1332 num_requests > max_for_double ? max_for_double : num_requests;
1333
1334 if (mbtd->test_group == TEST_PACKING_CONTROL_GROUP)
1335 num_requests += test_packed_trigger;
1336
1337 if (td->test_info.testcase == TEST_PACKING_NOT_EXP_TRIGGER_REQUESTS)
1338 num_requests = test_packed_trigger;
1339
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001340 return num_requests;
1341}
1342
Lee Susman70160bb2013-01-06 10:57:30 +02001343static int prepare_long_read_test_requests(struct test_data *td)
Lee Susmanf18263a2012-10-24 14:14:37 +02001344{
1345
1346 int ret;
1347 int start_sec;
1348 int j;
Lee Susmanf18263a2012-10-24 14:14:37 +02001349
1350 if (td)
1351 start_sec = td->start_sector;
1352 else {
1353 test_pr_err("%s: NULL td\n", __func__);
1354 return -EINVAL;
1355 }
1356
Lee Susman70160bb2013-01-06 10:57:30 +02001357 test_pr_info("%s: Adding %d read requests, first req_id=%d", __func__,
1358 LONG_READ_TEST_ACTUAL_NUM_REQS, td->wr_rd_next_req_id);
Lee Susmanf18263a2012-10-24 14:14:37 +02001359
Lee Susman70160bb2013-01-06 10:57:30 +02001360 for (j = 0; j < LONG_READ_TEST_ACTUAL_NUM_REQS; j++) {
Lee Susmanf18263a2012-10-24 14:14:37 +02001361
Lee Susman70160bb2013-01-06 10:57:30 +02001362 ret = test_iosched_add_wr_rd_test_req(0, READ,
Lee Susmanf18263a2012-10-24 14:14:37 +02001363 start_sec,
1364 TEST_MAX_BIOS_PER_REQ,
1365 TEST_NO_PATTERN, NULL);
1366 if (ret) {
Lee Susman70160bb2013-01-06 10:57:30 +02001367 test_pr_err("%s: failed to add a read request, err = %d"
1368 , __func__, ret);
Lee Susmanf18263a2012-10-24 14:14:37 +02001369 return ret;
1370 }
1371
1372 start_sec +=
1373 (TEST_MAX_BIOS_PER_REQ * sizeof(int) * BIO_U32_SIZE);
1374 }
1375
1376 return 0;
1377}
1378
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001379/*
1380 * An implementation for the prepare_test_fn pointer in the test_info
1381 * data structure. According to the testcase we add the right number of requests
1382 * and decide if an error is expected or not.
1383 */
1384static int prepare_test(struct test_data *td)
1385{
1386 struct mmc_queue *mq = test_iosched_get_req_queue()->queuedata;
1387 int max_num_requests;
1388 int num_requests = 0;
1389 int ret = 0;
1390 int is_random = mbtd->is_random;
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001391 int test_packed_trigger = mq->num_wr_reqs_to_start_packing;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001392
1393 if (!mq) {
1394 test_pr_err("%s: NULL mq", __func__);
1395 return -EINVAL;
1396 }
1397
1398 max_num_requests = mq->card->ext_csd.max_packed_writes;
1399
1400 if (is_random && mbtd->random_test_seed == 0) {
1401 mbtd->random_test_seed =
1402 (unsigned int)(get_jiffies_64() & 0xFFFF);
1403 test_pr_info("%s: got seed from jiffies %d",
1404 __func__, mbtd->random_test_seed);
1405 }
1406
1407 num_requests = get_num_requests(td);
1408
1409 if (mbtd->test_group == TEST_SEND_INVALID_GROUP)
1410 mq->packed_test_fn =
1411 test_invalid_packed_cmd;
1412
1413 if (mbtd->test_group == TEST_ERR_CHECK_GROUP)
1414 mq->err_check_fn = test_err_check;
1415
1416 switch (td->test_info.testcase) {
1417 case TEST_STOP_DUE_TO_FLUSH:
1418 case TEST_STOP_DUE_TO_READ:
1419 case TEST_RET_PARTIAL_FOLLOWED_BY_SUCCESS:
1420 case TEST_RET_PARTIAL_MULTIPLE_UNTIL_SUCCESS:
1421 case TEST_STOP_DUE_TO_EMPTY_QUEUE:
1422 case TEST_CMD23_PACKED_BIT_UNSET:
1423 ret = prepare_packed_requests(td, 0, num_requests, is_random);
1424 break;
1425 case TEST_STOP_DUE_TO_FLUSH_AFTER_MAX_REQS:
1426 case TEST_STOP_DUE_TO_READ_AFTER_MAX_REQS:
1427 ret = prepare_packed_requests(td, 0, max_num_requests - 1,
1428 is_random);
1429 break;
1430 case TEST_RET_PARTIAL_FOLLOWED_BY_ABORT:
1431 ret = prepare_partial_followed_by_abort(td, num_requests);
1432 break;
1433 case TEST_STOP_DUE_TO_MAX_REQ_NUM:
1434 case TEST_RET_PARTIAL_MAX_FAIL_IDX:
1435 ret = prepare_packed_requests(td, 0, max_num_requests,
1436 is_random);
1437 break;
1438 case TEST_STOP_DUE_TO_THRESHOLD:
1439 ret = prepare_packed_requests(td, 0, max_num_requests + 1,
1440 is_random);
1441 break;
1442 case TEST_RET_ABORT:
1443 case TEST_RET_RETRY:
1444 case TEST_RET_CMD_ERR:
1445 case TEST_RET_DATA_ERR:
1446 case TEST_HDR_INVALID_VERSION:
1447 case TEST_HDR_WRONG_WRITE_CODE:
1448 case TEST_HDR_INVALID_RW_CODE:
1449 case TEST_HDR_DIFFERENT_ADDRESSES:
1450 case TEST_HDR_REQ_NUM_SMALLER_THAN_ACTUAL:
1451 case TEST_HDR_REQ_NUM_LARGER_THAN_ACTUAL:
1452 case TEST_CMD23_MAX_PACKED_WRITES:
1453 case TEST_CMD23_ZERO_PACKED_WRITES:
1454 case TEST_CMD23_REL_WR_BIT_SET:
1455 case TEST_CMD23_BITS_16TO29_SET:
1456 case TEST_CMD23_HDR_BLK_NOT_IN_COUNT:
1457 case TEST_HDR_CMD23_PACKED_BIT_SET:
1458 ret = prepare_packed_requests(td, 1, num_requests, is_random);
1459 break;
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001460 case TEST_PACKING_EXP_N_OVER_TRIGGER:
1461 case TEST_PACKING_EXP_N_OVER_TRIGGER_FB_READ:
1462 case TEST_PACKING_NOT_EXP_TRIGGER_REQUESTS:
1463 case TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS:
1464 case TEST_PACK_MIX_PACKED_NO_PACKED_PACKED:
1465 case TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED:
1466 ret = prepare_packed_control_tests_requests(td, 0, num_requests,
1467 is_random);
1468 break;
1469 case TEST_PACKING_EXP_THRESHOLD_OVER_TRIGGER:
1470 ret = prepare_packed_control_tests_requests(td, 0,
1471 max_num_requests, is_random);
1472 break;
1473 case TEST_PACKING_EXP_ONE_OVER_TRIGGER_FB_READ:
1474 ret = prepare_packed_control_tests_requests(td, 0,
1475 test_packed_trigger + 1,
1476 is_random);
1477 break;
1478 case TEST_PACKING_EXP_N_OVER_TRIGGER_FLUSH_N:
1479 ret = prepare_packed_control_tests_requests(td, 0, num_requests,
1480 is_random);
1481 break;
1482 case TEST_PACKING_NOT_EXP_TRIGGER_READ_TRIGGER:
1483 case TEST_PACKING_NOT_EXP_TRIGGER_FLUSH_TRIGGER:
1484 ret = prepare_packed_control_tests_requests(td, 0,
1485 test_packed_trigger, is_random);
1486 break;
Lee Susmana35ae6e2012-10-25 16:06:07 +02001487 case TEST_LONG_SEQUENTIAL_WRITE:
Lee Susmanf18263a2012-10-24 14:14:37 +02001488 case TEST_LONG_SEQUENTIAL_READ:
Lee Susman70160bb2013-01-06 10:57:30 +02001489 ret = prepare_long_read_test_requests(td);
Lee Susmanf18263a2012-10-24 14:14:37 +02001490 break;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001491 default:
1492 test_pr_info("%s: Invalid test case...", __func__);
Lee Susmanf18263a2012-10-24 14:14:37 +02001493 ret = -EINVAL;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001494 }
1495
1496 return ret;
1497}
1498
Maya Ereza12d1d22013-01-10 23:35:42 +02001499static int run_packed_test(struct test_data *td)
1500{
1501 struct mmc_queue *mq;
1502 struct request_queue *req_q;
1503
1504 if (!td) {
1505 pr_err("%s: NULL td", __func__);
1506 return -EINVAL;
1507 }
1508
1509 req_q = td->req_q;
1510
1511 if (!req_q) {
1512 pr_err("%s: NULL request queue", __func__);
1513 return -EINVAL;
1514 }
1515
1516 mq = req_q->queuedata;
1517 if (!mq) {
1518 test_pr_err("%s: NULL mq", __func__);
1519 return -EINVAL;
1520 }
1521 mmc_blk_init_packed_statistics(mq->card);
1522
1523 if (td->test_info.testcase != TEST_PACK_MIX_PACKED_NO_PACKED_PACKED) {
1524 /*
1525 * Verify that the packing is disabled before starting the
1526 * test
1527 */
1528 mq->wr_packing_enabled = false;
1529 mq->num_of_potential_packed_wr_reqs = 0;
1530 }
1531
1532 __blk_run_queue(td->req_q);
1533
1534 return 0;
1535}
1536
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001537/*
1538 * An implementation for the post_test_fn in the test_info data structure.
1539 * In our case we just reset the function pointers in the mmc_queue in order for
1540 * the FS to be able to dispatch it's requests correctly after the test is
1541 * finished.
1542 */
1543static int post_test(struct test_data *td)
1544{
1545 struct mmc_queue *mq;
1546
1547 if (!td)
1548 return -EINVAL;
1549
1550 mq = td->req_q->queuedata;
1551
1552 if (!mq) {
1553 test_pr_err("%s: NULL mq", __func__);
1554 return -EINVAL;
1555 }
1556
1557 mq->packed_test_fn = NULL;
1558 mq->err_check_fn = NULL;
1559
1560 return 0;
1561}
1562
1563/*
1564 * This function checks, based on the current test's test_group, that the
1565 * packed commands capability and control are set right. In addition, we check
1566 * if the card supports the packed command feature.
1567 */
1568static int validate_packed_commands_settings(void)
1569{
1570 struct request_queue *req_q;
1571 struct mmc_queue *mq;
1572 int max_num_requests;
1573 struct mmc_host *host;
1574
1575 req_q = test_iosched_get_req_queue();
1576 if (!req_q) {
1577 test_pr_err("%s: test_iosched_get_req_queue failed", __func__);
1578 test_iosched_set_test_result(TEST_FAILED);
1579 return -EINVAL;
1580 }
1581
1582 mq = req_q->queuedata;
1583 if (!mq) {
1584 test_pr_err("%s: NULL mq", __func__);
1585 return -EINVAL;
1586 }
1587
1588 max_num_requests = mq->card->ext_csd.max_packed_writes;
1589 host = mq->card->host;
1590
1591 if (!(host->caps2 && MMC_CAP2_PACKED_WR)) {
1592 test_pr_err("%s: Packed Write capability disabled, exit test",
1593 __func__);
1594 test_iosched_set_test_result(TEST_NOT_SUPPORTED);
1595 return -EINVAL;
1596 }
1597
1598 if (max_num_requests == 0) {
1599 test_pr_err(
1600 "%s: no write packing support, ext_csd.max_packed_writes=%d",
1601 __func__, mq->card->ext_csd.max_packed_writes);
1602 test_iosched_set_test_result(TEST_NOT_SUPPORTED);
1603 return -EINVAL;
1604 }
1605
1606 test_pr_info("%s: max number of packed requests supported is %d ",
1607 __func__, max_num_requests);
1608
1609 switch (mbtd->test_group) {
1610 case TEST_SEND_WRITE_PACKING_GROUP:
1611 case TEST_ERR_CHECK_GROUP:
1612 case TEST_SEND_INVALID_GROUP:
1613 /* disable the packing control */
1614 host->caps2 &= ~MMC_CAP2_PACKED_WR_CONTROL;
1615 break;
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001616 case TEST_PACKING_CONTROL_GROUP:
1617 host->caps2 |= MMC_CAP2_PACKED_WR_CONTROL;
1618 break;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001619 default:
1620 break;
1621 }
1622
1623 return 0;
1624}
1625
Maya Erezddc55732012-10-17 09:51:01 +02001626static void pseudo_rnd_sector_and_size(unsigned int *seed,
1627 unsigned int min_start_sector,
1628 unsigned int *start_sector,
1629 unsigned int *num_of_bios)
1630{
1631 unsigned int max_sec = min_start_sector + TEST_MAX_SECTOR_RANGE;
1632 do {
1633 *start_sector = pseudo_random_seed(seed,
1634 1, max_sec);
1635 *num_of_bios = pseudo_random_seed(seed,
1636 1, TEST_MAX_BIOS_PER_REQ);
1637 if (!(*num_of_bios))
1638 *num_of_bios = 1;
1639 } while ((*start_sector < min_start_sector) ||
1640 (*start_sector + (*num_of_bios * BIO_U32_SIZE * 4)) > max_sec);
1641}
1642
1643/* sanitize test functions */
1644static int prepare_write_discard_sanitize_read(struct test_data *td)
1645{
1646 unsigned int start_sector;
1647 unsigned int num_of_bios = 0;
1648 static unsigned int total_bios;
1649 unsigned int *num_bios_seed;
1650 int i = 0;
1651
1652 if (mbtd->random_test_seed == 0) {
1653 mbtd->random_test_seed =
1654 (unsigned int)(get_jiffies_64() & 0xFFFF);
1655 test_pr_info("%s: got seed from jiffies %d",
1656 __func__, mbtd->random_test_seed);
1657 }
1658 num_bios_seed = &mbtd->random_test_seed;
1659
1660 do {
1661 pseudo_rnd_sector_and_size(num_bios_seed, td->start_sector,
1662 &start_sector, &num_of_bios);
1663
1664 /* DISCARD */
1665 total_bios += num_of_bios;
1666 test_pr_info("%s: discard req: id=%d, startSec=%d, NumBios=%d",
1667 __func__, td->unique_next_req_id, start_sector,
1668 num_of_bios);
1669 test_iosched_add_unique_test_req(0, REQ_UNIQUE_DISCARD,
1670 start_sector, BIO_TO_SECTOR(num_of_bios),
1671 NULL);
1672
1673 } while (++i < (BLKDEV_MAX_RQ-10));
1674
1675 test_pr_info("%s: total discard bios = %d", __func__, total_bios);
1676
1677 test_pr_info("%s: add sanitize req", __func__);
1678 test_iosched_add_unique_test_req(0, REQ_UNIQUE_SANITIZE, 0, 0, NULL);
1679
1680 return 0;
1681}
1682
Yaniv Gardie9214c82012-10-18 13:58:18 +02001683/*
1684 * Post test operations for BKOPs test
1685 * Disable the BKOPs statistics and clear the feature flags
1686 */
1687static int bkops_post_test(struct test_data *td)
1688{
1689 struct request_queue *q = td->req_q;
1690 struct mmc_queue *mq = (struct mmc_queue *)q->queuedata;
1691 struct mmc_card *card = mq->card;
1692
1693 mmc_card_clr_doing_bkops(mq->card);
1694 card->ext_csd.raw_bkops_status = 0;
1695
1696 spin_lock(&card->bkops_info.bkops_stats.lock);
1697 card->bkops_info.bkops_stats.enabled = false;
1698 spin_unlock(&card->bkops_info.bkops_stats.lock);
1699
1700 return 0;
1701}
1702
1703/*
1704 * Verify the BKOPs statsistics
1705 */
1706static int check_bkops_result(struct test_data *td)
1707{
1708 struct request_queue *q = td->req_q;
1709 struct mmc_queue *mq = (struct mmc_queue *)q->queuedata;
1710 struct mmc_card *card = mq->card;
1711 struct mmc_bkops_stats *bkops_stat;
1712
1713 if (!card)
1714 goto fail;
1715
1716 bkops_stat = &card->bkops_info.bkops_stats;
1717
1718 test_pr_info("%s: Test results: bkops:(%d,%d,%d) hpi:%d, suspend:%d",
1719 __func__,
1720 bkops_stat->bkops_level[BKOPS_SEVERITY_1_INDEX],
1721 bkops_stat->bkops_level[BKOPS_SEVERITY_2_INDEX],
1722 bkops_stat->bkops_level[BKOPS_SEVERITY_3_INDEX],
1723 bkops_stat->hpi,
1724 bkops_stat->suspend);
1725
1726 switch (mbtd->test_info.testcase) {
1727 case BKOPS_DELAYED_WORK_LEVEL_1:
1728 if ((bkops_stat->bkops_level[BKOPS_SEVERITY_1_INDEX] == 1) &&
1729 (bkops_stat->suspend == 1) &&
1730 (bkops_stat->hpi == 0))
1731 goto exit;
1732 else
1733 goto fail;
1734 break;
1735 case BKOPS_DELAYED_WORK_LEVEL_1_HPI:
1736 if ((bkops_stat->bkops_level[BKOPS_SEVERITY_1_INDEX] == 1) &&
1737 (bkops_stat->suspend == 0) &&
1738 (bkops_stat->hpi == 1))
1739 goto exit;
Yaniv Gardidced8e42012-11-25 16:00:40 +02001740 /* this might happen due to timing issues */
1741 else if
1742 ((bkops_stat->bkops_level[BKOPS_SEVERITY_1_INDEX] == 0) &&
1743 (bkops_stat->suspend == 0) &&
1744 (bkops_stat->hpi == 0))
1745 goto ignore;
Yaniv Gardie9214c82012-10-18 13:58:18 +02001746 else
1747 goto fail;
1748 break;
1749 case BKOPS_CANCEL_DELAYED_WORK:
1750 if ((bkops_stat->bkops_level[BKOPS_SEVERITY_1_INDEX] == 0) &&
1751 (bkops_stat->bkops_level[BKOPS_SEVERITY_2_INDEX] == 0) &&
1752 (bkops_stat->bkops_level[BKOPS_SEVERITY_3_INDEX] == 0) &&
1753 (bkops_stat->suspend == 0) &&
1754 (bkops_stat->hpi == 0))
1755 goto exit;
1756 else
1757 goto fail;
1758 case BKOPS_URGENT_LEVEL_2:
1759 case BKOPS_URGENT_LEVEL_2_TWO_REQS:
1760 if ((bkops_stat->bkops_level[BKOPS_SEVERITY_2_INDEX] == 1) &&
1761 (bkops_stat->suspend == 0) &&
1762 (bkops_stat->hpi == 0))
1763 goto exit;
1764 else
1765 goto fail;
1766 case BKOPS_URGENT_LEVEL_3:
1767 if ((bkops_stat->bkops_level[BKOPS_SEVERITY_3_INDEX] == 1) &&
1768 (bkops_stat->suspend == 0) &&
1769 (bkops_stat->hpi == 0))
1770 goto exit;
1771 else
1772 goto fail;
1773 default:
1774 return -EINVAL;
1775 }
1776
1777exit:
1778 return 0;
Yaniv Gardidced8e42012-11-25 16:00:40 +02001779ignore:
1780 test_iosched_set_ignore_round(true);
1781 return 0;
Yaniv Gardie9214c82012-10-18 13:58:18 +02001782fail:
1783 if (td->fs_wr_reqs_during_test) {
1784 test_pr_info("%s: wr reqs during test, cancel the round",
1785 __func__);
1786 test_iosched_set_ignore_round(true);
1787 return 0;
1788 }
1789
1790 test_pr_info("%s: BKOPs statistics are not as expected, test failed",
1791 __func__);
1792 return -EINVAL;
1793}
1794
1795static void bkops_end_io_final_fn(struct request *rq, int err)
1796{
1797 struct test_request *test_rq =
1798 (struct test_request *)rq->elv.priv[0];
1799 BUG_ON(!test_rq);
1800
1801 test_rq->req_completed = 1;
1802 test_rq->req_result = err;
1803
1804 test_pr_info("%s: request %d completed, err=%d",
1805 __func__, test_rq->req_id, err);
1806
1807 mbtd->bkops_stage = BKOPS_STAGE_4;
1808 wake_up(&mbtd->bkops_wait_q);
1809}
1810
1811static void bkops_end_io_fn(struct request *rq, int err)
1812{
1813 struct test_request *test_rq =
1814 (struct test_request *)rq->elv.priv[0];
1815 BUG_ON(!test_rq);
1816
1817 test_rq->req_completed = 1;
1818 test_rq->req_result = err;
1819
1820 test_pr_info("%s: request %d completed, err=%d",
1821 __func__, test_rq->req_id, err);
1822 mbtd->bkops_stage = BKOPS_STAGE_2;
1823 wake_up(&mbtd->bkops_wait_q);
1824
1825}
1826
1827static int prepare_bkops(struct test_data *td)
1828{
1829 int ret = 0;
1830 struct request_queue *q = td->req_q;
1831 struct mmc_queue *mq = (struct mmc_queue *)q->queuedata;
1832 struct mmc_card *card = mq->card;
1833 struct mmc_bkops_stats *bkops_stat;
1834
1835 if (!card)
1836 return -EINVAL;
1837
1838 bkops_stat = &card->bkops_info.bkops_stats;
1839
1840 if (!card->ext_csd.bkops_en) {
1841 test_pr_err("%s: BKOPS is not enabled by card or host)",
1842 __func__);
1843 return -ENOTSUPP;
1844 }
1845 if (mmc_card_doing_bkops(card)) {
1846 test_pr_err("%s: BKOPS in progress, try later", __func__);
1847 return -EAGAIN;
1848 }
1849
1850 mmc_blk_init_bkops_statistics(card);
1851
1852 if ((mbtd->test_info.testcase == BKOPS_URGENT_LEVEL_2) ||
1853 (mbtd->test_info.testcase == BKOPS_URGENT_LEVEL_2_TWO_REQS) ||
1854 (mbtd->test_info.testcase == BKOPS_URGENT_LEVEL_3))
1855 mq->err_check_fn = test_err_check;
1856 mbtd->err_check_counter = 0;
1857
1858 return ret;
1859}
1860
1861static int run_bkops(struct test_data *td)
1862{
1863 int ret = 0;
1864 struct request_queue *q = td->req_q;
1865 struct mmc_queue *mq = (struct mmc_queue *)q->queuedata;
1866 struct mmc_card *card = mq->card;
1867 struct mmc_bkops_stats *bkops_stat;
1868
1869 if (!card)
1870 return -EINVAL;
1871
1872 bkops_stat = &card->bkops_info.bkops_stats;
1873
1874 switch (mbtd->test_info.testcase) {
1875 case BKOPS_DELAYED_WORK_LEVEL_1:
1876 bkops_stat->ignore_card_bkops_status = true;
1877 card->ext_csd.raw_bkops_status = 1;
Maya Erez2d33a192013-01-04 15:52:41 +02001878 card->bkops_info.sectors_changed =
1879 card->bkops_info.min_sectors_to_queue_delayed_work + 1;
Yaniv Gardie9214c82012-10-18 13:58:18 +02001880 mbtd->bkops_stage = BKOPS_STAGE_1;
1881
1882 __blk_run_queue(q);
1883 /* this long sleep makes sure the host starts bkops and
1884 also, gets into suspend */
1885 msleep(10000);
1886
1887 bkops_stat->ignore_card_bkops_status = false;
1888 card->ext_csd.raw_bkops_status = 0;
1889
1890 test_iosched_mark_test_completion();
1891 break;
1892
1893 case BKOPS_DELAYED_WORK_LEVEL_1_HPI:
1894 bkops_stat->ignore_card_bkops_status = true;
1895 card->ext_csd.raw_bkops_status = 1;
Maya Erez2d33a192013-01-04 15:52:41 +02001896 card->bkops_info.sectors_changed =
1897 card->bkops_info.min_sectors_to_queue_delayed_work + 1;
Yaniv Gardie9214c82012-10-18 13:58:18 +02001898 mbtd->bkops_stage = BKOPS_STAGE_1;
1899
1900 __blk_run_queue(q);
1901 msleep(card->bkops_info.delay_ms);
1902
1903 ret = test_iosched_add_wr_rd_test_req(0, WRITE,
1904 td->start_sector,
1905 TEST_REQUEST_NUM_OF_BIOS,
1906 TEST_PATTERN_5A,
1907 bkops_end_io_final_fn);
1908 if (ret) {
1909 test_pr_err("%s: failed to add a write request",
1910 __func__);
1911 ret = -EINVAL;
1912 break;
1913 }
1914
Yaniv Gardie9214c82012-10-18 13:58:18 +02001915 __blk_run_queue(q);
1916 wait_event(mbtd->bkops_wait_q,
1917 mbtd->bkops_stage == BKOPS_STAGE_4);
1918 bkops_stat->ignore_card_bkops_status = false;
1919
1920 test_iosched_mark_test_completion();
1921 break;
1922
1923 case BKOPS_CANCEL_DELAYED_WORK:
1924 bkops_stat->ignore_card_bkops_status = true;
1925 card->ext_csd.raw_bkops_status = 1;
Maya Erez2d33a192013-01-04 15:52:41 +02001926 card->bkops_info.sectors_changed =
1927 card->bkops_info.min_sectors_to_queue_delayed_work + 1;
Yaniv Gardie9214c82012-10-18 13:58:18 +02001928 mbtd->bkops_stage = BKOPS_STAGE_1;
1929
1930 __blk_run_queue(q);
1931
1932 ret = test_iosched_add_wr_rd_test_req(0, WRITE,
1933 td->start_sector,
1934 TEST_REQUEST_NUM_OF_BIOS,
1935 TEST_PATTERN_5A,
1936 bkops_end_io_final_fn);
1937 if (ret) {
1938 test_pr_err("%s: failed to add a write request",
1939 __func__);
1940 ret = -EINVAL;
1941 break;
1942 }
1943
Yaniv Gardie9214c82012-10-18 13:58:18 +02001944 __blk_run_queue(q);
1945 wait_event(mbtd->bkops_wait_q,
1946 mbtd->bkops_stage == BKOPS_STAGE_4);
1947 bkops_stat->ignore_card_bkops_status = false;
1948
1949 test_iosched_mark_test_completion();
1950 break;
1951
1952 case BKOPS_URGENT_LEVEL_2:
1953 case BKOPS_URGENT_LEVEL_3:
1954 bkops_stat->ignore_card_bkops_status = true;
1955 if (mbtd->test_info.testcase == BKOPS_URGENT_LEVEL_2)
1956 card->ext_csd.raw_bkops_status = 2;
1957 else
1958 card->ext_csd.raw_bkops_status = 3;
1959 mbtd->bkops_stage = BKOPS_STAGE_1;
1960
1961 ret = test_iosched_add_wr_rd_test_req(0, WRITE,
1962 td->start_sector,
1963 TEST_REQUEST_NUM_OF_BIOS,
1964 TEST_PATTERN_5A,
1965 bkops_end_io_fn);
1966 if (ret) {
1967 test_pr_err("%s: failed to add a write request",
1968 __func__);
1969 ret = -EINVAL;
1970 break;
1971 }
1972
Yaniv Gardie9214c82012-10-18 13:58:18 +02001973 __blk_run_queue(q);
1974 wait_event(mbtd->bkops_wait_q,
1975 mbtd->bkops_stage == BKOPS_STAGE_2);
1976 card->ext_csd.raw_bkops_status = 0;
1977
1978 ret = test_iosched_add_wr_rd_test_req(0, WRITE,
1979 td->start_sector,
1980 TEST_REQUEST_NUM_OF_BIOS,
1981 TEST_PATTERN_5A,
1982 bkops_end_io_final_fn);
1983 if (ret) {
1984 test_pr_err("%s: failed to add a write request",
1985 __func__);
1986 ret = -EINVAL;
1987 break;
1988 }
1989
Yaniv Gardie9214c82012-10-18 13:58:18 +02001990 __blk_run_queue(q);
1991
1992 wait_event(mbtd->bkops_wait_q,
1993 mbtd->bkops_stage == BKOPS_STAGE_4);
1994
1995 bkops_stat->ignore_card_bkops_status = false;
1996 test_iosched_mark_test_completion();
1997 break;
1998
1999 case BKOPS_URGENT_LEVEL_2_TWO_REQS:
2000 mq->wr_packing_enabled = false;
2001 bkops_stat->ignore_card_bkops_status = true;
2002 card->ext_csd.raw_bkops_status = 2;
2003 mbtd->bkops_stage = BKOPS_STAGE_1;
2004
2005 ret = test_iosched_add_wr_rd_test_req(0, WRITE,
2006 td->start_sector,
2007 TEST_REQUEST_NUM_OF_BIOS,
2008 TEST_PATTERN_5A,
2009 NULL);
2010 if (ret) {
2011 test_pr_err("%s: failed to add a write request",
2012 __func__);
2013 ret = -EINVAL;
2014 break;
2015 }
2016
2017 ret = test_iosched_add_wr_rd_test_req(0, WRITE,
2018 td->start_sector,
2019 TEST_REQUEST_NUM_OF_BIOS,
2020 TEST_PATTERN_5A,
2021 bkops_end_io_fn);
2022 if (ret) {
2023 test_pr_err("%s: failed to add a write request",
2024 __func__);
2025 ret = -EINVAL;
2026 break;
2027 }
2028
Yaniv Gardie9214c82012-10-18 13:58:18 +02002029 __blk_run_queue(q);
2030 wait_event(mbtd->bkops_wait_q,
2031 mbtd->bkops_stage == BKOPS_STAGE_2);
2032 card->ext_csd.raw_bkops_status = 0;
2033
2034 ret = test_iosched_add_wr_rd_test_req(0, WRITE,
2035 td->start_sector,
2036 TEST_REQUEST_NUM_OF_BIOS,
2037 TEST_PATTERN_5A,
2038 bkops_end_io_final_fn);
2039 if (ret) {
2040 test_pr_err("%s: failed to add a write request",
2041 __func__);
2042 ret = -EINVAL;
2043 break;
2044 }
2045
Yaniv Gardie9214c82012-10-18 13:58:18 +02002046 __blk_run_queue(q);
2047
2048 wait_event(mbtd->bkops_wait_q,
2049 mbtd->bkops_stage == BKOPS_STAGE_4);
2050
2051 bkops_stat->ignore_card_bkops_status = false;
2052 test_iosched_mark_test_completion();
2053
2054 break;
2055 default:
2056 test_pr_err("%s: wrong testcase: %d", __func__,
2057 mbtd->test_info.testcase);
2058 ret = -EINVAL;
2059 }
2060 return ret;
2061}
2062
Lee Susmanb09c0412012-12-19 14:28:52 +02002063/*
2064 * new_req_post_test() - Do post test operations for
2065 * new_req_notification test: disable the statistics and clear
2066 * the feature flags.
2067 * @td The test_data for the new_req test that has
2068 * ended.
2069 */
2070static int new_req_post_test(struct test_data *td)
2071{
2072 struct mmc_queue *mq;
2073
2074 if (!td || !td->req_q)
2075 goto exit;
2076
2077 mq = (struct mmc_queue *)td->req_q->queuedata;
2078
2079 if (!mq || !mq->card)
2080 goto exit;
2081
Lee Susmanb09c0412012-12-19 14:28:52 +02002082 test_pr_info("Completed %d requests",
2083 mbtd->completed_req_count);
2084
2085exit:
2086 return 0;
2087}
2088
2089/*
2090 * check_new_req_result() - Print out the number of completed
2091 * requests. Assigned to the check_test_result_fn pointer,
2092 * therefore the name.
2093 * @td The test_data for the new_req test that has
2094 * ended.
2095 */
2096static int check_new_req_result(struct test_data *td)
2097{
2098 test_pr_info("%s: Test results: Completed %d requests",
2099 __func__, mbtd->completed_req_count);
2100 return 0;
2101}
2102
2103/*
2104 * new_req_free_end_io_fn() - Remove request from queuelist and
2105 * free request's allocated memory. Used as a call-back
2106 * assigned to end_io member in request struct.
2107 * @rq The request to be freed
2108 * @err Unused
2109 */
2110static void new_req_free_end_io_fn(struct request *rq, int err)
2111{
2112 struct test_request *test_rq =
2113 (struct test_request *)rq->elv.priv[0];
2114 struct test_data *ptd = test_get_test_data();
2115
2116 BUG_ON(!test_rq);
2117
2118 spin_lock_irq(&ptd->lock);
2119 list_del_init(&test_rq->queuelist);
2120 ptd->dispatched_count--;
2121 spin_unlock_irq(&ptd->lock);
2122
2123 __blk_put_request(ptd->req_q, test_rq->rq);
2124 kfree(test_rq->bios_buffer);
2125 kfree(test_rq);
2126 mbtd->completed_req_count++;
2127}
2128
2129static int prepare_new_req(struct test_data *td)
2130{
2131 struct request_queue *q = td->req_q;
2132 struct mmc_queue *mq = (struct mmc_queue *)q->queuedata;
2133
2134 mmc_blk_init_packed_statistics(mq->card);
Lee Susmanb09c0412012-12-19 14:28:52 +02002135 mbtd->completed_req_count = 0;
2136
2137 return 0;
2138}
2139
Lee Susmand50afb52013-01-09 14:48:47 +02002140static int run_new_req(struct test_data *ptd)
Lee Susmanb09c0412012-12-19 14:28:52 +02002141{
2142 int ret = 0;
2143 int i;
2144 unsigned int requests_count = 2;
2145 unsigned int bio_num;
2146 struct test_request *test_rq = NULL;
2147
2148 while (1) {
2149 for (i = 0; i < requests_count; i++) {
2150 bio_num = TEST_MAX_BIOS_PER_REQ;
2151 test_rq = test_iosched_create_test_req(0, READ,
2152 ptd->start_sector,
2153 bio_num, TEST_PATTERN_5A,
2154 new_req_free_end_io_fn);
2155 if (test_rq) {
2156 spin_lock_irq(ptd->req_q->queue_lock);
2157 list_add_tail(&test_rq->queuelist,
2158 &ptd->test_queue);
2159 ptd->test_count++;
2160 spin_unlock_irq(ptd->req_q->queue_lock);
2161 } else {
2162 test_pr_err("%s: failed to create read request",
2163 __func__);
2164 ret = -ENODEV;
2165 break;
2166 }
2167 }
2168
2169 __blk_run_queue(ptd->req_q);
2170 /* wait while a mmc layer will send all requests in test_queue*/
2171 while (!list_empty(&ptd->test_queue))
2172 msleep(NEW_REQ_TEST_SLEEP_TIME);
2173
2174 /* test finish criteria */
2175 if (mbtd->completed_req_count > 1000) {
2176 if (ptd->dispatched_count)
2177 continue;
2178 else
2179 break;
2180 }
2181
2182 for (i = 0; i < requests_count; i++) {
2183 bio_num = NEW_REQ_TEST_NUM_BIOS;
2184 test_rq = test_iosched_create_test_req(0, READ,
2185 ptd->start_sector,
2186 bio_num, TEST_PATTERN_5A,
2187 new_req_free_end_io_fn);
2188 if (test_rq) {
2189 spin_lock_irq(ptd->req_q->queue_lock);
2190 list_add_tail(&test_rq->queuelist,
2191 &ptd->test_queue);
2192 ptd->test_count++;
2193 spin_unlock_irq(ptd->req_q->queue_lock);
2194 } else {
2195 test_pr_err("%s: failed to create read request",
2196 __func__);
2197 ret = -ENODEV;
2198 break;
2199 }
2200 }
2201 __blk_run_queue(ptd->req_q);
2202 }
2203
2204 test_iosched_mark_test_completion();
2205 test_pr_info("%s: EXIT: %d code", __func__, ret);
2206
2207 return ret;
2208}
2209
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02002210static bool message_repeat;
2211static int test_open(struct inode *inode, struct file *file)
2212{
2213 file->private_data = inode->i_private;
2214 message_repeat = 1;
2215 return 0;
2216}
2217
2218/* send_packing TEST */
2219static ssize_t send_write_packing_test_write(struct file *file,
2220 const char __user *buf,
2221 size_t count,
2222 loff_t *ppos)
2223{
2224 int ret = 0;
2225 int i = 0;
2226 int number = -1;
2227 int j = 0;
2228
2229 test_pr_info("%s: -- send_write_packing TEST --", __func__);
2230
2231 sscanf(buf, "%d", &number);
2232
2233 if (number <= 0)
2234 number = 1;
2235
2236
2237 mbtd->test_group = TEST_SEND_WRITE_PACKING_GROUP;
2238
2239 if (validate_packed_commands_settings())
2240 return count;
2241
2242 if (mbtd->random_test_seed > 0)
2243 test_pr_info("%s: Test seed: %d", __func__,
2244 mbtd->random_test_seed);
2245
2246 memset(&mbtd->test_info, 0, sizeof(struct test_info));
2247
2248 mbtd->test_info.data = mbtd;
2249 mbtd->test_info.prepare_test_fn = prepare_test;
Maya Ereza12d1d22013-01-10 23:35:42 +02002250 mbtd->test_info.run_test_fn = run_packed_test;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02002251 mbtd->test_info.check_test_result_fn = check_wr_packing_statistics;
2252 mbtd->test_info.get_test_case_str_fn = get_test_case_str;
2253 mbtd->test_info.post_test_fn = post_test;
2254
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02002255 for (i = 0; i < number; ++i) {
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02002256 test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
2257 test_pr_info("%s: ====================", __func__);
2258
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02002259 for (j = SEND_WRITE_PACKING_MIN_TESTCASE;
2260 j <= SEND_WRITE_PACKING_MAX_TESTCASE; j++) {
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02002261
2262 mbtd->test_info.testcase = j;
2263 mbtd->is_random = RANDOM_TEST;
2264 ret = test_iosched_start_test(&mbtd->test_info);
2265 if (ret)
2266 break;
2267 /* Allow FS requests to be dispatched */
2268 msleep(1000);
2269 mbtd->test_info.testcase = j;
2270 mbtd->is_random = NON_RANDOM_TEST;
2271 ret = test_iosched_start_test(&mbtd->test_info);
2272 if (ret)
2273 break;
2274 /* Allow FS requests to be dispatched */
2275 msleep(1000);
2276 }
2277 }
2278
2279 test_pr_info("%s: Completed all the test cases.", __func__);
2280
2281 return count;
2282}
2283
2284static ssize_t send_write_packing_test_read(struct file *file,
2285 char __user *buffer,
2286 size_t count,
2287 loff_t *offset)
2288{
2289 memset((void *)buffer, 0, count);
2290
2291 snprintf(buffer, count,
2292 "\nsend_write_packing_test\n"
2293 "=========\n"
2294 "Description:\n"
2295 "This test checks the following scenarios\n"
2296 "- Pack due to FLUSH message\n"
2297 "- Pack due to FLUSH after threshold writes\n"
2298 "- Pack due to READ message\n"
2299 "- Pack due to READ after threshold writes\n"
2300 "- Pack due to empty queue\n"
2301 "- Pack due to threshold writes\n"
2302 "- Pack due to one over threshold writes\n");
2303
2304 if (message_repeat == 1) {
2305 message_repeat = 0;
2306 return strnlen(buffer, count);
2307 } else {
2308 return 0;
2309 }
2310}
2311
2312const struct file_operations send_write_packing_test_ops = {
2313 .open = test_open,
2314 .write = send_write_packing_test_write,
2315 .read = send_write_packing_test_read,
2316};
2317
2318/* err_check TEST */
2319static ssize_t err_check_test_write(struct file *file,
2320 const char __user *buf,
2321 size_t count,
2322 loff_t *ppos)
2323{
2324 int ret = 0;
2325 int i = 0;
2326 int number = -1;
2327 int j = 0;
2328
2329 test_pr_info("%s: -- err_check TEST --", __func__);
2330
2331 sscanf(buf, "%d", &number);
2332
2333 if (number <= 0)
2334 number = 1;
2335
2336 mbtd->test_group = TEST_ERR_CHECK_GROUP;
2337
2338 if (validate_packed_commands_settings())
2339 return count;
2340
2341 if (mbtd->random_test_seed > 0)
2342 test_pr_info("%s: Test seed: %d", __func__,
2343 mbtd->random_test_seed);
2344
2345 memset(&mbtd->test_info, 0, sizeof(struct test_info));
2346
2347 mbtd->test_info.data = mbtd;
2348 mbtd->test_info.prepare_test_fn = prepare_test;
Maya Ereza12d1d22013-01-10 23:35:42 +02002349 mbtd->test_info.run_test_fn = run_packed_test;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02002350 mbtd->test_info.check_test_result_fn = check_wr_packing_statistics;
2351 mbtd->test_info.get_test_case_str_fn = get_test_case_str;
2352 mbtd->test_info.post_test_fn = post_test;
2353
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02002354 for (i = 0; i < number; ++i) {
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02002355 test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
2356 test_pr_info("%s: ====================", __func__);
2357
2358 for (j = ERR_CHECK_MIN_TESTCASE;
2359 j <= ERR_CHECK_MAX_TESTCASE ; j++) {
2360 mbtd->test_info.testcase = j;
2361 mbtd->is_random = RANDOM_TEST;
2362 ret = test_iosched_start_test(&mbtd->test_info);
2363 if (ret)
2364 break;
2365 /* Allow FS requests to be dispatched */
2366 msleep(1000);
2367 mbtd->test_info.testcase = j;
2368 mbtd->is_random = NON_RANDOM_TEST;
2369 ret = test_iosched_start_test(&mbtd->test_info);
2370 if (ret)
2371 break;
2372 /* Allow FS requests to be dispatched */
2373 msleep(1000);
2374 }
2375 }
2376
2377 test_pr_info("%s: Completed all the test cases.", __func__);
2378
2379 return count;
2380}
2381
2382static ssize_t err_check_test_read(struct file *file,
2383 char __user *buffer,
2384 size_t count,
2385 loff_t *offset)
2386{
2387 memset((void *)buffer, 0, count);
2388
2389 snprintf(buffer, count,
2390 "\nerr_check_TEST\n"
2391 "=========\n"
2392 "Description:\n"
2393 "This test checks the following scenarios\n"
2394 "- Return ABORT\n"
2395 "- Return PARTIAL followed by success\n"
2396 "- Return PARTIAL followed by abort\n"
2397 "- Return PARTIAL multiple times until success\n"
2398 "- Return PARTIAL with fail index = threshold\n"
2399 "- Return RETRY\n"
2400 "- Return CMD_ERR\n"
2401 "- Return DATA_ERR\n");
2402
2403 if (message_repeat == 1) {
2404 message_repeat = 0;
2405 return strnlen(buffer, count);
2406 } else {
2407 return 0;
2408 }
2409}
2410
2411const struct file_operations err_check_test_ops = {
2412 .open = test_open,
2413 .write = err_check_test_write,
2414 .read = err_check_test_read,
2415};
2416
2417/* send_invalid_packed TEST */
2418static ssize_t send_invalid_packed_test_write(struct file *file,
2419 const char __user *buf,
2420 size_t count,
2421 loff_t *ppos)
2422{
2423 int ret = 0;
2424 int i = 0;
2425 int number = -1;
2426 int j = 0;
2427 int num_of_failures = 0;
2428
2429 test_pr_info("%s: -- send_invalid_packed TEST --", __func__);
2430
2431 sscanf(buf, "%d", &number);
2432
2433 if (number <= 0)
2434 number = 1;
2435
2436 mbtd->test_group = TEST_SEND_INVALID_GROUP;
2437
2438 if (validate_packed_commands_settings())
2439 return count;
2440
2441 if (mbtd->random_test_seed > 0)
2442 test_pr_info("%s: Test seed: %d", __func__,
2443 mbtd->random_test_seed);
2444
2445 memset(&mbtd->test_info, 0, sizeof(struct test_info));
2446
2447 mbtd->test_info.data = mbtd;
2448 mbtd->test_info.prepare_test_fn = prepare_test;
Maya Ereza12d1d22013-01-10 23:35:42 +02002449 mbtd->test_info.run_test_fn = run_packed_test;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02002450 mbtd->test_info.check_test_result_fn = check_wr_packing_statistics;
2451 mbtd->test_info.get_test_case_str_fn = get_test_case_str;
2452 mbtd->test_info.post_test_fn = post_test;
2453
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02002454 for (i = 0; i < number; ++i) {
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02002455 test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
2456 test_pr_info("%s: ====================", __func__);
2457
2458 for (j = INVALID_CMD_MIN_TESTCASE;
2459 j <= INVALID_CMD_MAX_TESTCASE ; j++) {
2460
2461 mbtd->test_info.testcase = j;
2462 mbtd->is_random = RANDOM_TEST;
2463 ret = test_iosched_start_test(&mbtd->test_info);
2464 if (ret)
2465 num_of_failures++;
2466 /* Allow FS requests to be dispatched */
2467 msleep(1000);
2468
2469 mbtd->test_info.testcase = j;
2470 mbtd->is_random = NON_RANDOM_TEST;
2471 ret = test_iosched_start_test(&mbtd->test_info);
2472 if (ret)
2473 num_of_failures++;
2474 /* Allow FS requests to be dispatched */
2475 msleep(1000);
2476 }
2477 }
2478
2479 test_pr_info("%s: Completed all the test cases.", __func__);
2480
2481 if (num_of_failures > 0) {
2482 test_iosched_set_test_result(TEST_FAILED);
2483 test_pr_err(
2484 "There were %d failures during the test, TEST FAILED",
2485 num_of_failures);
2486 }
2487 return count;
2488}
2489
2490static ssize_t send_invalid_packed_test_read(struct file *file,
2491 char __user *buffer,
2492 size_t count,
2493 loff_t *offset)
2494{
2495 memset((void *)buffer, 0, count);
2496
2497 snprintf(buffer, count,
2498 "\nsend_invalid_packed_TEST\n"
2499 "=========\n"
2500 "Description:\n"
2501 "This test checks the following scenarios\n"
2502 "- Send an invalid header version\n"
2503 "- Send the wrong write code\n"
2504 "- Send an invalid R/W code\n"
2505 "- Send wrong start address in header\n"
2506 "- Send header with block_count smaller than actual\n"
2507 "- Send header with block_count larger than actual\n"
2508 "- Send header CMD23 packed bit set\n"
2509 "- Send CMD23 with block count over threshold\n"
2510 "- Send CMD23 with block_count equals zero\n"
2511 "- Send CMD23 packed bit unset\n"
2512 "- Send CMD23 reliable write bit set\n"
2513 "- Send CMD23 bits [16-29] set\n"
2514 "- Send CMD23 header block not in block_count\n");
2515
2516 if (message_repeat == 1) {
2517 message_repeat = 0;
2518 return strnlen(buffer, count);
2519 } else {
2520 return 0;
2521 }
2522}
2523
2524const struct file_operations send_invalid_packed_test_ops = {
2525 .open = test_open,
2526 .write = send_invalid_packed_test_write,
2527 .read = send_invalid_packed_test_read,
2528};
2529
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02002530/* packing_control TEST */
2531static ssize_t write_packing_control_test_write(struct file *file,
2532 const char __user *buf,
2533 size_t count,
2534 loff_t *ppos)
2535{
2536 int ret = 0;
2537 int i = 0;
2538 int number = -1;
2539 int j = 0;
2540 struct mmc_queue *mq = test_iosched_get_req_queue()->queuedata;
2541 int max_num_requests = mq->card->ext_csd.max_packed_writes;
2542 int test_successful = 1;
2543
2544 test_pr_info("%s: -- write_packing_control TEST --", __func__);
2545
2546 sscanf(buf, "%d", &number);
2547
2548 if (number <= 0)
2549 number = 1;
2550
2551 test_pr_info("%s: max_num_requests = %d ", __func__,
2552 max_num_requests);
2553
2554 memset(&mbtd->test_info, 0, sizeof(struct test_info));
2555 mbtd->test_group = TEST_PACKING_CONTROL_GROUP;
2556
2557 if (validate_packed_commands_settings())
2558 return count;
2559
2560 mbtd->test_info.data = mbtd;
2561 mbtd->test_info.prepare_test_fn = prepare_test;
Maya Ereza12d1d22013-01-10 23:35:42 +02002562 mbtd->test_info.run_test_fn = run_packed_test;
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02002563 mbtd->test_info.check_test_result_fn = check_wr_packing_statistics;
2564 mbtd->test_info.get_test_case_str_fn = get_test_case_str;
2565
2566 for (i = 0; i < number; ++i) {
2567 test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
2568 test_pr_info("%s: ====================", __func__);
2569
2570 for (j = PACKING_CONTROL_MIN_TESTCASE;
2571 j <= PACKING_CONTROL_MAX_TESTCASE; j++) {
2572
2573 test_successful = 1;
2574 mbtd->test_info.testcase = j;
2575 mbtd->is_random = RANDOM_TEST;
2576 ret = test_iosched_start_test(&mbtd->test_info);
2577 if (ret) {
2578 test_successful = 0;
2579 break;
2580 }
2581 /* Allow FS requests to be dispatched */
2582 msleep(1000);
2583
2584 mbtd->test_info.testcase = j;
2585 mbtd->is_random = NON_RANDOM_TEST;
2586 ret = test_iosched_start_test(&mbtd->test_info);
2587 if (ret) {
2588 test_successful = 0;
2589 break;
2590 }
2591 /* Allow FS requests to be dispatched */
2592 msleep(1000);
2593 }
2594
2595 if (!test_successful)
2596 break;
2597 }
2598
2599 test_pr_info("%s: Completed all the test cases.", __func__);
2600
2601 return count;
2602}
2603
2604static ssize_t write_packing_control_test_read(struct file *file,
2605 char __user *buffer,
2606 size_t count,
2607 loff_t *offset)
2608{
2609 memset((void *)buffer, 0, count);
2610
2611 snprintf(buffer, count,
2612 "\nwrite_packing_control_test\n"
2613 "=========\n"
2614 "Description:\n"
2615 "This test checks the following scenarios\n"
2616 "- Packing expected - one over trigger\n"
2617 "- Packing expected - N over trigger\n"
2618 "- Packing expected - N over trigger followed by read\n"
2619 "- Packing expected - N over trigger followed by flush\n"
2620 "- Packing expected - threshold over trigger FB by flush\n"
2621 "- Packing not expected - less than trigger\n"
2622 "- Packing not expected - trigger requests\n"
2623 "- Packing not expected - trigger, read, trigger\n"
2624 "- Mixed state - packing -> no packing -> packing\n"
2625 "- Mixed state - no packing -> packing -> no packing\n");
2626
2627 if (message_repeat == 1) {
2628 message_repeat = 0;
2629 return strnlen(buffer, count);
2630 } else {
2631 return 0;
2632 }
2633}
2634
2635const struct file_operations write_packing_control_test_ops = {
2636 .open = test_open,
2637 .write = write_packing_control_test_write,
2638 .read = write_packing_control_test_read,
2639};
2640
Maya Erezddc55732012-10-17 09:51:01 +02002641static ssize_t write_discard_sanitize_test_write(struct file *file,
2642 const char __user *buf,
2643 size_t count,
2644 loff_t *ppos)
2645{
2646 int ret = 0;
2647 int i = 0;
2648 int number = -1;
2649
2650 sscanf(buf, "%d", &number);
2651 if (number <= 0)
2652 number = 1;
2653
2654 test_pr_info("%s: -- write_discard_sanitize TEST --\n", __func__);
2655
2656 memset(&mbtd->test_info, 0, sizeof(struct test_info));
2657
2658 mbtd->test_group = TEST_GENERAL_GROUP;
2659
2660 mbtd->test_info.data = mbtd;
2661 mbtd->test_info.prepare_test_fn = prepare_write_discard_sanitize_read;
2662 mbtd->test_info.get_test_case_str_fn = get_test_case_str;
2663 mbtd->test_info.timeout_msec = SANITIZE_TEST_TIMEOUT;
2664
2665 for (i = 0 ; i < number ; ++i) {
2666 test_pr_info("%s: Cycle # %d / %d\n", __func__, i+1, number);
2667 test_pr_info("%s: ===================", __func__);
2668
2669 mbtd->test_info.testcase = TEST_WRITE_DISCARD_SANITIZE_READ;
2670 ret = test_iosched_start_test(&mbtd->test_info);
2671
2672 if (ret)
2673 break;
2674 }
2675
2676 return count;
2677}
2678
2679const struct file_operations write_discard_sanitize_test_ops = {
2680 .open = test_open,
2681 .write = write_discard_sanitize_test_write,
2682};
2683
Yaniv Gardie9214c82012-10-18 13:58:18 +02002684static ssize_t bkops_test_write(struct file *file,
2685 const char __user *buf,
2686 size_t count,
2687 loff_t *ppos)
2688{
2689 int ret = 0;
2690 int i = 0, j;
2691 int number = -1;
2692
2693 test_pr_info("%s: -- bkops_test TEST --", __func__);
2694
2695 sscanf(buf, "%d", &number);
2696
2697 if (number <= 0)
2698 number = 1;
2699
2700 mbtd->test_group = TEST_BKOPS_GROUP;
2701
2702 memset(&mbtd->test_info, 0, sizeof(struct test_info));
2703
2704 mbtd->test_info.data = mbtd;
2705 mbtd->test_info.prepare_test_fn = prepare_bkops;
2706 mbtd->test_info.check_test_result_fn = check_bkops_result;
2707 mbtd->test_info.get_test_case_str_fn = get_test_case_str;
2708 mbtd->test_info.run_test_fn = run_bkops;
2709 mbtd->test_info.timeout_msec = BKOPS_TEST_TIMEOUT;
2710 mbtd->test_info.post_test_fn = bkops_post_test;
2711
2712 for (i = 0 ; i < number ; ++i) {
2713 test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
2714 test_pr_info("%s: ===================", __func__);
2715 for (j = BKOPS_MIN_TESTCASE ;
2716 j <= BKOPS_MAX_TESTCASE ; j++) {
2717 mbtd->test_info.testcase = j;
2718 ret = test_iosched_start_test(&mbtd->test_info);
2719 if (ret)
2720 break;
2721 }
2722 }
2723
2724 test_pr_info("%s: Completed all the test cases.", __func__);
2725
2726 return count;
2727}
2728
2729static ssize_t bkops_test_read(struct file *file,
2730 char __user *buffer,
2731 size_t count,
2732 loff_t *offset)
2733{
2734 memset((void *)buffer, 0, count);
2735
2736 snprintf(buffer, count,
2737 "\nbkops_test\n========================\n"
2738 "Description:\n"
2739 "This test simulates BKOPS status from card\n"
2740 "and verifies that:\n"
2741 " - Starting BKOPS delayed work, level 1\n"
2742 " - Starting BKOPS delayed work, level 1, with HPI\n"
2743 " - Cancel starting BKOPS delayed work, "
2744 " when a request is received\n"
2745 " - Starting BKOPS urgent, level 2,3\n"
2746 " - Starting BKOPS urgent with 2 requests\n");
2747 return strnlen(buffer, count);
2748}
2749
2750const struct file_operations bkops_test_ops = {
2751 .open = test_open,
2752 .write = bkops_test_write,
2753 .read = bkops_test_read,
2754};
2755
Lee Susmanf18263a2012-10-24 14:14:37 +02002756static ssize_t long_sequential_read_test_write(struct file *file,
2757 const char __user *buf,
2758 size_t count,
2759 loff_t *ppos)
2760{
2761 int ret = 0;
2762 int i = 0;
2763 int number = -1;
Lee Susman70160bb2013-01-06 10:57:30 +02002764 unsigned long mtime, integer, fraction;
Lee Susmanf18263a2012-10-24 14:14:37 +02002765
2766 test_pr_info("%s: -- Long Sequential Read TEST --", __func__);
2767
2768 sscanf(buf, "%d", &number);
2769
2770 if (number <= 0)
2771 number = 1;
2772
2773 memset(&mbtd->test_info, 0, sizeof(struct test_info));
2774 mbtd->test_group = TEST_GENERAL_GROUP;
2775
2776 mbtd->test_info.data = mbtd;
2777 mbtd->test_info.prepare_test_fn = prepare_test;
2778 mbtd->test_info.get_test_case_str_fn = get_test_case_str;
2779
2780 for (i = 0 ; i < number ; ++i) {
2781 test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
2782 test_pr_info("%s: ====================", __func__);
2783
2784 mbtd->test_info.testcase = TEST_LONG_SEQUENTIAL_READ;
2785 mbtd->is_random = NON_RANDOM_TEST;
2786 ret = test_iosched_start_test(&mbtd->test_info);
2787 if (ret)
2788 break;
2789
2790 mtime = jiffies_to_msecs(mbtd->test_info.test_duration);
2791
Lee Susman70160bb2013-01-06 10:57:30 +02002792 test_pr_info("%s: time is %lu msec, size is %u.%u MiB",
2793 __func__, mtime,
2794 LONG_TEST_SIZE_INTEGER(LONG_READ_NUM_BYTES),
2795 LONG_TEST_SIZE_FRACTION(LONG_READ_NUM_BYTES));
Lee Susmanf18263a2012-10-24 14:14:37 +02002796
2797 /* we first multiply in order not to lose precision */
2798 mtime *= MB_MSEC_RATIO_APPROXIMATION;
2799 /* divide values to get a MiB/sec integer value with one
2800 digit of precision. Multiply by 10 for one digit precision
2801 */
Lee Susman70160bb2013-01-06 10:57:30 +02002802 fraction = integer = (LONG_READ_NUM_BYTES * 10) / mtime;
Lee Susmanf18263a2012-10-24 14:14:37 +02002803 integer /= 10;
2804 /* and calculate the MiB value fraction */
2805 fraction -= integer * 10;
2806
Lee Susman70160bb2013-01-06 10:57:30 +02002807 test_pr_info("%s: Throughput: %lu.%lu MiB/sec\n"
Lee Susmanf18263a2012-10-24 14:14:37 +02002808 , __func__, integer, fraction);
2809
2810 /* Allow FS requests to be dispatched */
2811 msleep(1000);
2812 }
2813
2814 return count;
2815}
2816
2817static ssize_t long_sequential_read_test_read(struct file *file,
2818 char __user *buffer,
2819 size_t count,
2820 loff_t *offset)
2821{
2822 memset((void *)buffer, 0, count);
2823
2824 snprintf(buffer, count,
2825 "\nlong_sequential_read_test\n"
2826 "=========\n"
2827 "Description:\n"
2828 "This test runs the following scenarios\n"
2829 "- Long Sequential Read Test: this test measures read "
2830 "throughput at the driver level by sequentially reading many "
2831 "large requests.\n");
2832
2833 if (message_repeat == 1) {
2834 message_repeat = 0;
2835 return strnlen(buffer, count);
2836 } else
2837 return 0;
2838}
2839
2840const struct file_operations long_sequential_read_test_ops = {
2841 .open = test_open,
2842 .write = long_sequential_read_test_write,
2843 .read = long_sequential_read_test_read,
2844};
2845
Lee Susman70160bb2013-01-06 10:57:30 +02002846static void long_seq_write_free_end_io_fn(struct request *rq, int err)
2847{
2848 struct test_request *test_rq =
2849 (struct test_request *)rq->elv.priv[0];
2850 struct test_data *ptd = test_get_test_data();
2851
2852 BUG_ON(!test_rq);
2853
2854 spin_lock_irq(&ptd->lock);
2855 list_del_init(&test_rq->queuelist);
2856 ptd->dispatched_count--;
2857 __blk_put_request(ptd->req_q, test_rq->rq);
2858 spin_unlock_irq(&ptd->lock);
2859
2860 kfree(test_rq->bios_buffer);
2861 kfree(test_rq);
2862 mbtd->completed_req_count++;
2863
2864 check_test_completion();
2865}
2866
2867static int run_long_seq_write(struct test_data *td)
2868{
2869 int ret = 0;
2870 int i;
2871
2872 td->test_count = 0;
2873 mbtd->completed_req_count = 0;
2874
2875 test_pr_info("%s: Adding at least %d write requests, first req_id=%d",
2876 __func__, LONG_WRITE_TEST_MIN_NUM_REQS,
2877 td->wr_rd_next_req_id);
2878
2879 do {
2880 for (i = 0; i < TEST_MAX_REQUESTS; i++) {
2881 /*
2882 * since our requests come from a pool containing 128
2883 * requests, we don't want to exhaust this quantity,
2884 * therefore we add up to TEST_MAX_REQUESTS (which
2885 * includes a safety margin) and then call the mmc layer
2886 * to fetch them
2887 */
2888 if (td->test_count > TEST_MAX_REQUESTS)
2889 break;
2890
2891 ret = test_iosched_add_wr_rd_test_req(0, WRITE,
2892 td->start_sector, TEST_MAX_BIOS_PER_REQ,
2893 TEST_PATTERN_5A,
2894 long_seq_write_free_end_io_fn);
2895 if (ret) {
2896 test_pr_err("%s: failed to create write request"
2897 , __func__);
2898 break;
2899 }
2900 }
2901
2902 __blk_run_queue(td->req_q);
2903
2904 } while (mbtd->completed_req_count < LONG_WRITE_TEST_MIN_NUM_REQS);
2905
2906 test_pr_info("%s: completed %d requests", __func__,
2907 mbtd->completed_req_count);
2908
2909 return ret;
2910}
2911
Lee Susmana35ae6e2012-10-25 16:06:07 +02002912static ssize_t long_sequential_write_test_write(struct file *file,
2913 const char __user *buf,
2914 size_t count,
2915 loff_t *ppos)
2916{
2917 int ret = 0;
2918 int i = 0;
2919 int number = -1;
Lee Susman70160bb2013-01-06 10:57:30 +02002920 unsigned long mtime, integer, fraction, byte_count;
Lee Susmana35ae6e2012-10-25 16:06:07 +02002921
2922 test_pr_info("%s: -- Long Sequential Write TEST --", __func__);
2923
2924 sscanf(buf, "%d", &number);
2925
2926 if (number <= 0)
2927 number = 1;
2928
2929 memset(&mbtd->test_info, 0, sizeof(struct test_info));
2930 mbtd->test_group = TEST_GENERAL_GROUP;
2931
2932 mbtd->test_info.data = mbtd;
Lee Susmana35ae6e2012-10-25 16:06:07 +02002933 mbtd->test_info.get_test_case_str_fn = get_test_case_str;
Lee Susman70160bb2013-01-06 10:57:30 +02002934 mbtd->test_info.run_test_fn = run_long_seq_write;
Lee Susmana35ae6e2012-10-25 16:06:07 +02002935
2936 for (i = 0 ; i < number ; ++i) {
2937 test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
2938 test_pr_info("%s: ====================", __func__);
2939
Lee Susman70160bb2013-01-06 10:57:30 +02002940 integer = 0;
2941 fraction = 0;
2942 mbtd->test_info.test_byte_count = 0;
Lee Susmana35ae6e2012-10-25 16:06:07 +02002943 mbtd->test_info.testcase = TEST_LONG_SEQUENTIAL_WRITE;
2944 mbtd->is_random = NON_RANDOM_TEST;
2945 ret = test_iosched_start_test(&mbtd->test_info);
2946 if (ret)
2947 break;
2948
2949 mtime = jiffies_to_msecs(mbtd->test_info.test_duration);
Lee Susman70160bb2013-01-06 10:57:30 +02002950 byte_count = mbtd->test_info.test_byte_count;
Lee Susmana35ae6e2012-10-25 16:06:07 +02002951
Lee Susman70160bb2013-01-06 10:57:30 +02002952 test_pr_info("%s: time is %lu msec, size is %lu.%lu MiB",
2953 __func__, mtime, LONG_TEST_SIZE_INTEGER(byte_count),
2954 LONG_TEST_SIZE_FRACTION(byte_count));
Lee Susmana35ae6e2012-10-25 16:06:07 +02002955
2956 /* we first multiply in order not to lose precision */
2957 mtime *= MB_MSEC_RATIO_APPROXIMATION;
2958 /* divide values to get a MiB/sec integer value with one
2959 digit of precision
2960 */
Lee Susman70160bb2013-01-06 10:57:30 +02002961 fraction = integer = (byte_count * 10) / mtime;
Lee Susmana35ae6e2012-10-25 16:06:07 +02002962 integer /= 10;
2963 /* and calculate the MiB value fraction */
2964 fraction -= integer * 10;
2965
Lee Susman70160bb2013-01-06 10:57:30 +02002966 test_pr_info("%s: Throughput: %lu.%lu MiB/sec\n",
Lee Susmana35ae6e2012-10-25 16:06:07 +02002967 __func__, integer, fraction);
2968
2969 /* Allow FS requests to be dispatched */
2970 msleep(1000);
2971 }
2972
2973 return count;
2974}
2975
2976static ssize_t long_sequential_write_test_read(struct file *file,
2977 char __user *buffer,
2978 size_t count,
2979 loff_t *offset)
2980{
2981 memset((void *)buffer, 0, count);
2982
2983 snprintf(buffer, count,
2984 "\nlong_sequential_write_test\n"
2985 "=========\n"
2986 "Description:\n"
2987 "This test runs the following scenarios\n"
2988 "- Long Sequential Write Test: this test measures write "
2989 "throughput at the driver level by sequentially writing many "
2990 "large requests\n");
2991
2992 if (message_repeat == 1) {
2993 message_repeat = 0;
2994 return strnlen(buffer, count);
2995 } else
2996 return 0;
2997}
2998
2999const struct file_operations long_sequential_write_test_ops = {
3000 .open = test_open,
3001 .write = long_sequential_write_test_write,
3002 .read = long_sequential_write_test_read,
3003};
3004
Lee Susmanb09c0412012-12-19 14:28:52 +02003005static ssize_t new_req_notification_test_write(struct file *file,
3006 const char __user *buf,
3007 size_t count,
3008 loff_t *ppos)
3009{
3010 int ret = 0;
3011 int i = 0;
3012 int number = -1;
3013
3014 test_pr_info("%s: -- new_req_notification TEST --", __func__);
3015
3016 sscanf(buf, "%d", &number);
3017
3018 if (number <= 0)
3019 number = 1;
3020
3021 mbtd->test_group = TEST_NEW_NOTIFICATION_GROUP;
3022
3023 memset(&mbtd->test_info, 0, sizeof(struct test_info));
3024
3025 mbtd->test_info.data = mbtd;
3026 mbtd->test_info.prepare_test_fn = prepare_new_req;
3027 mbtd->test_info.check_test_result_fn = check_new_req_result;
3028 mbtd->test_info.get_test_case_str_fn = get_test_case_str;
3029 mbtd->test_info.run_test_fn = run_new_req;
3030 mbtd->test_info.timeout_msec = 10 * 60 * 1000; /* 1 min */
3031 mbtd->test_info.post_test_fn = new_req_post_test;
3032
3033 for (i = 0 ; i < number ; ++i) {
3034 test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
3035 test_pr_info("%s: ===================", __func__);
3036 test_pr_info("%s: start test case TEST_NEW_REQ_NOTIFICATION",
3037 __func__);
3038 mbtd->test_info.testcase = TEST_NEW_REQ_NOTIFICATION;
3039 ret = test_iosched_start_test(&mbtd->test_info);
3040 if (ret) {
3041 test_pr_info("%s: break from new_req tests loop",
3042 __func__);
3043 break;
3044 }
3045 }
3046 return count;
3047}
3048
3049static ssize_t new_req_notification_test_read(struct file *file,
3050 char __user *buffer,
3051 size_t count,
3052 loff_t *offset)
3053{
3054 memset((void *)buffer, 0, count);
3055
3056 snprintf(buffer, count,
3057 "\nnew_req_notification_test\n========================\n"
3058 "Description:\n"
3059 "This test checks following scenarious\n"
3060 "- new request arrives after a NULL request was sent to the "
3061 "mmc_queue,\n"
3062 "which is waiting for completion of a former request\n");
3063
3064 return strnlen(buffer, count);
3065}
3066
3067const struct file_operations new_req_notification_test_ops = {
3068 .open = test_open,
3069 .write = new_req_notification_test_write,
3070 .read = new_req_notification_test_read,
3071};
Lee Susmana35ae6e2012-10-25 16:06:07 +02003072
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02003073static void mmc_block_test_debugfs_cleanup(void)
3074{
3075 debugfs_remove(mbtd->debug.random_test_seed);
3076 debugfs_remove(mbtd->debug.send_write_packing_test);
3077 debugfs_remove(mbtd->debug.err_check_test);
3078 debugfs_remove(mbtd->debug.send_invalid_packed_test);
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02003079 debugfs_remove(mbtd->debug.packing_control_test);
Maya Erezddc55732012-10-17 09:51:01 +02003080 debugfs_remove(mbtd->debug.discard_sanitize_test);
Yaniv Gardie9214c82012-10-18 13:58:18 +02003081 debugfs_remove(mbtd->debug.bkops_test);
Lee Susmanf18263a2012-10-24 14:14:37 +02003082 debugfs_remove(mbtd->debug.long_sequential_read_test);
Lee Susmana35ae6e2012-10-25 16:06:07 +02003083 debugfs_remove(mbtd->debug.long_sequential_write_test);
Lee Susmanb09c0412012-12-19 14:28:52 +02003084 debugfs_remove(mbtd->debug.new_req_notification_test);
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02003085}
3086
3087static int mmc_block_test_debugfs_init(void)
3088{
3089 struct dentry *utils_root, *tests_root;
3090
3091 utils_root = test_iosched_get_debugfs_utils_root();
3092 tests_root = test_iosched_get_debugfs_tests_root();
3093
3094 if (!utils_root || !tests_root)
3095 return -EINVAL;
3096
3097 mbtd->debug.random_test_seed = debugfs_create_u32(
3098 "random_test_seed",
3099 S_IRUGO | S_IWUGO,
3100 utils_root,
3101 &mbtd->random_test_seed);
3102
3103 if (!mbtd->debug.random_test_seed)
3104 goto err_nomem;
3105
3106 mbtd->debug.send_write_packing_test =
3107 debugfs_create_file("send_write_packing_test",
3108 S_IRUGO | S_IWUGO,
3109 tests_root,
3110 NULL,
3111 &send_write_packing_test_ops);
3112
3113 if (!mbtd->debug.send_write_packing_test)
3114 goto err_nomem;
3115
3116 mbtd->debug.err_check_test =
3117 debugfs_create_file("err_check_test",
3118 S_IRUGO | S_IWUGO,
3119 tests_root,
3120 NULL,
3121 &err_check_test_ops);
3122
3123 if (!mbtd->debug.err_check_test)
3124 goto err_nomem;
3125
3126 mbtd->debug.send_invalid_packed_test =
3127 debugfs_create_file("send_invalid_packed_test",
3128 S_IRUGO | S_IWUGO,
3129 tests_root,
3130 NULL,
3131 &send_invalid_packed_test_ops);
3132
3133 if (!mbtd->debug.send_invalid_packed_test)
3134 goto err_nomem;
3135
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02003136 mbtd->debug.packing_control_test = debugfs_create_file(
3137 "packing_control_test",
3138 S_IRUGO | S_IWUGO,
3139 tests_root,
3140 NULL,
3141 &write_packing_control_test_ops);
3142
3143 if (!mbtd->debug.packing_control_test)
3144 goto err_nomem;
3145
Maya Erezddc55732012-10-17 09:51:01 +02003146 mbtd->debug.discard_sanitize_test =
3147 debugfs_create_file("write_discard_sanitize_test",
3148 S_IRUGO | S_IWUGO,
3149 tests_root,
3150 NULL,
3151 &write_discard_sanitize_test_ops);
3152 if (!mbtd->debug.discard_sanitize_test) {
3153 mmc_block_test_debugfs_cleanup();
3154 return -ENOMEM;
3155 }
3156
Yaniv Gardie9214c82012-10-18 13:58:18 +02003157 mbtd->debug.bkops_test =
3158 debugfs_create_file("bkops_test",
3159 S_IRUGO | S_IWUGO,
3160 tests_root,
3161 NULL,
3162 &bkops_test_ops);
3163
Lee Susmanb09c0412012-12-19 14:28:52 +02003164 mbtd->debug.new_req_notification_test =
3165 debugfs_create_file("new_req_notification_test",
3166 S_IRUGO | S_IWUGO,
3167 tests_root,
3168 NULL,
3169 &new_req_notification_test_ops);
3170
3171 if (!mbtd->debug.new_req_notification_test)
3172 goto err_nomem;
3173
Yaniv Gardie9214c82012-10-18 13:58:18 +02003174 if (!mbtd->debug.bkops_test)
3175 goto err_nomem;
3176
Lee Susmanf18263a2012-10-24 14:14:37 +02003177 mbtd->debug.long_sequential_read_test = debugfs_create_file(
3178 "long_sequential_read_test",
3179 S_IRUGO | S_IWUGO,
3180 tests_root,
3181 NULL,
3182 &long_sequential_read_test_ops);
3183
3184 if (!mbtd->debug.long_sequential_read_test)
3185 goto err_nomem;
3186
Lee Susmana35ae6e2012-10-25 16:06:07 +02003187 mbtd->debug.long_sequential_write_test = debugfs_create_file(
3188 "long_sequential_write_test",
3189 S_IRUGO | S_IWUGO,
3190 tests_root,
3191 NULL,
3192 &long_sequential_write_test_ops);
3193
3194 if (!mbtd->debug.long_sequential_write_test)
3195 goto err_nomem;
3196
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02003197 return 0;
3198
3199err_nomem:
3200 mmc_block_test_debugfs_cleanup();
3201 return -ENOMEM;
3202}
3203
3204static void mmc_block_test_probe(void)
3205{
3206 struct request_queue *q = test_iosched_get_req_queue();
3207 struct mmc_queue *mq;
3208 int max_packed_reqs;
3209
3210 if (!q) {
3211 test_pr_err("%s: NULL request queue", __func__);
3212 return;
3213 }
3214
3215 mq = q->queuedata;
3216 if (!mq) {
3217 test_pr_err("%s: NULL mq", __func__);
3218 return;
3219 }
3220
3221 max_packed_reqs = mq->card->ext_csd.max_packed_writes;
3222 mbtd->exp_packed_stats.packing_events =
3223 kzalloc((max_packed_reqs + 1) *
3224 sizeof(*mbtd->exp_packed_stats.packing_events),
3225 GFP_KERNEL);
3226
3227 mmc_block_test_debugfs_init();
3228}
3229
3230static void mmc_block_test_remove(void)
3231{
3232 mmc_block_test_debugfs_cleanup();
3233}
3234
3235static int __init mmc_block_test_init(void)
3236{
3237 mbtd = kzalloc(sizeof(struct mmc_block_test_data), GFP_KERNEL);
3238 if (!mbtd) {
3239 test_pr_err("%s: failed to allocate mmc_block_test_data",
3240 __func__);
3241 return -ENODEV;
3242 }
3243
Yaniv Gardie9214c82012-10-18 13:58:18 +02003244 init_waitqueue_head(&mbtd->bkops_wait_q);
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02003245 mbtd->bdt.init_fn = mmc_block_test_probe;
3246 mbtd->bdt.exit_fn = mmc_block_test_remove;
3247 INIT_LIST_HEAD(&mbtd->bdt.list);
3248 test_iosched_register(&mbtd->bdt);
3249
3250 return 0;
3251}
3252
3253static void __exit mmc_block_test_exit(void)
3254{
3255 test_iosched_unregister(&mbtd->bdt);
3256 kfree(mbtd);
3257}
3258
3259module_init(mmc_block_test_init);
3260module_exit(mmc_block_test_exit);
3261
3262MODULE_LICENSE("GPL v2");
3263MODULE_DESCRIPTION("MMC block test");