blob: c5551b8aaac0f0f9fe12de775c54a7c4a7737934 [file] [log] [blame]
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14/* MMC block test */
15
16#include <linux/module.h>
17#include <linux/blkdev.h>
18#include <linux/debugfs.h>
19#include <linux/mmc/card.h>
20#include <linux/mmc/host.h>
21#include <linux/delay.h>
22#include <linux/test-iosched.h>
Lee Susmanf18263a2012-10-24 14:14:37 +020023#include <linux/jiffies.h>
Tatyana Brokhman09b010d2012-10-09 13:50:56 +020024#include "queue.h"
Yaniv Gardie9214c82012-10-18 13:58:18 +020025#include <linux/mmc/mmc.h>
Tatyana Brokhman09b010d2012-10-09 13:50:56 +020026
27#define MODULE_NAME "mmc_block_test"
28#define TEST_MAX_SECTOR_RANGE (600*1024*1024) /* 600 MB */
29#define TEST_MAX_BIOS_PER_REQ 120
30#define CMD23_PACKED_BIT (1 << 30)
31#define LARGE_PRIME_1 1103515367
32#define LARGE_PRIME_2 35757
33#define PACKED_HDR_VER_MASK 0x000000FF
34#define PACKED_HDR_RW_MASK 0x0000FF00
35#define PACKED_HDR_NUM_REQS_MASK 0x00FF0000
36#define PACKED_HDR_BITS_16_TO_29_SET 0x3FFF0000
Maya Erezddc55732012-10-17 09:51:01 +020037#define SECTOR_SIZE 512
38#define NUM_OF_SECTORS_PER_BIO ((BIO_U32_SIZE * 4) / SECTOR_SIZE)
39#define BIO_TO_SECTOR(x) (x * NUM_OF_SECTORS_PER_BIO)
Lee Susmanf18263a2012-10-24 14:14:37 +020040/* the desired long test size to be written or read */
41#define LONG_TEST_MAX_NUM_BYTES (50*1024*1024) /* 50MB */
42/* request queue limitation is 128 requests, and we leave 10 spare requests */
43#define TEST_MAX_REQUESTS 118
44#define LONG_TEST_MAX_NUM_REQS (LONG_TEST_MAX_NUM_BYTES / \
45 (TEST_MAX_BIOS_PER_REQ * sizeof(int) * BIO_U32_SIZE))
46/* this doesn't allow the test requests num to be greater than the maximum */
47#define LONG_TEST_ACTUAL_NUM_REQS \
48 ((TEST_MAX_REQUESTS < LONG_TEST_MAX_NUM_REQS) ? \
49 TEST_MAX_REQUESTS : LONG_TEST_MAX_NUM_REQS)
50#define MB_MSEC_RATIO_APPROXIMATION ((1024 * 1024) / 1000)
51/* actual number of bytes in test */
52#define LONG_TEST_ACTUAL_BYTE_NUM (LONG_TEST_ACTUAL_NUM_REQS * \
53 (TEST_MAX_BIOS_PER_REQ * sizeof(int) * BIO_U32_SIZE))
54/* actual number of MiB in test multiplied by 10, for single digit precision*/
55#define LONG_TEST_ACTUAL_MB_NUM_X_10 ((LONG_TEST_ACTUAL_BYTE_NUM * 10) / \
56 (1024 * 1024))
57/* extract integer value */
58#define LONG_TEST_SIZE_INTEGER (LONG_TEST_ACTUAL_MB_NUM_X_10 / 10)
59/* and calculate the MiB value fraction */
60#define LONG_TEST_SIZE_FRACTION (LONG_TEST_ACTUAL_MB_NUM_X_10 - \
61 (LONG_TEST_SIZE_INTEGER * 10))
Tatyana Brokhman09b010d2012-10-09 13:50:56 +020062
63#define test_pr_debug(fmt, args...) pr_debug("%s: "fmt"\n", MODULE_NAME, args)
64#define test_pr_info(fmt, args...) pr_info("%s: "fmt"\n", MODULE_NAME, args)
65#define test_pr_err(fmt, args...) pr_err("%s: "fmt"\n", MODULE_NAME, args)
66
Maya Erezddc55732012-10-17 09:51:01 +020067#define SANITIZE_TEST_TIMEOUT 240000
Yaniv Gardie9214c82012-10-18 13:58:18 +020068#define TEST_REQUEST_NUM_OF_BIOS 3
69
70
71#define CHECK_BKOPS_STATS(stats, exp_bkops, exp_hpi, exp_suspend) \
72 ((stats.bkops != exp_bkops) || \
73 (stats.hpi != exp_hpi) || \
74 (stats.suspend != exp_suspend))
75#define BKOPS_TEST_TIMEOUT 60000
Maya Erezddc55732012-10-17 09:51:01 +020076
Tatyana Brokhman09b010d2012-10-09 13:50:56 +020077enum is_random {
78 NON_RANDOM_TEST,
79 RANDOM_TEST,
80};
81
82enum mmc_block_test_testcases {
83 /* Start of send write packing test group */
84 SEND_WRITE_PACKING_MIN_TESTCASE,
85 TEST_STOP_DUE_TO_READ = SEND_WRITE_PACKING_MIN_TESTCASE,
86 TEST_STOP_DUE_TO_READ_AFTER_MAX_REQS,
87 TEST_STOP_DUE_TO_FLUSH,
88 TEST_STOP_DUE_TO_FLUSH_AFTER_MAX_REQS,
89 TEST_STOP_DUE_TO_EMPTY_QUEUE,
90 TEST_STOP_DUE_TO_MAX_REQ_NUM,
91 TEST_STOP_DUE_TO_THRESHOLD,
92 SEND_WRITE_PACKING_MAX_TESTCASE = TEST_STOP_DUE_TO_THRESHOLD,
93
94 /* Start of err check test group */
95 ERR_CHECK_MIN_TESTCASE,
96 TEST_RET_ABORT = ERR_CHECK_MIN_TESTCASE,
97 TEST_RET_PARTIAL_FOLLOWED_BY_SUCCESS,
98 TEST_RET_PARTIAL_FOLLOWED_BY_ABORT,
99 TEST_RET_PARTIAL_MULTIPLE_UNTIL_SUCCESS,
100 TEST_RET_PARTIAL_MAX_FAIL_IDX,
101 TEST_RET_RETRY,
102 TEST_RET_CMD_ERR,
103 TEST_RET_DATA_ERR,
104 ERR_CHECK_MAX_TESTCASE = TEST_RET_DATA_ERR,
105
106 /* Start of send invalid test group */
107 INVALID_CMD_MIN_TESTCASE,
108 TEST_HDR_INVALID_VERSION = INVALID_CMD_MIN_TESTCASE,
109 TEST_HDR_WRONG_WRITE_CODE,
110 TEST_HDR_INVALID_RW_CODE,
111 TEST_HDR_DIFFERENT_ADDRESSES,
112 TEST_HDR_REQ_NUM_SMALLER_THAN_ACTUAL,
113 TEST_HDR_REQ_NUM_LARGER_THAN_ACTUAL,
114 TEST_HDR_CMD23_PACKED_BIT_SET,
115 TEST_CMD23_MAX_PACKED_WRITES,
116 TEST_CMD23_ZERO_PACKED_WRITES,
117 TEST_CMD23_PACKED_BIT_UNSET,
118 TEST_CMD23_REL_WR_BIT_SET,
119 TEST_CMD23_BITS_16TO29_SET,
120 TEST_CMD23_HDR_BLK_NOT_IN_COUNT,
121 INVALID_CMD_MAX_TESTCASE = TEST_CMD23_HDR_BLK_NOT_IN_COUNT,
Tatyana Brokhman91e1e322012-10-09 13:53:43 +0200122
123 /*
124 * Start of packing control test group.
125 * in these next testcases the abbreviation FB = followed by
126 */
127 PACKING_CONTROL_MIN_TESTCASE,
128 TEST_PACKING_EXP_ONE_OVER_TRIGGER_FB_READ =
129 PACKING_CONTROL_MIN_TESTCASE,
130 TEST_PACKING_EXP_N_OVER_TRIGGER,
131 TEST_PACKING_EXP_N_OVER_TRIGGER_FB_READ,
132 TEST_PACKING_EXP_N_OVER_TRIGGER_FLUSH_N,
133 TEST_PACKING_EXP_THRESHOLD_OVER_TRIGGER,
134 TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS,
135 TEST_PACKING_NOT_EXP_TRIGGER_REQUESTS,
136 TEST_PACKING_NOT_EXP_TRIGGER_READ_TRIGGER,
137 TEST_PACKING_NOT_EXP_TRIGGER_FLUSH_TRIGGER,
138 TEST_PACK_MIX_PACKED_NO_PACKED_PACKED,
139 TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED,
140 PACKING_CONTROL_MAX_TESTCASE = TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED,
Maya Erezddc55732012-10-17 09:51:01 +0200141
142 TEST_WRITE_DISCARD_SANITIZE_READ,
Yaniv Gardie9214c82012-10-18 13:58:18 +0200143
144 /* Start of bkops test group */
145 BKOPS_MIN_TESTCASE,
146 BKOPS_DELAYED_WORK_LEVEL_1 = BKOPS_MIN_TESTCASE,
147 BKOPS_DELAYED_WORK_LEVEL_1_HPI,
148 BKOPS_CANCEL_DELAYED_WORK,
149 BKOPS_URGENT_LEVEL_2,
150 BKOPS_URGENT_LEVEL_2_TWO_REQS,
151 BKOPS_URGENT_LEVEL_3,
152 BKOPS_MAX_TESTCASE = BKOPS_URGENT_LEVEL_3,
Lee Susmanf18263a2012-10-24 14:14:37 +0200153
154 TEST_LONG_SEQUENTIAL_READ,
Lee Susmana35ae6e2012-10-25 16:06:07 +0200155 TEST_LONG_SEQUENTIAL_WRITE,
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200156};
157
158enum mmc_block_test_group {
159 TEST_NO_GROUP,
160 TEST_GENERAL_GROUP,
161 TEST_SEND_WRITE_PACKING_GROUP,
162 TEST_ERR_CHECK_GROUP,
163 TEST_SEND_INVALID_GROUP,
Tatyana Brokhman91e1e322012-10-09 13:53:43 +0200164 TEST_PACKING_CONTROL_GROUP,
Yaniv Gardie9214c82012-10-18 13:58:18 +0200165 TEST_BKOPS_GROUP,
166};
167
168enum bkops_test_stages {
169 BKOPS_STAGE_1,
170 BKOPS_STAGE_2,
171 BKOPS_STAGE_3,
172 BKOPS_STAGE_4,
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200173};
174
175struct mmc_block_test_debug {
176 struct dentry *send_write_packing_test;
177 struct dentry *err_check_test;
178 struct dentry *send_invalid_packed_test;
179 struct dentry *random_test_seed;
Tatyana Brokhman91e1e322012-10-09 13:53:43 +0200180 struct dentry *packing_control_test;
Maya Erezddc55732012-10-17 09:51:01 +0200181 struct dentry *discard_sanitize_test;
Yaniv Gardie9214c82012-10-18 13:58:18 +0200182 struct dentry *bkops_test;
Lee Susmanf18263a2012-10-24 14:14:37 +0200183 struct dentry *long_sequential_read_test;
Lee Susmana35ae6e2012-10-25 16:06:07 +0200184 struct dentry *long_sequential_write_test;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200185};
186
187struct mmc_block_test_data {
188 /* The number of write requests that the test will issue */
189 int num_requests;
190 /* The expected write packing statistics for the current test */
191 struct mmc_wr_pack_stats exp_packed_stats;
192 /*
193 * A user-defined seed for random choices of number of bios written in
194 * a request, and of number of requests issued in a test
195 * This field is randomly updated after each use
196 */
197 unsigned int random_test_seed;
198 /* A retry counter used in err_check tests */
199 int err_check_counter;
200 /* Can be one of the values of enum test_group */
201 enum mmc_block_test_group test_group;
202 /*
203 * Indicates if the current testcase is running with random values of
204 * num_requests and num_bios (in each request)
205 */
206 int is_random;
207 /* Data structure for debugfs dentrys */
208 struct mmc_block_test_debug debug;
209 /*
210 * Data structure containing individual test information, including
211 * self-defined specific data
212 */
213 struct test_info test_info;
214 /* mmc block device test */
215 struct blk_dev_test_type bdt;
Yaniv Gardie9214c82012-10-18 13:58:18 +0200216 /* Current BKOPs test stage */
217 enum bkops_test_stages bkops_stage;
218 /* A wait queue for BKOPs tests */
219 wait_queue_head_t bkops_wait_q;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200220};
221
222static struct mmc_block_test_data *mbtd;
223
Lee Susmane868f8a2012-11-04 15:04:41 +0200224void print_mmc_packing_stats(struct mmc_card *card)
225{
226 int i;
227 int max_num_of_packed_reqs = 0;
228
229 if ((!card) || (!card->wr_pack_stats.packing_events))
230 return;
231
232 max_num_of_packed_reqs = card->ext_csd.max_packed_writes;
233
234 spin_lock(&card->wr_pack_stats.lock);
235
236 pr_info("%s: write packing statistics:\n",
237 mmc_hostname(card->host));
238
239 for (i = 1 ; i <= max_num_of_packed_reqs ; ++i) {
240 if (card->wr_pack_stats.packing_events[i] != 0)
241 pr_info("%s: Packed %d reqs - %d times\n",
242 mmc_hostname(card->host), i,
243 card->wr_pack_stats.packing_events[i]);
244 }
245
246 pr_info("%s: stopped packing due to the following reasons:\n",
247 mmc_hostname(card->host));
248
249 if (card->wr_pack_stats.pack_stop_reason[EXCEEDS_SEGMENTS])
250 pr_info("%s: %d times: exceedmax num of segments\n",
251 mmc_hostname(card->host),
252 card->wr_pack_stats.pack_stop_reason[EXCEEDS_SEGMENTS]);
253 if (card->wr_pack_stats.pack_stop_reason[EXCEEDS_SECTORS])
254 pr_info("%s: %d times: exceeding the max num of sectors\n",
255 mmc_hostname(card->host),
256 card->wr_pack_stats.pack_stop_reason[EXCEEDS_SECTORS]);
257 if (card->wr_pack_stats.pack_stop_reason[WRONG_DATA_DIR])
258 pr_info("%s: %d times: wrong data direction\n",
259 mmc_hostname(card->host),
260 card->wr_pack_stats.pack_stop_reason[WRONG_DATA_DIR]);
261 if (card->wr_pack_stats.pack_stop_reason[FLUSH_OR_DISCARD])
262 pr_info("%s: %d times: flush or discard\n",
263 mmc_hostname(card->host),
264 card->wr_pack_stats.pack_stop_reason[FLUSH_OR_DISCARD]);
265 if (card->wr_pack_stats.pack_stop_reason[EMPTY_QUEUE])
266 pr_info("%s: %d times: empty queue\n",
267 mmc_hostname(card->host),
268 card->wr_pack_stats.pack_stop_reason[EMPTY_QUEUE]);
269 if (card->wr_pack_stats.pack_stop_reason[REL_WRITE])
270 pr_info("%s: %d times: rel write\n",
271 mmc_hostname(card->host),
272 card->wr_pack_stats.pack_stop_reason[REL_WRITE]);
273 if (card->wr_pack_stats.pack_stop_reason[THRESHOLD])
274 pr_info("%s: %d times: Threshold\n",
275 mmc_hostname(card->host),
276 card->wr_pack_stats.pack_stop_reason[THRESHOLD]);
277
278 spin_unlock(&card->wr_pack_stats.lock);
279}
280
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200281/*
282 * A callback assigned to the packed_test_fn field.
283 * Called from block layer in mmc_blk_packed_hdr_wrq_prep.
284 * Here we alter the packed header or CMD23 in order to send an invalid
285 * packed command to the card.
286 */
287static void test_invalid_packed_cmd(struct request_queue *q,
288 struct mmc_queue_req *mqrq)
289{
290 struct mmc_queue *mq = q->queuedata;
291 u32 *packed_cmd_hdr = mqrq->packed_cmd_hdr;
292 struct request *req = mqrq->req;
293 struct request *second_rq;
294 struct test_request *test_rq;
295 struct mmc_blk_request *brq = &mqrq->brq;
296 int num_requests;
297 int max_packed_reqs;
298
299 if (!mq) {
300 test_pr_err("%s: NULL mq", __func__);
301 return;
302 }
303
304 test_rq = (struct test_request *)req->elv.priv[0];
305 if (!test_rq) {
306 test_pr_err("%s: NULL test_rq", __func__);
307 return;
308 }
309 max_packed_reqs = mq->card->ext_csd.max_packed_writes;
310
311 switch (mbtd->test_info.testcase) {
312 case TEST_HDR_INVALID_VERSION:
313 test_pr_info("%s: set invalid header version", __func__);
314 /* Put 0 in header version field (1 byte, offset 0 in header) */
315 packed_cmd_hdr[0] = packed_cmd_hdr[0] & ~PACKED_HDR_VER_MASK;
316 break;
317 case TEST_HDR_WRONG_WRITE_CODE:
318 test_pr_info("%s: wrong write code", __func__);
319 /* Set R/W field with R value (1 byte, offset 1 in header) */
320 packed_cmd_hdr[0] = packed_cmd_hdr[0] & ~PACKED_HDR_RW_MASK;
321 packed_cmd_hdr[0] = packed_cmd_hdr[0] | 0x00000100;
322 break;
323 case TEST_HDR_INVALID_RW_CODE:
324 test_pr_info("%s: invalid r/w code", __func__);
325 /* Set R/W field with invalid value */
326 packed_cmd_hdr[0] = packed_cmd_hdr[0] & ~PACKED_HDR_RW_MASK;
327 packed_cmd_hdr[0] = packed_cmd_hdr[0] | 0x00000400;
328 break;
329 case TEST_HDR_DIFFERENT_ADDRESSES:
330 test_pr_info("%s: different addresses", __func__);
331 second_rq = list_entry(req->queuelist.next, struct request,
332 queuelist);
333 test_pr_info("%s: test_rq->sector=%ld, second_rq->sector=%ld",
334 __func__, (long)req->__sector,
335 (long)second_rq->__sector);
336 /*
337 * Put start sector of second write request in the first write
338 * request's cmd25 argument in the packed header
339 */
340 packed_cmd_hdr[3] = second_rq->__sector;
341 break;
342 case TEST_HDR_REQ_NUM_SMALLER_THAN_ACTUAL:
343 test_pr_info("%s: request num smaller than actual" , __func__);
344 num_requests = (packed_cmd_hdr[0] & PACKED_HDR_NUM_REQS_MASK)
345 >> 16;
346 /* num of entries is decremented by 1 */
347 num_requests = (num_requests - 1) << 16;
348 /*
349 * Set number of requests field in packed write header to be
350 * smaller than the actual number (1 byte, offset 2 in header)
351 */
352 packed_cmd_hdr[0] = (packed_cmd_hdr[0] &
353 ~PACKED_HDR_NUM_REQS_MASK) + num_requests;
354 break;
355 case TEST_HDR_REQ_NUM_LARGER_THAN_ACTUAL:
356 test_pr_info("%s: request num larger than actual" , __func__);
357 num_requests = (packed_cmd_hdr[0] & PACKED_HDR_NUM_REQS_MASK)
358 >> 16;
359 /* num of entries is incremented by 1 */
360 num_requests = (num_requests + 1) << 16;
361 /*
362 * Set number of requests field in packed write header to be
363 * larger than the actual number (1 byte, offset 2 in header).
364 */
365 packed_cmd_hdr[0] = (packed_cmd_hdr[0] &
366 ~PACKED_HDR_NUM_REQS_MASK) + num_requests;
367 break;
368 case TEST_HDR_CMD23_PACKED_BIT_SET:
369 test_pr_info("%s: header CMD23 packed bit set" , __func__);
370 /*
371 * Set packed bit (bit 30) in cmd23 argument of first and second
372 * write requests in packed write header.
373 * These are located at bytes 2 and 4 in packed write header
374 */
375 packed_cmd_hdr[2] = packed_cmd_hdr[2] | CMD23_PACKED_BIT;
376 packed_cmd_hdr[4] = packed_cmd_hdr[4] | CMD23_PACKED_BIT;
377 break;
378 case TEST_CMD23_MAX_PACKED_WRITES:
379 test_pr_info("%s: CMD23 request num > max_packed_reqs",
380 __func__);
381 /*
382 * Set the individual packed cmd23 request num to
383 * max_packed_reqs + 1
384 */
385 brq->sbc.arg = MMC_CMD23_ARG_PACKED | (max_packed_reqs + 1);
386 break;
387 case TEST_CMD23_ZERO_PACKED_WRITES:
388 test_pr_info("%s: CMD23 request num = 0", __func__);
389 /* Set the individual packed cmd23 request num to zero */
390 brq->sbc.arg = MMC_CMD23_ARG_PACKED;
391 break;
392 case TEST_CMD23_PACKED_BIT_UNSET:
393 test_pr_info("%s: CMD23 packed bit unset", __func__);
394 /*
395 * Set the individual packed cmd23 packed bit to 0,
396 * although there is a packed write request
397 */
398 brq->sbc.arg &= ~CMD23_PACKED_BIT;
399 break;
400 case TEST_CMD23_REL_WR_BIT_SET:
401 test_pr_info("%s: CMD23 REL WR bit set", __func__);
402 /* Set the individual packed cmd23 reliable write bit */
403 brq->sbc.arg = MMC_CMD23_ARG_PACKED | MMC_CMD23_ARG_REL_WR;
404 break;
405 case TEST_CMD23_BITS_16TO29_SET:
406 test_pr_info("%s: CMD23 bits [16-29] set", __func__);
407 brq->sbc.arg = MMC_CMD23_ARG_PACKED |
408 PACKED_HDR_BITS_16_TO_29_SET;
409 break;
410 case TEST_CMD23_HDR_BLK_NOT_IN_COUNT:
411 test_pr_info("%s: CMD23 hdr not in block count", __func__);
412 brq->sbc.arg = MMC_CMD23_ARG_PACKED |
413 ((rq_data_dir(req) == READ) ? 0 : mqrq->packed_blocks);
414 break;
415 default:
416 test_pr_err("%s: unexpected testcase %d",
417 __func__, mbtd->test_info.testcase);
418 break;
419 }
420}
421
422/*
423 * A callback assigned to the err_check_fn field of the mmc_request by the
424 * MMC/card/block layer.
425 * Called upon request completion by the MMC/core layer.
426 * Here we emulate an error return value from the card.
427 */
428static int test_err_check(struct mmc_card *card, struct mmc_async_req *areq)
429{
430 struct mmc_queue_req *mq_rq = container_of(areq, struct mmc_queue_req,
431 mmc_active);
432 struct request_queue *req_q = test_iosched_get_req_queue();
433 struct mmc_queue *mq;
434 int max_packed_reqs;
435 int ret = 0;
Yaniv Gardie9214c82012-10-18 13:58:18 +0200436 struct mmc_blk_request *brq;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200437
438 if (req_q)
439 mq = req_q->queuedata;
440 else {
441 test_pr_err("%s: NULL request_queue", __func__);
442 return 0;
443 }
444
445 if (!mq) {
446 test_pr_err("%s: %s: NULL mq", __func__,
447 mmc_hostname(card->host));
448 return 0;
449 }
450
451 max_packed_reqs = mq->card->ext_csd.max_packed_writes;
452
453 if (!mq_rq) {
454 test_pr_err("%s: %s: NULL mq_rq", __func__,
455 mmc_hostname(card->host));
456 return 0;
457 }
Yaniv Gardie9214c82012-10-18 13:58:18 +0200458 brq = &mq_rq->brq;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200459
460 switch (mbtd->test_info.testcase) {
461 case TEST_RET_ABORT:
462 test_pr_info("%s: return abort", __func__);
463 ret = MMC_BLK_ABORT;
464 break;
465 case TEST_RET_PARTIAL_FOLLOWED_BY_SUCCESS:
466 test_pr_info("%s: return partial followed by success",
467 __func__);
468 /*
469 * Since in this testcase num_requests is always >= 2,
470 * we can be sure that packed_fail_idx is always >= 1
471 */
472 mq_rq->packed_fail_idx = (mbtd->num_requests / 2);
473 test_pr_info("%s: packed_fail_idx = %d"
474 , __func__, mq_rq->packed_fail_idx);
475 mq->err_check_fn = NULL;
476 ret = MMC_BLK_PARTIAL;
477 break;
478 case TEST_RET_PARTIAL_FOLLOWED_BY_ABORT:
479 if (!mbtd->err_check_counter) {
480 test_pr_info("%s: return partial followed by abort",
481 __func__);
482 mbtd->err_check_counter++;
483 /*
484 * Since in this testcase num_requests is always >= 3,
485 * we have that packed_fail_idx is always >= 1
486 */
487 mq_rq->packed_fail_idx = (mbtd->num_requests / 2);
488 test_pr_info("%s: packed_fail_idx = %d"
489 , __func__, mq_rq->packed_fail_idx);
490 ret = MMC_BLK_PARTIAL;
491 break;
492 }
493 mbtd->err_check_counter = 0;
494 mq->err_check_fn = NULL;
495 ret = MMC_BLK_ABORT;
496 break;
497 case TEST_RET_PARTIAL_MULTIPLE_UNTIL_SUCCESS:
498 test_pr_info("%s: return partial multiple until success",
499 __func__);
500 if (++mbtd->err_check_counter >= (mbtd->num_requests)) {
501 mq->err_check_fn = NULL;
502 mbtd->err_check_counter = 0;
503 ret = MMC_BLK_PARTIAL;
504 break;
505 }
506 mq_rq->packed_fail_idx = 1;
507 ret = MMC_BLK_PARTIAL;
508 break;
509 case TEST_RET_PARTIAL_MAX_FAIL_IDX:
510 test_pr_info("%s: return partial max fail_idx", __func__);
511 mq_rq->packed_fail_idx = max_packed_reqs - 1;
512 mq->err_check_fn = NULL;
513 ret = MMC_BLK_PARTIAL;
514 break;
515 case TEST_RET_RETRY:
516 test_pr_info("%s: return retry", __func__);
517 ret = MMC_BLK_RETRY;
518 break;
519 case TEST_RET_CMD_ERR:
520 test_pr_info("%s: return cmd err", __func__);
521 ret = MMC_BLK_CMD_ERR;
522 break;
523 case TEST_RET_DATA_ERR:
524 test_pr_info("%s: return data err", __func__);
525 ret = MMC_BLK_DATA_ERR;
526 break;
Yaniv Gardie9214c82012-10-18 13:58:18 +0200527 case BKOPS_URGENT_LEVEL_2:
528 case BKOPS_URGENT_LEVEL_3:
529 case BKOPS_URGENT_LEVEL_2_TWO_REQS:
530 if (mbtd->err_check_counter++ == 0) {
531 test_pr_info("%s: simulate an exception from the card",
532 __func__);
533 brq->cmd.resp[0] |= R1_EXCEPTION_EVENT;
534 }
535 mq->err_check_fn = NULL;
536 break;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200537 default:
538 test_pr_err("%s: unexpected testcase %d",
539 __func__, mbtd->test_info.testcase);
540 }
541
542 return ret;
543}
544
545/*
546 * This is a specific implementation for the get_test_case_str_fn function
547 * pointer in the test_info data structure. Given a valid test_data instance,
548 * the function returns a string resembling the test name, based on the testcase
549 */
550static char *get_test_case_str(struct test_data *td)
551{
552 if (!td) {
553 test_pr_err("%s: NULL td", __func__);
554 return NULL;
555 }
556
557 switch (td->test_info.testcase) {
558 case TEST_STOP_DUE_TO_FLUSH:
Lee Susmane868f8a2012-11-04 15:04:41 +0200559 return " stop due to flush";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200560 case TEST_STOP_DUE_TO_FLUSH_AFTER_MAX_REQS:
Lee Susmane868f8a2012-11-04 15:04:41 +0200561 return " stop due to flush after max-1 reqs";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200562 case TEST_STOP_DUE_TO_READ:
Lee Susmane868f8a2012-11-04 15:04:41 +0200563 return " stop due to read";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200564 case TEST_STOP_DUE_TO_READ_AFTER_MAX_REQS:
565 return "Test stop due to read after max-1 reqs";
566 case TEST_STOP_DUE_TO_EMPTY_QUEUE:
567 return "Test stop due to empty queue";
568 case TEST_STOP_DUE_TO_MAX_REQ_NUM:
569 return "Test stop due to max req num";
570 case TEST_STOP_DUE_TO_THRESHOLD:
571 return "Test stop due to exceeding threshold";
572 case TEST_RET_ABORT:
573 return "Test err_check return abort";
574 case TEST_RET_PARTIAL_FOLLOWED_BY_SUCCESS:
575 return "Test err_check return partial followed by success";
576 case TEST_RET_PARTIAL_FOLLOWED_BY_ABORT:
577 return "Test err_check return partial followed by abort";
578 case TEST_RET_PARTIAL_MULTIPLE_UNTIL_SUCCESS:
579 return "Test err_check return partial multiple until success";
580 case TEST_RET_PARTIAL_MAX_FAIL_IDX:
581 return "Test err_check return partial max fail index";
582 case TEST_RET_RETRY:
583 return "Test err_check return retry";
584 case TEST_RET_CMD_ERR:
585 return "Test err_check return cmd error";
586 case TEST_RET_DATA_ERR:
587 return "Test err_check return data error";
588 case TEST_HDR_INVALID_VERSION:
589 return "Test invalid - wrong header version";
590 case TEST_HDR_WRONG_WRITE_CODE:
591 return "Test invalid - wrong write code";
592 case TEST_HDR_INVALID_RW_CODE:
593 return "Test invalid - wrong R/W code";
594 case TEST_HDR_DIFFERENT_ADDRESSES:
595 return "Test invalid - header different addresses";
596 case TEST_HDR_REQ_NUM_SMALLER_THAN_ACTUAL:
597 return "Test invalid - header req num smaller than actual";
598 case TEST_HDR_REQ_NUM_LARGER_THAN_ACTUAL:
599 return "Test invalid - header req num larger than actual";
600 case TEST_HDR_CMD23_PACKED_BIT_SET:
601 return "Test invalid - header cmd23 packed bit set";
602 case TEST_CMD23_MAX_PACKED_WRITES:
603 return "Test invalid - cmd23 max packed writes";
604 case TEST_CMD23_ZERO_PACKED_WRITES:
605 return "Test invalid - cmd23 zero packed writes";
606 case TEST_CMD23_PACKED_BIT_UNSET:
607 return "Test invalid - cmd23 packed bit unset";
608 case TEST_CMD23_REL_WR_BIT_SET:
609 return "Test invalid - cmd23 rel wr bit set";
610 case TEST_CMD23_BITS_16TO29_SET:
611 return "Test invalid - cmd23 bits [16-29] set";
612 case TEST_CMD23_HDR_BLK_NOT_IN_COUNT:
613 return "Test invalid - cmd23 header block not in count";
Tatyana Brokhman91e1e322012-10-09 13:53:43 +0200614 case TEST_PACKING_EXP_N_OVER_TRIGGER:
615 return "\nTest packing control - pack n";
616 case TEST_PACKING_EXP_N_OVER_TRIGGER_FB_READ:
617 return "\nTest packing control - pack n followed by read";
618 case TEST_PACKING_EXP_N_OVER_TRIGGER_FLUSH_N:
619 return "\nTest packing control - pack n followed by flush";
620 case TEST_PACKING_EXP_ONE_OVER_TRIGGER_FB_READ:
621 return "\nTest packing control - pack one followed by read";
622 case TEST_PACKING_EXP_THRESHOLD_OVER_TRIGGER:
623 return "\nTest packing control - pack threshold";
624 case TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS:
625 return "\nTest packing control - no packing";
626 case TEST_PACKING_NOT_EXP_TRIGGER_REQUESTS:
627 return "\nTest packing control - no packing, trigger requests";
628 case TEST_PACKING_NOT_EXP_TRIGGER_READ_TRIGGER:
629 return "\nTest packing control - no pack, trigger-read-trigger";
630 case TEST_PACKING_NOT_EXP_TRIGGER_FLUSH_TRIGGER:
631 return "\nTest packing control- no pack, trigger-flush-trigger";
632 case TEST_PACK_MIX_PACKED_NO_PACKED_PACKED:
633 return "\nTest packing control - mix: pack -> no pack -> pack";
634 case TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED:
635 return "\nTest packing control - mix: no pack->pack->no pack";
Maya Erezddc55732012-10-17 09:51:01 +0200636 case TEST_WRITE_DISCARD_SANITIZE_READ:
637 return "\nTest write, discard, sanitize";
Yaniv Gardie9214c82012-10-18 13:58:18 +0200638 case BKOPS_DELAYED_WORK_LEVEL_1:
639 return "\nTest delayed work BKOPS level 1";
640 case BKOPS_DELAYED_WORK_LEVEL_1_HPI:
641 return "\nTest delayed work BKOPS level 1 with HPI";
642 case BKOPS_CANCEL_DELAYED_WORK:
643 return "\nTest cancel delayed BKOPS work";
644 case BKOPS_URGENT_LEVEL_2:
645 return "\nTest urgent BKOPS level 2";
646 case BKOPS_URGENT_LEVEL_2_TWO_REQS:
647 return "\nTest urgent BKOPS level 2, followed by a request";
648 case BKOPS_URGENT_LEVEL_3:
649 return "\nTest urgent BKOPS level 3";
Lee Susmanf18263a2012-10-24 14:14:37 +0200650 case TEST_LONG_SEQUENTIAL_READ:
651 return "Test long sequential read";
Lee Susmana35ae6e2012-10-25 16:06:07 +0200652 case TEST_LONG_SEQUENTIAL_WRITE:
653 return "Test long sequential write";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200654 default:
655 return "Unknown testcase";
656 }
657
658 return NULL;
659}
660
661/*
662 * Compare individual testcase's statistics to the expected statistics:
663 * Compare stop reason and number of packing events
664 */
665static int check_wr_packing_statistics(struct test_data *td)
666{
667 struct mmc_wr_pack_stats *mmc_packed_stats;
668 struct mmc_queue *mq = td->req_q->queuedata;
669 int max_packed_reqs = mq->card->ext_csd.max_packed_writes;
670 int i;
671 struct mmc_card *card = mq->card;
672 struct mmc_wr_pack_stats expected_stats;
673 int *stop_reason;
674 int ret = 0;
675
676 if (!mq) {
677 test_pr_err("%s: NULL mq", __func__);
678 return -EINVAL;
679 }
680
681 expected_stats = mbtd->exp_packed_stats;
682
683 mmc_packed_stats = mmc_blk_get_packed_statistics(card);
684 if (!mmc_packed_stats) {
685 test_pr_err("%s: NULL mmc_packed_stats", __func__);
686 return -EINVAL;
687 }
688
689 if (!mmc_packed_stats->packing_events) {
690 test_pr_err("%s: NULL packing_events", __func__);
691 return -EINVAL;
692 }
693
694 spin_lock(&mmc_packed_stats->lock);
695
696 if (!mmc_packed_stats->enabled) {
697 test_pr_err("%s write packing statistics are not enabled",
698 __func__);
699 ret = -EINVAL;
700 goto exit_err;
701 }
702
703 stop_reason = mmc_packed_stats->pack_stop_reason;
704
Tatyana Brokhman91e1e322012-10-09 13:53:43 +0200705 for (i = 1; i <= max_packed_reqs; ++i) {
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200706 if (mmc_packed_stats->packing_events[i] !=
707 expected_stats.packing_events[i]) {
708 test_pr_err(
709 "%s: Wrong pack stats in index %d, got %d, expected %d",
710 __func__, i, mmc_packed_stats->packing_events[i],
711 expected_stats.packing_events[i]);
712 if (td->fs_wr_reqs_during_test)
713 goto cancel_round;
714 ret = -EINVAL;
715 goto exit_err;
716 }
717 }
718
719 if (mmc_packed_stats->pack_stop_reason[EXCEEDS_SEGMENTS] !=
720 expected_stats.pack_stop_reason[EXCEEDS_SEGMENTS]) {
721 test_pr_err(
722 "%s: Wrong pack stop reason EXCEEDS_SEGMENTS %d, expected %d",
723 __func__, stop_reason[EXCEEDS_SEGMENTS],
724 expected_stats.pack_stop_reason[EXCEEDS_SEGMENTS]);
725 if (td->fs_wr_reqs_during_test)
726 goto cancel_round;
727 ret = -EINVAL;
728 goto exit_err;
729 }
730
731 if (mmc_packed_stats->pack_stop_reason[EXCEEDS_SECTORS] !=
732 expected_stats.pack_stop_reason[EXCEEDS_SECTORS]) {
733 test_pr_err(
734 "%s: Wrong pack stop reason EXCEEDS_SECTORS %d, expected %d",
735 __func__, stop_reason[EXCEEDS_SECTORS],
736 expected_stats.pack_stop_reason[EXCEEDS_SECTORS]);
737 if (td->fs_wr_reqs_during_test)
738 goto cancel_round;
739 ret = -EINVAL;
740 goto exit_err;
741 }
742
743 if (mmc_packed_stats->pack_stop_reason[WRONG_DATA_DIR] !=
744 expected_stats.pack_stop_reason[WRONG_DATA_DIR]) {
745 test_pr_err(
746 "%s: Wrong pack stop reason WRONG_DATA_DIR %d, expected %d",
747 __func__, stop_reason[WRONG_DATA_DIR],
748 expected_stats.pack_stop_reason[WRONG_DATA_DIR]);
749 if (td->fs_wr_reqs_during_test)
750 goto cancel_round;
751 ret = -EINVAL;
752 goto exit_err;
753 }
754
755 if (mmc_packed_stats->pack_stop_reason[FLUSH_OR_DISCARD] !=
756 expected_stats.pack_stop_reason[FLUSH_OR_DISCARD]) {
757 test_pr_err(
758 "%s: Wrong pack stop reason FLUSH_OR_DISCARD %d, expected %d",
759 __func__, stop_reason[FLUSH_OR_DISCARD],
760 expected_stats.pack_stop_reason[FLUSH_OR_DISCARD]);
761 if (td->fs_wr_reqs_during_test)
762 goto cancel_round;
763 ret = -EINVAL;
764 goto exit_err;
765 }
766
767 if (mmc_packed_stats->pack_stop_reason[EMPTY_QUEUE] !=
768 expected_stats.pack_stop_reason[EMPTY_QUEUE]) {
769 test_pr_err(
770 "%s: Wrong pack stop reason EMPTY_QUEUE %d, expected %d",
771 __func__, stop_reason[EMPTY_QUEUE],
772 expected_stats.pack_stop_reason[EMPTY_QUEUE]);
773 if (td->fs_wr_reqs_during_test)
774 goto cancel_round;
775 ret = -EINVAL;
776 goto exit_err;
777 }
778
779 if (mmc_packed_stats->pack_stop_reason[REL_WRITE] !=
780 expected_stats.pack_stop_reason[REL_WRITE]) {
781 test_pr_err(
782 "%s: Wrong pack stop reason REL_WRITE %d, expected %d",
783 __func__, stop_reason[REL_WRITE],
784 expected_stats.pack_stop_reason[REL_WRITE]);
785 if (td->fs_wr_reqs_during_test)
786 goto cancel_round;
787 ret = -EINVAL;
788 goto exit_err;
789 }
790
791exit_err:
792 spin_unlock(&mmc_packed_stats->lock);
793 if (ret && mmc_packed_stats->enabled)
794 print_mmc_packing_stats(card);
795 return ret;
796cancel_round:
797 spin_unlock(&mmc_packed_stats->lock);
798 test_iosched_set_ignore_round(true);
799 return 0;
800}
801
802/*
803 * Pseudo-randomly choose a seed based on the last seed, and update it in
804 * seed_number. then return seed_number (mod max_val), or min_val.
805 */
806static unsigned int pseudo_random_seed(unsigned int *seed_number,
807 unsigned int min_val,
808 unsigned int max_val)
809{
810 int ret = 0;
811
812 if (!seed_number)
813 return 0;
814
815 *seed_number = ((unsigned int)(((unsigned long)*seed_number *
816 (unsigned long)LARGE_PRIME_1) + LARGE_PRIME_2));
817 ret = (unsigned int)((*seed_number) % max_val);
818
819 return (ret > min_val ? ret : min_val);
820}
821
822/*
823 * Given a pseudo-random seed, find a pseudo-random num_of_bios.
824 * Make sure that num_of_bios is not larger than TEST_MAX_SECTOR_RANGE
825 */
826static void pseudo_rnd_num_of_bios(unsigned int *num_bios_seed,
827 unsigned int *num_of_bios)
828{
829 do {
830 *num_of_bios = pseudo_random_seed(num_bios_seed, 1,
831 TEST_MAX_BIOS_PER_REQ);
832 if (!(*num_of_bios))
833 *num_of_bios = 1;
834 } while ((*num_of_bios) * BIO_U32_SIZE * 4 > TEST_MAX_SECTOR_RANGE);
835}
836
837/* Add a single read request to the given td's request queue */
838static int prepare_request_add_read(struct test_data *td)
839{
840 int ret;
841 int start_sec;
842
843 if (td)
844 start_sec = td->start_sector;
845 else {
846 test_pr_err("%s: NULL td", __func__);
847 return 0;
848 }
849
850 test_pr_info("%s: Adding a read request, first req_id=%d", __func__,
851 td->wr_rd_next_req_id);
852
853 ret = test_iosched_add_wr_rd_test_req(0, READ, start_sec, 2,
854 TEST_PATTERN_5A, NULL);
855 if (ret) {
856 test_pr_err("%s: failed to add a read request", __func__);
857 return ret;
858 }
859
860 return 0;
861}
862
863/* Add a single flush request to the given td's request queue */
864static int prepare_request_add_flush(struct test_data *td)
865{
866 int ret;
867
868 if (!td) {
869 test_pr_err("%s: NULL td", __func__);
870 return 0;
871 }
872
873 test_pr_info("%s: Adding a flush request, first req_id=%d", __func__,
874 td->unique_next_req_id);
875 ret = test_iosched_add_unique_test_req(0, REQ_UNIQUE_FLUSH,
876 0, 0, NULL);
877 if (ret) {
878 test_pr_err("%s: failed to add a flush request", __func__);
879 return ret;
880 }
881
882 return ret;
883}
884
885/*
886 * Add num_requets amount of write requests to the given td's request queue.
887 * If random test mode is chosen we pseudo-randomly choose the number of bios
888 * for each write request, otherwise add between 1 to 5 bio per request.
889 */
890static int prepare_request_add_write_reqs(struct test_data *td,
891 int num_requests, int is_err_expected,
892 int is_random)
893{
894 int i;
895 unsigned int start_sec;
896 int num_bios;
897 int ret = 0;
898 unsigned int *bio_seed = &mbtd->random_test_seed;
899
900 if (td)
901 start_sec = td->start_sector;
902 else {
903 test_pr_err("%s: NULL td", __func__);
904 return ret;
905 }
906
907 test_pr_info("%s: Adding %d write requests, first req_id=%d", __func__,
908 num_requests, td->wr_rd_next_req_id);
909
Lee Susmanf18263a2012-10-24 14:14:37 +0200910 for (i = 1 ; i <= num_requests ; i++) {
911 start_sec =
912 td->start_sector + sizeof(int) *
913 BIO_U32_SIZE * td->num_of_write_bios;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200914 if (is_random)
915 pseudo_rnd_num_of_bios(bio_seed, &num_bios);
916 else
917 /*
918 * For the non-random case, give num_bios a value
919 * between 1 and 5, to keep a small number of BIOs
920 */
921 num_bios = (i%5)+1;
922
923 ret = test_iosched_add_wr_rd_test_req(is_err_expected, WRITE,
924 start_sec, num_bios, TEST_PATTERN_5A, NULL);
925
926 if (ret) {
927 test_pr_err("%s: failed to add a write request",
928 __func__);
929 return ret;
930 }
931 }
932 return 0;
933}
934
935/*
936 * Prepare the write, read and flush requests for a generic packed commands
937 * testcase
938 */
939static int prepare_packed_requests(struct test_data *td, int is_err_expected,
940 int num_requests, int is_random)
941{
942 int ret = 0;
943 struct mmc_queue *mq;
944 int max_packed_reqs;
945 struct request_queue *req_q;
946
947 if (!td) {
948 pr_err("%s: NULL td", __func__);
949 return -EINVAL;
950 }
951
952 req_q = td->req_q;
953
954 if (!req_q) {
955 pr_err("%s: NULL request queue", __func__);
956 return -EINVAL;
957 }
958
959 mq = req_q->queuedata;
960 if (!mq) {
961 test_pr_err("%s: NULL mq", __func__);
962 return -EINVAL;
963 }
964
965 max_packed_reqs = mq->card->ext_csd.max_packed_writes;
966
967 if (mbtd->random_test_seed <= 0) {
968 mbtd->random_test_seed =
969 (unsigned int)(get_jiffies_64() & 0xFFFF);
970 test_pr_info("%s: got seed from jiffies %d",
971 __func__, mbtd->random_test_seed);
972 }
973
974 mmc_blk_init_packed_statistics(mq->card);
975
976 ret = prepare_request_add_write_reqs(td, num_requests, is_err_expected,
977 is_random);
978 if (ret)
979 return ret;
980
981 /* Avoid memory corruption in upcoming stats set */
982 if (td->test_info.testcase == TEST_STOP_DUE_TO_THRESHOLD)
983 num_requests--;
984
985 memset((void *)mbtd->exp_packed_stats.pack_stop_reason, 0,
986 sizeof(mbtd->exp_packed_stats.pack_stop_reason));
987 memset(mbtd->exp_packed_stats.packing_events, 0,
988 (max_packed_reqs + 1) * sizeof(u32));
989 if (num_requests <= max_packed_reqs)
990 mbtd->exp_packed_stats.packing_events[num_requests] = 1;
991
992 switch (td->test_info.testcase) {
993 case TEST_STOP_DUE_TO_FLUSH:
994 case TEST_STOP_DUE_TO_FLUSH_AFTER_MAX_REQS:
995 ret = prepare_request_add_flush(td);
996 if (ret)
997 return ret;
998
999 mbtd->exp_packed_stats.pack_stop_reason[FLUSH_OR_DISCARD] = 1;
1000 break;
1001 case TEST_STOP_DUE_TO_READ:
1002 case TEST_STOP_DUE_TO_READ_AFTER_MAX_REQS:
1003 ret = prepare_request_add_read(td);
1004 if (ret)
1005 return ret;
1006
1007 mbtd->exp_packed_stats.pack_stop_reason[WRONG_DATA_DIR] = 1;
1008 break;
1009 case TEST_STOP_DUE_TO_THRESHOLD:
1010 mbtd->exp_packed_stats.packing_events[num_requests] = 1;
1011 mbtd->exp_packed_stats.packing_events[1] = 1;
1012 mbtd->exp_packed_stats.pack_stop_reason[THRESHOLD] = 1;
1013 mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1;
1014 break;
1015 case TEST_STOP_DUE_TO_MAX_REQ_NUM:
1016 case TEST_RET_PARTIAL_MAX_FAIL_IDX:
1017 mbtd->exp_packed_stats.pack_stop_reason[THRESHOLD] = 1;
1018 break;
1019 default:
1020 mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1;
1021 }
1022 mbtd->num_requests = num_requests;
1023
1024 return 0;
1025}
1026
1027/*
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001028 * Prepare the write, read and flush requests for the packing control
1029 * testcases
1030 */
1031static int prepare_packed_control_tests_requests(struct test_data *td,
1032 int is_err_expected, int num_requests, int is_random)
1033{
1034 int ret = 0;
1035 struct mmc_queue *mq;
1036 int max_packed_reqs;
1037 int temp_num_req = num_requests;
1038 struct request_queue *req_q;
1039 int test_packed_trigger;
1040 int num_packed_reqs;
1041
1042 if (!td) {
1043 test_pr_err("%s: NULL td\n", __func__);
1044 return -EINVAL;
1045 }
1046
1047 req_q = td->req_q;
1048
1049 if (!req_q) {
1050 test_pr_err("%s: NULL request queue\n", __func__);
1051 return -EINVAL;
1052 }
1053
1054 mq = req_q->queuedata;
1055 if (!mq) {
1056 test_pr_err("%s: NULL mq", __func__);
1057 return -EINVAL;
1058 }
1059
1060 max_packed_reqs = mq->card->ext_csd.max_packed_writes;
1061 test_packed_trigger = mq->num_wr_reqs_to_start_packing;
1062 num_packed_reqs = num_requests - test_packed_trigger;
1063
1064 if (mbtd->random_test_seed == 0) {
1065 mbtd->random_test_seed =
1066 (unsigned int)(get_jiffies_64() & 0xFFFF);
1067 test_pr_info("%s: got seed from jiffies %d",
1068 __func__, mbtd->random_test_seed);
1069 }
1070
1071 mmc_blk_init_packed_statistics(mq->card);
1072
1073 if (td->test_info.testcase ==
1074 TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED) {
1075 temp_num_req = num_requests;
1076 num_requests = test_packed_trigger - 1;
1077 }
1078
1079 /* Verify that the packing is disabled before starting the test */
1080 mq->wr_packing_enabled = false;
1081 mq->num_of_potential_packed_wr_reqs = 0;
1082
1083 if (td->test_info.testcase == TEST_PACK_MIX_PACKED_NO_PACKED_PACKED) {
1084 mq->num_of_potential_packed_wr_reqs = test_packed_trigger + 1;
1085 mq->wr_packing_enabled = true;
1086 num_requests = test_packed_trigger + 2;
1087 }
1088
1089 ret = prepare_request_add_write_reqs(td, num_requests, is_err_expected,
1090 is_random);
1091 if (ret)
1092 goto exit;
1093
1094 if (td->test_info.testcase == TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED)
1095 num_requests = temp_num_req;
1096
1097 memset((void *)mbtd->exp_packed_stats.pack_stop_reason, 0,
1098 sizeof(mbtd->exp_packed_stats.pack_stop_reason));
1099 memset(mbtd->exp_packed_stats.packing_events, 0,
1100 (max_packed_reqs + 1) * sizeof(u32));
1101
1102 switch (td->test_info.testcase) {
1103 case TEST_PACKING_EXP_N_OVER_TRIGGER_FB_READ:
1104 case TEST_PACKING_EXP_ONE_OVER_TRIGGER_FB_READ:
1105 ret = prepare_request_add_read(td);
1106 if (ret)
1107 goto exit;
1108
1109 mbtd->exp_packed_stats.pack_stop_reason[WRONG_DATA_DIR] = 1;
1110 mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 1;
1111 break;
1112 case TEST_PACKING_EXP_N_OVER_TRIGGER_FLUSH_N:
1113 ret = prepare_request_add_flush(td);
1114 if (ret)
1115 goto exit;
1116
1117 ret = prepare_request_add_write_reqs(td, num_packed_reqs,
1118 is_err_expected, is_random);
1119 if (ret)
1120 goto exit;
1121
1122 mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1;
1123 mbtd->exp_packed_stats.pack_stop_reason[FLUSH_OR_DISCARD] = 1;
1124 mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 2;
1125 break;
1126 case TEST_PACKING_NOT_EXP_TRIGGER_READ_TRIGGER:
1127 ret = prepare_request_add_read(td);
1128 if (ret)
1129 goto exit;
1130
1131 ret = prepare_request_add_write_reqs(td, test_packed_trigger,
1132 is_err_expected, is_random);
1133 if (ret)
1134 goto exit;
1135
1136 mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 1;
1137 break;
1138 case TEST_PACKING_NOT_EXP_TRIGGER_FLUSH_TRIGGER:
1139 ret = prepare_request_add_flush(td);
1140 if (ret)
1141 goto exit;
1142
1143 ret = prepare_request_add_write_reqs(td, test_packed_trigger,
1144 is_err_expected, is_random);
1145 if (ret)
1146 goto exit;
1147
1148 mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 1;
1149 break;
1150 case TEST_PACK_MIX_PACKED_NO_PACKED_PACKED:
1151 ret = prepare_request_add_read(td);
1152 if (ret)
1153 goto exit;
1154
1155 ret = prepare_request_add_write_reqs(td, test_packed_trigger-1,
1156 is_err_expected, is_random);
1157 if (ret)
1158 goto exit;
1159
1160 ret = prepare_request_add_write_reqs(td, num_requests,
1161 is_err_expected, is_random);
1162 if (ret)
1163 goto exit;
1164
1165 mbtd->exp_packed_stats.packing_events[num_requests] = 1;
1166 mbtd->exp_packed_stats.packing_events[num_requests-1] = 1;
1167 mbtd->exp_packed_stats.pack_stop_reason[WRONG_DATA_DIR] = 1;
1168 mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1;
1169 break;
1170 case TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED:
1171 ret = prepare_request_add_read(td);
1172 if (ret)
1173 goto exit;
1174
1175 ret = prepare_request_add_write_reqs(td, num_requests,
1176 is_err_expected, is_random);
1177 if (ret)
1178 goto exit;
1179
1180 ret = prepare_request_add_read(td);
1181 if (ret)
1182 goto exit;
1183
1184 ret = prepare_request_add_write_reqs(td, test_packed_trigger-1,
1185 is_err_expected, is_random);
1186 if (ret)
1187 goto exit;
1188
1189 mbtd->exp_packed_stats.pack_stop_reason[WRONG_DATA_DIR] = 1;
1190 mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 1;
1191 break;
1192 case TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS:
1193 case TEST_PACKING_NOT_EXP_TRIGGER_REQUESTS:
1194 mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 1;
1195 break;
1196 default:
1197 mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1;
1198 mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 1;
1199 }
1200 mbtd->num_requests = num_requests;
1201
1202exit:
1203 return ret;
1204}
1205
1206/*
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001207 * Prepare requests for the TEST_RET_PARTIAL_FOLLOWED_BY_ABORT testcase.
1208 * In this testcase we have mixed error expectations from different
1209 * write requests, hence the special prepare function.
1210 */
1211static int prepare_partial_followed_by_abort(struct test_data *td,
1212 int num_requests)
1213{
1214 int i, start_address;
1215 int is_err_expected = 0;
1216 int ret = 0;
1217 struct mmc_queue *mq = test_iosched_get_req_queue()->queuedata;
1218 int max_packed_reqs;
1219
1220 if (!mq) {
1221 test_pr_err("%s: NULL mq", __func__);
1222 return -EINVAL;
1223 }
1224
1225 max_packed_reqs = mq->card->ext_csd.max_packed_writes;
1226
1227 mmc_blk_init_packed_statistics(mq->card);
1228
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001229 for (i = 1; i <= num_requests; i++) {
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001230 if (i > (num_requests / 2))
1231 is_err_expected = 1;
1232
Lee Susmanf18263a2012-10-24 14:14:37 +02001233 start_address = td->start_sector +
1234 sizeof(int) * BIO_U32_SIZE * td->num_of_write_bios;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001235 ret = test_iosched_add_wr_rd_test_req(is_err_expected, WRITE,
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001236 start_address, (i % 5) + 1, TEST_PATTERN_5A,
1237 NULL);
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001238 if (ret) {
1239 test_pr_err("%s: failed to add a write request",
1240 __func__);
1241 return ret;
1242 }
1243 }
1244
1245 memset((void *)&mbtd->exp_packed_stats.pack_stop_reason, 0,
1246 sizeof(mbtd->exp_packed_stats.pack_stop_reason));
1247 memset(mbtd->exp_packed_stats.packing_events, 0,
1248 (max_packed_reqs + 1) * sizeof(u32));
1249 mbtd->exp_packed_stats.packing_events[num_requests] = 1;
1250 mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1;
1251
1252 mbtd->num_requests = num_requests;
1253
1254 return ret;
1255}
1256
1257/*
1258 * Get number of write requests for current testcase. If random test mode was
1259 * chosen, pseudo-randomly choose the number of requests, otherwise set to
1260 * two less than the packing threshold.
1261 */
1262static int get_num_requests(struct test_data *td)
1263{
1264 int *seed = &mbtd->random_test_seed;
1265 struct request_queue *req_q;
1266 struct mmc_queue *mq;
1267 int max_num_requests;
1268 int num_requests;
1269 int min_num_requests = 2;
1270 int is_random = mbtd->is_random;
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001271 int max_for_double;
1272 int test_packed_trigger;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001273
1274 req_q = test_iosched_get_req_queue();
1275 if (req_q)
1276 mq = req_q->queuedata;
1277 else {
1278 test_pr_err("%s: NULL request queue", __func__);
1279 return 0;
1280 }
1281
1282 if (!mq) {
1283 test_pr_err("%s: NULL mq", __func__);
1284 return -EINVAL;
1285 }
1286
1287 max_num_requests = mq->card->ext_csd.max_packed_writes;
1288 num_requests = max_num_requests - 2;
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001289 test_packed_trigger = mq->num_wr_reqs_to_start_packing;
1290
1291 /*
1292 * Here max_for_double is intended for packed control testcases
1293 * in which we issue many write requests. It's purpose is to prevent
1294 * exceeding max number of req_queue requests.
1295 */
1296 max_for_double = max_num_requests - 10;
1297
1298 if (td->test_info.testcase ==
1299 TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS)
1300 /* Don't expect packing, so issue up to trigger-1 reqs */
1301 num_requests = test_packed_trigger - 1;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001302
1303 if (is_random) {
1304 if (td->test_info.testcase ==
1305 TEST_RET_PARTIAL_FOLLOWED_BY_ABORT)
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001306 /*
1307 * Here we don't want num_requests to be less than 1
1308 * as a consequence of division by 2.
1309 */
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001310 min_num_requests = 3;
1311
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001312 if (td->test_info.testcase ==
1313 TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS)
1314 /* Don't expect packing, so issue up to trigger reqs */
1315 max_num_requests = test_packed_trigger;
1316
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001317 num_requests = pseudo_random_seed(seed, min_num_requests,
1318 max_num_requests - 1);
1319 }
1320
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001321 if (td->test_info.testcase ==
1322 TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS)
1323 num_requests -= test_packed_trigger;
1324
1325 if (td->test_info.testcase == TEST_PACKING_EXP_N_OVER_TRIGGER_FLUSH_N)
1326 num_requests =
1327 num_requests > max_for_double ? max_for_double : num_requests;
1328
1329 if (mbtd->test_group == TEST_PACKING_CONTROL_GROUP)
1330 num_requests += test_packed_trigger;
1331
1332 if (td->test_info.testcase == TEST_PACKING_NOT_EXP_TRIGGER_REQUESTS)
1333 num_requests = test_packed_trigger;
1334
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001335 return num_requests;
1336}
1337
Lee Susmanf18263a2012-10-24 14:14:37 +02001338static int prepare_long_test_requests(struct test_data *td)
1339{
1340
1341 int ret;
1342 int start_sec;
1343 int j;
1344 int test_direction;
1345
1346 if (td)
1347 start_sec = td->start_sector;
1348 else {
1349 test_pr_err("%s: NULL td\n", __func__);
1350 return -EINVAL;
1351 }
1352
Lee Susmana35ae6e2012-10-25 16:06:07 +02001353 if (td->test_info.testcase == TEST_LONG_SEQUENTIAL_WRITE)
1354 test_direction = WRITE;
1355 else
1356 test_direction = READ;
Lee Susmanf18263a2012-10-24 14:14:37 +02001357
Lee Susmana35ae6e2012-10-25 16:06:07 +02001358 test_pr_info("%s: Adding %d write requests, first req_id=%d", __func__,
Lee Susmanf18263a2012-10-24 14:14:37 +02001359 LONG_TEST_ACTUAL_NUM_REQS, td->wr_rd_next_req_id);
1360
1361 for (j = 0; j < LONG_TEST_ACTUAL_NUM_REQS; j++) {
1362
1363 ret = test_iosched_add_wr_rd_test_req(0, test_direction,
1364 start_sec,
1365 TEST_MAX_BIOS_PER_REQ,
1366 TEST_NO_PATTERN, NULL);
1367 if (ret) {
1368 test_pr_err("%s: failed to add a bio request",
1369 __func__);
1370 return ret;
1371 }
1372
1373 start_sec +=
1374 (TEST_MAX_BIOS_PER_REQ * sizeof(int) * BIO_U32_SIZE);
1375 }
1376
1377 return 0;
1378}
1379
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001380/*
1381 * An implementation for the prepare_test_fn pointer in the test_info
1382 * data structure. According to the testcase we add the right number of requests
1383 * and decide if an error is expected or not.
1384 */
1385static int prepare_test(struct test_data *td)
1386{
1387 struct mmc_queue *mq = test_iosched_get_req_queue()->queuedata;
1388 int max_num_requests;
1389 int num_requests = 0;
1390 int ret = 0;
1391 int is_random = mbtd->is_random;
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001392 int test_packed_trigger = mq->num_wr_reqs_to_start_packing;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001393
1394 if (!mq) {
1395 test_pr_err("%s: NULL mq", __func__);
1396 return -EINVAL;
1397 }
1398
1399 max_num_requests = mq->card->ext_csd.max_packed_writes;
1400
1401 if (is_random && mbtd->random_test_seed == 0) {
1402 mbtd->random_test_seed =
1403 (unsigned int)(get_jiffies_64() & 0xFFFF);
1404 test_pr_info("%s: got seed from jiffies %d",
1405 __func__, mbtd->random_test_seed);
1406 }
1407
1408 num_requests = get_num_requests(td);
1409
1410 if (mbtd->test_group == TEST_SEND_INVALID_GROUP)
1411 mq->packed_test_fn =
1412 test_invalid_packed_cmd;
1413
1414 if (mbtd->test_group == TEST_ERR_CHECK_GROUP)
1415 mq->err_check_fn = test_err_check;
1416
1417 switch (td->test_info.testcase) {
1418 case TEST_STOP_DUE_TO_FLUSH:
1419 case TEST_STOP_DUE_TO_READ:
1420 case TEST_RET_PARTIAL_FOLLOWED_BY_SUCCESS:
1421 case TEST_RET_PARTIAL_MULTIPLE_UNTIL_SUCCESS:
1422 case TEST_STOP_DUE_TO_EMPTY_QUEUE:
1423 case TEST_CMD23_PACKED_BIT_UNSET:
1424 ret = prepare_packed_requests(td, 0, num_requests, is_random);
1425 break;
1426 case TEST_STOP_DUE_TO_FLUSH_AFTER_MAX_REQS:
1427 case TEST_STOP_DUE_TO_READ_AFTER_MAX_REQS:
1428 ret = prepare_packed_requests(td, 0, max_num_requests - 1,
1429 is_random);
1430 break;
1431 case TEST_RET_PARTIAL_FOLLOWED_BY_ABORT:
1432 ret = prepare_partial_followed_by_abort(td, num_requests);
1433 break;
1434 case TEST_STOP_DUE_TO_MAX_REQ_NUM:
1435 case TEST_RET_PARTIAL_MAX_FAIL_IDX:
1436 ret = prepare_packed_requests(td, 0, max_num_requests,
1437 is_random);
1438 break;
1439 case TEST_STOP_DUE_TO_THRESHOLD:
1440 ret = prepare_packed_requests(td, 0, max_num_requests + 1,
1441 is_random);
1442 break;
1443 case TEST_RET_ABORT:
1444 case TEST_RET_RETRY:
1445 case TEST_RET_CMD_ERR:
1446 case TEST_RET_DATA_ERR:
1447 case TEST_HDR_INVALID_VERSION:
1448 case TEST_HDR_WRONG_WRITE_CODE:
1449 case TEST_HDR_INVALID_RW_CODE:
1450 case TEST_HDR_DIFFERENT_ADDRESSES:
1451 case TEST_HDR_REQ_NUM_SMALLER_THAN_ACTUAL:
1452 case TEST_HDR_REQ_NUM_LARGER_THAN_ACTUAL:
1453 case TEST_CMD23_MAX_PACKED_WRITES:
1454 case TEST_CMD23_ZERO_PACKED_WRITES:
1455 case TEST_CMD23_REL_WR_BIT_SET:
1456 case TEST_CMD23_BITS_16TO29_SET:
1457 case TEST_CMD23_HDR_BLK_NOT_IN_COUNT:
1458 case TEST_HDR_CMD23_PACKED_BIT_SET:
1459 ret = prepare_packed_requests(td, 1, num_requests, is_random);
1460 break;
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001461 case TEST_PACKING_EXP_N_OVER_TRIGGER:
1462 case TEST_PACKING_EXP_N_OVER_TRIGGER_FB_READ:
1463 case TEST_PACKING_NOT_EXP_TRIGGER_REQUESTS:
1464 case TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS:
1465 case TEST_PACK_MIX_PACKED_NO_PACKED_PACKED:
1466 case TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED:
1467 ret = prepare_packed_control_tests_requests(td, 0, num_requests,
1468 is_random);
1469 break;
1470 case TEST_PACKING_EXP_THRESHOLD_OVER_TRIGGER:
1471 ret = prepare_packed_control_tests_requests(td, 0,
1472 max_num_requests, is_random);
1473 break;
1474 case TEST_PACKING_EXP_ONE_OVER_TRIGGER_FB_READ:
1475 ret = prepare_packed_control_tests_requests(td, 0,
1476 test_packed_trigger + 1,
1477 is_random);
1478 break;
1479 case TEST_PACKING_EXP_N_OVER_TRIGGER_FLUSH_N:
1480 ret = prepare_packed_control_tests_requests(td, 0, num_requests,
1481 is_random);
1482 break;
1483 case TEST_PACKING_NOT_EXP_TRIGGER_READ_TRIGGER:
1484 case TEST_PACKING_NOT_EXP_TRIGGER_FLUSH_TRIGGER:
1485 ret = prepare_packed_control_tests_requests(td, 0,
1486 test_packed_trigger, is_random);
1487 break;
Lee Susmana35ae6e2012-10-25 16:06:07 +02001488 case TEST_LONG_SEQUENTIAL_WRITE:
1489 ret = prepare_long_test_requests(td);
1490 break;
Lee Susmanf18263a2012-10-24 14:14:37 +02001491 case TEST_LONG_SEQUENTIAL_READ:
1492 ret = prepare_long_test_requests(td);
1493 break;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001494 default:
1495 test_pr_info("%s: Invalid test case...", __func__);
Lee Susmanf18263a2012-10-24 14:14:37 +02001496 ret = -EINVAL;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001497 }
1498
1499 return ret;
1500}
1501
1502/*
1503 * An implementation for the post_test_fn in the test_info data structure.
1504 * In our case we just reset the function pointers in the mmc_queue in order for
1505 * the FS to be able to dispatch it's requests correctly after the test is
1506 * finished.
1507 */
1508static int post_test(struct test_data *td)
1509{
1510 struct mmc_queue *mq;
1511
1512 if (!td)
1513 return -EINVAL;
1514
1515 mq = td->req_q->queuedata;
1516
1517 if (!mq) {
1518 test_pr_err("%s: NULL mq", __func__);
1519 return -EINVAL;
1520 }
1521
1522 mq->packed_test_fn = NULL;
1523 mq->err_check_fn = NULL;
1524
1525 return 0;
1526}
1527
1528/*
1529 * This function checks, based on the current test's test_group, that the
1530 * packed commands capability and control are set right. In addition, we check
1531 * if the card supports the packed command feature.
1532 */
1533static int validate_packed_commands_settings(void)
1534{
1535 struct request_queue *req_q;
1536 struct mmc_queue *mq;
1537 int max_num_requests;
1538 struct mmc_host *host;
1539
1540 req_q = test_iosched_get_req_queue();
1541 if (!req_q) {
1542 test_pr_err("%s: test_iosched_get_req_queue failed", __func__);
1543 test_iosched_set_test_result(TEST_FAILED);
1544 return -EINVAL;
1545 }
1546
1547 mq = req_q->queuedata;
1548 if (!mq) {
1549 test_pr_err("%s: NULL mq", __func__);
1550 return -EINVAL;
1551 }
1552
1553 max_num_requests = mq->card->ext_csd.max_packed_writes;
1554 host = mq->card->host;
1555
1556 if (!(host->caps2 && MMC_CAP2_PACKED_WR)) {
1557 test_pr_err("%s: Packed Write capability disabled, exit test",
1558 __func__);
1559 test_iosched_set_test_result(TEST_NOT_SUPPORTED);
1560 return -EINVAL;
1561 }
1562
1563 if (max_num_requests == 0) {
1564 test_pr_err(
1565 "%s: no write packing support, ext_csd.max_packed_writes=%d",
1566 __func__, mq->card->ext_csd.max_packed_writes);
1567 test_iosched_set_test_result(TEST_NOT_SUPPORTED);
1568 return -EINVAL;
1569 }
1570
1571 test_pr_info("%s: max number of packed requests supported is %d ",
1572 __func__, max_num_requests);
1573
1574 switch (mbtd->test_group) {
1575 case TEST_SEND_WRITE_PACKING_GROUP:
1576 case TEST_ERR_CHECK_GROUP:
1577 case TEST_SEND_INVALID_GROUP:
1578 /* disable the packing control */
1579 host->caps2 &= ~MMC_CAP2_PACKED_WR_CONTROL;
1580 break;
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001581 case TEST_PACKING_CONTROL_GROUP:
1582 host->caps2 |= MMC_CAP2_PACKED_WR_CONTROL;
1583 break;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001584 default:
1585 break;
1586 }
1587
1588 return 0;
1589}
1590
Maya Erezddc55732012-10-17 09:51:01 +02001591static void pseudo_rnd_sector_and_size(unsigned int *seed,
1592 unsigned int min_start_sector,
1593 unsigned int *start_sector,
1594 unsigned int *num_of_bios)
1595{
1596 unsigned int max_sec = min_start_sector + TEST_MAX_SECTOR_RANGE;
1597 do {
1598 *start_sector = pseudo_random_seed(seed,
1599 1, max_sec);
1600 *num_of_bios = pseudo_random_seed(seed,
1601 1, TEST_MAX_BIOS_PER_REQ);
1602 if (!(*num_of_bios))
1603 *num_of_bios = 1;
1604 } while ((*start_sector < min_start_sector) ||
1605 (*start_sector + (*num_of_bios * BIO_U32_SIZE * 4)) > max_sec);
1606}
1607
1608/* sanitize test functions */
1609static int prepare_write_discard_sanitize_read(struct test_data *td)
1610{
1611 unsigned int start_sector;
1612 unsigned int num_of_bios = 0;
1613 static unsigned int total_bios;
1614 unsigned int *num_bios_seed;
1615 int i = 0;
1616
1617 if (mbtd->random_test_seed == 0) {
1618 mbtd->random_test_seed =
1619 (unsigned int)(get_jiffies_64() & 0xFFFF);
1620 test_pr_info("%s: got seed from jiffies %d",
1621 __func__, mbtd->random_test_seed);
1622 }
1623 num_bios_seed = &mbtd->random_test_seed;
1624
1625 do {
1626 pseudo_rnd_sector_and_size(num_bios_seed, td->start_sector,
1627 &start_sector, &num_of_bios);
1628
1629 /* DISCARD */
1630 total_bios += num_of_bios;
1631 test_pr_info("%s: discard req: id=%d, startSec=%d, NumBios=%d",
1632 __func__, td->unique_next_req_id, start_sector,
1633 num_of_bios);
1634 test_iosched_add_unique_test_req(0, REQ_UNIQUE_DISCARD,
1635 start_sector, BIO_TO_SECTOR(num_of_bios),
1636 NULL);
1637
1638 } while (++i < (BLKDEV_MAX_RQ-10));
1639
1640 test_pr_info("%s: total discard bios = %d", __func__, total_bios);
1641
1642 test_pr_info("%s: add sanitize req", __func__);
1643 test_iosched_add_unique_test_req(0, REQ_UNIQUE_SANITIZE, 0, 0, NULL);
1644
1645 return 0;
1646}
1647
Yaniv Gardie9214c82012-10-18 13:58:18 +02001648/*
1649 * Post test operations for BKOPs test
1650 * Disable the BKOPs statistics and clear the feature flags
1651 */
1652static int bkops_post_test(struct test_data *td)
1653{
1654 struct request_queue *q = td->req_q;
1655 struct mmc_queue *mq = (struct mmc_queue *)q->queuedata;
1656 struct mmc_card *card = mq->card;
1657
1658 mmc_card_clr_doing_bkops(mq->card);
1659 card->ext_csd.raw_bkops_status = 0;
1660
1661 spin_lock(&card->bkops_info.bkops_stats.lock);
1662 card->bkops_info.bkops_stats.enabled = false;
1663 spin_unlock(&card->bkops_info.bkops_stats.lock);
1664
1665 return 0;
1666}
1667
1668/*
1669 * Verify the BKOPs statsistics
1670 */
1671static int check_bkops_result(struct test_data *td)
1672{
1673 struct request_queue *q = td->req_q;
1674 struct mmc_queue *mq = (struct mmc_queue *)q->queuedata;
1675 struct mmc_card *card = mq->card;
1676 struct mmc_bkops_stats *bkops_stat;
1677
1678 if (!card)
1679 goto fail;
1680
1681 bkops_stat = &card->bkops_info.bkops_stats;
1682
1683 test_pr_info("%s: Test results: bkops:(%d,%d,%d) hpi:%d, suspend:%d",
1684 __func__,
1685 bkops_stat->bkops_level[BKOPS_SEVERITY_1_INDEX],
1686 bkops_stat->bkops_level[BKOPS_SEVERITY_2_INDEX],
1687 bkops_stat->bkops_level[BKOPS_SEVERITY_3_INDEX],
1688 bkops_stat->hpi,
1689 bkops_stat->suspend);
1690
1691 switch (mbtd->test_info.testcase) {
1692 case BKOPS_DELAYED_WORK_LEVEL_1:
1693 if ((bkops_stat->bkops_level[BKOPS_SEVERITY_1_INDEX] == 1) &&
1694 (bkops_stat->suspend == 1) &&
1695 (bkops_stat->hpi == 0))
1696 goto exit;
1697 else
1698 goto fail;
1699 break;
1700 case BKOPS_DELAYED_WORK_LEVEL_1_HPI:
1701 if ((bkops_stat->bkops_level[BKOPS_SEVERITY_1_INDEX] == 1) &&
1702 (bkops_stat->suspend == 0) &&
1703 (bkops_stat->hpi == 1))
1704 goto exit;
1705 else
1706 goto fail;
1707 break;
1708 case BKOPS_CANCEL_DELAYED_WORK:
1709 if ((bkops_stat->bkops_level[BKOPS_SEVERITY_1_INDEX] == 0) &&
1710 (bkops_stat->bkops_level[BKOPS_SEVERITY_2_INDEX] == 0) &&
1711 (bkops_stat->bkops_level[BKOPS_SEVERITY_3_INDEX] == 0) &&
1712 (bkops_stat->suspend == 0) &&
1713 (bkops_stat->hpi == 0))
1714 goto exit;
1715 else
1716 goto fail;
1717 case BKOPS_URGENT_LEVEL_2:
1718 case BKOPS_URGENT_LEVEL_2_TWO_REQS:
1719 if ((bkops_stat->bkops_level[BKOPS_SEVERITY_2_INDEX] == 1) &&
1720 (bkops_stat->suspend == 0) &&
1721 (bkops_stat->hpi == 0))
1722 goto exit;
1723 else
1724 goto fail;
1725 case BKOPS_URGENT_LEVEL_3:
1726 if ((bkops_stat->bkops_level[BKOPS_SEVERITY_3_INDEX] == 1) &&
1727 (bkops_stat->suspend == 0) &&
1728 (bkops_stat->hpi == 0))
1729 goto exit;
1730 else
1731 goto fail;
1732 default:
1733 return -EINVAL;
1734 }
1735
1736exit:
1737 return 0;
1738fail:
1739 if (td->fs_wr_reqs_during_test) {
1740 test_pr_info("%s: wr reqs during test, cancel the round",
1741 __func__);
1742 test_iosched_set_ignore_round(true);
1743 return 0;
1744 }
1745
1746 test_pr_info("%s: BKOPs statistics are not as expected, test failed",
1747 __func__);
1748 return -EINVAL;
1749}
1750
1751static void bkops_end_io_final_fn(struct request *rq, int err)
1752{
1753 struct test_request *test_rq =
1754 (struct test_request *)rq->elv.priv[0];
1755 BUG_ON(!test_rq);
1756
1757 test_rq->req_completed = 1;
1758 test_rq->req_result = err;
1759
1760 test_pr_info("%s: request %d completed, err=%d",
1761 __func__, test_rq->req_id, err);
1762
1763 mbtd->bkops_stage = BKOPS_STAGE_4;
1764 wake_up(&mbtd->bkops_wait_q);
1765}
1766
1767static void bkops_end_io_fn(struct request *rq, int err)
1768{
1769 struct test_request *test_rq =
1770 (struct test_request *)rq->elv.priv[0];
1771 BUG_ON(!test_rq);
1772
1773 test_rq->req_completed = 1;
1774 test_rq->req_result = err;
1775
1776 test_pr_info("%s: request %d completed, err=%d",
1777 __func__, test_rq->req_id, err);
1778 mbtd->bkops_stage = BKOPS_STAGE_2;
1779 wake_up(&mbtd->bkops_wait_q);
1780
1781}
1782
1783static int prepare_bkops(struct test_data *td)
1784{
1785 int ret = 0;
1786 struct request_queue *q = td->req_q;
1787 struct mmc_queue *mq = (struct mmc_queue *)q->queuedata;
1788 struct mmc_card *card = mq->card;
1789 struct mmc_bkops_stats *bkops_stat;
1790
1791 if (!card)
1792 return -EINVAL;
1793
1794 bkops_stat = &card->bkops_info.bkops_stats;
1795
1796 if (!card->ext_csd.bkops_en) {
1797 test_pr_err("%s: BKOPS is not enabled by card or host)",
1798 __func__);
1799 return -ENOTSUPP;
1800 }
1801 if (mmc_card_doing_bkops(card)) {
1802 test_pr_err("%s: BKOPS in progress, try later", __func__);
1803 return -EAGAIN;
1804 }
1805
1806 mmc_blk_init_bkops_statistics(card);
1807
1808 if ((mbtd->test_info.testcase == BKOPS_URGENT_LEVEL_2) ||
1809 (mbtd->test_info.testcase == BKOPS_URGENT_LEVEL_2_TWO_REQS) ||
1810 (mbtd->test_info.testcase == BKOPS_URGENT_LEVEL_3))
1811 mq->err_check_fn = test_err_check;
1812 mbtd->err_check_counter = 0;
1813
1814 return ret;
1815}
1816
1817static int run_bkops(struct test_data *td)
1818{
1819 int ret = 0;
1820 struct request_queue *q = td->req_q;
1821 struct mmc_queue *mq = (struct mmc_queue *)q->queuedata;
1822 struct mmc_card *card = mq->card;
1823 struct mmc_bkops_stats *bkops_stat;
1824
1825 if (!card)
1826 return -EINVAL;
1827
1828 bkops_stat = &card->bkops_info.bkops_stats;
1829
1830 switch (mbtd->test_info.testcase) {
1831 case BKOPS_DELAYED_WORK_LEVEL_1:
1832 bkops_stat->ignore_card_bkops_status = true;
1833 card->ext_csd.raw_bkops_status = 1;
1834 card->bkops_info.sectors_changed =
1835 card->bkops_info.min_sectors_to_queue_delayed_work + 1;
1836 mbtd->bkops_stage = BKOPS_STAGE_1;
1837
1838 __blk_run_queue(q);
1839 /* this long sleep makes sure the host starts bkops and
1840 also, gets into suspend */
1841 msleep(10000);
1842
1843 bkops_stat->ignore_card_bkops_status = false;
1844 card->ext_csd.raw_bkops_status = 0;
1845
1846 test_iosched_mark_test_completion();
1847 break;
1848
1849 case BKOPS_DELAYED_WORK_LEVEL_1_HPI:
1850 bkops_stat->ignore_card_bkops_status = true;
1851 card->ext_csd.raw_bkops_status = 1;
1852 card->bkops_info.sectors_changed =
1853 card->bkops_info.min_sectors_to_queue_delayed_work + 1;
1854 mbtd->bkops_stage = BKOPS_STAGE_1;
1855
1856 __blk_run_queue(q);
1857 msleep(card->bkops_info.delay_ms);
1858
1859 ret = test_iosched_add_wr_rd_test_req(0, WRITE,
1860 td->start_sector,
1861 TEST_REQUEST_NUM_OF_BIOS,
1862 TEST_PATTERN_5A,
1863 bkops_end_io_final_fn);
1864 if (ret) {
1865 test_pr_err("%s: failed to add a write request",
1866 __func__);
1867 ret = -EINVAL;
1868 break;
1869 }
1870
1871 td->next_req = list_entry(td->test_queue.prev,
1872 struct test_request, queuelist);
1873 __blk_run_queue(q);
1874 wait_event(mbtd->bkops_wait_q,
1875 mbtd->bkops_stage == BKOPS_STAGE_4);
1876 bkops_stat->ignore_card_bkops_status = false;
1877
1878 test_iosched_mark_test_completion();
1879 break;
1880
1881 case BKOPS_CANCEL_DELAYED_WORK:
1882 bkops_stat->ignore_card_bkops_status = true;
1883 card->ext_csd.raw_bkops_status = 1;
1884 card->bkops_info.sectors_changed =
1885 card->bkops_info.min_sectors_to_queue_delayed_work + 1;
1886 mbtd->bkops_stage = BKOPS_STAGE_1;
1887
1888 __blk_run_queue(q);
1889
1890 ret = test_iosched_add_wr_rd_test_req(0, WRITE,
1891 td->start_sector,
1892 TEST_REQUEST_NUM_OF_BIOS,
1893 TEST_PATTERN_5A,
1894 bkops_end_io_final_fn);
1895 if (ret) {
1896 test_pr_err("%s: failed to add a write request",
1897 __func__);
1898 ret = -EINVAL;
1899 break;
1900 }
1901
1902 td->next_req = list_entry(td->test_queue.prev,
1903 struct test_request, queuelist);
1904 __blk_run_queue(q);
1905 wait_event(mbtd->bkops_wait_q,
1906 mbtd->bkops_stage == BKOPS_STAGE_4);
1907 bkops_stat->ignore_card_bkops_status = false;
1908
1909 test_iosched_mark_test_completion();
1910 break;
1911
1912 case BKOPS_URGENT_LEVEL_2:
1913 case BKOPS_URGENT_LEVEL_3:
1914 bkops_stat->ignore_card_bkops_status = true;
1915 if (mbtd->test_info.testcase == BKOPS_URGENT_LEVEL_2)
1916 card->ext_csd.raw_bkops_status = 2;
1917 else
1918 card->ext_csd.raw_bkops_status = 3;
1919 mbtd->bkops_stage = BKOPS_STAGE_1;
1920
1921 ret = test_iosched_add_wr_rd_test_req(0, WRITE,
1922 td->start_sector,
1923 TEST_REQUEST_NUM_OF_BIOS,
1924 TEST_PATTERN_5A,
1925 bkops_end_io_fn);
1926 if (ret) {
1927 test_pr_err("%s: failed to add a write request",
1928 __func__);
1929 ret = -EINVAL;
1930 break;
1931 }
1932
1933 td->next_req = list_entry(td->test_queue.prev,
1934 struct test_request, queuelist);
1935 __blk_run_queue(q);
1936 wait_event(mbtd->bkops_wait_q,
1937 mbtd->bkops_stage == BKOPS_STAGE_2);
1938 card->ext_csd.raw_bkops_status = 0;
1939
1940 ret = test_iosched_add_wr_rd_test_req(0, WRITE,
1941 td->start_sector,
1942 TEST_REQUEST_NUM_OF_BIOS,
1943 TEST_PATTERN_5A,
1944 bkops_end_io_final_fn);
1945 if (ret) {
1946 test_pr_err("%s: failed to add a write request",
1947 __func__);
1948 ret = -EINVAL;
1949 break;
1950 }
1951
1952 td->next_req = list_entry(td->test_queue.prev,
1953 struct test_request, queuelist);
1954 __blk_run_queue(q);
1955
1956 wait_event(mbtd->bkops_wait_q,
1957 mbtd->bkops_stage == BKOPS_STAGE_4);
1958
1959 bkops_stat->ignore_card_bkops_status = false;
1960 test_iosched_mark_test_completion();
1961 break;
1962
1963 case BKOPS_URGENT_LEVEL_2_TWO_REQS:
1964 mq->wr_packing_enabled = false;
1965 bkops_stat->ignore_card_bkops_status = true;
1966 card->ext_csd.raw_bkops_status = 2;
1967 mbtd->bkops_stage = BKOPS_STAGE_1;
1968
1969 ret = test_iosched_add_wr_rd_test_req(0, WRITE,
1970 td->start_sector,
1971 TEST_REQUEST_NUM_OF_BIOS,
1972 TEST_PATTERN_5A,
1973 NULL);
1974 if (ret) {
1975 test_pr_err("%s: failed to add a write request",
1976 __func__);
1977 ret = -EINVAL;
1978 break;
1979 }
1980
1981 ret = test_iosched_add_wr_rd_test_req(0, WRITE,
1982 td->start_sector,
1983 TEST_REQUEST_NUM_OF_BIOS,
1984 TEST_PATTERN_5A,
1985 bkops_end_io_fn);
1986 if (ret) {
1987 test_pr_err("%s: failed to add a write request",
1988 __func__);
1989 ret = -EINVAL;
1990 break;
1991 }
1992
1993 td->next_req = list_entry(td->test_queue.next,
1994 struct test_request, queuelist);
1995 __blk_run_queue(q);
1996 wait_event(mbtd->bkops_wait_q,
1997 mbtd->bkops_stage == BKOPS_STAGE_2);
1998 card->ext_csd.raw_bkops_status = 0;
1999
2000 ret = test_iosched_add_wr_rd_test_req(0, WRITE,
2001 td->start_sector,
2002 TEST_REQUEST_NUM_OF_BIOS,
2003 TEST_PATTERN_5A,
2004 bkops_end_io_final_fn);
2005 if (ret) {
2006 test_pr_err("%s: failed to add a write request",
2007 __func__);
2008 ret = -EINVAL;
2009 break;
2010 }
2011
2012 td->next_req = list_entry(td->test_queue.prev,
2013 struct test_request, queuelist);
2014 __blk_run_queue(q);
2015
2016 wait_event(mbtd->bkops_wait_q,
2017 mbtd->bkops_stage == BKOPS_STAGE_4);
2018
2019 bkops_stat->ignore_card_bkops_status = false;
2020 test_iosched_mark_test_completion();
2021
2022 break;
2023 default:
2024 test_pr_err("%s: wrong testcase: %d", __func__,
2025 mbtd->test_info.testcase);
2026 ret = -EINVAL;
2027 }
2028 return ret;
2029}
2030
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02002031static bool message_repeat;
2032static int test_open(struct inode *inode, struct file *file)
2033{
2034 file->private_data = inode->i_private;
2035 message_repeat = 1;
2036 return 0;
2037}
2038
2039/* send_packing TEST */
2040static ssize_t send_write_packing_test_write(struct file *file,
2041 const char __user *buf,
2042 size_t count,
2043 loff_t *ppos)
2044{
2045 int ret = 0;
2046 int i = 0;
2047 int number = -1;
2048 int j = 0;
2049
2050 test_pr_info("%s: -- send_write_packing TEST --", __func__);
2051
2052 sscanf(buf, "%d", &number);
2053
2054 if (number <= 0)
2055 number = 1;
2056
2057
2058 mbtd->test_group = TEST_SEND_WRITE_PACKING_GROUP;
2059
2060 if (validate_packed_commands_settings())
2061 return count;
2062
2063 if (mbtd->random_test_seed > 0)
2064 test_pr_info("%s: Test seed: %d", __func__,
2065 mbtd->random_test_seed);
2066
2067 memset(&mbtd->test_info, 0, sizeof(struct test_info));
2068
2069 mbtd->test_info.data = mbtd;
2070 mbtd->test_info.prepare_test_fn = prepare_test;
2071 mbtd->test_info.check_test_result_fn = check_wr_packing_statistics;
2072 mbtd->test_info.get_test_case_str_fn = get_test_case_str;
2073 mbtd->test_info.post_test_fn = post_test;
2074
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02002075 for (i = 0; i < number; ++i) {
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02002076 test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
2077 test_pr_info("%s: ====================", __func__);
2078
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02002079 for (j = SEND_WRITE_PACKING_MIN_TESTCASE;
2080 j <= SEND_WRITE_PACKING_MAX_TESTCASE; j++) {
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02002081
2082 mbtd->test_info.testcase = j;
2083 mbtd->is_random = RANDOM_TEST;
2084 ret = test_iosched_start_test(&mbtd->test_info);
2085 if (ret)
2086 break;
2087 /* Allow FS requests to be dispatched */
2088 msleep(1000);
2089 mbtd->test_info.testcase = j;
2090 mbtd->is_random = NON_RANDOM_TEST;
2091 ret = test_iosched_start_test(&mbtd->test_info);
2092 if (ret)
2093 break;
2094 /* Allow FS requests to be dispatched */
2095 msleep(1000);
2096 }
2097 }
2098
2099 test_pr_info("%s: Completed all the test cases.", __func__);
2100
2101 return count;
2102}
2103
2104static ssize_t send_write_packing_test_read(struct file *file,
2105 char __user *buffer,
2106 size_t count,
2107 loff_t *offset)
2108{
2109 memset((void *)buffer, 0, count);
2110
2111 snprintf(buffer, count,
2112 "\nsend_write_packing_test\n"
2113 "=========\n"
2114 "Description:\n"
2115 "This test checks the following scenarios\n"
2116 "- Pack due to FLUSH message\n"
2117 "- Pack due to FLUSH after threshold writes\n"
2118 "- Pack due to READ message\n"
2119 "- Pack due to READ after threshold writes\n"
2120 "- Pack due to empty queue\n"
2121 "- Pack due to threshold writes\n"
2122 "- Pack due to one over threshold writes\n");
2123
2124 if (message_repeat == 1) {
2125 message_repeat = 0;
2126 return strnlen(buffer, count);
2127 } else {
2128 return 0;
2129 }
2130}
2131
2132const struct file_operations send_write_packing_test_ops = {
2133 .open = test_open,
2134 .write = send_write_packing_test_write,
2135 .read = send_write_packing_test_read,
2136};
2137
2138/* err_check TEST */
2139static ssize_t err_check_test_write(struct file *file,
2140 const char __user *buf,
2141 size_t count,
2142 loff_t *ppos)
2143{
2144 int ret = 0;
2145 int i = 0;
2146 int number = -1;
2147 int j = 0;
2148
2149 test_pr_info("%s: -- err_check TEST --", __func__);
2150
2151 sscanf(buf, "%d", &number);
2152
2153 if (number <= 0)
2154 number = 1;
2155
2156 mbtd->test_group = TEST_ERR_CHECK_GROUP;
2157
2158 if (validate_packed_commands_settings())
2159 return count;
2160
2161 if (mbtd->random_test_seed > 0)
2162 test_pr_info("%s: Test seed: %d", __func__,
2163 mbtd->random_test_seed);
2164
2165 memset(&mbtd->test_info, 0, sizeof(struct test_info));
2166
2167 mbtd->test_info.data = mbtd;
2168 mbtd->test_info.prepare_test_fn = prepare_test;
2169 mbtd->test_info.check_test_result_fn = check_wr_packing_statistics;
2170 mbtd->test_info.get_test_case_str_fn = get_test_case_str;
2171 mbtd->test_info.post_test_fn = post_test;
2172
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02002173 for (i = 0; i < number; ++i) {
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02002174 test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
2175 test_pr_info("%s: ====================", __func__);
2176
2177 for (j = ERR_CHECK_MIN_TESTCASE;
2178 j <= ERR_CHECK_MAX_TESTCASE ; j++) {
2179 mbtd->test_info.testcase = j;
2180 mbtd->is_random = RANDOM_TEST;
2181 ret = test_iosched_start_test(&mbtd->test_info);
2182 if (ret)
2183 break;
2184 /* Allow FS requests to be dispatched */
2185 msleep(1000);
2186 mbtd->test_info.testcase = j;
2187 mbtd->is_random = NON_RANDOM_TEST;
2188 ret = test_iosched_start_test(&mbtd->test_info);
2189 if (ret)
2190 break;
2191 /* Allow FS requests to be dispatched */
2192 msleep(1000);
2193 }
2194 }
2195
2196 test_pr_info("%s: Completed all the test cases.", __func__);
2197
2198 return count;
2199}
2200
2201static ssize_t err_check_test_read(struct file *file,
2202 char __user *buffer,
2203 size_t count,
2204 loff_t *offset)
2205{
2206 memset((void *)buffer, 0, count);
2207
2208 snprintf(buffer, count,
2209 "\nerr_check_TEST\n"
2210 "=========\n"
2211 "Description:\n"
2212 "This test checks the following scenarios\n"
2213 "- Return ABORT\n"
2214 "- Return PARTIAL followed by success\n"
2215 "- Return PARTIAL followed by abort\n"
2216 "- Return PARTIAL multiple times until success\n"
2217 "- Return PARTIAL with fail index = threshold\n"
2218 "- Return RETRY\n"
2219 "- Return CMD_ERR\n"
2220 "- Return DATA_ERR\n");
2221
2222 if (message_repeat == 1) {
2223 message_repeat = 0;
2224 return strnlen(buffer, count);
2225 } else {
2226 return 0;
2227 }
2228}
2229
2230const struct file_operations err_check_test_ops = {
2231 .open = test_open,
2232 .write = err_check_test_write,
2233 .read = err_check_test_read,
2234};
2235
2236/* send_invalid_packed TEST */
2237static ssize_t send_invalid_packed_test_write(struct file *file,
2238 const char __user *buf,
2239 size_t count,
2240 loff_t *ppos)
2241{
2242 int ret = 0;
2243 int i = 0;
2244 int number = -1;
2245 int j = 0;
2246 int num_of_failures = 0;
2247
2248 test_pr_info("%s: -- send_invalid_packed TEST --", __func__);
2249
2250 sscanf(buf, "%d", &number);
2251
2252 if (number <= 0)
2253 number = 1;
2254
2255 mbtd->test_group = TEST_SEND_INVALID_GROUP;
2256
2257 if (validate_packed_commands_settings())
2258 return count;
2259
2260 if (mbtd->random_test_seed > 0)
2261 test_pr_info("%s: Test seed: %d", __func__,
2262 mbtd->random_test_seed);
2263
2264 memset(&mbtd->test_info, 0, sizeof(struct test_info));
2265
2266 mbtd->test_info.data = mbtd;
2267 mbtd->test_info.prepare_test_fn = prepare_test;
2268 mbtd->test_info.check_test_result_fn = check_wr_packing_statistics;
2269 mbtd->test_info.get_test_case_str_fn = get_test_case_str;
2270 mbtd->test_info.post_test_fn = post_test;
2271
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02002272 for (i = 0; i < number; ++i) {
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02002273 test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
2274 test_pr_info("%s: ====================", __func__);
2275
2276 for (j = INVALID_CMD_MIN_TESTCASE;
2277 j <= INVALID_CMD_MAX_TESTCASE ; j++) {
2278
2279 mbtd->test_info.testcase = j;
2280 mbtd->is_random = RANDOM_TEST;
2281 ret = test_iosched_start_test(&mbtd->test_info);
2282 if (ret)
2283 num_of_failures++;
2284 /* Allow FS requests to be dispatched */
2285 msleep(1000);
2286
2287 mbtd->test_info.testcase = j;
2288 mbtd->is_random = NON_RANDOM_TEST;
2289 ret = test_iosched_start_test(&mbtd->test_info);
2290 if (ret)
2291 num_of_failures++;
2292 /* Allow FS requests to be dispatched */
2293 msleep(1000);
2294 }
2295 }
2296
2297 test_pr_info("%s: Completed all the test cases.", __func__);
2298
2299 if (num_of_failures > 0) {
2300 test_iosched_set_test_result(TEST_FAILED);
2301 test_pr_err(
2302 "There were %d failures during the test, TEST FAILED",
2303 num_of_failures);
2304 }
2305 return count;
2306}
2307
2308static ssize_t send_invalid_packed_test_read(struct file *file,
2309 char __user *buffer,
2310 size_t count,
2311 loff_t *offset)
2312{
2313 memset((void *)buffer, 0, count);
2314
2315 snprintf(buffer, count,
2316 "\nsend_invalid_packed_TEST\n"
2317 "=========\n"
2318 "Description:\n"
2319 "This test checks the following scenarios\n"
2320 "- Send an invalid header version\n"
2321 "- Send the wrong write code\n"
2322 "- Send an invalid R/W code\n"
2323 "- Send wrong start address in header\n"
2324 "- Send header with block_count smaller than actual\n"
2325 "- Send header with block_count larger than actual\n"
2326 "- Send header CMD23 packed bit set\n"
2327 "- Send CMD23 with block count over threshold\n"
2328 "- Send CMD23 with block_count equals zero\n"
2329 "- Send CMD23 packed bit unset\n"
2330 "- Send CMD23 reliable write bit set\n"
2331 "- Send CMD23 bits [16-29] set\n"
2332 "- Send CMD23 header block not in block_count\n");
2333
2334 if (message_repeat == 1) {
2335 message_repeat = 0;
2336 return strnlen(buffer, count);
2337 } else {
2338 return 0;
2339 }
2340}
2341
2342const struct file_operations send_invalid_packed_test_ops = {
2343 .open = test_open,
2344 .write = send_invalid_packed_test_write,
2345 .read = send_invalid_packed_test_read,
2346};
2347
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02002348/* packing_control TEST */
2349static ssize_t write_packing_control_test_write(struct file *file,
2350 const char __user *buf,
2351 size_t count,
2352 loff_t *ppos)
2353{
2354 int ret = 0;
2355 int i = 0;
2356 int number = -1;
2357 int j = 0;
2358 struct mmc_queue *mq = test_iosched_get_req_queue()->queuedata;
2359 int max_num_requests = mq->card->ext_csd.max_packed_writes;
2360 int test_successful = 1;
2361
2362 test_pr_info("%s: -- write_packing_control TEST --", __func__);
2363
2364 sscanf(buf, "%d", &number);
2365
2366 if (number <= 0)
2367 number = 1;
2368
2369 test_pr_info("%s: max_num_requests = %d ", __func__,
2370 max_num_requests);
2371
2372 memset(&mbtd->test_info, 0, sizeof(struct test_info));
2373 mbtd->test_group = TEST_PACKING_CONTROL_GROUP;
2374
2375 if (validate_packed_commands_settings())
2376 return count;
2377
2378 mbtd->test_info.data = mbtd;
2379 mbtd->test_info.prepare_test_fn = prepare_test;
2380 mbtd->test_info.check_test_result_fn = check_wr_packing_statistics;
2381 mbtd->test_info.get_test_case_str_fn = get_test_case_str;
2382
2383 for (i = 0; i < number; ++i) {
2384 test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
2385 test_pr_info("%s: ====================", __func__);
2386
2387 for (j = PACKING_CONTROL_MIN_TESTCASE;
2388 j <= PACKING_CONTROL_MAX_TESTCASE; j++) {
2389
2390 test_successful = 1;
2391 mbtd->test_info.testcase = j;
2392 mbtd->is_random = RANDOM_TEST;
2393 ret = test_iosched_start_test(&mbtd->test_info);
2394 if (ret) {
2395 test_successful = 0;
2396 break;
2397 }
2398 /* Allow FS requests to be dispatched */
2399 msleep(1000);
2400
2401 mbtd->test_info.testcase = j;
2402 mbtd->is_random = NON_RANDOM_TEST;
2403 ret = test_iosched_start_test(&mbtd->test_info);
2404 if (ret) {
2405 test_successful = 0;
2406 break;
2407 }
2408 /* Allow FS requests to be dispatched */
2409 msleep(1000);
2410 }
2411
2412 if (!test_successful)
2413 break;
2414 }
2415
2416 test_pr_info("%s: Completed all the test cases.", __func__);
2417
2418 return count;
2419}
2420
2421static ssize_t write_packing_control_test_read(struct file *file,
2422 char __user *buffer,
2423 size_t count,
2424 loff_t *offset)
2425{
2426 memset((void *)buffer, 0, count);
2427
2428 snprintf(buffer, count,
2429 "\nwrite_packing_control_test\n"
2430 "=========\n"
2431 "Description:\n"
2432 "This test checks the following scenarios\n"
2433 "- Packing expected - one over trigger\n"
2434 "- Packing expected - N over trigger\n"
2435 "- Packing expected - N over trigger followed by read\n"
2436 "- Packing expected - N over trigger followed by flush\n"
2437 "- Packing expected - threshold over trigger FB by flush\n"
2438 "- Packing not expected - less than trigger\n"
2439 "- Packing not expected - trigger requests\n"
2440 "- Packing not expected - trigger, read, trigger\n"
2441 "- Mixed state - packing -> no packing -> packing\n"
2442 "- Mixed state - no packing -> packing -> no packing\n");
2443
2444 if (message_repeat == 1) {
2445 message_repeat = 0;
2446 return strnlen(buffer, count);
2447 } else {
2448 return 0;
2449 }
2450}
2451
2452const struct file_operations write_packing_control_test_ops = {
2453 .open = test_open,
2454 .write = write_packing_control_test_write,
2455 .read = write_packing_control_test_read,
2456};
2457
Maya Erezddc55732012-10-17 09:51:01 +02002458static ssize_t write_discard_sanitize_test_write(struct file *file,
2459 const char __user *buf,
2460 size_t count,
2461 loff_t *ppos)
2462{
2463 int ret = 0;
2464 int i = 0;
2465 int number = -1;
2466
2467 sscanf(buf, "%d", &number);
2468 if (number <= 0)
2469 number = 1;
2470
2471 test_pr_info("%s: -- write_discard_sanitize TEST --\n", __func__);
2472
2473 memset(&mbtd->test_info, 0, sizeof(struct test_info));
2474
2475 mbtd->test_group = TEST_GENERAL_GROUP;
2476
2477 mbtd->test_info.data = mbtd;
2478 mbtd->test_info.prepare_test_fn = prepare_write_discard_sanitize_read;
2479 mbtd->test_info.get_test_case_str_fn = get_test_case_str;
2480 mbtd->test_info.timeout_msec = SANITIZE_TEST_TIMEOUT;
2481
2482 for (i = 0 ; i < number ; ++i) {
2483 test_pr_info("%s: Cycle # %d / %d\n", __func__, i+1, number);
2484 test_pr_info("%s: ===================", __func__);
2485
2486 mbtd->test_info.testcase = TEST_WRITE_DISCARD_SANITIZE_READ;
2487 ret = test_iosched_start_test(&mbtd->test_info);
2488
2489 if (ret)
2490 break;
2491 }
2492
2493 return count;
2494}
2495
2496const struct file_operations write_discard_sanitize_test_ops = {
2497 .open = test_open,
2498 .write = write_discard_sanitize_test_write,
2499};
2500
Yaniv Gardie9214c82012-10-18 13:58:18 +02002501static ssize_t bkops_test_write(struct file *file,
2502 const char __user *buf,
2503 size_t count,
2504 loff_t *ppos)
2505{
2506 int ret = 0;
2507 int i = 0, j;
2508 int number = -1;
2509
2510 test_pr_info("%s: -- bkops_test TEST --", __func__);
2511
2512 sscanf(buf, "%d", &number);
2513
2514 if (number <= 0)
2515 number = 1;
2516
2517 mbtd->test_group = TEST_BKOPS_GROUP;
2518
2519 memset(&mbtd->test_info, 0, sizeof(struct test_info));
2520
2521 mbtd->test_info.data = mbtd;
2522 mbtd->test_info.prepare_test_fn = prepare_bkops;
2523 mbtd->test_info.check_test_result_fn = check_bkops_result;
2524 mbtd->test_info.get_test_case_str_fn = get_test_case_str;
2525 mbtd->test_info.run_test_fn = run_bkops;
2526 mbtd->test_info.timeout_msec = BKOPS_TEST_TIMEOUT;
2527 mbtd->test_info.post_test_fn = bkops_post_test;
2528
2529 for (i = 0 ; i < number ; ++i) {
2530 test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
2531 test_pr_info("%s: ===================", __func__);
2532 for (j = BKOPS_MIN_TESTCASE ;
2533 j <= BKOPS_MAX_TESTCASE ; j++) {
2534 mbtd->test_info.testcase = j;
2535 ret = test_iosched_start_test(&mbtd->test_info);
2536 if (ret)
2537 break;
2538 }
2539 }
2540
2541 test_pr_info("%s: Completed all the test cases.", __func__);
2542
2543 return count;
2544}
2545
2546static ssize_t bkops_test_read(struct file *file,
2547 char __user *buffer,
2548 size_t count,
2549 loff_t *offset)
2550{
2551 memset((void *)buffer, 0, count);
2552
2553 snprintf(buffer, count,
2554 "\nbkops_test\n========================\n"
2555 "Description:\n"
2556 "This test simulates BKOPS status from card\n"
2557 "and verifies that:\n"
2558 " - Starting BKOPS delayed work, level 1\n"
2559 " - Starting BKOPS delayed work, level 1, with HPI\n"
2560 " - Cancel starting BKOPS delayed work, "
2561 " when a request is received\n"
2562 " - Starting BKOPS urgent, level 2,3\n"
2563 " - Starting BKOPS urgent with 2 requests\n");
2564 return strnlen(buffer, count);
2565}
2566
2567const struct file_operations bkops_test_ops = {
2568 .open = test_open,
2569 .write = bkops_test_write,
2570 .read = bkops_test_read,
2571};
2572
Lee Susmanf18263a2012-10-24 14:14:37 +02002573static ssize_t long_sequential_read_test_write(struct file *file,
2574 const char __user *buf,
2575 size_t count,
2576 loff_t *ppos)
2577{
2578 int ret = 0;
2579 int i = 0;
2580 int number = -1;
2581 unsigned int mtime, integer, fraction;
2582
2583 test_pr_info("%s: -- Long Sequential Read TEST --", __func__);
2584
2585 sscanf(buf, "%d", &number);
2586
2587 if (number <= 0)
2588 number = 1;
2589
2590 memset(&mbtd->test_info, 0, sizeof(struct test_info));
2591 mbtd->test_group = TEST_GENERAL_GROUP;
2592
2593 mbtd->test_info.data = mbtd;
2594 mbtd->test_info.prepare_test_fn = prepare_test;
2595 mbtd->test_info.get_test_case_str_fn = get_test_case_str;
2596
2597 for (i = 0 ; i < number ; ++i) {
2598 test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
2599 test_pr_info("%s: ====================", __func__);
2600
2601 mbtd->test_info.testcase = TEST_LONG_SEQUENTIAL_READ;
2602 mbtd->is_random = NON_RANDOM_TEST;
2603 ret = test_iosched_start_test(&mbtd->test_info);
2604 if (ret)
2605 break;
2606
2607 mtime = jiffies_to_msecs(mbtd->test_info.test_duration);
2608
2609 test_pr_info("%s: time is %u msec, size is %u.%u MiB",
2610 __func__, mtime, LONG_TEST_SIZE_INTEGER,
2611 LONG_TEST_SIZE_FRACTION);
2612
2613 /* we first multiply in order not to lose precision */
2614 mtime *= MB_MSEC_RATIO_APPROXIMATION;
2615 /* divide values to get a MiB/sec integer value with one
2616 digit of precision. Multiply by 10 for one digit precision
2617 */
2618 fraction = integer = (LONG_TEST_ACTUAL_BYTE_NUM * 10) / mtime;
2619 integer /= 10;
2620 /* and calculate the MiB value fraction */
2621 fraction -= integer * 10;
2622
2623 test_pr_info("%s: Throughput: %u.%u MiB/sec\n"
2624 , __func__, integer, fraction);
2625
2626 /* Allow FS requests to be dispatched */
2627 msleep(1000);
2628 }
2629
2630 return count;
2631}
2632
2633static ssize_t long_sequential_read_test_read(struct file *file,
2634 char __user *buffer,
2635 size_t count,
2636 loff_t *offset)
2637{
2638 memset((void *)buffer, 0, count);
2639
2640 snprintf(buffer, count,
2641 "\nlong_sequential_read_test\n"
2642 "=========\n"
2643 "Description:\n"
2644 "This test runs the following scenarios\n"
2645 "- Long Sequential Read Test: this test measures read "
2646 "throughput at the driver level by sequentially reading many "
2647 "large requests.\n");
2648
2649 if (message_repeat == 1) {
2650 message_repeat = 0;
2651 return strnlen(buffer, count);
2652 } else
2653 return 0;
2654}
2655
2656const struct file_operations long_sequential_read_test_ops = {
2657 .open = test_open,
2658 .write = long_sequential_read_test_write,
2659 .read = long_sequential_read_test_read,
2660};
2661
Lee Susmana35ae6e2012-10-25 16:06:07 +02002662static ssize_t long_sequential_write_test_write(struct file *file,
2663 const char __user *buf,
2664 size_t count,
2665 loff_t *ppos)
2666{
2667 int ret = 0;
2668 int i = 0;
2669 int number = -1;
2670 unsigned int mtime, integer, fraction;
2671
2672 test_pr_info("%s: -- Long Sequential Write TEST --", __func__);
2673
2674 sscanf(buf, "%d", &number);
2675
2676 if (number <= 0)
2677 number = 1;
2678
2679 memset(&mbtd->test_info, 0, sizeof(struct test_info));
2680 mbtd->test_group = TEST_GENERAL_GROUP;
2681
2682 mbtd->test_info.data = mbtd;
2683 mbtd->test_info.prepare_test_fn = prepare_test;
2684 mbtd->test_info.get_test_case_str_fn = get_test_case_str;
2685
2686 for (i = 0 ; i < number ; ++i) {
2687 test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
2688 test_pr_info("%s: ====================", __func__);
2689
2690 mbtd->test_info.testcase = TEST_LONG_SEQUENTIAL_WRITE;
2691 mbtd->is_random = NON_RANDOM_TEST;
2692 ret = test_iosched_start_test(&mbtd->test_info);
2693 if (ret)
2694 break;
2695
2696 mtime = jiffies_to_msecs(mbtd->test_info.test_duration);
2697
2698 test_pr_info("%s: time is %u msec, size is %u.%u MiB",
2699 __func__, mtime, LONG_TEST_SIZE_INTEGER,
2700 LONG_TEST_SIZE_FRACTION);
2701
2702 /* we first multiply in order not to lose precision */
2703 mtime *= MB_MSEC_RATIO_APPROXIMATION;
2704 /* divide values to get a MiB/sec integer value with one
2705 digit of precision
2706 */
2707 fraction = integer = (LONG_TEST_ACTUAL_BYTE_NUM * 10) / mtime;
2708 integer /= 10;
2709 /* and calculate the MiB value fraction */
2710 fraction -= integer * 10;
2711
2712 test_pr_info("%s: Throughput: %u.%u MiB/sec\n",
2713 __func__, integer, fraction);
2714
2715 /* Allow FS requests to be dispatched */
2716 msleep(1000);
2717 }
2718
2719 return count;
2720}
2721
2722static ssize_t long_sequential_write_test_read(struct file *file,
2723 char __user *buffer,
2724 size_t count,
2725 loff_t *offset)
2726{
2727 memset((void *)buffer, 0, count);
2728
2729 snprintf(buffer, count,
2730 "\nlong_sequential_write_test\n"
2731 "=========\n"
2732 "Description:\n"
2733 "This test runs the following scenarios\n"
2734 "- Long Sequential Write Test: this test measures write "
2735 "throughput at the driver level by sequentially writing many "
2736 "large requests\n");
2737
2738 if (message_repeat == 1) {
2739 message_repeat = 0;
2740 return strnlen(buffer, count);
2741 } else
2742 return 0;
2743}
2744
2745const struct file_operations long_sequential_write_test_ops = {
2746 .open = test_open,
2747 .write = long_sequential_write_test_write,
2748 .read = long_sequential_write_test_read,
2749};
2750
2751
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02002752static void mmc_block_test_debugfs_cleanup(void)
2753{
2754 debugfs_remove(mbtd->debug.random_test_seed);
2755 debugfs_remove(mbtd->debug.send_write_packing_test);
2756 debugfs_remove(mbtd->debug.err_check_test);
2757 debugfs_remove(mbtd->debug.send_invalid_packed_test);
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02002758 debugfs_remove(mbtd->debug.packing_control_test);
Maya Erezddc55732012-10-17 09:51:01 +02002759 debugfs_remove(mbtd->debug.discard_sanitize_test);
Yaniv Gardie9214c82012-10-18 13:58:18 +02002760 debugfs_remove(mbtd->debug.bkops_test);
Lee Susmanf18263a2012-10-24 14:14:37 +02002761 debugfs_remove(mbtd->debug.long_sequential_read_test);
Lee Susmana35ae6e2012-10-25 16:06:07 +02002762 debugfs_remove(mbtd->debug.long_sequential_write_test);
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02002763}
2764
2765static int mmc_block_test_debugfs_init(void)
2766{
2767 struct dentry *utils_root, *tests_root;
2768
2769 utils_root = test_iosched_get_debugfs_utils_root();
2770 tests_root = test_iosched_get_debugfs_tests_root();
2771
2772 if (!utils_root || !tests_root)
2773 return -EINVAL;
2774
2775 mbtd->debug.random_test_seed = debugfs_create_u32(
2776 "random_test_seed",
2777 S_IRUGO | S_IWUGO,
2778 utils_root,
2779 &mbtd->random_test_seed);
2780
2781 if (!mbtd->debug.random_test_seed)
2782 goto err_nomem;
2783
2784 mbtd->debug.send_write_packing_test =
2785 debugfs_create_file("send_write_packing_test",
2786 S_IRUGO | S_IWUGO,
2787 tests_root,
2788 NULL,
2789 &send_write_packing_test_ops);
2790
2791 if (!mbtd->debug.send_write_packing_test)
2792 goto err_nomem;
2793
2794 mbtd->debug.err_check_test =
2795 debugfs_create_file("err_check_test",
2796 S_IRUGO | S_IWUGO,
2797 tests_root,
2798 NULL,
2799 &err_check_test_ops);
2800
2801 if (!mbtd->debug.err_check_test)
2802 goto err_nomem;
2803
2804 mbtd->debug.send_invalid_packed_test =
2805 debugfs_create_file("send_invalid_packed_test",
2806 S_IRUGO | S_IWUGO,
2807 tests_root,
2808 NULL,
2809 &send_invalid_packed_test_ops);
2810
2811 if (!mbtd->debug.send_invalid_packed_test)
2812 goto err_nomem;
2813
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02002814 mbtd->debug.packing_control_test = debugfs_create_file(
2815 "packing_control_test",
2816 S_IRUGO | S_IWUGO,
2817 tests_root,
2818 NULL,
2819 &write_packing_control_test_ops);
2820
2821 if (!mbtd->debug.packing_control_test)
2822 goto err_nomem;
2823
Maya Erezddc55732012-10-17 09:51:01 +02002824 mbtd->debug.discard_sanitize_test =
2825 debugfs_create_file("write_discard_sanitize_test",
2826 S_IRUGO | S_IWUGO,
2827 tests_root,
2828 NULL,
2829 &write_discard_sanitize_test_ops);
2830 if (!mbtd->debug.discard_sanitize_test) {
2831 mmc_block_test_debugfs_cleanup();
2832 return -ENOMEM;
2833 }
2834
Yaniv Gardie9214c82012-10-18 13:58:18 +02002835 mbtd->debug.bkops_test =
2836 debugfs_create_file("bkops_test",
2837 S_IRUGO | S_IWUGO,
2838 tests_root,
2839 NULL,
2840 &bkops_test_ops);
2841
2842 if (!mbtd->debug.bkops_test)
2843 goto err_nomem;
2844
Lee Susmanf18263a2012-10-24 14:14:37 +02002845 mbtd->debug.long_sequential_read_test = debugfs_create_file(
2846 "long_sequential_read_test",
2847 S_IRUGO | S_IWUGO,
2848 tests_root,
2849 NULL,
2850 &long_sequential_read_test_ops);
2851
2852 if (!mbtd->debug.long_sequential_read_test)
2853 goto err_nomem;
2854
Lee Susmana35ae6e2012-10-25 16:06:07 +02002855 mbtd->debug.long_sequential_write_test = debugfs_create_file(
2856 "long_sequential_write_test",
2857 S_IRUGO | S_IWUGO,
2858 tests_root,
2859 NULL,
2860 &long_sequential_write_test_ops);
2861
2862 if (!mbtd->debug.long_sequential_write_test)
2863 goto err_nomem;
2864
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02002865 return 0;
2866
2867err_nomem:
2868 mmc_block_test_debugfs_cleanup();
2869 return -ENOMEM;
2870}
2871
2872static void mmc_block_test_probe(void)
2873{
2874 struct request_queue *q = test_iosched_get_req_queue();
2875 struct mmc_queue *mq;
2876 int max_packed_reqs;
2877
2878 if (!q) {
2879 test_pr_err("%s: NULL request queue", __func__);
2880 return;
2881 }
2882
2883 mq = q->queuedata;
2884 if (!mq) {
2885 test_pr_err("%s: NULL mq", __func__);
2886 return;
2887 }
2888
2889 max_packed_reqs = mq->card->ext_csd.max_packed_writes;
2890 mbtd->exp_packed_stats.packing_events =
2891 kzalloc((max_packed_reqs + 1) *
2892 sizeof(*mbtd->exp_packed_stats.packing_events),
2893 GFP_KERNEL);
2894
2895 mmc_block_test_debugfs_init();
2896}
2897
2898static void mmc_block_test_remove(void)
2899{
2900 mmc_block_test_debugfs_cleanup();
2901}
2902
2903static int __init mmc_block_test_init(void)
2904{
2905 mbtd = kzalloc(sizeof(struct mmc_block_test_data), GFP_KERNEL);
2906 if (!mbtd) {
2907 test_pr_err("%s: failed to allocate mmc_block_test_data",
2908 __func__);
2909 return -ENODEV;
2910 }
2911
Yaniv Gardie9214c82012-10-18 13:58:18 +02002912 init_waitqueue_head(&mbtd->bkops_wait_q);
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02002913 mbtd->bdt.init_fn = mmc_block_test_probe;
2914 mbtd->bdt.exit_fn = mmc_block_test_remove;
2915 INIT_LIST_HEAD(&mbtd->bdt.list);
2916 test_iosched_register(&mbtd->bdt);
2917
2918 return 0;
2919}
2920
2921static void __exit mmc_block_test_exit(void)
2922{
2923 test_iosched_unregister(&mbtd->bdt);
2924 kfree(mbtd);
2925}
2926
2927module_init(mmc_block_test_init);
2928module_exit(mmc_block_test_exit);
2929
2930MODULE_LICENSE("GPL v2");
2931MODULE_DESCRIPTION("MMC block test");