blob: 08c75a0e048f51e56ea5baa56c486cf40de0d9c8 [file] [log] [blame]
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14/* MMC block test */
15
16#include <linux/module.h>
17#include <linux/blkdev.h>
18#include <linux/debugfs.h>
19#include <linux/mmc/card.h>
20#include <linux/mmc/host.h>
21#include <linux/delay.h>
22#include <linux/test-iosched.h>
Lee Susmanf18263a2012-10-24 14:14:37 +020023#include <linux/jiffies.h>
Tatyana Brokhman09b010d2012-10-09 13:50:56 +020024#include "queue.h"
Yaniv Gardie9214c82012-10-18 13:58:18 +020025#include <linux/mmc/mmc.h>
Tatyana Brokhman09b010d2012-10-09 13:50:56 +020026
27#define MODULE_NAME "mmc_block_test"
28#define TEST_MAX_SECTOR_RANGE (600*1024*1024) /* 600 MB */
29#define TEST_MAX_BIOS_PER_REQ 120
30#define CMD23_PACKED_BIT (1 << 30)
31#define LARGE_PRIME_1 1103515367
32#define LARGE_PRIME_2 35757
33#define PACKED_HDR_VER_MASK 0x000000FF
34#define PACKED_HDR_RW_MASK 0x0000FF00
35#define PACKED_HDR_NUM_REQS_MASK 0x00FF0000
36#define PACKED_HDR_BITS_16_TO_29_SET 0x3FFF0000
Maya Erezddc55732012-10-17 09:51:01 +020037#define SECTOR_SIZE 512
38#define NUM_OF_SECTORS_PER_BIO ((BIO_U32_SIZE * 4) / SECTOR_SIZE)
39#define BIO_TO_SECTOR(x) (x * NUM_OF_SECTORS_PER_BIO)
Lee Susmanf18263a2012-10-24 14:14:37 +020040/* the desired long test size to be written or read */
41#define LONG_TEST_MAX_NUM_BYTES (50*1024*1024) /* 50MB */
42/* request queue limitation is 128 requests, and we leave 10 spare requests */
43#define TEST_MAX_REQUESTS 118
44#define LONG_TEST_MAX_NUM_REQS (LONG_TEST_MAX_NUM_BYTES / \
45 (TEST_MAX_BIOS_PER_REQ * sizeof(int) * BIO_U32_SIZE))
46/* this doesn't allow the test requests num to be greater than the maximum */
47#define LONG_TEST_ACTUAL_NUM_REQS \
48 ((TEST_MAX_REQUESTS < LONG_TEST_MAX_NUM_REQS) ? \
49 TEST_MAX_REQUESTS : LONG_TEST_MAX_NUM_REQS)
50#define MB_MSEC_RATIO_APPROXIMATION ((1024 * 1024) / 1000)
51/* actual number of bytes in test */
52#define LONG_TEST_ACTUAL_BYTE_NUM (LONG_TEST_ACTUAL_NUM_REQS * \
53 (TEST_MAX_BIOS_PER_REQ * sizeof(int) * BIO_U32_SIZE))
54/* actual number of MiB in test multiplied by 10, for single digit precision*/
55#define LONG_TEST_ACTUAL_MB_NUM_X_10 ((LONG_TEST_ACTUAL_BYTE_NUM * 10) / \
56 (1024 * 1024))
57/* extract integer value */
58#define LONG_TEST_SIZE_INTEGER (LONG_TEST_ACTUAL_MB_NUM_X_10 / 10)
59/* and calculate the MiB value fraction */
60#define LONG_TEST_SIZE_FRACTION (LONG_TEST_ACTUAL_MB_NUM_X_10 - \
61 (LONG_TEST_SIZE_INTEGER * 10))
Tatyana Brokhman09b010d2012-10-09 13:50:56 +020062
63#define test_pr_debug(fmt, args...) pr_debug("%s: "fmt"\n", MODULE_NAME, args)
64#define test_pr_info(fmt, args...) pr_info("%s: "fmt"\n", MODULE_NAME, args)
65#define test_pr_err(fmt, args...) pr_err("%s: "fmt"\n", MODULE_NAME, args)
66
Maya Erezddc55732012-10-17 09:51:01 +020067#define SANITIZE_TEST_TIMEOUT 240000
Lee Susmanb09c0412012-12-19 14:28:52 +020068#define NEW_REQ_TEST_SLEEP_TIME 1
69#define NEW_REQ_TEST_NUM_BIOS 64
Yaniv Gardie9214c82012-10-18 13:58:18 +020070#define TEST_REQUEST_NUM_OF_BIOS 3
71
Yaniv Gardie9214c82012-10-18 13:58:18 +020072#define CHECK_BKOPS_STATS(stats, exp_bkops, exp_hpi, exp_suspend) \
73 ((stats.bkops != exp_bkops) || \
74 (stats.hpi != exp_hpi) || \
75 (stats.suspend != exp_suspend))
76#define BKOPS_TEST_TIMEOUT 60000
Maya Erezddc55732012-10-17 09:51:01 +020077
Tatyana Brokhman09b010d2012-10-09 13:50:56 +020078enum is_random {
79 NON_RANDOM_TEST,
80 RANDOM_TEST,
81};
82
83enum mmc_block_test_testcases {
84 /* Start of send write packing test group */
85 SEND_WRITE_PACKING_MIN_TESTCASE,
86 TEST_STOP_DUE_TO_READ = SEND_WRITE_PACKING_MIN_TESTCASE,
87 TEST_STOP_DUE_TO_READ_AFTER_MAX_REQS,
88 TEST_STOP_DUE_TO_FLUSH,
89 TEST_STOP_DUE_TO_FLUSH_AFTER_MAX_REQS,
90 TEST_STOP_DUE_TO_EMPTY_QUEUE,
91 TEST_STOP_DUE_TO_MAX_REQ_NUM,
92 TEST_STOP_DUE_TO_THRESHOLD,
93 SEND_WRITE_PACKING_MAX_TESTCASE = TEST_STOP_DUE_TO_THRESHOLD,
94
95 /* Start of err check test group */
96 ERR_CHECK_MIN_TESTCASE,
97 TEST_RET_ABORT = ERR_CHECK_MIN_TESTCASE,
98 TEST_RET_PARTIAL_FOLLOWED_BY_SUCCESS,
99 TEST_RET_PARTIAL_FOLLOWED_BY_ABORT,
100 TEST_RET_PARTIAL_MULTIPLE_UNTIL_SUCCESS,
101 TEST_RET_PARTIAL_MAX_FAIL_IDX,
102 TEST_RET_RETRY,
103 TEST_RET_CMD_ERR,
104 TEST_RET_DATA_ERR,
105 ERR_CHECK_MAX_TESTCASE = TEST_RET_DATA_ERR,
106
107 /* Start of send invalid test group */
108 INVALID_CMD_MIN_TESTCASE,
109 TEST_HDR_INVALID_VERSION = INVALID_CMD_MIN_TESTCASE,
110 TEST_HDR_WRONG_WRITE_CODE,
111 TEST_HDR_INVALID_RW_CODE,
112 TEST_HDR_DIFFERENT_ADDRESSES,
113 TEST_HDR_REQ_NUM_SMALLER_THAN_ACTUAL,
114 TEST_HDR_REQ_NUM_LARGER_THAN_ACTUAL,
115 TEST_HDR_CMD23_PACKED_BIT_SET,
116 TEST_CMD23_MAX_PACKED_WRITES,
117 TEST_CMD23_ZERO_PACKED_WRITES,
118 TEST_CMD23_PACKED_BIT_UNSET,
119 TEST_CMD23_REL_WR_BIT_SET,
120 TEST_CMD23_BITS_16TO29_SET,
121 TEST_CMD23_HDR_BLK_NOT_IN_COUNT,
122 INVALID_CMD_MAX_TESTCASE = TEST_CMD23_HDR_BLK_NOT_IN_COUNT,
Tatyana Brokhman91e1e322012-10-09 13:53:43 +0200123
124 /*
125 * Start of packing control test group.
126 * in these next testcases the abbreviation FB = followed by
127 */
128 PACKING_CONTROL_MIN_TESTCASE,
129 TEST_PACKING_EXP_ONE_OVER_TRIGGER_FB_READ =
130 PACKING_CONTROL_MIN_TESTCASE,
131 TEST_PACKING_EXP_N_OVER_TRIGGER,
132 TEST_PACKING_EXP_N_OVER_TRIGGER_FB_READ,
133 TEST_PACKING_EXP_N_OVER_TRIGGER_FLUSH_N,
134 TEST_PACKING_EXP_THRESHOLD_OVER_TRIGGER,
135 TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS,
136 TEST_PACKING_NOT_EXP_TRIGGER_REQUESTS,
137 TEST_PACKING_NOT_EXP_TRIGGER_READ_TRIGGER,
138 TEST_PACKING_NOT_EXP_TRIGGER_FLUSH_TRIGGER,
139 TEST_PACK_MIX_PACKED_NO_PACKED_PACKED,
140 TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED,
141 PACKING_CONTROL_MAX_TESTCASE = TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED,
Maya Erezddc55732012-10-17 09:51:01 +0200142
143 TEST_WRITE_DISCARD_SANITIZE_READ,
Yaniv Gardie9214c82012-10-18 13:58:18 +0200144
145 /* Start of bkops test group */
146 BKOPS_MIN_TESTCASE,
147 BKOPS_DELAYED_WORK_LEVEL_1 = BKOPS_MIN_TESTCASE,
148 BKOPS_DELAYED_WORK_LEVEL_1_HPI,
149 BKOPS_CANCEL_DELAYED_WORK,
150 BKOPS_URGENT_LEVEL_2,
151 BKOPS_URGENT_LEVEL_2_TWO_REQS,
152 BKOPS_URGENT_LEVEL_3,
153 BKOPS_MAX_TESTCASE = BKOPS_URGENT_LEVEL_3,
Lee Susmanf18263a2012-10-24 14:14:37 +0200154
155 TEST_LONG_SEQUENTIAL_READ,
Lee Susmana35ae6e2012-10-25 16:06:07 +0200156 TEST_LONG_SEQUENTIAL_WRITE,
Lee Susmanb09c0412012-12-19 14:28:52 +0200157
158 TEST_NEW_REQ_NOTIFICATION,
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200159};
160
161enum mmc_block_test_group {
162 TEST_NO_GROUP,
163 TEST_GENERAL_GROUP,
164 TEST_SEND_WRITE_PACKING_GROUP,
165 TEST_ERR_CHECK_GROUP,
166 TEST_SEND_INVALID_GROUP,
Tatyana Brokhman91e1e322012-10-09 13:53:43 +0200167 TEST_PACKING_CONTROL_GROUP,
Yaniv Gardie9214c82012-10-18 13:58:18 +0200168 TEST_BKOPS_GROUP,
Lee Susmanb09c0412012-12-19 14:28:52 +0200169 TEST_NEW_NOTIFICATION_GROUP,
Yaniv Gardie9214c82012-10-18 13:58:18 +0200170};
171
172enum bkops_test_stages {
173 BKOPS_STAGE_1,
174 BKOPS_STAGE_2,
175 BKOPS_STAGE_3,
176 BKOPS_STAGE_4,
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200177};
178
179struct mmc_block_test_debug {
180 struct dentry *send_write_packing_test;
181 struct dentry *err_check_test;
182 struct dentry *send_invalid_packed_test;
183 struct dentry *random_test_seed;
Tatyana Brokhman91e1e322012-10-09 13:53:43 +0200184 struct dentry *packing_control_test;
Maya Erezddc55732012-10-17 09:51:01 +0200185 struct dentry *discard_sanitize_test;
Yaniv Gardie9214c82012-10-18 13:58:18 +0200186 struct dentry *bkops_test;
Lee Susmanf18263a2012-10-24 14:14:37 +0200187 struct dentry *long_sequential_read_test;
Lee Susmana35ae6e2012-10-25 16:06:07 +0200188 struct dentry *long_sequential_write_test;
Lee Susmanb09c0412012-12-19 14:28:52 +0200189 struct dentry *new_req_notification_test;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200190};
191
192struct mmc_block_test_data {
193 /* The number of write requests that the test will issue */
194 int num_requests;
195 /* The expected write packing statistics for the current test */
196 struct mmc_wr_pack_stats exp_packed_stats;
197 /*
198 * A user-defined seed for random choices of number of bios written in
199 * a request, and of number of requests issued in a test
200 * This field is randomly updated after each use
201 */
202 unsigned int random_test_seed;
203 /* A retry counter used in err_check tests */
204 int err_check_counter;
205 /* Can be one of the values of enum test_group */
206 enum mmc_block_test_group test_group;
207 /*
208 * Indicates if the current testcase is running with random values of
209 * num_requests and num_bios (in each request)
210 */
211 int is_random;
212 /* Data structure for debugfs dentrys */
213 struct mmc_block_test_debug debug;
214 /*
215 * Data structure containing individual test information, including
216 * self-defined specific data
217 */
218 struct test_info test_info;
219 /* mmc block device test */
220 struct blk_dev_test_type bdt;
Yaniv Gardie9214c82012-10-18 13:58:18 +0200221 /* Current BKOPs test stage */
222 enum bkops_test_stages bkops_stage;
223 /* A wait queue for BKOPs tests */
224 wait_queue_head_t bkops_wait_q;
Lee Susmanb09c0412012-12-19 14:28:52 +0200225
226 unsigned int completed_req_count;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200227};
228
229static struct mmc_block_test_data *mbtd;
230
Lee Susmane868f8a2012-11-04 15:04:41 +0200231void print_mmc_packing_stats(struct mmc_card *card)
232{
233 int i;
234 int max_num_of_packed_reqs = 0;
235
236 if ((!card) || (!card->wr_pack_stats.packing_events))
237 return;
238
239 max_num_of_packed_reqs = card->ext_csd.max_packed_writes;
240
241 spin_lock(&card->wr_pack_stats.lock);
242
243 pr_info("%s: write packing statistics:\n",
244 mmc_hostname(card->host));
245
246 for (i = 1 ; i <= max_num_of_packed_reqs ; ++i) {
247 if (card->wr_pack_stats.packing_events[i] != 0)
248 pr_info("%s: Packed %d reqs - %d times\n",
249 mmc_hostname(card->host), i,
250 card->wr_pack_stats.packing_events[i]);
251 }
252
253 pr_info("%s: stopped packing due to the following reasons:\n",
254 mmc_hostname(card->host));
255
256 if (card->wr_pack_stats.pack_stop_reason[EXCEEDS_SEGMENTS])
257 pr_info("%s: %d times: exceedmax num of segments\n",
258 mmc_hostname(card->host),
259 card->wr_pack_stats.pack_stop_reason[EXCEEDS_SEGMENTS]);
260 if (card->wr_pack_stats.pack_stop_reason[EXCEEDS_SECTORS])
261 pr_info("%s: %d times: exceeding the max num of sectors\n",
262 mmc_hostname(card->host),
263 card->wr_pack_stats.pack_stop_reason[EXCEEDS_SECTORS]);
264 if (card->wr_pack_stats.pack_stop_reason[WRONG_DATA_DIR])
265 pr_info("%s: %d times: wrong data direction\n",
266 mmc_hostname(card->host),
267 card->wr_pack_stats.pack_stop_reason[WRONG_DATA_DIR]);
268 if (card->wr_pack_stats.pack_stop_reason[FLUSH_OR_DISCARD])
269 pr_info("%s: %d times: flush or discard\n",
270 mmc_hostname(card->host),
271 card->wr_pack_stats.pack_stop_reason[FLUSH_OR_DISCARD]);
272 if (card->wr_pack_stats.pack_stop_reason[EMPTY_QUEUE])
273 pr_info("%s: %d times: empty queue\n",
274 mmc_hostname(card->host),
275 card->wr_pack_stats.pack_stop_reason[EMPTY_QUEUE]);
276 if (card->wr_pack_stats.pack_stop_reason[REL_WRITE])
277 pr_info("%s: %d times: rel write\n",
278 mmc_hostname(card->host),
279 card->wr_pack_stats.pack_stop_reason[REL_WRITE]);
280 if (card->wr_pack_stats.pack_stop_reason[THRESHOLD])
281 pr_info("%s: %d times: Threshold\n",
282 mmc_hostname(card->host),
283 card->wr_pack_stats.pack_stop_reason[THRESHOLD]);
284
285 spin_unlock(&card->wr_pack_stats.lock);
286}
287
Lee Susman66842b02012-12-19 14:28:03 +0200288/**
289 * mmc_print_async_event_stats() - Print async event statistics
290 * @card: The mmc_card in which the async_event_stats
291 * struct is a member
292 */
293void mmc_print_async_event_stats(struct mmc_card *card)
294{
295 struct mmc_async_event_stats *s;
296
297 if (!card)
298 return;
299
300 s = &card->async_event_stats;
301 if (!s)
302 return;
303
304 pr_info("%s: new notification & req statistics:\n",
305 mmc_hostname(card->host));
306 pr_info("%s: done_flag:%d", mmc_hostname(card->host),
307 s->done_flag);
308 pr_info("%s: cmd_retry:%d", mmc_hostname(card->host),
309 s->cmd_retry);
310 pr_info("%s: NULL fetched:%d", mmc_hostname(card->host),
311 s->null_fetched);
312 pr_info("%s: wake up new:%d", mmc_hostname(card->host),
313 s->wakeup_new);
314 pr_info("%s: new_request_flag:%d", mmc_hostname(card->host),
315 s->new_request_flag);
316 pr_info("%s: no waiting:%d\n", mmc_hostname(card->host),
317 s->q_no_waiting);
318 pr_info("%s: no_mmc_request_action:%d", mmc_hostname(card->host),
319 s->no_mmc_request_action);
320 pr_info("%s: wakeup_mq_thread:%d", mmc_hostname(card->host),
321 s->wakeup_mq_thread);
322 pr_info("%s: fetch_due_to_new_req:%d", mmc_hostname(card->host),
323 s->fetch_due_to_new_req);
324 pr_info("%s: returned_new_req:%d", mmc_hostname(card->host),
325 s->returned_new_req);
326 pr_info("%s: done_when_new_req_event_on:%d", mmc_hostname(card->host),
327 s->done_when_new_req_event_on);
328 pr_info("%s: new_req_when_new_marked:%d", mmc_hostname(card->host),
329 s->new_req_when_new_marked);
330}
331
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200332/*
333 * A callback assigned to the packed_test_fn field.
334 * Called from block layer in mmc_blk_packed_hdr_wrq_prep.
335 * Here we alter the packed header or CMD23 in order to send an invalid
336 * packed command to the card.
337 */
338static void test_invalid_packed_cmd(struct request_queue *q,
339 struct mmc_queue_req *mqrq)
340{
341 struct mmc_queue *mq = q->queuedata;
342 u32 *packed_cmd_hdr = mqrq->packed_cmd_hdr;
343 struct request *req = mqrq->req;
344 struct request *second_rq;
345 struct test_request *test_rq;
346 struct mmc_blk_request *brq = &mqrq->brq;
347 int num_requests;
348 int max_packed_reqs;
349
350 if (!mq) {
351 test_pr_err("%s: NULL mq", __func__);
352 return;
353 }
354
355 test_rq = (struct test_request *)req->elv.priv[0];
356 if (!test_rq) {
357 test_pr_err("%s: NULL test_rq", __func__);
358 return;
359 }
360 max_packed_reqs = mq->card->ext_csd.max_packed_writes;
361
362 switch (mbtd->test_info.testcase) {
363 case TEST_HDR_INVALID_VERSION:
364 test_pr_info("%s: set invalid header version", __func__);
365 /* Put 0 in header version field (1 byte, offset 0 in header) */
366 packed_cmd_hdr[0] = packed_cmd_hdr[0] & ~PACKED_HDR_VER_MASK;
367 break;
368 case TEST_HDR_WRONG_WRITE_CODE:
369 test_pr_info("%s: wrong write code", __func__);
370 /* Set R/W field with R value (1 byte, offset 1 in header) */
371 packed_cmd_hdr[0] = packed_cmd_hdr[0] & ~PACKED_HDR_RW_MASK;
372 packed_cmd_hdr[0] = packed_cmd_hdr[0] | 0x00000100;
373 break;
374 case TEST_HDR_INVALID_RW_CODE:
375 test_pr_info("%s: invalid r/w code", __func__);
376 /* Set R/W field with invalid value */
377 packed_cmd_hdr[0] = packed_cmd_hdr[0] & ~PACKED_HDR_RW_MASK;
378 packed_cmd_hdr[0] = packed_cmd_hdr[0] | 0x00000400;
379 break;
380 case TEST_HDR_DIFFERENT_ADDRESSES:
381 test_pr_info("%s: different addresses", __func__);
382 second_rq = list_entry(req->queuelist.next, struct request,
383 queuelist);
384 test_pr_info("%s: test_rq->sector=%ld, second_rq->sector=%ld",
385 __func__, (long)req->__sector,
386 (long)second_rq->__sector);
387 /*
388 * Put start sector of second write request in the first write
389 * request's cmd25 argument in the packed header
390 */
391 packed_cmd_hdr[3] = second_rq->__sector;
392 break;
393 case TEST_HDR_REQ_NUM_SMALLER_THAN_ACTUAL:
394 test_pr_info("%s: request num smaller than actual" , __func__);
395 num_requests = (packed_cmd_hdr[0] & PACKED_HDR_NUM_REQS_MASK)
396 >> 16;
397 /* num of entries is decremented by 1 */
398 num_requests = (num_requests - 1) << 16;
399 /*
400 * Set number of requests field in packed write header to be
401 * smaller than the actual number (1 byte, offset 2 in header)
402 */
403 packed_cmd_hdr[0] = (packed_cmd_hdr[0] &
404 ~PACKED_HDR_NUM_REQS_MASK) + num_requests;
405 break;
406 case TEST_HDR_REQ_NUM_LARGER_THAN_ACTUAL:
407 test_pr_info("%s: request num larger than actual" , __func__);
408 num_requests = (packed_cmd_hdr[0] & PACKED_HDR_NUM_REQS_MASK)
409 >> 16;
410 /* num of entries is incremented by 1 */
411 num_requests = (num_requests + 1) << 16;
412 /*
413 * Set number of requests field in packed write header to be
414 * larger than the actual number (1 byte, offset 2 in header).
415 */
416 packed_cmd_hdr[0] = (packed_cmd_hdr[0] &
417 ~PACKED_HDR_NUM_REQS_MASK) + num_requests;
418 break;
419 case TEST_HDR_CMD23_PACKED_BIT_SET:
420 test_pr_info("%s: header CMD23 packed bit set" , __func__);
421 /*
422 * Set packed bit (bit 30) in cmd23 argument of first and second
423 * write requests in packed write header.
424 * These are located at bytes 2 and 4 in packed write header
425 */
426 packed_cmd_hdr[2] = packed_cmd_hdr[2] | CMD23_PACKED_BIT;
427 packed_cmd_hdr[4] = packed_cmd_hdr[4] | CMD23_PACKED_BIT;
428 break;
429 case TEST_CMD23_MAX_PACKED_WRITES:
430 test_pr_info("%s: CMD23 request num > max_packed_reqs",
431 __func__);
432 /*
433 * Set the individual packed cmd23 request num to
434 * max_packed_reqs + 1
435 */
436 brq->sbc.arg = MMC_CMD23_ARG_PACKED | (max_packed_reqs + 1);
437 break;
438 case TEST_CMD23_ZERO_PACKED_WRITES:
439 test_pr_info("%s: CMD23 request num = 0", __func__);
440 /* Set the individual packed cmd23 request num to zero */
441 brq->sbc.arg = MMC_CMD23_ARG_PACKED;
442 break;
443 case TEST_CMD23_PACKED_BIT_UNSET:
444 test_pr_info("%s: CMD23 packed bit unset", __func__);
445 /*
446 * Set the individual packed cmd23 packed bit to 0,
447 * although there is a packed write request
448 */
449 brq->sbc.arg &= ~CMD23_PACKED_BIT;
450 break;
451 case TEST_CMD23_REL_WR_BIT_SET:
452 test_pr_info("%s: CMD23 REL WR bit set", __func__);
453 /* Set the individual packed cmd23 reliable write bit */
454 brq->sbc.arg = MMC_CMD23_ARG_PACKED | MMC_CMD23_ARG_REL_WR;
455 break;
456 case TEST_CMD23_BITS_16TO29_SET:
457 test_pr_info("%s: CMD23 bits [16-29] set", __func__);
458 brq->sbc.arg = MMC_CMD23_ARG_PACKED |
459 PACKED_HDR_BITS_16_TO_29_SET;
460 break;
461 case TEST_CMD23_HDR_BLK_NOT_IN_COUNT:
462 test_pr_info("%s: CMD23 hdr not in block count", __func__);
463 brq->sbc.arg = MMC_CMD23_ARG_PACKED |
464 ((rq_data_dir(req) == READ) ? 0 : mqrq->packed_blocks);
465 break;
466 default:
467 test_pr_err("%s: unexpected testcase %d",
468 __func__, mbtd->test_info.testcase);
469 break;
470 }
471}
472
473/*
474 * A callback assigned to the err_check_fn field of the mmc_request by the
475 * MMC/card/block layer.
476 * Called upon request completion by the MMC/core layer.
477 * Here we emulate an error return value from the card.
478 */
479static int test_err_check(struct mmc_card *card, struct mmc_async_req *areq)
480{
481 struct mmc_queue_req *mq_rq = container_of(areq, struct mmc_queue_req,
482 mmc_active);
483 struct request_queue *req_q = test_iosched_get_req_queue();
484 struct mmc_queue *mq;
485 int max_packed_reqs;
486 int ret = 0;
Yaniv Gardie9214c82012-10-18 13:58:18 +0200487 struct mmc_blk_request *brq;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200488
489 if (req_q)
490 mq = req_q->queuedata;
491 else {
492 test_pr_err("%s: NULL request_queue", __func__);
493 return 0;
494 }
495
496 if (!mq) {
497 test_pr_err("%s: %s: NULL mq", __func__,
498 mmc_hostname(card->host));
499 return 0;
500 }
501
502 max_packed_reqs = mq->card->ext_csd.max_packed_writes;
503
504 if (!mq_rq) {
505 test_pr_err("%s: %s: NULL mq_rq", __func__,
506 mmc_hostname(card->host));
507 return 0;
508 }
Yaniv Gardie9214c82012-10-18 13:58:18 +0200509 brq = &mq_rq->brq;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200510
511 switch (mbtd->test_info.testcase) {
512 case TEST_RET_ABORT:
513 test_pr_info("%s: return abort", __func__);
514 ret = MMC_BLK_ABORT;
515 break;
516 case TEST_RET_PARTIAL_FOLLOWED_BY_SUCCESS:
517 test_pr_info("%s: return partial followed by success",
518 __func__);
519 /*
520 * Since in this testcase num_requests is always >= 2,
521 * we can be sure that packed_fail_idx is always >= 1
522 */
523 mq_rq->packed_fail_idx = (mbtd->num_requests / 2);
524 test_pr_info("%s: packed_fail_idx = %d"
525 , __func__, mq_rq->packed_fail_idx);
526 mq->err_check_fn = NULL;
527 ret = MMC_BLK_PARTIAL;
528 break;
529 case TEST_RET_PARTIAL_FOLLOWED_BY_ABORT:
530 if (!mbtd->err_check_counter) {
531 test_pr_info("%s: return partial followed by abort",
532 __func__);
533 mbtd->err_check_counter++;
534 /*
535 * Since in this testcase num_requests is always >= 3,
536 * we have that packed_fail_idx is always >= 1
537 */
538 mq_rq->packed_fail_idx = (mbtd->num_requests / 2);
539 test_pr_info("%s: packed_fail_idx = %d"
540 , __func__, mq_rq->packed_fail_idx);
541 ret = MMC_BLK_PARTIAL;
542 break;
543 }
544 mbtd->err_check_counter = 0;
545 mq->err_check_fn = NULL;
546 ret = MMC_BLK_ABORT;
547 break;
548 case TEST_RET_PARTIAL_MULTIPLE_UNTIL_SUCCESS:
549 test_pr_info("%s: return partial multiple until success",
550 __func__);
551 if (++mbtd->err_check_counter >= (mbtd->num_requests)) {
552 mq->err_check_fn = NULL;
553 mbtd->err_check_counter = 0;
554 ret = MMC_BLK_PARTIAL;
555 break;
556 }
557 mq_rq->packed_fail_idx = 1;
558 ret = MMC_BLK_PARTIAL;
559 break;
560 case TEST_RET_PARTIAL_MAX_FAIL_IDX:
561 test_pr_info("%s: return partial max fail_idx", __func__);
562 mq_rq->packed_fail_idx = max_packed_reqs - 1;
563 mq->err_check_fn = NULL;
564 ret = MMC_BLK_PARTIAL;
565 break;
566 case TEST_RET_RETRY:
567 test_pr_info("%s: return retry", __func__);
568 ret = MMC_BLK_RETRY;
569 break;
570 case TEST_RET_CMD_ERR:
571 test_pr_info("%s: return cmd err", __func__);
572 ret = MMC_BLK_CMD_ERR;
573 break;
574 case TEST_RET_DATA_ERR:
575 test_pr_info("%s: return data err", __func__);
576 ret = MMC_BLK_DATA_ERR;
577 break;
Yaniv Gardie9214c82012-10-18 13:58:18 +0200578 case BKOPS_URGENT_LEVEL_2:
579 case BKOPS_URGENT_LEVEL_3:
580 case BKOPS_URGENT_LEVEL_2_TWO_REQS:
581 if (mbtd->err_check_counter++ == 0) {
582 test_pr_info("%s: simulate an exception from the card",
583 __func__);
584 brq->cmd.resp[0] |= R1_EXCEPTION_EVENT;
585 }
586 mq->err_check_fn = NULL;
587 break;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200588 default:
589 test_pr_err("%s: unexpected testcase %d",
590 __func__, mbtd->test_info.testcase);
591 }
592
593 return ret;
594}
595
596/*
597 * This is a specific implementation for the get_test_case_str_fn function
598 * pointer in the test_info data structure. Given a valid test_data instance,
599 * the function returns a string resembling the test name, based on the testcase
600 */
601static char *get_test_case_str(struct test_data *td)
602{
603 if (!td) {
604 test_pr_err("%s: NULL td", __func__);
605 return NULL;
606 }
607
Lee Susman039ce092012-11-15 13:36:15 +0200608switch (td->test_info.testcase) {
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200609 case TEST_STOP_DUE_TO_FLUSH:
Lee Susman039ce092012-11-15 13:36:15 +0200610 return "\"stop due to flush\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200611 case TEST_STOP_DUE_TO_FLUSH_AFTER_MAX_REQS:
Lee Susman039ce092012-11-15 13:36:15 +0200612 return "\"stop due to flush after max-1 reqs\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200613 case TEST_STOP_DUE_TO_READ:
Lee Susman039ce092012-11-15 13:36:15 +0200614 return "\"stop due to read\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200615 case TEST_STOP_DUE_TO_READ_AFTER_MAX_REQS:
Lee Susman039ce092012-11-15 13:36:15 +0200616 return "\"stop due to read after max-1 reqs\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200617 case TEST_STOP_DUE_TO_EMPTY_QUEUE:
Lee Susman039ce092012-11-15 13:36:15 +0200618 return "\"stop due to empty queue\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200619 case TEST_STOP_DUE_TO_MAX_REQ_NUM:
Lee Susman039ce092012-11-15 13:36:15 +0200620 return "\"stop due to max req num\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200621 case TEST_STOP_DUE_TO_THRESHOLD:
Lee Susman039ce092012-11-15 13:36:15 +0200622 return "\"stop due to exceeding threshold\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200623 case TEST_RET_ABORT:
Lee Susman039ce092012-11-15 13:36:15 +0200624 return "\"err_check return abort\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200625 case TEST_RET_PARTIAL_FOLLOWED_BY_SUCCESS:
Lee Susman039ce092012-11-15 13:36:15 +0200626 return "\"err_check return partial followed by success\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200627 case TEST_RET_PARTIAL_FOLLOWED_BY_ABORT:
Lee Susman039ce092012-11-15 13:36:15 +0200628 return "\"err_check return partial followed by abort\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200629 case TEST_RET_PARTIAL_MULTIPLE_UNTIL_SUCCESS:
Lee Susman039ce092012-11-15 13:36:15 +0200630 return "\"err_check return partial multiple until success\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200631 case TEST_RET_PARTIAL_MAX_FAIL_IDX:
Lee Susman039ce092012-11-15 13:36:15 +0200632 return "\"err_check return partial max fail index\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200633 case TEST_RET_RETRY:
Lee Susman039ce092012-11-15 13:36:15 +0200634 return "\"err_check return retry\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200635 case TEST_RET_CMD_ERR:
Lee Susman039ce092012-11-15 13:36:15 +0200636 return "\"err_check return cmd error\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200637 case TEST_RET_DATA_ERR:
Lee Susman039ce092012-11-15 13:36:15 +0200638 return "\"err_check return data error\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200639 case TEST_HDR_INVALID_VERSION:
Lee Susman039ce092012-11-15 13:36:15 +0200640 return "\"invalid - wrong header version\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200641 case TEST_HDR_WRONG_WRITE_CODE:
Lee Susman039ce092012-11-15 13:36:15 +0200642 return "\"invalid - wrong write code\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200643 case TEST_HDR_INVALID_RW_CODE:
Lee Susman039ce092012-11-15 13:36:15 +0200644 return "\"invalid - wrong R/W code\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200645 case TEST_HDR_DIFFERENT_ADDRESSES:
Lee Susman039ce092012-11-15 13:36:15 +0200646 return "\"invalid - header different addresses\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200647 case TEST_HDR_REQ_NUM_SMALLER_THAN_ACTUAL:
Lee Susman039ce092012-11-15 13:36:15 +0200648 return "\"invalid - header req num smaller than actual\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200649 case TEST_HDR_REQ_NUM_LARGER_THAN_ACTUAL:
Lee Susman039ce092012-11-15 13:36:15 +0200650 return "\"invalid - header req num larger than actual\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200651 case TEST_HDR_CMD23_PACKED_BIT_SET:
Lee Susman039ce092012-11-15 13:36:15 +0200652 return "\"invalid - header cmd23 packed bit set\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200653 case TEST_CMD23_MAX_PACKED_WRITES:
Lee Susman039ce092012-11-15 13:36:15 +0200654 return "\"invalid - cmd23 max packed writes\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200655 case TEST_CMD23_ZERO_PACKED_WRITES:
Lee Susman039ce092012-11-15 13:36:15 +0200656 return "\"invalid - cmd23 zero packed writes\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200657 case TEST_CMD23_PACKED_BIT_UNSET:
Lee Susman039ce092012-11-15 13:36:15 +0200658 return "\"invalid - cmd23 packed bit unset\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200659 case TEST_CMD23_REL_WR_BIT_SET:
Lee Susman039ce092012-11-15 13:36:15 +0200660 return "\"invalid - cmd23 rel wr bit set\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200661 case TEST_CMD23_BITS_16TO29_SET:
Lee Susman039ce092012-11-15 13:36:15 +0200662 return "\"invalid - cmd23 bits [16-29] set\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200663 case TEST_CMD23_HDR_BLK_NOT_IN_COUNT:
Lee Susman039ce092012-11-15 13:36:15 +0200664 return "\"invalid - cmd23 header block not in count\"";
Tatyana Brokhman91e1e322012-10-09 13:53:43 +0200665 case TEST_PACKING_EXP_N_OVER_TRIGGER:
Lee Susman039ce092012-11-15 13:36:15 +0200666 return "\"packing control - pack n\"";
Tatyana Brokhman91e1e322012-10-09 13:53:43 +0200667 case TEST_PACKING_EXP_N_OVER_TRIGGER_FB_READ:
Lee Susman039ce092012-11-15 13:36:15 +0200668 return "\"packing control - pack n followed by read\"";
Tatyana Brokhman91e1e322012-10-09 13:53:43 +0200669 case TEST_PACKING_EXP_N_OVER_TRIGGER_FLUSH_N:
Lee Susman039ce092012-11-15 13:36:15 +0200670 return "\"packing control - pack n followed by flush\"";
Tatyana Brokhman91e1e322012-10-09 13:53:43 +0200671 case TEST_PACKING_EXP_ONE_OVER_TRIGGER_FB_READ:
Lee Susman039ce092012-11-15 13:36:15 +0200672 return "\"packing control - pack one followed by read\"";
Tatyana Brokhman91e1e322012-10-09 13:53:43 +0200673 case TEST_PACKING_EXP_THRESHOLD_OVER_TRIGGER:
Lee Susman039ce092012-11-15 13:36:15 +0200674 return "\"packing control - pack threshold\"";
Tatyana Brokhman91e1e322012-10-09 13:53:43 +0200675 case TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS:
Lee Susman039ce092012-11-15 13:36:15 +0200676 return "\"packing control - no packing\"";
Tatyana Brokhman91e1e322012-10-09 13:53:43 +0200677 case TEST_PACKING_NOT_EXP_TRIGGER_REQUESTS:
Lee Susman039ce092012-11-15 13:36:15 +0200678 return "\"packing control - no packing, trigger requests\"";
Tatyana Brokhman91e1e322012-10-09 13:53:43 +0200679 case TEST_PACKING_NOT_EXP_TRIGGER_READ_TRIGGER:
Lee Susman039ce092012-11-15 13:36:15 +0200680 return "\"packing control - no pack, trigger-read-trigger\"";
Tatyana Brokhman91e1e322012-10-09 13:53:43 +0200681 case TEST_PACKING_NOT_EXP_TRIGGER_FLUSH_TRIGGER:
Lee Susman039ce092012-11-15 13:36:15 +0200682 return "\"packing control- no pack, trigger-flush-trigger\"";
Tatyana Brokhman91e1e322012-10-09 13:53:43 +0200683 case TEST_PACK_MIX_PACKED_NO_PACKED_PACKED:
Lee Susman039ce092012-11-15 13:36:15 +0200684 return "\"packing control - mix: pack -> no pack -> pack\"";
Tatyana Brokhman91e1e322012-10-09 13:53:43 +0200685 case TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED:
Lee Susman039ce092012-11-15 13:36:15 +0200686 return "\"packing control - mix: no pack->pack->no pack\"";
Maya Erezddc55732012-10-17 09:51:01 +0200687 case TEST_WRITE_DISCARD_SANITIZE_READ:
Lee Susman039ce092012-11-15 13:36:15 +0200688 return "\"write, discard, sanitize\"";
Yaniv Gardie9214c82012-10-18 13:58:18 +0200689 case BKOPS_DELAYED_WORK_LEVEL_1:
Lee Susman039ce092012-11-15 13:36:15 +0200690 return "\"delayed work BKOPS level 1\"";
Yaniv Gardie9214c82012-10-18 13:58:18 +0200691 case BKOPS_DELAYED_WORK_LEVEL_1_HPI:
Lee Susman039ce092012-11-15 13:36:15 +0200692 return "\"delayed work BKOPS level 1 with HPI\"";
Yaniv Gardie9214c82012-10-18 13:58:18 +0200693 case BKOPS_CANCEL_DELAYED_WORK:
Lee Susman039ce092012-11-15 13:36:15 +0200694 return "\"cancel delayed BKOPS work\"";
Yaniv Gardie9214c82012-10-18 13:58:18 +0200695 case BKOPS_URGENT_LEVEL_2:
Lee Susman039ce092012-11-15 13:36:15 +0200696 return "\"urgent BKOPS level 2\"";
Yaniv Gardie9214c82012-10-18 13:58:18 +0200697 case BKOPS_URGENT_LEVEL_2_TWO_REQS:
Lee Susman039ce092012-11-15 13:36:15 +0200698 return "\"urgent BKOPS level 2, followed by a request\"";
Yaniv Gardie9214c82012-10-18 13:58:18 +0200699 case BKOPS_URGENT_LEVEL_3:
Lee Susman039ce092012-11-15 13:36:15 +0200700 return "\"urgent BKOPS level 3\"";
Lee Susmanf18263a2012-10-24 14:14:37 +0200701 case TEST_LONG_SEQUENTIAL_READ:
Lee Susman039ce092012-11-15 13:36:15 +0200702 return "\"long sequential read\"";
Lee Susmana35ae6e2012-10-25 16:06:07 +0200703 case TEST_LONG_SEQUENTIAL_WRITE:
Lee Susman039ce092012-11-15 13:36:15 +0200704 return "\"long sequential write\"";
Lee Susmanb09c0412012-12-19 14:28:52 +0200705 case TEST_NEW_REQ_NOTIFICATION:
706 return "\"new request notification test\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200707 default:
Lee Susman039ce092012-11-15 13:36:15 +0200708 return " Unknown testcase";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200709 }
710
711 return NULL;
712}
713
714/*
715 * Compare individual testcase's statistics to the expected statistics:
716 * Compare stop reason and number of packing events
717 */
718static int check_wr_packing_statistics(struct test_data *td)
719{
720 struct mmc_wr_pack_stats *mmc_packed_stats;
721 struct mmc_queue *mq = td->req_q->queuedata;
722 int max_packed_reqs = mq->card->ext_csd.max_packed_writes;
723 int i;
724 struct mmc_card *card = mq->card;
725 struct mmc_wr_pack_stats expected_stats;
726 int *stop_reason;
727 int ret = 0;
728
729 if (!mq) {
730 test_pr_err("%s: NULL mq", __func__);
731 return -EINVAL;
732 }
733
734 expected_stats = mbtd->exp_packed_stats;
735
736 mmc_packed_stats = mmc_blk_get_packed_statistics(card);
737 if (!mmc_packed_stats) {
738 test_pr_err("%s: NULL mmc_packed_stats", __func__);
739 return -EINVAL;
740 }
741
742 if (!mmc_packed_stats->packing_events) {
743 test_pr_err("%s: NULL packing_events", __func__);
744 return -EINVAL;
745 }
746
747 spin_lock(&mmc_packed_stats->lock);
748
749 if (!mmc_packed_stats->enabled) {
750 test_pr_err("%s write packing statistics are not enabled",
751 __func__);
752 ret = -EINVAL;
753 goto exit_err;
754 }
755
756 stop_reason = mmc_packed_stats->pack_stop_reason;
757
Tatyana Brokhman91e1e322012-10-09 13:53:43 +0200758 for (i = 1; i <= max_packed_reqs; ++i) {
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200759 if (mmc_packed_stats->packing_events[i] !=
760 expected_stats.packing_events[i]) {
761 test_pr_err(
762 "%s: Wrong pack stats in index %d, got %d, expected %d",
763 __func__, i, mmc_packed_stats->packing_events[i],
764 expected_stats.packing_events[i]);
765 if (td->fs_wr_reqs_during_test)
766 goto cancel_round;
767 ret = -EINVAL;
768 goto exit_err;
769 }
770 }
771
772 if (mmc_packed_stats->pack_stop_reason[EXCEEDS_SEGMENTS] !=
773 expected_stats.pack_stop_reason[EXCEEDS_SEGMENTS]) {
774 test_pr_err(
775 "%s: Wrong pack stop reason EXCEEDS_SEGMENTS %d, expected %d",
776 __func__, stop_reason[EXCEEDS_SEGMENTS],
777 expected_stats.pack_stop_reason[EXCEEDS_SEGMENTS]);
778 if (td->fs_wr_reqs_during_test)
779 goto cancel_round;
780 ret = -EINVAL;
781 goto exit_err;
782 }
783
784 if (mmc_packed_stats->pack_stop_reason[EXCEEDS_SECTORS] !=
785 expected_stats.pack_stop_reason[EXCEEDS_SECTORS]) {
786 test_pr_err(
787 "%s: Wrong pack stop reason EXCEEDS_SECTORS %d, expected %d",
788 __func__, stop_reason[EXCEEDS_SECTORS],
789 expected_stats.pack_stop_reason[EXCEEDS_SECTORS]);
790 if (td->fs_wr_reqs_during_test)
791 goto cancel_round;
792 ret = -EINVAL;
793 goto exit_err;
794 }
795
796 if (mmc_packed_stats->pack_stop_reason[WRONG_DATA_DIR] !=
797 expected_stats.pack_stop_reason[WRONG_DATA_DIR]) {
798 test_pr_err(
799 "%s: Wrong pack stop reason WRONG_DATA_DIR %d, expected %d",
800 __func__, stop_reason[WRONG_DATA_DIR],
801 expected_stats.pack_stop_reason[WRONG_DATA_DIR]);
802 if (td->fs_wr_reqs_during_test)
803 goto cancel_round;
804 ret = -EINVAL;
805 goto exit_err;
806 }
807
808 if (mmc_packed_stats->pack_stop_reason[FLUSH_OR_DISCARD] !=
809 expected_stats.pack_stop_reason[FLUSH_OR_DISCARD]) {
810 test_pr_err(
811 "%s: Wrong pack stop reason FLUSH_OR_DISCARD %d, expected %d",
812 __func__, stop_reason[FLUSH_OR_DISCARD],
813 expected_stats.pack_stop_reason[FLUSH_OR_DISCARD]);
814 if (td->fs_wr_reqs_during_test)
815 goto cancel_round;
816 ret = -EINVAL;
817 goto exit_err;
818 }
819
820 if (mmc_packed_stats->pack_stop_reason[EMPTY_QUEUE] !=
821 expected_stats.pack_stop_reason[EMPTY_QUEUE]) {
822 test_pr_err(
823 "%s: Wrong pack stop reason EMPTY_QUEUE %d, expected %d",
824 __func__, stop_reason[EMPTY_QUEUE],
825 expected_stats.pack_stop_reason[EMPTY_QUEUE]);
826 if (td->fs_wr_reqs_during_test)
827 goto cancel_round;
828 ret = -EINVAL;
829 goto exit_err;
830 }
831
832 if (mmc_packed_stats->pack_stop_reason[REL_WRITE] !=
833 expected_stats.pack_stop_reason[REL_WRITE]) {
834 test_pr_err(
835 "%s: Wrong pack stop reason REL_WRITE %d, expected %d",
836 __func__, stop_reason[REL_WRITE],
837 expected_stats.pack_stop_reason[REL_WRITE]);
838 if (td->fs_wr_reqs_during_test)
839 goto cancel_round;
840 ret = -EINVAL;
841 goto exit_err;
842 }
843
844exit_err:
845 spin_unlock(&mmc_packed_stats->lock);
846 if (ret && mmc_packed_stats->enabled)
847 print_mmc_packing_stats(card);
848 return ret;
849cancel_round:
850 spin_unlock(&mmc_packed_stats->lock);
851 test_iosched_set_ignore_round(true);
852 return 0;
853}
854
855/*
856 * Pseudo-randomly choose a seed based on the last seed, and update it in
857 * seed_number. then return seed_number (mod max_val), or min_val.
858 */
859static unsigned int pseudo_random_seed(unsigned int *seed_number,
860 unsigned int min_val,
861 unsigned int max_val)
862{
863 int ret = 0;
864
865 if (!seed_number)
866 return 0;
867
868 *seed_number = ((unsigned int)(((unsigned long)*seed_number *
869 (unsigned long)LARGE_PRIME_1) + LARGE_PRIME_2));
870 ret = (unsigned int)((*seed_number) % max_val);
871
872 return (ret > min_val ? ret : min_val);
873}
874
875/*
876 * Given a pseudo-random seed, find a pseudo-random num_of_bios.
877 * Make sure that num_of_bios is not larger than TEST_MAX_SECTOR_RANGE
878 */
879static void pseudo_rnd_num_of_bios(unsigned int *num_bios_seed,
880 unsigned int *num_of_bios)
881{
882 do {
883 *num_of_bios = pseudo_random_seed(num_bios_seed, 1,
884 TEST_MAX_BIOS_PER_REQ);
885 if (!(*num_of_bios))
886 *num_of_bios = 1;
887 } while ((*num_of_bios) * BIO_U32_SIZE * 4 > TEST_MAX_SECTOR_RANGE);
888}
889
890/* Add a single read request to the given td's request queue */
891static int prepare_request_add_read(struct test_data *td)
892{
893 int ret;
894 int start_sec;
895
896 if (td)
897 start_sec = td->start_sector;
898 else {
899 test_pr_err("%s: NULL td", __func__);
900 return 0;
901 }
902
903 test_pr_info("%s: Adding a read request, first req_id=%d", __func__,
904 td->wr_rd_next_req_id);
905
906 ret = test_iosched_add_wr_rd_test_req(0, READ, start_sec, 2,
907 TEST_PATTERN_5A, NULL);
908 if (ret) {
909 test_pr_err("%s: failed to add a read request", __func__);
910 return ret;
911 }
912
913 return 0;
914}
915
916/* Add a single flush request to the given td's request queue */
917static int prepare_request_add_flush(struct test_data *td)
918{
919 int ret;
920
921 if (!td) {
922 test_pr_err("%s: NULL td", __func__);
923 return 0;
924 }
925
926 test_pr_info("%s: Adding a flush request, first req_id=%d", __func__,
927 td->unique_next_req_id);
928 ret = test_iosched_add_unique_test_req(0, REQ_UNIQUE_FLUSH,
929 0, 0, NULL);
930 if (ret) {
931 test_pr_err("%s: failed to add a flush request", __func__);
932 return ret;
933 }
934
935 return ret;
936}
937
938/*
939 * Add num_requets amount of write requests to the given td's request queue.
940 * If random test mode is chosen we pseudo-randomly choose the number of bios
941 * for each write request, otherwise add between 1 to 5 bio per request.
942 */
943static int prepare_request_add_write_reqs(struct test_data *td,
944 int num_requests, int is_err_expected,
945 int is_random)
946{
947 int i;
948 unsigned int start_sec;
949 int num_bios;
950 int ret = 0;
951 unsigned int *bio_seed = &mbtd->random_test_seed;
952
953 if (td)
954 start_sec = td->start_sector;
955 else {
956 test_pr_err("%s: NULL td", __func__);
957 return ret;
958 }
959
960 test_pr_info("%s: Adding %d write requests, first req_id=%d", __func__,
961 num_requests, td->wr_rd_next_req_id);
962
Lee Susmanf18263a2012-10-24 14:14:37 +0200963 for (i = 1 ; i <= num_requests ; i++) {
964 start_sec =
965 td->start_sector + sizeof(int) *
966 BIO_U32_SIZE * td->num_of_write_bios;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200967 if (is_random)
968 pseudo_rnd_num_of_bios(bio_seed, &num_bios);
969 else
970 /*
971 * For the non-random case, give num_bios a value
972 * between 1 and 5, to keep a small number of BIOs
973 */
974 num_bios = (i%5)+1;
975
976 ret = test_iosched_add_wr_rd_test_req(is_err_expected, WRITE,
977 start_sec, num_bios, TEST_PATTERN_5A, NULL);
978
979 if (ret) {
980 test_pr_err("%s: failed to add a write request",
981 __func__);
982 return ret;
983 }
984 }
985 return 0;
986}
987
988/*
989 * Prepare the write, read and flush requests for a generic packed commands
990 * testcase
991 */
992static int prepare_packed_requests(struct test_data *td, int is_err_expected,
993 int num_requests, int is_random)
994{
995 int ret = 0;
996 struct mmc_queue *mq;
997 int max_packed_reqs;
998 struct request_queue *req_q;
999
1000 if (!td) {
1001 pr_err("%s: NULL td", __func__);
1002 return -EINVAL;
1003 }
1004
1005 req_q = td->req_q;
1006
1007 if (!req_q) {
1008 pr_err("%s: NULL request queue", __func__);
1009 return -EINVAL;
1010 }
1011
1012 mq = req_q->queuedata;
1013 if (!mq) {
1014 test_pr_err("%s: NULL mq", __func__);
1015 return -EINVAL;
1016 }
1017
1018 max_packed_reqs = mq->card->ext_csd.max_packed_writes;
1019
1020 if (mbtd->random_test_seed <= 0) {
1021 mbtd->random_test_seed =
1022 (unsigned int)(get_jiffies_64() & 0xFFFF);
1023 test_pr_info("%s: got seed from jiffies %d",
1024 __func__, mbtd->random_test_seed);
1025 }
1026
1027 mmc_blk_init_packed_statistics(mq->card);
1028
1029 ret = prepare_request_add_write_reqs(td, num_requests, is_err_expected,
1030 is_random);
1031 if (ret)
1032 return ret;
1033
1034 /* Avoid memory corruption in upcoming stats set */
1035 if (td->test_info.testcase == TEST_STOP_DUE_TO_THRESHOLD)
1036 num_requests--;
1037
1038 memset((void *)mbtd->exp_packed_stats.pack_stop_reason, 0,
1039 sizeof(mbtd->exp_packed_stats.pack_stop_reason));
1040 memset(mbtd->exp_packed_stats.packing_events, 0,
1041 (max_packed_reqs + 1) * sizeof(u32));
1042 if (num_requests <= max_packed_reqs)
1043 mbtd->exp_packed_stats.packing_events[num_requests] = 1;
1044
1045 switch (td->test_info.testcase) {
1046 case TEST_STOP_DUE_TO_FLUSH:
1047 case TEST_STOP_DUE_TO_FLUSH_AFTER_MAX_REQS:
1048 ret = prepare_request_add_flush(td);
1049 if (ret)
1050 return ret;
1051
1052 mbtd->exp_packed_stats.pack_stop_reason[FLUSH_OR_DISCARD] = 1;
1053 break;
1054 case TEST_STOP_DUE_TO_READ:
1055 case TEST_STOP_DUE_TO_READ_AFTER_MAX_REQS:
1056 ret = prepare_request_add_read(td);
1057 if (ret)
1058 return ret;
1059
1060 mbtd->exp_packed_stats.pack_stop_reason[WRONG_DATA_DIR] = 1;
1061 break;
1062 case TEST_STOP_DUE_TO_THRESHOLD:
1063 mbtd->exp_packed_stats.packing_events[num_requests] = 1;
1064 mbtd->exp_packed_stats.packing_events[1] = 1;
1065 mbtd->exp_packed_stats.pack_stop_reason[THRESHOLD] = 1;
1066 mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1;
1067 break;
1068 case TEST_STOP_DUE_TO_MAX_REQ_NUM:
1069 case TEST_RET_PARTIAL_MAX_FAIL_IDX:
1070 mbtd->exp_packed_stats.pack_stop_reason[THRESHOLD] = 1;
1071 break;
1072 default:
1073 mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1;
1074 }
1075 mbtd->num_requests = num_requests;
1076
1077 return 0;
1078}
1079
1080/*
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001081 * Prepare the write, read and flush requests for the packing control
1082 * testcases
1083 */
1084static int prepare_packed_control_tests_requests(struct test_data *td,
1085 int is_err_expected, int num_requests, int is_random)
1086{
1087 int ret = 0;
1088 struct mmc_queue *mq;
1089 int max_packed_reqs;
1090 int temp_num_req = num_requests;
1091 struct request_queue *req_q;
1092 int test_packed_trigger;
1093 int num_packed_reqs;
1094
1095 if (!td) {
1096 test_pr_err("%s: NULL td\n", __func__);
1097 return -EINVAL;
1098 }
1099
1100 req_q = td->req_q;
1101
1102 if (!req_q) {
1103 test_pr_err("%s: NULL request queue\n", __func__);
1104 return -EINVAL;
1105 }
1106
1107 mq = req_q->queuedata;
1108 if (!mq) {
1109 test_pr_err("%s: NULL mq", __func__);
1110 return -EINVAL;
1111 }
1112
1113 max_packed_reqs = mq->card->ext_csd.max_packed_writes;
1114 test_packed_trigger = mq->num_wr_reqs_to_start_packing;
1115 num_packed_reqs = num_requests - test_packed_trigger;
1116
1117 if (mbtd->random_test_seed == 0) {
1118 mbtd->random_test_seed =
1119 (unsigned int)(get_jiffies_64() & 0xFFFF);
1120 test_pr_info("%s: got seed from jiffies %d",
1121 __func__, mbtd->random_test_seed);
1122 }
1123
1124 mmc_blk_init_packed_statistics(mq->card);
1125
1126 if (td->test_info.testcase ==
1127 TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED) {
1128 temp_num_req = num_requests;
1129 num_requests = test_packed_trigger - 1;
1130 }
1131
1132 /* Verify that the packing is disabled before starting the test */
1133 mq->wr_packing_enabled = false;
1134 mq->num_of_potential_packed_wr_reqs = 0;
1135
1136 if (td->test_info.testcase == TEST_PACK_MIX_PACKED_NO_PACKED_PACKED) {
1137 mq->num_of_potential_packed_wr_reqs = test_packed_trigger + 1;
1138 mq->wr_packing_enabled = true;
1139 num_requests = test_packed_trigger + 2;
1140 }
1141
1142 ret = prepare_request_add_write_reqs(td, num_requests, is_err_expected,
1143 is_random);
1144 if (ret)
1145 goto exit;
1146
1147 if (td->test_info.testcase == TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED)
1148 num_requests = temp_num_req;
1149
1150 memset((void *)mbtd->exp_packed_stats.pack_stop_reason, 0,
1151 sizeof(mbtd->exp_packed_stats.pack_stop_reason));
1152 memset(mbtd->exp_packed_stats.packing_events, 0,
1153 (max_packed_reqs + 1) * sizeof(u32));
1154
1155 switch (td->test_info.testcase) {
1156 case TEST_PACKING_EXP_N_OVER_TRIGGER_FB_READ:
1157 case TEST_PACKING_EXP_ONE_OVER_TRIGGER_FB_READ:
1158 ret = prepare_request_add_read(td);
1159 if (ret)
1160 goto exit;
1161
1162 mbtd->exp_packed_stats.pack_stop_reason[WRONG_DATA_DIR] = 1;
1163 mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 1;
1164 break;
1165 case TEST_PACKING_EXP_N_OVER_TRIGGER_FLUSH_N:
1166 ret = prepare_request_add_flush(td);
1167 if (ret)
1168 goto exit;
1169
1170 ret = prepare_request_add_write_reqs(td, num_packed_reqs,
1171 is_err_expected, is_random);
1172 if (ret)
1173 goto exit;
1174
1175 mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1;
1176 mbtd->exp_packed_stats.pack_stop_reason[FLUSH_OR_DISCARD] = 1;
1177 mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 2;
1178 break;
1179 case TEST_PACKING_NOT_EXP_TRIGGER_READ_TRIGGER:
1180 ret = prepare_request_add_read(td);
1181 if (ret)
1182 goto exit;
1183
1184 ret = prepare_request_add_write_reqs(td, test_packed_trigger,
1185 is_err_expected, is_random);
1186 if (ret)
1187 goto exit;
1188
1189 mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 1;
1190 break;
1191 case TEST_PACKING_NOT_EXP_TRIGGER_FLUSH_TRIGGER:
1192 ret = prepare_request_add_flush(td);
1193 if (ret)
1194 goto exit;
1195
1196 ret = prepare_request_add_write_reqs(td, test_packed_trigger,
1197 is_err_expected, is_random);
1198 if (ret)
1199 goto exit;
1200
1201 mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 1;
1202 break;
1203 case TEST_PACK_MIX_PACKED_NO_PACKED_PACKED:
1204 ret = prepare_request_add_read(td);
1205 if (ret)
1206 goto exit;
1207
1208 ret = prepare_request_add_write_reqs(td, test_packed_trigger-1,
1209 is_err_expected, is_random);
1210 if (ret)
1211 goto exit;
1212
1213 ret = prepare_request_add_write_reqs(td, num_requests,
1214 is_err_expected, is_random);
1215 if (ret)
1216 goto exit;
1217
1218 mbtd->exp_packed_stats.packing_events[num_requests] = 1;
1219 mbtd->exp_packed_stats.packing_events[num_requests-1] = 1;
1220 mbtd->exp_packed_stats.pack_stop_reason[WRONG_DATA_DIR] = 1;
1221 mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1;
1222 break;
1223 case TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED:
1224 ret = prepare_request_add_read(td);
1225 if (ret)
1226 goto exit;
1227
1228 ret = prepare_request_add_write_reqs(td, num_requests,
1229 is_err_expected, is_random);
1230 if (ret)
1231 goto exit;
1232
1233 ret = prepare_request_add_read(td);
1234 if (ret)
1235 goto exit;
1236
1237 ret = prepare_request_add_write_reqs(td, test_packed_trigger-1,
1238 is_err_expected, is_random);
1239 if (ret)
1240 goto exit;
1241
1242 mbtd->exp_packed_stats.pack_stop_reason[WRONG_DATA_DIR] = 1;
1243 mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 1;
1244 break;
1245 case TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS:
1246 case TEST_PACKING_NOT_EXP_TRIGGER_REQUESTS:
1247 mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 1;
1248 break;
1249 default:
1250 mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1;
1251 mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 1;
1252 }
1253 mbtd->num_requests = num_requests;
1254
1255exit:
1256 return ret;
1257}
1258
1259/*
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001260 * Prepare requests for the TEST_RET_PARTIAL_FOLLOWED_BY_ABORT testcase.
1261 * In this testcase we have mixed error expectations from different
1262 * write requests, hence the special prepare function.
1263 */
1264static int prepare_partial_followed_by_abort(struct test_data *td,
1265 int num_requests)
1266{
1267 int i, start_address;
1268 int is_err_expected = 0;
1269 int ret = 0;
1270 struct mmc_queue *mq = test_iosched_get_req_queue()->queuedata;
1271 int max_packed_reqs;
1272
1273 if (!mq) {
1274 test_pr_err("%s: NULL mq", __func__);
1275 return -EINVAL;
1276 }
1277
1278 max_packed_reqs = mq->card->ext_csd.max_packed_writes;
1279
1280 mmc_blk_init_packed_statistics(mq->card);
1281
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001282 for (i = 1; i <= num_requests; i++) {
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001283 if (i > (num_requests / 2))
1284 is_err_expected = 1;
1285
Lee Susmanf18263a2012-10-24 14:14:37 +02001286 start_address = td->start_sector +
1287 sizeof(int) * BIO_U32_SIZE * td->num_of_write_bios;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001288 ret = test_iosched_add_wr_rd_test_req(is_err_expected, WRITE,
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001289 start_address, (i % 5) + 1, TEST_PATTERN_5A,
1290 NULL);
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001291 if (ret) {
1292 test_pr_err("%s: failed to add a write request",
1293 __func__);
1294 return ret;
1295 }
1296 }
1297
1298 memset((void *)&mbtd->exp_packed_stats.pack_stop_reason, 0,
1299 sizeof(mbtd->exp_packed_stats.pack_stop_reason));
1300 memset(mbtd->exp_packed_stats.packing_events, 0,
1301 (max_packed_reqs + 1) * sizeof(u32));
1302 mbtd->exp_packed_stats.packing_events[num_requests] = 1;
1303 mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1;
1304
1305 mbtd->num_requests = num_requests;
1306
1307 return ret;
1308}
1309
1310/*
1311 * Get number of write requests for current testcase. If random test mode was
1312 * chosen, pseudo-randomly choose the number of requests, otherwise set to
1313 * two less than the packing threshold.
1314 */
1315static int get_num_requests(struct test_data *td)
1316{
1317 int *seed = &mbtd->random_test_seed;
1318 struct request_queue *req_q;
1319 struct mmc_queue *mq;
1320 int max_num_requests;
1321 int num_requests;
1322 int min_num_requests = 2;
1323 int is_random = mbtd->is_random;
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001324 int max_for_double;
1325 int test_packed_trigger;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001326
1327 req_q = test_iosched_get_req_queue();
1328 if (req_q)
1329 mq = req_q->queuedata;
1330 else {
1331 test_pr_err("%s: NULL request queue", __func__);
1332 return 0;
1333 }
1334
1335 if (!mq) {
1336 test_pr_err("%s: NULL mq", __func__);
1337 return -EINVAL;
1338 }
1339
1340 max_num_requests = mq->card->ext_csd.max_packed_writes;
1341 num_requests = max_num_requests - 2;
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001342 test_packed_trigger = mq->num_wr_reqs_to_start_packing;
1343
1344 /*
1345 * Here max_for_double is intended for packed control testcases
1346 * in which we issue many write requests. It's purpose is to prevent
1347 * exceeding max number of req_queue requests.
1348 */
1349 max_for_double = max_num_requests - 10;
1350
1351 if (td->test_info.testcase ==
1352 TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS)
1353 /* Don't expect packing, so issue up to trigger-1 reqs */
1354 num_requests = test_packed_trigger - 1;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001355
1356 if (is_random) {
1357 if (td->test_info.testcase ==
1358 TEST_RET_PARTIAL_FOLLOWED_BY_ABORT)
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001359 /*
1360 * Here we don't want num_requests to be less than 1
1361 * as a consequence of division by 2.
1362 */
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001363 min_num_requests = 3;
1364
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001365 if (td->test_info.testcase ==
1366 TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS)
1367 /* Don't expect packing, so issue up to trigger reqs */
1368 max_num_requests = test_packed_trigger;
1369
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001370 num_requests = pseudo_random_seed(seed, min_num_requests,
1371 max_num_requests - 1);
1372 }
1373
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001374 if (td->test_info.testcase ==
1375 TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS)
1376 num_requests -= test_packed_trigger;
1377
1378 if (td->test_info.testcase == TEST_PACKING_EXP_N_OVER_TRIGGER_FLUSH_N)
1379 num_requests =
1380 num_requests > max_for_double ? max_for_double : num_requests;
1381
1382 if (mbtd->test_group == TEST_PACKING_CONTROL_GROUP)
1383 num_requests += test_packed_trigger;
1384
1385 if (td->test_info.testcase == TEST_PACKING_NOT_EXP_TRIGGER_REQUESTS)
1386 num_requests = test_packed_trigger;
1387
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001388 return num_requests;
1389}
1390
Lee Susmanf18263a2012-10-24 14:14:37 +02001391static int prepare_long_test_requests(struct test_data *td)
1392{
1393
1394 int ret;
1395 int start_sec;
1396 int j;
1397 int test_direction;
1398
1399 if (td)
1400 start_sec = td->start_sector;
1401 else {
1402 test_pr_err("%s: NULL td\n", __func__);
1403 return -EINVAL;
1404 }
1405
Lee Susmana35ae6e2012-10-25 16:06:07 +02001406 if (td->test_info.testcase == TEST_LONG_SEQUENTIAL_WRITE)
1407 test_direction = WRITE;
1408 else
1409 test_direction = READ;
Lee Susmanf18263a2012-10-24 14:14:37 +02001410
Lee Susmana35ae6e2012-10-25 16:06:07 +02001411 test_pr_info("%s: Adding %d write requests, first req_id=%d", __func__,
Lee Susmanf18263a2012-10-24 14:14:37 +02001412 LONG_TEST_ACTUAL_NUM_REQS, td->wr_rd_next_req_id);
1413
1414 for (j = 0; j < LONG_TEST_ACTUAL_NUM_REQS; j++) {
1415
1416 ret = test_iosched_add_wr_rd_test_req(0, test_direction,
1417 start_sec,
1418 TEST_MAX_BIOS_PER_REQ,
1419 TEST_NO_PATTERN, NULL);
1420 if (ret) {
1421 test_pr_err("%s: failed to add a bio request",
1422 __func__);
1423 return ret;
1424 }
1425
1426 start_sec +=
1427 (TEST_MAX_BIOS_PER_REQ * sizeof(int) * BIO_U32_SIZE);
1428 }
1429
1430 return 0;
1431}
1432
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001433/*
1434 * An implementation for the prepare_test_fn pointer in the test_info
1435 * data structure. According to the testcase we add the right number of requests
1436 * and decide if an error is expected or not.
1437 */
1438static int prepare_test(struct test_data *td)
1439{
1440 struct mmc_queue *mq = test_iosched_get_req_queue()->queuedata;
1441 int max_num_requests;
1442 int num_requests = 0;
1443 int ret = 0;
1444 int is_random = mbtd->is_random;
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001445 int test_packed_trigger = mq->num_wr_reqs_to_start_packing;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001446
1447 if (!mq) {
1448 test_pr_err("%s: NULL mq", __func__);
1449 return -EINVAL;
1450 }
1451
1452 max_num_requests = mq->card->ext_csd.max_packed_writes;
1453
1454 if (is_random && mbtd->random_test_seed == 0) {
1455 mbtd->random_test_seed =
1456 (unsigned int)(get_jiffies_64() & 0xFFFF);
1457 test_pr_info("%s: got seed from jiffies %d",
1458 __func__, mbtd->random_test_seed);
1459 }
1460
1461 num_requests = get_num_requests(td);
1462
1463 if (mbtd->test_group == TEST_SEND_INVALID_GROUP)
1464 mq->packed_test_fn =
1465 test_invalid_packed_cmd;
1466
1467 if (mbtd->test_group == TEST_ERR_CHECK_GROUP)
1468 mq->err_check_fn = test_err_check;
1469
1470 switch (td->test_info.testcase) {
1471 case TEST_STOP_DUE_TO_FLUSH:
1472 case TEST_STOP_DUE_TO_READ:
1473 case TEST_RET_PARTIAL_FOLLOWED_BY_SUCCESS:
1474 case TEST_RET_PARTIAL_MULTIPLE_UNTIL_SUCCESS:
1475 case TEST_STOP_DUE_TO_EMPTY_QUEUE:
1476 case TEST_CMD23_PACKED_BIT_UNSET:
1477 ret = prepare_packed_requests(td, 0, num_requests, is_random);
1478 break;
1479 case TEST_STOP_DUE_TO_FLUSH_AFTER_MAX_REQS:
1480 case TEST_STOP_DUE_TO_READ_AFTER_MAX_REQS:
1481 ret = prepare_packed_requests(td, 0, max_num_requests - 1,
1482 is_random);
1483 break;
1484 case TEST_RET_PARTIAL_FOLLOWED_BY_ABORT:
1485 ret = prepare_partial_followed_by_abort(td, num_requests);
1486 break;
1487 case TEST_STOP_DUE_TO_MAX_REQ_NUM:
1488 case TEST_RET_PARTIAL_MAX_FAIL_IDX:
1489 ret = prepare_packed_requests(td, 0, max_num_requests,
1490 is_random);
1491 break;
1492 case TEST_STOP_DUE_TO_THRESHOLD:
1493 ret = prepare_packed_requests(td, 0, max_num_requests + 1,
1494 is_random);
1495 break;
1496 case TEST_RET_ABORT:
1497 case TEST_RET_RETRY:
1498 case TEST_RET_CMD_ERR:
1499 case TEST_RET_DATA_ERR:
1500 case TEST_HDR_INVALID_VERSION:
1501 case TEST_HDR_WRONG_WRITE_CODE:
1502 case TEST_HDR_INVALID_RW_CODE:
1503 case TEST_HDR_DIFFERENT_ADDRESSES:
1504 case TEST_HDR_REQ_NUM_SMALLER_THAN_ACTUAL:
1505 case TEST_HDR_REQ_NUM_LARGER_THAN_ACTUAL:
1506 case TEST_CMD23_MAX_PACKED_WRITES:
1507 case TEST_CMD23_ZERO_PACKED_WRITES:
1508 case TEST_CMD23_REL_WR_BIT_SET:
1509 case TEST_CMD23_BITS_16TO29_SET:
1510 case TEST_CMD23_HDR_BLK_NOT_IN_COUNT:
1511 case TEST_HDR_CMD23_PACKED_BIT_SET:
1512 ret = prepare_packed_requests(td, 1, num_requests, is_random);
1513 break;
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001514 case TEST_PACKING_EXP_N_OVER_TRIGGER:
1515 case TEST_PACKING_EXP_N_OVER_TRIGGER_FB_READ:
1516 case TEST_PACKING_NOT_EXP_TRIGGER_REQUESTS:
1517 case TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS:
1518 case TEST_PACK_MIX_PACKED_NO_PACKED_PACKED:
1519 case TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED:
1520 ret = prepare_packed_control_tests_requests(td, 0, num_requests,
1521 is_random);
1522 break;
1523 case TEST_PACKING_EXP_THRESHOLD_OVER_TRIGGER:
1524 ret = prepare_packed_control_tests_requests(td, 0,
1525 max_num_requests, is_random);
1526 break;
1527 case TEST_PACKING_EXP_ONE_OVER_TRIGGER_FB_READ:
1528 ret = prepare_packed_control_tests_requests(td, 0,
1529 test_packed_trigger + 1,
1530 is_random);
1531 break;
1532 case TEST_PACKING_EXP_N_OVER_TRIGGER_FLUSH_N:
1533 ret = prepare_packed_control_tests_requests(td, 0, num_requests,
1534 is_random);
1535 break;
1536 case TEST_PACKING_NOT_EXP_TRIGGER_READ_TRIGGER:
1537 case TEST_PACKING_NOT_EXP_TRIGGER_FLUSH_TRIGGER:
1538 ret = prepare_packed_control_tests_requests(td, 0,
1539 test_packed_trigger, is_random);
1540 break;
Lee Susmana35ae6e2012-10-25 16:06:07 +02001541 case TEST_LONG_SEQUENTIAL_WRITE:
1542 ret = prepare_long_test_requests(td);
1543 break;
Lee Susmanf18263a2012-10-24 14:14:37 +02001544 case TEST_LONG_SEQUENTIAL_READ:
1545 ret = prepare_long_test_requests(td);
1546 break;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001547 default:
1548 test_pr_info("%s: Invalid test case...", __func__);
Lee Susmanf18263a2012-10-24 14:14:37 +02001549 ret = -EINVAL;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001550 }
1551
1552 return ret;
1553}
1554
1555/*
1556 * An implementation for the post_test_fn in the test_info data structure.
1557 * In our case we just reset the function pointers in the mmc_queue in order for
1558 * the FS to be able to dispatch it's requests correctly after the test is
1559 * finished.
1560 */
1561static int post_test(struct test_data *td)
1562{
1563 struct mmc_queue *mq;
1564
1565 if (!td)
1566 return -EINVAL;
1567
1568 mq = td->req_q->queuedata;
1569
1570 if (!mq) {
1571 test_pr_err("%s: NULL mq", __func__);
1572 return -EINVAL;
1573 }
1574
1575 mq->packed_test_fn = NULL;
1576 mq->err_check_fn = NULL;
1577
1578 return 0;
1579}
1580
1581/*
1582 * This function checks, based on the current test's test_group, that the
1583 * packed commands capability and control are set right. In addition, we check
1584 * if the card supports the packed command feature.
1585 */
1586static int validate_packed_commands_settings(void)
1587{
1588 struct request_queue *req_q;
1589 struct mmc_queue *mq;
1590 int max_num_requests;
1591 struct mmc_host *host;
1592
1593 req_q = test_iosched_get_req_queue();
1594 if (!req_q) {
1595 test_pr_err("%s: test_iosched_get_req_queue failed", __func__);
1596 test_iosched_set_test_result(TEST_FAILED);
1597 return -EINVAL;
1598 }
1599
1600 mq = req_q->queuedata;
1601 if (!mq) {
1602 test_pr_err("%s: NULL mq", __func__);
1603 return -EINVAL;
1604 }
1605
1606 max_num_requests = mq->card->ext_csd.max_packed_writes;
1607 host = mq->card->host;
1608
1609 if (!(host->caps2 && MMC_CAP2_PACKED_WR)) {
1610 test_pr_err("%s: Packed Write capability disabled, exit test",
1611 __func__);
1612 test_iosched_set_test_result(TEST_NOT_SUPPORTED);
1613 return -EINVAL;
1614 }
1615
1616 if (max_num_requests == 0) {
1617 test_pr_err(
1618 "%s: no write packing support, ext_csd.max_packed_writes=%d",
1619 __func__, mq->card->ext_csd.max_packed_writes);
1620 test_iosched_set_test_result(TEST_NOT_SUPPORTED);
1621 return -EINVAL;
1622 }
1623
1624 test_pr_info("%s: max number of packed requests supported is %d ",
1625 __func__, max_num_requests);
1626
1627 switch (mbtd->test_group) {
1628 case TEST_SEND_WRITE_PACKING_GROUP:
1629 case TEST_ERR_CHECK_GROUP:
1630 case TEST_SEND_INVALID_GROUP:
1631 /* disable the packing control */
1632 host->caps2 &= ~MMC_CAP2_PACKED_WR_CONTROL;
1633 break;
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001634 case TEST_PACKING_CONTROL_GROUP:
1635 host->caps2 |= MMC_CAP2_PACKED_WR_CONTROL;
1636 break;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001637 default:
1638 break;
1639 }
1640
1641 return 0;
1642}
1643
Maya Erezddc55732012-10-17 09:51:01 +02001644static void pseudo_rnd_sector_and_size(unsigned int *seed,
1645 unsigned int min_start_sector,
1646 unsigned int *start_sector,
1647 unsigned int *num_of_bios)
1648{
1649 unsigned int max_sec = min_start_sector + TEST_MAX_SECTOR_RANGE;
1650 do {
1651 *start_sector = pseudo_random_seed(seed,
1652 1, max_sec);
1653 *num_of_bios = pseudo_random_seed(seed,
1654 1, TEST_MAX_BIOS_PER_REQ);
1655 if (!(*num_of_bios))
1656 *num_of_bios = 1;
1657 } while ((*start_sector < min_start_sector) ||
1658 (*start_sector + (*num_of_bios * BIO_U32_SIZE * 4)) > max_sec);
1659}
1660
1661/* sanitize test functions */
1662static int prepare_write_discard_sanitize_read(struct test_data *td)
1663{
1664 unsigned int start_sector;
1665 unsigned int num_of_bios = 0;
1666 static unsigned int total_bios;
1667 unsigned int *num_bios_seed;
1668 int i = 0;
1669
1670 if (mbtd->random_test_seed == 0) {
1671 mbtd->random_test_seed =
1672 (unsigned int)(get_jiffies_64() & 0xFFFF);
1673 test_pr_info("%s: got seed from jiffies %d",
1674 __func__, mbtd->random_test_seed);
1675 }
1676 num_bios_seed = &mbtd->random_test_seed;
1677
1678 do {
1679 pseudo_rnd_sector_and_size(num_bios_seed, td->start_sector,
1680 &start_sector, &num_of_bios);
1681
1682 /* DISCARD */
1683 total_bios += num_of_bios;
1684 test_pr_info("%s: discard req: id=%d, startSec=%d, NumBios=%d",
1685 __func__, td->unique_next_req_id, start_sector,
1686 num_of_bios);
1687 test_iosched_add_unique_test_req(0, REQ_UNIQUE_DISCARD,
1688 start_sector, BIO_TO_SECTOR(num_of_bios),
1689 NULL);
1690
1691 } while (++i < (BLKDEV_MAX_RQ-10));
1692
1693 test_pr_info("%s: total discard bios = %d", __func__, total_bios);
1694
1695 test_pr_info("%s: add sanitize req", __func__);
1696 test_iosched_add_unique_test_req(0, REQ_UNIQUE_SANITIZE, 0, 0, NULL);
1697
1698 return 0;
1699}
1700
Yaniv Gardie9214c82012-10-18 13:58:18 +02001701/*
1702 * Post test operations for BKOPs test
1703 * Disable the BKOPs statistics and clear the feature flags
1704 */
1705static int bkops_post_test(struct test_data *td)
1706{
1707 struct request_queue *q = td->req_q;
1708 struct mmc_queue *mq = (struct mmc_queue *)q->queuedata;
1709 struct mmc_card *card = mq->card;
1710
1711 mmc_card_clr_doing_bkops(mq->card);
1712 card->ext_csd.raw_bkops_status = 0;
1713
1714 spin_lock(&card->bkops_info.bkops_stats.lock);
1715 card->bkops_info.bkops_stats.enabled = false;
1716 spin_unlock(&card->bkops_info.bkops_stats.lock);
1717
1718 return 0;
1719}
1720
1721/*
1722 * Verify the BKOPs statsistics
1723 */
1724static int check_bkops_result(struct test_data *td)
1725{
1726 struct request_queue *q = td->req_q;
1727 struct mmc_queue *mq = (struct mmc_queue *)q->queuedata;
1728 struct mmc_card *card = mq->card;
1729 struct mmc_bkops_stats *bkops_stat;
1730
1731 if (!card)
1732 goto fail;
1733
1734 bkops_stat = &card->bkops_info.bkops_stats;
1735
1736 test_pr_info("%s: Test results: bkops:(%d,%d,%d) hpi:%d, suspend:%d",
1737 __func__,
1738 bkops_stat->bkops_level[BKOPS_SEVERITY_1_INDEX],
1739 bkops_stat->bkops_level[BKOPS_SEVERITY_2_INDEX],
1740 bkops_stat->bkops_level[BKOPS_SEVERITY_3_INDEX],
1741 bkops_stat->hpi,
1742 bkops_stat->suspend);
1743
1744 switch (mbtd->test_info.testcase) {
1745 case BKOPS_DELAYED_WORK_LEVEL_1:
1746 if ((bkops_stat->bkops_level[BKOPS_SEVERITY_1_INDEX] == 1) &&
1747 (bkops_stat->suspend == 1) &&
1748 (bkops_stat->hpi == 0))
1749 goto exit;
1750 else
1751 goto fail;
1752 break;
1753 case BKOPS_DELAYED_WORK_LEVEL_1_HPI:
1754 if ((bkops_stat->bkops_level[BKOPS_SEVERITY_1_INDEX] == 1) &&
1755 (bkops_stat->suspend == 0) &&
1756 (bkops_stat->hpi == 1))
1757 goto exit;
Yaniv Gardidced8e42012-11-25 16:00:40 +02001758 /* this might happen due to timing issues */
1759 else if
1760 ((bkops_stat->bkops_level[BKOPS_SEVERITY_1_INDEX] == 0) &&
1761 (bkops_stat->suspend == 0) &&
1762 (bkops_stat->hpi == 0))
1763 goto ignore;
Yaniv Gardie9214c82012-10-18 13:58:18 +02001764 else
1765 goto fail;
1766 break;
1767 case BKOPS_CANCEL_DELAYED_WORK:
1768 if ((bkops_stat->bkops_level[BKOPS_SEVERITY_1_INDEX] == 0) &&
1769 (bkops_stat->bkops_level[BKOPS_SEVERITY_2_INDEX] == 0) &&
1770 (bkops_stat->bkops_level[BKOPS_SEVERITY_3_INDEX] == 0) &&
1771 (bkops_stat->suspend == 0) &&
1772 (bkops_stat->hpi == 0))
1773 goto exit;
1774 else
1775 goto fail;
1776 case BKOPS_URGENT_LEVEL_2:
1777 case BKOPS_URGENT_LEVEL_2_TWO_REQS:
1778 if ((bkops_stat->bkops_level[BKOPS_SEVERITY_2_INDEX] == 1) &&
1779 (bkops_stat->suspend == 0) &&
1780 (bkops_stat->hpi == 0))
1781 goto exit;
1782 else
1783 goto fail;
1784 case BKOPS_URGENT_LEVEL_3:
1785 if ((bkops_stat->bkops_level[BKOPS_SEVERITY_3_INDEX] == 1) &&
1786 (bkops_stat->suspend == 0) &&
1787 (bkops_stat->hpi == 0))
1788 goto exit;
1789 else
1790 goto fail;
1791 default:
1792 return -EINVAL;
1793 }
1794
1795exit:
1796 return 0;
Yaniv Gardidced8e42012-11-25 16:00:40 +02001797ignore:
1798 test_iosched_set_ignore_round(true);
1799 return 0;
Yaniv Gardie9214c82012-10-18 13:58:18 +02001800fail:
1801 if (td->fs_wr_reqs_during_test) {
1802 test_pr_info("%s: wr reqs during test, cancel the round",
1803 __func__);
1804 test_iosched_set_ignore_round(true);
1805 return 0;
1806 }
1807
1808 test_pr_info("%s: BKOPs statistics are not as expected, test failed",
1809 __func__);
1810 return -EINVAL;
1811}
1812
1813static void bkops_end_io_final_fn(struct request *rq, int err)
1814{
1815 struct test_request *test_rq =
1816 (struct test_request *)rq->elv.priv[0];
1817 BUG_ON(!test_rq);
1818
1819 test_rq->req_completed = 1;
1820 test_rq->req_result = err;
1821
1822 test_pr_info("%s: request %d completed, err=%d",
1823 __func__, test_rq->req_id, err);
1824
1825 mbtd->bkops_stage = BKOPS_STAGE_4;
1826 wake_up(&mbtd->bkops_wait_q);
1827}
1828
1829static void bkops_end_io_fn(struct request *rq, int err)
1830{
1831 struct test_request *test_rq =
1832 (struct test_request *)rq->elv.priv[0];
1833 BUG_ON(!test_rq);
1834
1835 test_rq->req_completed = 1;
1836 test_rq->req_result = err;
1837
1838 test_pr_info("%s: request %d completed, err=%d",
1839 __func__, test_rq->req_id, err);
1840 mbtd->bkops_stage = BKOPS_STAGE_2;
1841 wake_up(&mbtd->bkops_wait_q);
1842
1843}
1844
1845static int prepare_bkops(struct test_data *td)
1846{
1847 int ret = 0;
1848 struct request_queue *q = td->req_q;
1849 struct mmc_queue *mq = (struct mmc_queue *)q->queuedata;
1850 struct mmc_card *card = mq->card;
1851 struct mmc_bkops_stats *bkops_stat;
1852
1853 if (!card)
1854 return -EINVAL;
1855
1856 bkops_stat = &card->bkops_info.bkops_stats;
1857
1858 if (!card->ext_csd.bkops_en) {
1859 test_pr_err("%s: BKOPS is not enabled by card or host)",
1860 __func__);
1861 return -ENOTSUPP;
1862 }
1863 if (mmc_card_doing_bkops(card)) {
1864 test_pr_err("%s: BKOPS in progress, try later", __func__);
1865 return -EAGAIN;
1866 }
1867
1868 mmc_blk_init_bkops_statistics(card);
1869
1870 if ((mbtd->test_info.testcase == BKOPS_URGENT_LEVEL_2) ||
1871 (mbtd->test_info.testcase == BKOPS_URGENT_LEVEL_2_TWO_REQS) ||
1872 (mbtd->test_info.testcase == BKOPS_URGENT_LEVEL_3))
1873 mq->err_check_fn = test_err_check;
1874 mbtd->err_check_counter = 0;
1875
1876 return ret;
1877}
1878
1879static int run_bkops(struct test_data *td)
1880{
1881 int ret = 0;
1882 struct request_queue *q = td->req_q;
1883 struct mmc_queue *mq = (struct mmc_queue *)q->queuedata;
1884 struct mmc_card *card = mq->card;
1885 struct mmc_bkops_stats *bkops_stat;
1886
1887 if (!card)
1888 return -EINVAL;
1889
1890 bkops_stat = &card->bkops_info.bkops_stats;
1891
1892 switch (mbtd->test_info.testcase) {
1893 case BKOPS_DELAYED_WORK_LEVEL_1:
1894 bkops_stat->ignore_card_bkops_status = true;
1895 card->ext_csd.raw_bkops_status = 1;
1896 card->bkops_info.sectors_changed =
1897 card->bkops_info.min_sectors_to_queue_delayed_work + 1;
1898 mbtd->bkops_stage = BKOPS_STAGE_1;
1899
1900 __blk_run_queue(q);
1901 /* this long sleep makes sure the host starts bkops and
1902 also, gets into suspend */
1903 msleep(10000);
1904
1905 bkops_stat->ignore_card_bkops_status = false;
1906 card->ext_csd.raw_bkops_status = 0;
1907
1908 test_iosched_mark_test_completion();
1909 break;
1910
1911 case BKOPS_DELAYED_WORK_LEVEL_1_HPI:
1912 bkops_stat->ignore_card_bkops_status = true;
1913 card->ext_csd.raw_bkops_status = 1;
1914 card->bkops_info.sectors_changed =
1915 card->bkops_info.min_sectors_to_queue_delayed_work + 1;
1916 mbtd->bkops_stage = BKOPS_STAGE_1;
1917
1918 __blk_run_queue(q);
1919 msleep(card->bkops_info.delay_ms);
1920
1921 ret = test_iosched_add_wr_rd_test_req(0, WRITE,
1922 td->start_sector,
1923 TEST_REQUEST_NUM_OF_BIOS,
1924 TEST_PATTERN_5A,
1925 bkops_end_io_final_fn);
1926 if (ret) {
1927 test_pr_err("%s: failed to add a write request",
1928 __func__);
1929 ret = -EINVAL;
1930 break;
1931 }
1932
Yaniv Gardie9214c82012-10-18 13:58:18 +02001933 __blk_run_queue(q);
1934 wait_event(mbtd->bkops_wait_q,
1935 mbtd->bkops_stage == BKOPS_STAGE_4);
1936 bkops_stat->ignore_card_bkops_status = false;
1937
1938 test_iosched_mark_test_completion();
1939 break;
1940
1941 case BKOPS_CANCEL_DELAYED_WORK:
1942 bkops_stat->ignore_card_bkops_status = true;
1943 card->ext_csd.raw_bkops_status = 1;
1944 card->bkops_info.sectors_changed =
1945 card->bkops_info.min_sectors_to_queue_delayed_work + 1;
1946 mbtd->bkops_stage = BKOPS_STAGE_1;
1947
1948 __blk_run_queue(q);
1949
1950 ret = test_iosched_add_wr_rd_test_req(0, WRITE,
1951 td->start_sector,
1952 TEST_REQUEST_NUM_OF_BIOS,
1953 TEST_PATTERN_5A,
1954 bkops_end_io_final_fn);
1955 if (ret) {
1956 test_pr_err("%s: failed to add a write request",
1957 __func__);
1958 ret = -EINVAL;
1959 break;
1960 }
1961
Yaniv Gardie9214c82012-10-18 13:58:18 +02001962 __blk_run_queue(q);
1963 wait_event(mbtd->bkops_wait_q,
1964 mbtd->bkops_stage == BKOPS_STAGE_4);
1965 bkops_stat->ignore_card_bkops_status = false;
1966
1967 test_iosched_mark_test_completion();
1968 break;
1969
1970 case BKOPS_URGENT_LEVEL_2:
1971 case BKOPS_URGENT_LEVEL_3:
1972 bkops_stat->ignore_card_bkops_status = true;
1973 if (mbtd->test_info.testcase == BKOPS_URGENT_LEVEL_2)
1974 card->ext_csd.raw_bkops_status = 2;
1975 else
1976 card->ext_csd.raw_bkops_status = 3;
1977 mbtd->bkops_stage = BKOPS_STAGE_1;
1978
1979 ret = test_iosched_add_wr_rd_test_req(0, WRITE,
1980 td->start_sector,
1981 TEST_REQUEST_NUM_OF_BIOS,
1982 TEST_PATTERN_5A,
1983 bkops_end_io_fn);
1984 if (ret) {
1985 test_pr_err("%s: failed to add a write request",
1986 __func__);
1987 ret = -EINVAL;
1988 break;
1989 }
1990
Yaniv Gardie9214c82012-10-18 13:58:18 +02001991 __blk_run_queue(q);
1992 wait_event(mbtd->bkops_wait_q,
1993 mbtd->bkops_stage == BKOPS_STAGE_2);
1994 card->ext_csd.raw_bkops_status = 0;
1995
1996 ret = test_iosched_add_wr_rd_test_req(0, WRITE,
1997 td->start_sector,
1998 TEST_REQUEST_NUM_OF_BIOS,
1999 TEST_PATTERN_5A,
2000 bkops_end_io_final_fn);
2001 if (ret) {
2002 test_pr_err("%s: failed to add a write request",
2003 __func__);
2004 ret = -EINVAL;
2005 break;
2006 }
2007
Yaniv Gardie9214c82012-10-18 13:58:18 +02002008 __blk_run_queue(q);
2009
2010 wait_event(mbtd->bkops_wait_q,
2011 mbtd->bkops_stage == BKOPS_STAGE_4);
2012
2013 bkops_stat->ignore_card_bkops_status = false;
2014 test_iosched_mark_test_completion();
2015 break;
2016
2017 case BKOPS_URGENT_LEVEL_2_TWO_REQS:
2018 mq->wr_packing_enabled = false;
2019 bkops_stat->ignore_card_bkops_status = true;
2020 card->ext_csd.raw_bkops_status = 2;
2021 mbtd->bkops_stage = BKOPS_STAGE_1;
2022
2023 ret = test_iosched_add_wr_rd_test_req(0, WRITE,
2024 td->start_sector,
2025 TEST_REQUEST_NUM_OF_BIOS,
2026 TEST_PATTERN_5A,
2027 NULL);
2028 if (ret) {
2029 test_pr_err("%s: failed to add a write request",
2030 __func__);
2031 ret = -EINVAL;
2032 break;
2033 }
2034
2035 ret = test_iosched_add_wr_rd_test_req(0, WRITE,
2036 td->start_sector,
2037 TEST_REQUEST_NUM_OF_BIOS,
2038 TEST_PATTERN_5A,
2039 bkops_end_io_fn);
2040 if (ret) {
2041 test_pr_err("%s: failed to add a write request",
2042 __func__);
2043 ret = -EINVAL;
2044 break;
2045 }
2046
Yaniv Gardie9214c82012-10-18 13:58:18 +02002047 __blk_run_queue(q);
2048 wait_event(mbtd->bkops_wait_q,
2049 mbtd->bkops_stage == BKOPS_STAGE_2);
2050 card->ext_csd.raw_bkops_status = 0;
2051
2052 ret = test_iosched_add_wr_rd_test_req(0, WRITE,
2053 td->start_sector,
2054 TEST_REQUEST_NUM_OF_BIOS,
2055 TEST_PATTERN_5A,
2056 bkops_end_io_final_fn);
2057 if (ret) {
2058 test_pr_err("%s: failed to add a write request",
2059 __func__);
2060 ret = -EINVAL;
2061 break;
2062 }
2063
Yaniv Gardie9214c82012-10-18 13:58:18 +02002064 __blk_run_queue(q);
2065
2066 wait_event(mbtd->bkops_wait_q,
2067 mbtd->bkops_stage == BKOPS_STAGE_4);
2068
2069 bkops_stat->ignore_card_bkops_status = false;
2070 test_iosched_mark_test_completion();
2071
2072 break;
2073 default:
2074 test_pr_err("%s: wrong testcase: %d", __func__,
2075 mbtd->test_info.testcase);
2076 ret = -EINVAL;
2077 }
2078 return ret;
2079}
2080
Lee Susmanb09c0412012-12-19 14:28:52 +02002081/*
2082 * new_req_post_test() - Do post test operations for
2083 * new_req_notification test: disable the statistics and clear
2084 * the feature flags.
2085 * @td The test_data for the new_req test that has
2086 * ended.
2087 */
2088static int new_req_post_test(struct test_data *td)
2089{
2090 struct mmc_queue *mq;
2091
2092 if (!td || !td->req_q)
2093 goto exit;
2094
2095 mq = (struct mmc_queue *)td->req_q->queuedata;
2096
2097 if (!mq || !mq->card)
2098 goto exit;
2099
2100 /* disable async_event test stats */
2101 mq->card->async_event_stats.enabled = false;
2102 mmc_print_async_event_stats(mq->card);
2103 test_pr_info("Completed %d requests",
2104 mbtd->completed_req_count);
2105
2106exit:
2107 return 0;
2108}
2109
2110/*
2111 * check_new_req_result() - Print out the number of completed
2112 * requests. Assigned to the check_test_result_fn pointer,
2113 * therefore the name.
2114 * @td The test_data for the new_req test that has
2115 * ended.
2116 */
2117static int check_new_req_result(struct test_data *td)
2118{
2119 test_pr_info("%s: Test results: Completed %d requests",
2120 __func__, mbtd->completed_req_count);
2121 return 0;
2122}
2123
2124/*
2125 * new_req_free_end_io_fn() - Remove request from queuelist and
2126 * free request's allocated memory. Used as a call-back
2127 * assigned to end_io member in request struct.
2128 * @rq The request to be freed
2129 * @err Unused
2130 */
2131static void new_req_free_end_io_fn(struct request *rq, int err)
2132{
2133 struct test_request *test_rq =
2134 (struct test_request *)rq->elv.priv[0];
2135 struct test_data *ptd = test_get_test_data();
2136
2137 BUG_ON(!test_rq);
2138
2139 spin_lock_irq(&ptd->lock);
2140 list_del_init(&test_rq->queuelist);
2141 ptd->dispatched_count--;
2142 spin_unlock_irq(&ptd->lock);
2143
2144 __blk_put_request(ptd->req_q, test_rq->rq);
2145 kfree(test_rq->bios_buffer);
2146 kfree(test_rq);
2147 mbtd->completed_req_count++;
2148}
2149
2150static int prepare_new_req(struct test_data *td)
2151{
2152 struct request_queue *q = td->req_q;
2153 struct mmc_queue *mq = (struct mmc_queue *)q->queuedata;
2154
2155 mmc_blk_init_packed_statistics(mq->card);
2156 mmc_blk_init_async_event_statistics(mq->card);
2157
2158 mbtd->completed_req_count = 0;
2159
2160 return 0;
2161}
2162
2163static int test_new_req_notification(struct test_data *ptd)
2164{
2165 int ret = 0;
2166 int i;
2167 unsigned int requests_count = 2;
2168 unsigned int bio_num;
2169 struct test_request *test_rq = NULL;
2170
2171 while (1) {
2172 for (i = 0; i < requests_count; i++) {
2173 bio_num = TEST_MAX_BIOS_PER_REQ;
2174 test_rq = test_iosched_create_test_req(0, READ,
2175 ptd->start_sector,
2176 bio_num, TEST_PATTERN_5A,
2177 new_req_free_end_io_fn);
2178 if (test_rq) {
2179 spin_lock_irq(ptd->req_q->queue_lock);
2180 list_add_tail(&test_rq->queuelist,
2181 &ptd->test_queue);
2182 ptd->test_count++;
2183 spin_unlock_irq(ptd->req_q->queue_lock);
2184 } else {
2185 test_pr_err("%s: failed to create read request",
2186 __func__);
2187 ret = -ENODEV;
2188 break;
2189 }
2190 }
2191
2192 __blk_run_queue(ptd->req_q);
2193 /* wait while a mmc layer will send all requests in test_queue*/
2194 while (!list_empty(&ptd->test_queue))
2195 msleep(NEW_REQ_TEST_SLEEP_TIME);
2196
2197 /* test finish criteria */
2198 if (mbtd->completed_req_count > 1000) {
2199 if (ptd->dispatched_count)
2200 continue;
2201 else
2202 break;
2203 }
2204
2205 for (i = 0; i < requests_count; i++) {
2206 bio_num = NEW_REQ_TEST_NUM_BIOS;
2207 test_rq = test_iosched_create_test_req(0, READ,
2208 ptd->start_sector,
2209 bio_num, TEST_PATTERN_5A,
2210 new_req_free_end_io_fn);
2211 if (test_rq) {
2212 spin_lock_irq(ptd->req_q->queue_lock);
2213 list_add_tail(&test_rq->queuelist,
2214 &ptd->test_queue);
2215 ptd->test_count++;
2216 spin_unlock_irq(ptd->req_q->queue_lock);
2217 } else {
2218 test_pr_err("%s: failed to create read request",
2219 __func__);
2220 ret = -ENODEV;
2221 break;
2222 }
2223 }
2224 __blk_run_queue(ptd->req_q);
2225 }
2226
2227 test_iosched_mark_test_completion();
2228 test_pr_info("%s: EXIT: %d code", __func__, ret);
2229
2230 return ret;
2231}
2232
2233static int run_new_req(struct test_data *td)
2234{
2235 int ret = 0;
2236 struct request_queue *q = td->req_q;
2237 struct mmc_queue *mq = (struct mmc_queue *)q->queuedata;
2238
2239 mmc_blk_init_async_event_statistics(mq->card);
2240 ret = test_new_req_notification(td);
2241
2242 return ret;
2243}
2244
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02002245static bool message_repeat;
2246static int test_open(struct inode *inode, struct file *file)
2247{
2248 file->private_data = inode->i_private;
2249 message_repeat = 1;
2250 return 0;
2251}
2252
2253/* send_packing TEST */
2254static ssize_t send_write_packing_test_write(struct file *file,
2255 const char __user *buf,
2256 size_t count,
2257 loff_t *ppos)
2258{
2259 int ret = 0;
2260 int i = 0;
2261 int number = -1;
2262 int j = 0;
2263
2264 test_pr_info("%s: -- send_write_packing TEST --", __func__);
2265
2266 sscanf(buf, "%d", &number);
2267
2268 if (number <= 0)
2269 number = 1;
2270
2271
2272 mbtd->test_group = TEST_SEND_WRITE_PACKING_GROUP;
2273
2274 if (validate_packed_commands_settings())
2275 return count;
2276
2277 if (mbtd->random_test_seed > 0)
2278 test_pr_info("%s: Test seed: %d", __func__,
2279 mbtd->random_test_seed);
2280
2281 memset(&mbtd->test_info, 0, sizeof(struct test_info));
2282
2283 mbtd->test_info.data = mbtd;
2284 mbtd->test_info.prepare_test_fn = prepare_test;
2285 mbtd->test_info.check_test_result_fn = check_wr_packing_statistics;
2286 mbtd->test_info.get_test_case_str_fn = get_test_case_str;
2287 mbtd->test_info.post_test_fn = post_test;
2288
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02002289 for (i = 0; i < number; ++i) {
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02002290 test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
2291 test_pr_info("%s: ====================", __func__);
2292
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02002293 for (j = SEND_WRITE_PACKING_MIN_TESTCASE;
2294 j <= SEND_WRITE_PACKING_MAX_TESTCASE; j++) {
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02002295
2296 mbtd->test_info.testcase = j;
2297 mbtd->is_random = RANDOM_TEST;
2298 ret = test_iosched_start_test(&mbtd->test_info);
2299 if (ret)
2300 break;
2301 /* Allow FS requests to be dispatched */
2302 msleep(1000);
2303 mbtd->test_info.testcase = j;
2304 mbtd->is_random = NON_RANDOM_TEST;
2305 ret = test_iosched_start_test(&mbtd->test_info);
2306 if (ret)
2307 break;
2308 /* Allow FS requests to be dispatched */
2309 msleep(1000);
2310 }
2311 }
2312
2313 test_pr_info("%s: Completed all the test cases.", __func__);
2314
2315 return count;
2316}
2317
2318static ssize_t send_write_packing_test_read(struct file *file,
2319 char __user *buffer,
2320 size_t count,
2321 loff_t *offset)
2322{
2323 memset((void *)buffer, 0, count);
2324
2325 snprintf(buffer, count,
2326 "\nsend_write_packing_test\n"
2327 "=========\n"
2328 "Description:\n"
2329 "This test checks the following scenarios\n"
2330 "- Pack due to FLUSH message\n"
2331 "- Pack due to FLUSH after threshold writes\n"
2332 "- Pack due to READ message\n"
2333 "- Pack due to READ after threshold writes\n"
2334 "- Pack due to empty queue\n"
2335 "- Pack due to threshold writes\n"
2336 "- Pack due to one over threshold writes\n");
2337
2338 if (message_repeat == 1) {
2339 message_repeat = 0;
2340 return strnlen(buffer, count);
2341 } else {
2342 return 0;
2343 }
2344}
2345
2346const struct file_operations send_write_packing_test_ops = {
2347 .open = test_open,
2348 .write = send_write_packing_test_write,
2349 .read = send_write_packing_test_read,
2350};
2351
2352/* err_check TEST */
2353static ssize_t err_check_test_write(struct file *file,
2354 const char __user *buf,
2355 size_t count,
2356 loff_t *ppos)
2357{
2358 int ret = 0;
2359 int i = 0;
2360 int number = -1;
2361 int j = 0;
2362
2363 test_pr_info("%s: -- err_check TEST --", __func__);
2364
2365 sscanf(buf, "%d", &number);
2366
2367 if (number <= 0)
2368 number = 1;
2369
2370 mbtd->test_group = TEST_ERR_CHECK_GROUP;
2371
2372 if (validate_packed_commands_settings())
2373 return count;
2374
2375 if (mbtd->random_test_seed > 0)
2376 test_pr_info("%s: Test seed: %d", __func__,
2377 mbtd->random_test_seed);
2378
2379 memset(&mbtd->test_info, 0, sizeof(struct test_info));
2380
2381 mbtd->test_info.data = mbtd;
2382 mbtd->test_info.prepare_test_fn = prepare_test;
2383 mbtd->test_info.check_test_result_fn = check_wr_packing_statistics;
2384 mbtd->test_info.get_test_case_str_fn = get_test_case_str;
2385 mbtd->test_info.post_test_fn = post_test;
2386
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02002387 for (i = 0; i < number; ++i) {
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02002388 test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
2389 test_pr_info("%s: ====================", __func__);
2390
2391 for (j = ERR_CHECK_MIN_TESTCASE;
2392 j <= ERR_CHECK_MAX_TESTCASE ; j++) {
2393 mbtd->test_info.testcase = j;
2394 mbtd->is_random = RANDOM_TEST;
2395 ret = test_iosched_start_test(&mbtd->test_info);
2396 if (ret)
2397 break;
2398 /* Allow FS requests to be dispatched */
2399 msleep(1000);
2400 mbtd->test_info.testcase = j;
2401 mbtd->is_random = NON_RANDOM_TEST;
2402 ret = test_iosched_start_test(&mbtd->test_info);
2403 if (ret)
2404 break;
2405 /* Allow FS requests to be dispatched */
2406 msleep(1000);
2407 }
2408 }
2409
2410 test_pr_info("%s: Completed all the test cases.", __func__);
2411
2412 return count;
2413}
2414
2415static ssize_t err_check_test_read(struct file *file,
2416 char __user *buffer,
2417 size_t count,
2418 loff_t *offset)
2419{
2420 memset((void *)buffer, 0, count);
2421
2422 snprintf(buffer, count,
2423 "\nerr_check_TEST\n"
2424 "=========\n"
2425 "Description:\n"
2426 "This test checks the following scenarios\n"
2427 "- Return ABORT\n"
2428 "- Return PARTIAL followed by success\n"
2429 "- Return PARTIAL followed by abort\n"
2430 "- Return PARTIAL multiple times until success\n"
2431 "- Return PARTIAL with fail index = threshold\n"
2432 "- Return RETRY\n"
2433 "- Return CMD_ERR\n"
2434 "- Return DATA_ERR\n");
2435
2436 if (message_repeat == 1) {
2437 message_repeat = 0;
2438 return strnlen(buffer, count);
2439 } else {
2440 return 0;
2441 }
2442}
2443
2444const struct file_operations err_check_test_ops = {
2445 .open = test_open,
2446 .write = err_check_test_write,
2447 .read = err_check_test_read,
2448};
2449
2450/* send_invalid_packed TEST */
2451static ssize_t send_invalid_packed_test_write(struct file *file,
2452 const char __user *buf,
2453 size_t count,
2454 loff_t *ppos)
2455{
2456 int ret = 0;
2457 int i = 0;
2458 int number = -1;
2459 int j = 0;
2460 int num_of_failures = 0;
2461
2462 test_pr_info("%s: -- send_invalid_packed TEST --", __func__);
2463
2464 sscanf(buf, "%d", &number);
2465
2466 if (number <= 0)
2467 number = 1;
2468
2469 mbtd->test_group = TEST_SEND_INVALID_GROUP;
2470
2471 if (validate_packed_commands_settings())
2472 return count;
2473
2474 if (mbtd->random_test_seed > 0)
2475 test_pr_info("%s: Test seed: %d", __func__,
2476 mbtd->random_test_seed);
2477
2478 memset(&mbtd->test_info, 0, sizeof(struct test_info));
2479
2480 mbtd->test_info.data = mbtd;
2481 mbtd->test_info.prepare_test_fn = prepare_test;
2482 mbtd->test_info.check_test_result_fn = check_wr_packing_statistics;
2483 mbtd->test_info.get_test_case_str_fn = get_test_case_str;
2484 mbtd->test_info.post_test_fn = post_test;
2485
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02002486 for (i = 0; i < number; ++i) {
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02002487 test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
2488 test_pr_info("%s: ====================", __func__);
2489
2490 for (j = INVALID_CMD_MIN_TESTCASE;
2491 j <= INVALID_CMD_MAX_TESTCASE ; j++) {
2492
2493 mbtd->test_info.testcase = j;
2494 mbtd->is_random = RANDOM_TEST;
2495 ret = test_iosched_start_test(&mbtd->test_info);
2496 if (ret)
2497 num_of_failures++;
2498 /* Allow FS requests to be dispatched */
2499 msleep(1000);
2500
2501 mbtd->test_info.testcase = j;
2502 mbtd->is_random = NON_RANDOM_TEST;
2503 ret = test_iosched_start_test(&mbtd->test_info);
2504 if (ret)
2505 num_of_failures++;
2506 /* Allow FS requests to be dispatched */
2507 msleep(1000);
2508 }
2509 }
2510
2511 test_pr_info("%s: Completed all the test cases.", __func__);
2512
2513 if (num_of_failures > 0) {
2514 test_iosched_set_test_result(TEST_FAILED);
2515 test_pr_err(
2516 "There were %d failures during the test, TEST FAILED",
2517 num_of_failures);
2518 }
2519 return count;
2520}
2521
2522static ssize_t send_invalid_packed_test_read(struct file *file,
2523 char __user *buffer,
2524 size_t count,
2525 loff_t *offset)
2526{
2527 memset((void *)buffer, 0, count);
2528
2529 snprintf(buffer, count,
2530 "\nsend_invalid_packed_TEST\n"
2531 "=========\n"
2532 "Description:\n"
2533 "This test checks the following scenarios\n"
2534 "- Send an invalid header version\n"
2535 "- Send the wrong write code\n"
2536 "- Send an invalid R/W code\n"
2537 "- Send wrong start address in header\n"
2538 "- Send header with block_count smaller than actual\n"
2539 "- Send header with block_count larger than actual\n"
2540 "- Send header CMD23 packed bit set\n"
2541 "- Send CMD23 with block count over threshold\n"
2542 "- Send CMD23 with block_count equals zero\n"
2543 "- Send CMD23 packed bit unset\n"
2544 "- Send CMD23 reliable write bit set\n"
2545 "- Send CMD23 bits [16-29] set\n"
2546 "- Send CMD23 header block not in block_count\n");
2547
2548 if (message_repeat == 1) {
2549 message_repeat = 0;
2550 return strnlen(buffer, count);
2551 } else {
2552 return 0;
2553 }
2554}
2555
2556const struct file_operations send_invalid_packed_test_ops = {
2557 .open = test_open,
2558 .write = send_invalid_packed_test_write,
2559 .read = send_invalid_packed_test_read,
2560};
2561
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02002562/* packing_control TEST */
2563static ssize_t write_packing_control_test_write(struct file *file,
2564 const char __user *buf,
2565 size_t count,
2566 loff_t *ppos)
2567{
2568 int ret = 0;
2569 int i = 0;
2570 int number = -1;
2571 int j = 0;
2572 struct mmc_queue *mq = test_iosched_get_req_queue()->queuedata;
2573 int max_num_requests = mq->card->ext_csd.max_packed_writes;
2574 int test_successful = 1;
2575
2576 test_pr_info("%s: -- write_packing_control TEST --", __func__);
2577
2578 sscanf(buf, "%d", &number);
2579
2580 if (number <= 0)
2581 number = 1;
2582
2583 test_pr_info("%s: max_num_requests = %d ", __func__,
2584 max_num_requests);
2585
2586 memset(&mbtd->test_info, 0, sizeof(struct test_info));
2587 mbtd->test_group = TEST_PACKING_CONTROL_GROUP;
2588
2589 if (validate_packed_commands_settings())
2590 return count;
2591
2592 mbtd->test_info.data = mbtd;
2593 mbtd->test_info.prepare_test_fn = prepare_test;
2594 mbtd->test_info.check_test_result_fn = check_wr_packing_statistics;
2595 mbtd->test_info.get_test_case_str_fn = get_test_case_str;
2596
2597 for (i = 0; i < number; ++i) {
2598 test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
2599 test_pr_info("%s: ====================", __func__);
2600
2601 for (j = PACKING_CONTROL_MIN_TESTCASE;
2602 j <= PACKING_CONTROL_MAX_TESTCASE; j++) {
2603
2604 test_successful = 1;
2605 mbtd->test_info.testcase = j;
2606 mbtd->is_random = RANDOM_TEST;
2607 ret = test_iosched_start_test(&mbtd->test_info);
2608 if (ret) {
2609 test_successful = 0;
2610 break;
2611 }
2612 /* Allow FS requests to be dispatched */
2613 msleep(1000);
2614
2615 mbtd->test_info.testcase = j;
2616 mbtd->is_random = NON_RANDOM_TEST;
2617 ret = test_iosched_start_test(&mbtd->test_info);
2618 if (ret) {
2619 test_successful = 0;
2620 break;
2621 }
2622 /* Allow FS requests to be dispatched */
2623 msleep(1000);
2624 }
2625
2626 if (!test_successful)
2627 break;
2628 }
2629
2630 test_pr_info("%s: Completed all the test cases.", __func__);
2631
2632 return count;
2633}
2634
2635static ssize_t write_packing_control_test_read(struct file *file,
2636 char __user *buffer,
2637 size_t count,
2638 loff_t *offset)
2639{
2640 memset((void *)buffer, 0, count);
2641
2642 snprintf(buffer, count,
2643 "\nwrite_packing_control_test\n"
2644 "=========\n"
2645 "Description:\n"
2646 "This test checks the following scenarios\n"
2647 "- Packing expected - one over trigger\n"
2648 "- Packing expected - N over trigger\n"
2649 "- Packing expected - N over trigger followed by read\n"
2650 "- Packing expected - N over trigger followed by flush\n"
2651 "- Packing expected - threshold over trigger FB by flush\n"
2652 "- Packing not expected - less than trigger\n"
2653 "- Packing not expected - trigger requests\n"
2654 "- Packing not expected - trigger, read, trigger\n"
2655 "- Mixed state - packing -> no packing -> packing\n"
2656 "- Mixed state - no packing -> packing -> no packing\n");
2657
2658 if (message_repeat == 1) {
2659 message_repeat = 0;
2660 return strnlen(buffer, count);
2661 } else {
2662 return 0;
2663 }
2664}
2665
2666const struct file_operations write_packing_control_test_ops = {
2667 .open = test_open,
2668 .write = write_packing_control_test_write,
2669 .read = write_packing_control_test_read,
2670};
2671
Maya Erezddc55732012-10-17 09:51:01 +02002672static ssize_t write_discard_sanitize_test_write(struct file *file,
2673 const char __user *buf,
2674 size_t count,
2675 loff_t *ppos)
2676{
2677 int ret = 0;
2678 int i = 0;
2679 int number = -1;
2680
2681 sscanf(buf, "%d", &number);
2682 if (number <= 0)
2683 number = 1;
2684
2685 test_pr_info("%s: -- write_discard_sanitize TEST --\n", __func__);
2686
2687 memset(&mbtd->test_info, 0, sizeof(struct test_info));
2688
2689 mbtd->test_group = TEST_GENERAL_GROUP;
2690
2691 mbtd->test_info.data = mbtd;
2692 mbtd->test_info.prepare_test_fn = prepare_write_discard_sanitize_read;
2693 mbtd->test_info.get_test_case_str_fn = get_test_case_str;
2694 mbtd->test_info.timeout_msec = SANITIZE_TEST_TIMEOUT;
2695
2696 for (i = 0 ; i < number ; ++i) {
2697 test_pr_info("%s: Cycle # %d / %d\n", __func__, i+1, number);
2698 test_pr_info("%s: ===================", __func__);
2699
2700 mbtd->test_info.testcase = TEST_WRITE_DISCARD_SANITIZE_READ;
2701 ret = test_iosched_start_test(&mbtd->test_info);
2702
2703 if (ret)
2704 break;
2705 }
2706
2707 return count;
2708}
2709
2710const struct file_operations write_discard_sanitize_test_ops = {
2711 .open = test_open,
2712 .write = write_discard_sanitize_test_write,
2713};
2714
Yaniv Gardie9214c82012-10-18 13:58:18 +02002715static ssize_t bkops_test_write(struct file *file,
2716 const char __user *buf,
2717 size_t count,
2718 loff_t *ppos)
2719{
2720 int ret = 0;
2721 int i = 0, j;
2722 int number = -1;
2723
2724 test_pr_info("%s: -- bkops_test TEST --", __func__);
2725
2726 sscanf(buf, "%d", &number);
2727
2728 if (number <= 0)
2729 number = 1;
2730
2731 mbtd->test_group = TEST_BKOPS_GROUP;
2732
2733 memset(&mbtd->test_info, 0, sizeof(struct test_info));
2734
2735 mbtd->test_info.data = mbtd;
2736 mbtd->test_info.prepare_test_fn = prepare_bkops;
2737 mbtd->test_info.check_test_result_fn = check_bkops_result;
2738 mbtd->test_info.get_test_case_str_fn = get_test_case_str;
2739 mbtd->test_info.run_test_fn = run_bkops;
2740 mbtd->test_info.timeout_msec = BKOPS_TEST_TIMEOUT;
2741 mbtd->test_info.post_test_fn = bkops_post_test;
2742
2743 for (i = 0 ; i < number ; ++i) {
2744 test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
2745 test_pr_info("%s: ===================", __func__);
2746 for (j = BKOPS_MIN_TESTCASE ;
2747 j <= BKOPS_MAX_TESTCASE ; j++) {
2748 mbtd->test_info.testcase = j;
2749 ret = test_iosched_start_test(&mbtd->test_info);
2750 if (ret)
2751 break;
2752 }
2753 }
2754
2755 test_pr_info("%s: Completed all the test cases.", __func__);
2756
2757 return count;
2758}
2759
2760static ssize_t bkops_test_read(struct file *file,
2761 char __user *buffer,
2762 size_t count,
2763 loff_t *offset)
2764{
2765 memset((void *)buffer, 0, count);
2766
2767 snprintf(buffer, count,
2768 "\nbkops_test\n========================\n"
2769 "Description:\n"
2770 "This test simulates BKOPS status from card\n"
2771 "and verifies that:\n"
2772 " - Starting BKOPS delayed work, level 1\n"
2773 " - Starting BKOPS delayed work, level 1, with HPI\n"
2774 " - Cancel starting BKOPS delayed work, "
2775 " when a request is received\n"
2776 " - Starting BKOPS urgent, level 2,3\n"
2777 " - Starting BKOPS urgent with 2 requests\n");
2778 return strnlen(buffer, count);
2779}
2780
2781const struct file_operations bkops_test_ops = {
2782 .open = test_open,
2783 .write = bkops_test_write,
2784 .read = bkops_test_read,
2785};
2786
Lee Susmanf18263a2012-10-24 14:14:37 +02002787static ssize_t long_sequential_read_test_write(struct file *file,
2788 const char __user *buf,
2789 size_t count,
2790 loff_t *ppos)
2791{
2792 int ret = 0;
2793 int i = 0;
2794 int number = -1;
2795 unsigned int mtime, integer, fraction;
2796
2797 test_pr_info("%s: -- Long Sequential Read TEST --", __func__);
2798
2799 sscanf(buf, "%d", &number);
2800
2801 if (number <= 0)
2802 number = 1;
2803
2804 memset(&mbtd->test_info, 0, sizeof(struct test_info));
2805 mbtd->test_group = TEST_GENERAL_GROUP;
2806
2807 mbtd->test_info.data = mbtd;
2808 mbtd->test_info.prepare_test_fn = prepare_test;
2809 mbtd->test_info.get_test_case_str_fn = get_test_case_str;
2810
2811 for (i = 0 ; i < number ; ++i) {
2812 test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
2813 test_pr_info("%s: ====================", __func__);
2814
2815 mbtd->test_info.testcase = TEST_LONG_SEQUENTIAL_READ;
2816 mbtd->is_random = NON_RANDOM_TEST;
2817 ret = test_iosched_start_test(&mbtd->test_info);
2818 if (ret)
2819 break;
2820
2821 mtime = jiffies_to_msecs(mbtd->test_info.test_duration);
2822
2823 test_pr_info("%s: time is %u msec, size is %u.%u MiB",
2824 __func__, mtime, LONG_TEST_SIZE_INTEGER,
2825 LONG_TEST_SIZE_FRACTION);
2826
2827 /* we first multiply in order not to lose precision */
2828 mtime *= MB_MSEC_RATIO_APPROXIMATION;
2829 /* divide values to get a MiB/sec integer value with one
2830 digit of precision. Multiply by 10 for one digit precision
2831 */
2832 fraction = integer = (LONG_TEST_ACTUAL_BYTE_NUM * 10) / mtime;
2833 integer /= 10;
2834 /* and calculate the MiB value fraction */
2835 fraction -= integer * 10;
2836
2837 test_pr_info("%s: Throughput: %u.%u MiB/sec\n"
2838 , __func__, integer, fraction);
2839
2840 /* Allow FS requests to be dispatched */
2841 msleep(1000);
2842 }
2843
2844 return count;
2845}
2846
2847static ssize_t long_sequential_read_test_read(struct file *file,
2848 char __user *buffer,
2849 size_t count,
2850 loff_t *offset)
2851{
2852 memset((void *)buffer, 0, count);
2853
2854 snprintf(buffer, count,
2855 "\nlong_sequential_read_test\n"
2856 "=========\n"
2857 "Description:\n"
2858 "This test runs the following scenarios\n"
2859 "- Long Sequential Read Test: this test measures read "
2860 "throughput at the driver level by sequentially reading many "
2861 "large requests.\n");
2862
2863 if (message_repeat == 1) {
2864 message_repeat = 0;
2865 return strnlen(buffer, count);
2866 } else
2867 return 0;
2868}
2869
2870const struct file_operations long_sequential_read_test_ops = {
2871 .open = test_open,
2872 .write = long_sequential_read_test_write,
2873 .read = long_sequential_read_test_read,
2874};
2875
Lee Susmana35ae6e2012-10-25 16:06:07 +02002876static ssize_t long_sequential_write_test_write(struct file *file,
2877 const char __user *buf,
2878 size_t count,
2879 loff_t *ppos)
2880{
2881 int ret = 0;
2882 int i = 0;
2883 int number = -1;
2884 unsigned int mtime, integer, fraction;
2885
2886 test_pr_info("%s: -- Long Sequential Write TEST --", __func__);
2887
2888 sscanf(buf, "%d", &number);
2889
2890 if (number <= 0)
2891 number = 1;
2892
2893 memset(&mbtd->test_info, 0, sizeof(struct test_info));
2894 mbtd->test_group = TEST_GENERAL_GROUP;
2895
2896 mbtd->test_info.data = mbtd;
2897 mbtd->test_info.prepare_test_fn = prepare_test;
2898 mbtd->test_info.get_test_case_str_fn = get_test_case_str;
2899
2900 for (i = 0 ; i < number ; ++i) {
2901 test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
2902 test_pr_info("%s: ====================", __func__);
2903
2904 mbtd->test_info.testcase = TEST_LONG_SEQUENTIAL_WRITE;
2905 mbtd->is_random = NON_RANDOM_TEST;
2906 ret = test_iosched_start_test(&mbtd->test_info);
2907 if (ret)
2908 break;
2909
2910 mtime = jiffies_to_msecs(mbtd->test_info.test_duration);
2911
2912 test_pr_info("%s: time is %u msec, size is %u.%u MiB",
2913 __func__, mtime, LONG_TEST_SIZE_INTEGER,
2914 LONG_TEST_SIZE_FRACTION);
2915
2916 /* we first multiply in order not to lose precision */
2917 mtime *= MB_MSEC_RATIO_APPROXIMATION;
2918 /* divide values to get a MiB/sec integer value with one
2919 digit of precision
2920 */
2921 fraction = integer = (LONG_TEST_ACTUAL_BYTE_NUM * 10) / mtime;
2922 integer /= 10;
2923 /* and calculate the MiB value fraction */
2924 fraction -= integer * 10;
2925
2926 test_pr_info("%s: Throughput: %u.%u MiB/sec\n",
2927 __func__, integer, fraction);
2928
2929 /* Allow FS requests to be dispatched */
2930 msleep(1000);
2931 }
2932
2933 return count;
2934}
2935
2936static ssize_t long_sequential_write_test_read(struct file *file,
2937 char __user *buffer,
2938 size_t count,
2939 loff_t *offset)
2940{
2941 memset((void *)buffer, 0, count);
2942
2943 snprintf(buffer, count,
2944 "\nlong_sequential_write_test\n"
2945 "=========\n"
2946 "Description:\n"
2947 "This test runs the following scenarios\n"
2948 "- Long Sequential Write Test: this test measures write "
2949 "throughput at the driver level by sequentially writing many "
2950 "large requests\n");
2951
2952 if (message_repeat == 1) {
2953 message_repeat = 0;
2954 return strnlen(buffer, count);
2955 } else
2956 return 0;
2957}
2958
2959const struct file_operations long_sequential_write_test_ops = {
2960 .open = test_open,
2961 .write = long_sequential_write_test_write,
2962 .read = long_sequential_write_test_read,
2963};
2964
Lee Susmanb09c0412012-12-19 14:28:52 +02002965static ssize_t new_req_notification_test_write(struct file *file,
2966 const char __user *buf,
2967 size_t count,
2968 loff_t *ppos)
2969{
2970 int ret = 0;
2971 int i = 0;
2972 int number = -1;
2973
2974 test_pr_info("%s: -- new_req_notification TEST --", __func__);
2975
2976 sscanf(buf, "%d", &number);
2977
2978 if (number <= 0)
2979 number = 1;
2980
2981 mbtd->test_group = TEST_NEW_NOTIFICATION_GROUP;
2982
2983 memset(&mbtd->test_info, 0, sizeof(struct test_info));
2984
2985 mbtd->test_info.data = mbtd;
2986 mbtd->test_info.prepare_test_fn = prepare_new_req;
2987 mbtd->test_info.check_test_result_fn = check_new_req_result;
2988 mbtd->test_info.get_test_case_str_fn = get_test_case_str;
2989 mbtd->test_info.run_test_fn = run_new_req;
2990 mbtd->test_info.timeout_msec = 10 * 60 * 1000; /* 1 min */
2991 mbtd->test_info.post_test_fn = new_req_post_test;
2992
2993 for (i = 0 ; i < number ; ++i) {
2994 test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
2995 test_pr_info("%s: ===================", __func__);
2996 test_pr_info("%s: start test case TEST_NEW_REQ_NOTIFICATION",
2997 __func__);
2998 mbtd->test_info.testcase = TEST_NEW_REQ_NOTIFICATION;
2999 ret = test_iosched_start_test(&mbtd->test_info);
3000 if (ret) {
3001 test_pr_info("%s: break from new_req tests loop",
3002 __func__);
3003 break;
3004 }
3005 }
3006 return count;
3007}
3008
3009static ssize_t new_req_notification_test_read(struct file *file,
3010 char __user *buffer,
3011 size_t count,
3012 loff_t *offset)
3013{
3014 memset((void *)buffer, 0, count);
3015
3016 snprintf(buffer, count,
3017 "\nnew_req_notification_test\n========================\n"
3018 "Description:\n"
3019 "This test checks following scenarious\n"
3020 "- new request arrives after a NULL request was sent to the "
3021 "mmc_queue,\n"
3022 "which is waiting for completion of a former request\n");
3023
3024 return strnlen(buffer, count);
3025}
3026
3027const struct file_operations new_req_notification_test_ops = {
3028 .open = test_open,
3029 .write = new_req_notification_test_write,
3030 .read = new_req_notification_test_read,
3031};
Lee Susmana35ae6e2012-10-25 16:06:07 +02003032
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02003033static void mmc_block_test_debugfs_cleanup(void)
3034{
3035 debugfs_remove(mbtd->debug.random_test_seed);
3036 debugfs_remove(mbtd->debug.send_write_packing_test);
3037 debugfs_remove(mbtd->debug.err_check_test);
3038 debugfs_remove(mbtd->debug.send_invalid_packed_test);
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02003039 debugfs_remove(mbtd->debug.packing_control_test);
Maya Erezddc55732012-10-17 09:51:01 +02003040 debugfs_remove(mbtd->debug.discard_sanitize_test);
Yaniv Gardie9214c82012-10-18 13:58:18 +02003041 debugfs_remove(mbtd->debug.bkops_test);
Lee Susmanf18263a2012-10-24 14:14:37 +02003042 debugfs_remove(mbtd->debug.long_sequential_read_test);
Lee Susmana35ae6e2012-10-25 16:06:07 +02003043 debugfs_remove(mbtd->debug.long_sequential_write_test);
Lee Susmanb09c0412012-12-19 14:28:52 +02003044 debugfs_remove(mbtd->debug.new_req_notification_test);
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02003045}
3046
3047static int mmc_block_test_debugfs_init(void)
3048{
3049 struct dentry *utils_root, *tests_root;
3050
3051 utils_root = test_iosched_get_debugfs_utils_root();
3052 tests_root = test_iosched_get_debugfs_tests_root();
3053
3054 if (!utils_root || !tests_root)
3055 return -EINVAL;
3056
3057 mbtd->debug.random_test_seed = debugfs_create_u32(
3058 "random_test_seed",
3059 S_IRUGO | S_IWUGO,
3060 utils_root,
3061 &mbtd->random_test_seed);
3062
3063 if (!mbtd->debug.random_test_seed)
3064 goto err_nomem;
3065
3066 mbtd->debug.send_write_packing_test =
3067 debugfs_create_file("send_write_packing_test",
3068 S_IRUGO | S_IWUGO,
3069 tests_root,
3070 NULL,
3071 &send_write_packing_test_ops);
3072
3073 if (!mbtd->debug.send_write_packing_test)
3074 goto err_nomem;
3075
3076 mbtd->debug.err_check_test =
3077 debugfs_create_file("err_check_test",
3078 S_IRUGO | S_IWUGO,
3079 tests_root,
3080 NULL,
3081 &err_check_test_ops);
3082
3083 if (!mbtd->debug.err_check_test)
3084 goto err_nomem;
3085
3086 mbtd->debug.send_invalid_packed_test =
3087 debugfs_create_file("send_invalid_packed_test",
3088 S_IRUGO | S_IWUGO,
3089 tests_root,
3090 NULL,
3091 &send_invalid_packed_test_ops);
3092
3093 if (!mbtd->debug.send_invalid_packed_test)
3094 goto err_nomem;
3095
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02003096 mbtd->debug.packing_control_test = debugfs_create_file(
3097 "packing_control_test",
3098 S_IRUGO | S_IWUGO,
3099 tests_root,
3100 NULL,
3101 &write_packing_control_test_ops);
3102
3103 if (!mbtd->debug.packing_control_test)
3104 goto err_nomem;
3105
Maya Erezddc55732012-10-17 09:51:01 +02003106 mbtd->debug.discard_sanitize_test =
3107 debugfs_create_file("write_discard_sanitize_test",
3108 S_IRUGO | S_IWUGO,
3109 tests_root,
3110 NULL,
3111 &write_discard_sanitize_test_ops);
3112 if (!mbtd->debug.discard_sanitize_test) {
3113 mmc_block_test_debugfs_cleanup();
3114 return -ENOMEM;
3115 }
3116
Yaniv Gardie9214c82012-10-18 13:58:18 +02003117 mbtd->debug.bkops_test =
3118 debugfs_create_file("bkops_test",
3119 S_IRUGO | S_IWUGO,
3120 tests_root,
3121 NULL,
3122 &bkops_test_ops);
3123
Lee Susmanb09c0412012-12-19 14:28:52 +02003124 mbtd->debug.new_req_notification_test =
3125 debugfs_create_file("new_req_notification_test",
3126 S_IRUGO | S_IWUGO,
3127 tests_root,
3128 NULL,
3129 &new_req_notification_test_ops);
3130
3131 if (!mbtd->debug.new_req_notification_test)
3132 goto err_nomem;
3133
Yaniv Gardie9214c82012-10-18 13:58:18 +02003134 if (!mbtd->debug.bkops_test)
3135 goto err_nomem;
3136
Lee Susmanf18263a2012-10-24 14:14:37 +02003137 mbtd->debug.long_sequential_read_test = debugfs_create_file(
3138 "long_sequential_read_test",
3139 S_IRUGO | S_IWUGO,
3140 tests_root,
3141 NULL,
3142 &long_sequential_read_test_ops);
3143
3144 if (!mbtd->debug.long_sequential_read_test)
3145 goto err_nomem;
3146
Lee Susmana35ae6e2012-10-25 16:06:07 +02003147 mbtd->debug.long_sequential_write_test = debugfs_create_file(
3148 "long_sequential_write_test",
3149 S_IRUGO | S_IWUGO,
3150 tests_root,
3151 NULL,
3152 &long_sequential_write_test_ops);
3153
3154 if (!mbtd->debug.long_sequential_write_test)
3155 goto err_nomem;
3156
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02003157 return 0;
3158
3159err_nomem:
3160 mmc_block_test_debugfs_cleanup();
3161 return -ENOMEM;
3162}
3163
3164static void mmc_block_test_probe(void)
3165{
3166 struct request_queue *q = test_iosched_get_req_queue();
3167 struct mmc_queue *mq;
3168 int max_packed_reqs;
3169
3170 if (!q) {
3171 test_pr_err("%s: NULL request queue", __func__);
3172 return;
3173 }
3174
3175 mq = q->queuedata;
3176 if (!mq) {
3177 test_pr_err("%s: NULL mq", __func__);
3178 return;
3179 }
3180
3181 max_packed_reqs = mq->card->ext_csd.max_packed_writes;
3182 mbtd->exp_packed_stats.packing_events =
3183 kzalloc((max_packed_reqs + 1) *
3184 sizeof(*mbtd->exp_packed_stats.packing_events),
3185 GFP_KERNEL);
3186
3187 mmc_block_test_debugfs_init();
3188}
3189
3190static void mmc_block_test_remove(void)
3191{
3192 mmc_block_test_debugfs_cleanup();
3193}
3194
3195static int __init mmc_block_test_init(void)
3196{
3197 mbtd = kzalloc(sizeof(struct mmc_block_test_data), GFP_KERNEL);
3198 if (!mbtd) {
3199 test_pr_err("%s: failed to allocate mmc_block_test_data",
3200 __func__);
3201 return -ENODEV;
3202 }
3203
Yaniv Gardie9214c82012-10-18 13:58:18 +02003204 init_waitqueue_head(&mbtd->bkops_wait_q);
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02003205 mbtd->bdt.init_fn = mmc_block_test_probe;
3206 mbtd->bdt.exit_fn = mmc_block_test_remove;
3207 INIT_LIST_HEAD(&mbtd->bdt.list);
3208 test_iosched_register(&mbtd->bdt);
3209
3210 return 0;
3211}
3212
3213static void __exit mmc_block_test_exit(void)
3214{
3215 test_iosched_unregister(&mbtd->bdt);
3216 kfree(mbtd);
3217}
3218
3219module_init(mmc_block_test_init);
3220module_exit(mmc_block_test_exit);
3221
3222MODULE_LICENSE("GPL v2");
3223MODULE_DESCRIPTION("MMC block test");