blob: fbbfbec44bd3e9c52a37b2850d54a263ad1321c3 [file] [log] [blame]
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14/* MMC block test */
15
16#include <linux/module.h>
17#include <linux/blkdev.h>
18#include <linux/debugfs.h>
19#include <linux/mmc/card.h>
20#include <linux/mmc/host.h>
21#include <linux/delay.h>
22#include <linux/test-iosched.h>
Lee Susmanf18263a2012-10-24 14:14:37 +020023#include <linux/jiffies.h>
Tatyana Brokhman09b010d2012-10-09 13:50:56 +020024#include "queue.h"
Yaniv Gardie9214c82012-10-18 13:58:18 +020025#include <linux/mmc/mmc.h>
Tatyana Brokhman09b010d2012-10-09 13:50:56 +020026
27#define MODULE_NAME "mmc_block_test"
28#define TEST_MAX_SECTOR_RANGE (600*1024*1024) /* 600 MB */
29#define TEST_MAX_BIOS_PER_REQ 120
30#define CMD23_PACKED_BIT (1 << 30)
31#define LARGE_PRIME_1 1103515367
32#define LARGE_PRIME_2 35757
33#define PACKED_HDR_VER_MASK 0x000000FF
34#define PACKED_HDR_RW_MASK 0x0000FF00
35#define PACKED_HDR_NUM_REQS_MASK 0x00FF0000
36#define PACKED_HDR_BITS_16_TO_29_SET 0x3FFF0000
Maya Erezddc55732012-10-17 09:51:01 +020037#define SECTOR_SIZE 512
38#define NUM_OF_SECTORS_PER_BIO ((BIO_U32_SIZE * 4) / SECTOR_SIZE)
39#define BIO_TO_SECTOR(x) (x * NUM_OF_SECTORS_PER_BIO)
Lee Susmanf18263a2012-10-24 14:14:37 +020040/* the desired long test size to be written or read */
41#define LONG_TEST_MAX_NUM_BYTES (50*1024*1024) /* 50MB */
42/* request queue limitation is 128 requests, and we leave 10 spare requests */
43#define TEST_MAX_REQUESTS 118
44#define LONG_TEST_MAX_NUM_REQS (LONG_TEST_MAX_NUM_BYTES / \
45 (TEST_MAX_BIOS_PER_REQ * sizeof(int) * BIO_U32_SIZE))
46/* this doesn't allow the test requests num to be greater than the maximum */
47#define LONG_TEST_ACTUAL_NUM_REQS \
48 ((TEST_MAX_REQUESTS < LONG_TEST_MAX_NUM_REQS) ? \
49 TEST_MAX_REQUESTS : LONG_TEST_MAX_NUM_REQS)
50#define MB_MSEC_RATIO_APPROXIMATION ((1024 * 1024) / 1000)
51/* actual number of bytes in test */
52#define LONG_TEST_ACTUAL_BYTE_NUM (LONG_TEST_ACTUAL_NUM_REQS * \
53 (TEST_MAX_BIOS_PER_REQ * sizeof(int) * BIO_U32_SIZE))
54/* actual number of MiB in test multiplied by 10, for single digit precision*/
55#define LONG_TEST_ACTUAL_MB_NUM_X_10 ((LONG_TEST_ACTUAL_BYTE_NUM * 10) / \
56 (1024 * 1024))
57/* extract integer value */
58#define LONG_TEST_SIZE_INTEGER (LONG_TEST_ACTUAL_MB_NUM_X_10 / 10)
59/* and calculate the MiB value fraction */
60#define LONG_TEST_SIZE_FRACTION (LONG_TEST_ACTUAL_MB_NUM_X_10 - \
61 (LONG_TEST_SIZE_INTEGER * 10))
Tatyana Brokhman09b010d2012-10-09 13:50:56 +020062
63#define test_pr_debug(fmt, args...) pr_debug("%s: "fmt"\n", MODULE_NAME, args)
64#define test_pr_info(fmt, args...) pr_info("%s: "fmt"\n", MODULE_NAME, args)
65#define test_pr_err(fmt, args...) pr_err("%s: "fmt"\n", MODULE_NAME, args)
66
Maya Erezddc55732012-10-17 09:51:01 +020067#define SANITIZE_TEST_TIMEOUT 240000
Lee Susmanb09c0412012-12-19 14:28:52 +020068#define NEW_REQ_TEST_SLEEP_TIME 1
69#define NEW_REQ_TEST_NUM_BIOS 64
Yaniv Gardie9214c82012-10-18 13:58:18 +020070#define TEST_REQUEST_NUM_OF_BIOS 3
71
Yaniv Gardie9214c82012-10-18 13:58:18 +020072#define CHECK_BKOPS_STATS(stats, exp_bkops, exp_hpi, exp_suspend) \
73 ((stats.bkops != exp_bkops) || \
74 (stats.hpi != exp_hpi) || \
75 (stats.suspend != exp_suspend))
76#define BKOPS_TEST_TIMEOUT 60000
Maya Erezddc55732012-10-17 09:51:01 +020077
Tatyana Brokhman09b010d2012-10-09 13:50:56 +020078enum is_random {
79 NON_RANDOM_TEST,
80 RANDOM_TEST,
81};
82
83enum mmc_block_test_testcases {
84 /* Start of send write packing test group */
85 SEND_WRITE_PACKING_MIN_TESTCASE,
86 TEST_STOP_DUE_TO_READ = SEND_WRITE_PACKING_MIN_TESTCASE,
87 TEST_STOP_DUE_TO_READ_AFTER_MAX_REQS,
88 TEST_STOP_DUE_TO_FLUSH,
89 TEST_STOP_DUE_TO_FLUSH_AFTER_MAX_REQS,
90 TEST_STOP_DUE_TO_EMPTY_QUEUE,
91 TEST_STOP_DUE_TO_MAX_REQ_NUM,
92 TEST_STOP_DUE_TO_THRESHOLD,
93 SEND_WRITE_PACKING_MAX_TESTCASE = TEST_STOP_DUE_TO_THRESHOLD,
94
95 /* Start of err check test group */
96 ERR_CHECK_MIN_TESTCASE,
97 TEST_RET_ABORT = ERR_CHECK_MIN_TESTCASE,
98 TEST_RET_PARTIAL_FOLLOWED_BY_SUCCESS,
99 TEST_RET_PARTIAL_FOLLOWED_BY_ABORT,
100 TEST_RET_PARTIAL_MULTIPLE_UNTIL_SUCCESS,
101 TEST_RET_PARTIAL_MAX_FAIL_IDX,
102 TEST_RET_RETRY,
103 TEST_RET_CMD_ERR,
104 TEST_RET_DATA_ERR,
105 ERR_CHECK_MAX_TESTCASE = TEST_RET_DATA_ERR,
106
107 /* Start of send invalid test group */
108 INVALID_CMD_MIN_TESTCASE,
109 TEST_HDR_INVALID_VERSION = INVALID_CMD_MIN_TESTCASE,
110 TEST_HDR_WRONG_WRITE_CODE,
111 TEST_HDR_INVALID_RW_CODE,
112 TEST_HDR_DIFFERENT_ADDRESSES,
113 TEST_HDR_REQ_NUM_SMALLER_THAN_ACTUAL,
114 TEST_HDR_REQ_NUM_LARGER_THAN_ACTUAL,
115 TEST_HDR_CMD23_PACKED_BIT_SET,
116 TEST_CMD23_MAX_PACKED_WRITES,
117 TEST_CMD23_ZERO_PACKED_WRITES,
118 TEST_CMD23_PACKED_BIT_UNSET,
119 TEST_CMD23_REL_WR_BIT_SET,
120 TEST_CMD23_BITS_16TO29_SET,
121 TEST_CMD23_HDR_BLK_NOT_IN_COUNT,
122 INVALID_CMD_MAX_TESTCASE = TEST_CMD23_HDR_BLK_NOT_IN_COUNT,
Tatyana Brokhman91e1e322012-10-09 13:53:43 +0200123
124 /*
125 * Start of packing control test group.
126 * in these next testcases the abbreviation FB = followed by
127 */
128 PACKING_CONTROL_MIN_TESTCASE,
129 TEST_PACKING_EXP_ONE_OVER_TRIGGER_FB_READ =
130 PACKING_CONTROL_MIN_TESTCASE,
131 TEST_PACKING_EXP_N_OVER_TRIGGER,
132 TEST_PACKING_EXP_N_OVER_TRIGGER_FB_READ,
133 TEST_PACKING_EXP_N_OVER_TRIGGER_FLUSH_N,
134 TEST_PACKING_EXP_THRESHOLD_OVER_TRIGGER,
135 TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS,
136 TEST_PACKING_NOT_EXP_TRIGGER_REQUESTS,
137 TEST_PACKING_NOT_EXP_TRIGGER_READ_TRIGGER,
138 TEST_PACKING_NOT_EXP_TRIGGER_FLUSH_TRIGGER,
139 TEST_PACK_MIX_PACKED_NO_PACKED_PACKED,
140 TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED,
141 PACKING_CONTROL_MAX_TESTCASE = TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED,
Maya Erezddc55732012-10-17 09:51:01 +0200142
143 TEST_WRITE_DISCARD_SANITIZE_READ,
Yaniv Gardie9214c82012-10-18 13:58:18 +0200144
145 /* Start of bkops test group */
146 BKOPS_MIN_TESTCASE,
147 BKOPS_DELAYED_WORK_LEVEL_1 = BKOPS_MIN_TESTCASE,
148 BKOPS_DELAYED_WORK_LEVEL_1_HPI,
149 BKOPS_CANCEL_DELAYED_WORK,
150 BKOPS_URGENT_LEVEL_2,
151 BKOPS_URGENT_LEVEL_2_TWO_REQS,
152 BKOPS_URGENT_LEVEL_3,
153 BKOPS_MAX_TESTCASE = BKOPS_URGENT_LEVEL_3,
Lee Susmanf18263a2012-10-24 14:14:37 +0200154
155 TEST_LONG_SEQUENTIAL_READ,
Lee Susmana35ae6e2012-10-25 16:06:07 +0200156 TEST_LONG_SEQUENTIAL_WRITE,
Lee Susmanb09c0412012-12-19 14:28:52 +0200157
158 TEST_NEW_REQ_NOTIFICATION,
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200159};
160
161enum mmc_block_test_group {
162 TEST_NO_GROUP,
163 TEST_GENERAL_GROUP,
164 TEST_SEND_WRITE_PACKING_GROUP,
165 TEST_ERR_CHECK_GROUP,
166 TEST_SEND_INVALID_GROUP,
Tatyana Brokhman91e1e322012-10-09 13:53:43 +0200167 TEST_PACKING_CONTROL_GROUP,
Yaniv Gardie9214c82012-10-18 13:58:18 +0200168 TEST_BKOPS_GROUP,
Lee Susmanb09c0412012-12-19 14:28:52 +0200169 TEST_NEW_NOTIFICATION_GROUP,
Yaniv Gardie9214c82012-10-18 13:58:18 +0200170};
171
172enum bkops_test_stages {
173 BKOPS_STAGE_1,
174 BKOPS_STAGE_2,
175 BKOPS_STAGE_3,
176 BKOPS_STAGE_4,
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200177};
178
179struct mmc_block_test_debug {
180 struct dentry *send_write_packing_test;
181 struct dentry *err_check_test;
182 struct dentry *send_invalid_packed_test;
183 struct dentry *random_test_seed;
Tatyana Brokhman91e1e322012-10-09 13:53:43 +0200184 struct dentry *packing_control_test;
Maya Erezddc55732012-10-17 09:51:01 +0200185 struct dentry *discard_sanitize_test;
Yaniv Gardie9214c82012-10-18 13:58:18 +0200186 struct dentry *bkops_test;
Lee Susmanf18263a2012-10-24 14:14:37 +0200187 struct dentry *long_sequential_read_test;
Lee Susmana35ae6e2012-10-25 16:06:07 +0200188 struct dentry *long_sequential_write_test;
Lee Susmanb09c0412012-12-19 14:28:52 +0200189 struct dentry *new_req_notification_test;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200190};
191
192struct mmc_block_test_data {
193 /* The number of write requests that the test will issue */
194 int num_requests;
195 /* The expected write packing statistics for the current test */
196 struct mmc_wr_pack_stats exp_packed_stats;
197 /*
198 * A user-defined seed for random choices of number of bios written in
199 * a request, and of number of requests issued in a test
200 * This field is randomly updated after each use
201 */
202 unsigned int random_test_seed;
203 /* A retry counter used in err_check tests */
204 int err_check_counter;
205 /* Can be one of the values of enum test_group */
206 enum mmc_block_test_group test_group;
207 /*
208 * Indicates if the current testcase is running with random values of
209 * num_requests and num_bios (in each request)
210 */
211 int is_random;
212 /* Data structure for debugfs dentrys */
213 struct mmc_block_test_debug debug;
214 /*
215 * Data structure containing individual test information, including
216 * self-defined specific data
217 */
218 struct test_info test_info;
219 /* mmc block device test */
220 struct blk_dev_test_type bdt;
Yaniv Gardie9214c82012-10-18 13:58:18 +0200221 /* Current BKOPs test stage */
222 enum bkops_test_stages bkops_stage;
223 /* A wait queue for BKOPs tests */
224 wait_queue_head_t bkops_wait_q;
Lee Susmanb09c0412012-12-19 14:28:52 +0200225
226 unsigned int completed_req_count;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200227};
228
229static struct mmc_block_test_data *mbtd;
230
Lee Susmane868f8a2012-11-04 15:04:41 +0200231void print_mmc_packing_stats(struct mmc_card *card)
232{
233 int i;
234 int max_num_of_packed_reqs = 0;
235
236 if ((!card) || (!card->wr_pack_stats.packing_events))
237 return;
238
239 max_num_of_packed_reqs = card->ext_csd.max_packed_writes;
240
241 spin_lock(&card->wr_pack_stats.lock);
242
243 pr_info("%s: write packing statistics:\n",
244 mmc_hostname(card->host));
245
246 for (i = 1 ; i <= max_num_of_packed_reqs ; ++i) {
247 if (card->wr_pack_stats.packing_events[i] != 0)
248 pr_info("%s: Packed %d reqs - %d times\n",
249 mmc_hostname(card->host), i,
250 card->wr_pack_stats.packing_events[i]);
251 }
252
253 pr_info("%s: stopped packing due to the following reasons:\n",
254 mmc_hostname(card->host));
255
256 if (card->wr_pack_stats.pack_stop_reason[EXCEEDS_SEGMENTS])
257 pr_info("%s: %d times: exceedmax num of segments\n",
258 mmc_hostname(card->host),
259 card->wr_pack_stats.pack_stop_reason[EXCEEDS_SEGMENTS]);
260 if (card->wr_pack_stats.pack_stop_reason[EXCEEDS_SECTORS])
261 pr_info("%s: %d times: exceeding the max num of sectors\n",
262 mmc_hostname(card->host),
263 card->wr_pack_stats.pack_stop_reason[EXCEEDS_SECTORS]);
264 if (card->wr_pack_stats.pack_stop_reason[WRONG_DATA_DIR])
265 pr_info("%s: %d times: wrong data direction\n",
266 mmc_hostname(card->host),
267 card->wr_pack_stats.pack_stop_reason[WRONG_DATA_DIR]);
268 if (card->wr_pack_stats.pack_stop_reason[FLUSH_OR_DISCARD])
269 pr_info("%s: %d times: flush or discard\n",
270 mmc_hostname(card->host),
271 card->wr_pack_stats.pack_stop_reason[FLUSH_OR_DISCARD]);
272 if (card->wr_pack_stats.pack_stop_reason[EMPTY_QUEUE])
273 pr_info("%s: %d times: empty queue\n",
274 mmc_hostname(card->host),
275 card->wr_pack_stats.pack_stop_reason[EMPTY_QUEUE]);
276 if (card->wr_pack_stats.pack_stop_reason[REL_WRITE])
277 pr_info("%s: %d times: rel write\n",
278 mmc_hostname(card->host),
279 card->wr_pack_stats.pack_stop_reason[REL_WRITE]);
280 if (card->wr_pack_stats.pack_stop_reason[THRESHOLD])
281 pr_info("%s: %d times: Threshold\n",
282 mmc_hostname(card->host),
283 card->wr_pack_stats.pack_stop_reason[THRESHOLD]);
284
285 spin_unlock(&card->wr_pack_stats.lock);
286}
287
Lee Susman66842b02012-12-19 14:28:03 +0200288/**
289 * mmc_print_async_event_stats() - Print async event statistics
290 * @card: The mmc_card in which the async_event_stats
291 * struct is a member
292 */
293void mmc_print_async_event_stats(struct mmc_card *card)
294{
295 struct mmc_async_event_stats *s;
296
297 if (!card)
298 return;
299
300 s = &card->async_event_stats;
301 if (!s)
302 return;
303
304 pr_info("%s: new notification & req statistics:\n",
305 mmc_hostname(card->host));
306 pr_info("%s: done_flag:%d", mmc_hostname(card->host),
307 s->done_flag);
308 pr_info("%s: cmd_retry:%d", mmc_hostname(card->host),
309 s->cmd_retry);
310 pr_info("%s: NULL fetched:%d", mmc_hostname(card->host),
311 s->null_fetched);
312 pr_info("%s: wake up new:%d", mmc_hostname(card->host),
313 s->wakeup_new);
314 pr_info("%s: new_request_flag:%d", mmc_hostname(card->host),
315 s->new_request_flag);
316 pr_info("%s: no waiting:%d\n", mmc_hostname(card->host),
317 s->q_no_waiting);
318 pr_info("%s: no_mmc_request_action:%d", mmc_hostname(card->host),
319 s->no_mmc_request_action);
320 pr_info("%s: wakeup_mq_thread:%d", mmc_hostname(card->host),
321 s->wakeup_mq_thread);
322 pr_info("%s: fetch_due_to_new_req:%d", mmc_hostname(card->host),
323 s->fetch_due_to_new_req);
324 pr_info("%s: returned_new_req:%d", mmc_hostname(card->host),
325 s->returned_new_req);
326 pr_info("%s: done_when_new_req_event_on:%d", mmc_hostname(card->host),
327 s->done_when_new_req_event_on);
328 pr_info("%s: new_req_when_new_marked:%d", mmc_hostname(card->host),
329 s->new_req_when_new_marked);
330}
331
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200332/*
333 * A callback assigned to the packed_test_fn field.
334 * Called from block layer in mmc_blk_packed_hdr_wrq_prep.
335 * Here we alter the packed header or CMD23 in order to send an invalid
336 * packed command to the card.
337 */
338static void test_invalid_packed_cmd(struct request_queue *q,
339 struct mmc_queue_req *mqrq)
340{
341 struct mmc_queue *mq = q->queuedata;
342 u32 *packed_cmd_hdr = mqrq->packed_cmd_hdr;
343 struct request *req = mqrq->req;
344 struct request *second_rq;
345 struct test_request *test_rq;
346 struct mmc_blk_request *brq = &mqrq->brq;
347 int num_requests;
348 int max_packed_reqs;
349
350 if (!mq) {
351 test_pr_err("%s: NULL mq", __func__);
352 return;
353 }
354
355 test_rq = (struct test_request *)req->elv.priv[0];
356 if (!test_rq) {
357 test_pr_err("%s: NULL test_rq", __func__);
358 return;
359 }
360 max_packed_reqs = mq->card->ext_csd.max_packed_writes;
361
362 switch (mbtd->test_info.testcase) {
363 case TEST_HDR_INVALID_VERSION:
364 test_pr_info("%s: set invalid header version", __func__);
365 /* Put 0 in header version field (1 byte, offset 0 in header) */
366 packed_cmd_hdr[0] = packed_cmd_hdr[0] & ~PACKED_HDR_VER_MASK;
367 break;
368 case TEST_HDR_WRONG_WRITE_CODE:
369 test_pr_info("%s: wrong write code", __func__);
370 /* Set R/W field with R value (1 byte, offset 1 in header) */
371 packed_cmd_hdr[0] = packed_cmd_hdr[0] & ~PACKED_HDR_RW_MASK;
372 packed_cmd_hdr[0] = packed_cmd_hdr[0] | 0x00000100;
373 break;
374 case TEST_HDR_INVALID_RW_CODE:
375 test_pr_info("%s: invalid r/w code", __func__);
376 /* Set R/W field with invalid value */
377 packed_cmd_hdr[0] = packed_cmd_hdr[0] & ~PACKED_HDR_RW_MASK;
378 packed_cmd_hdr[0] = packed_cmd_hdr[0] | 0x00000400;
379 break;
380 case TEST_HDR_DIFFERENT_ADDRESSES:
381 test_pr_info("%s: different addresses", __func__);
382 second_rq = list_entry(req->queuelist.next, struct request,
383 queuelist);
384 test_pr_info("%s: test_rq->sector=%ld, second_rq->sector=%ld",
385 __func__, (long)req->__sector,
386 (long)second_rq->__sector);
387 /*
388 * Put start sector of second write request in the first write
389 * request's cmd25 argument in the packed header
390 */
391 packed_cmd_hdr[3] = second_rq->__sector;
392 break;
393 case TEST_HDR_REQ_NUM_SMALLER_THAN_ACTUAL:
394 test_pr_info("%s: request num smaller than actual" , __func__);
395 num_requests = (packed_cmd_hdr[0] & PACKED_HDR_NUM_REQS_MASK)
396 >> 16;
397 /* num of entries is decremented by 1 */
398 num_requests = (num_requests - 1) << 16;
399 /*
400 * Set number of requests field in packed write header to be
401 * smaller than the actual number (1 byte, offset 2 in header)
402 */
403 packed_cmd_hdr[0] = (packed_cmd_hdr[0] &
404 ~PACKED_HDR_NUM_REQS_MASK) + num_requests;
405 break;
406 case TEST_HDR_REQ_NUM_LARGER_THAN_ACTUAL:
407 test_pr_info("%s: request num larger than actual" , __func__);
408 num_requests = (packed_cmd_hdr[0] & PACKED_HDR_NUM_REQS_MASK)
409 >> 16;
410 /* num of entries is incremented by 1 */
411 num_requests = (num_requests + 1) << 16;
412 /*
413 * Set number of requests field in packed write header to be
414 * larger than the actual number (1 byte, offset 2 in header).
415 */
416 packed_cmd_hdr[0] = (packed_cmd_hdr[0] &
417 ~PACKED_HDR_NUM_REQS_MASK) + num_requests;
418 break;
419 case TEST_HDR_CMD23_PACKED_BIT_SET:
420 test_pr_info("%s: header CMD23 packed bit set" , __func__);
421 /*
422 * Set packed bit (bit 30) in cmd23 argument of first and second
423 * write requests in packed write header.
424 * These are located at bytes 2 and 4 in packed write header
425 */
426 packed_cmd_hdr[2] = packed_cmd_hdr[2] | CMD23_PACKED_BIT;
427 packed_cmd_hdr[4] = packed_cmd_hdr[4] | CMD23_PACKED_BIT;
428 break;
429 case TEST_CMD23_MAX_PACKED_WRITES:
430 test_pr_info("%s: CMD23 request num > max_packed_reqs",
431 __func__);
432 /*
433 * Set the individual packed cmd23 request num to
434 * max_packed_reqs + 1
435 */
436 brq->sbc.arg = MMC_CMD23_ARG_PACKED | (max_packed_reqs + 1);
437 break;
438 case TEST_CMD23_ZERO_PACKED_WRITES:
439 test_pr_info("%s: CMD23 request num = 0", __func__);
440 /* Set the individual packed cmd23 request num to zero */
441 brq->sbc.arg = MMC_CMD23_ARG_PACKED;
442 break;
443 case TEST_CMD23_PACKED_BIT_UNSET:
444 test_pr_info("%s: CMD23 packed bit unset", __func__);
445 /*
446 * Set the individual packed cmd23 packed bit to 0,
447 * although there is a packed write request
448 */
449 brq->sbc.arg &= ~CMD23_PACKED_BIT;
450 break;
451 case TEST_CMD23_REL_WR_BIT_SET:
452 test_pr_info("%s: CMD23 REL WR bit set", __func__);
453 /* Set the individual packed cmd23 reliable write bit */
454 brq->sbc.arg = MMC_CMD23_ARG_PACKED | MMC_CMD23_ARG_REL_WR;
455 break;
456 case TEST_CMD23_BITS_16TO29_SET:
457 test_pr_info("%s: CMD23 bits [16-29] set", __func__);
458 brq->sbc.arg = MMC_CMD23_ARG_PACKED |
459 PACKED_HDR_BITS_16_TO_29_SET;
460 break;
461 case TEST_CMD23_HDR_BLK_NOT_IN_COUNT:
462 test_pr_info("%s: CMD23 hdr not in block count", __func__);
463 brq->sbc.arg = MMC_CMD23_ARG_PACKED |
464 ((rq_data_dir(req) == READ) ? 0 : mqrq->packed_blocks);
465 break;
466 default:
467 test_pr_err("%s: unexpected testcase %d",
468 __func__, mbtd->test_info.testcase);
469 break;
470 }
471}
472
473/*
474 * A callback assigned to the err_check_fn field of the mmc_request by the
475 * MMC/card/block layer.
476 * Called upon request completion by the MMC/core layer.
477 * Here we emulate an error return value from the card.
478 */
479static int test_err_check(struct mmc_card *card, struct mmc_async_req *areq)
480{
481 struct mmc_queue_req *mq_rq = container_of(areq, struct mmc_queue_req,
482 mmc_active);
483 struct request_queue *req_q = test_iosched_get_req_queue();
484 struct mmc_queue *mq;
485 int max_packed_reqs;
486 int ret = 0;
Yaniv Gardie9214c82012-10-18 13:58:18 +0200487 struct mmc_blk_request *brq;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200488
489 if (req_q)
490 mq = req_q->queuedata;
491 else {
492 test_pr_err("%s: NULL request_queue", __func__);
493 return 0;
494 }
495
496 if (!mq) {
497 test_pr_err("%s: %s: NULL mq", __func__,
498 mmc_hostname(card->host));
499 return 0;
500 }
501
502 max_packed_reqs = mq->card->ext_csd.max_packed_writes;
503
504 if (!mq_rq) {
505 test_pr_err("%s: %s: NULL mq_rq", __func__,
506 mmc_hostname(card->host));
507 return 0;
508 }
Yaniv Gardie9214c82012-10-18 13:58:18 +0200509 brq = &mq_rq->brq;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200510
511 switch (mbtd->test_info.testcase) {
512 case TEST_RET_ABORT:
513 test_pr_info("%s: return abort", __func__);
514 ret = MMC_BLK_ABORT;
515 break;
516 case TEST_RET_PARTIAL_FOLLOWED_BY_SUCCESS:
517 test_pr_info("%s: return partial followed by success",
518 __func__);
519 /*
520 * Since in this testcase num_requests is always >= 2,
521 * we can be sure that packed_fail_idx is always >= 1
522 */
523 mq_rq->packed_fail_idx = (mbtd->num_requests / 2);
524 test_pr_info("%s: packed_fail_idx = %d"
525 , __func__, mq_rq->packed_fail_idx);
526 mq->err_check_fn = NULL;
527 ret = MMC_BLK_PARTIAL;
528 break;
529 case TEST_RET_PARTIAL_FOLLOWED_BY_ABORT:
530 if (!mbtd->err_check_counter) {
531 test_pr_info("%s: return partial followed by abort",
532 __func__);
533 mbtd->err_check_counter++;
534 /*
535 * Since in this testcase num_requests is always >= 3,
536 * we have that packed_fail_idx is always >= 1
537 */
538 mq_rq->packed_fail_idx = (mbtd->num_requests / 2);
539 test_pr_info("%s: packed_fail_idx = %d"
540 , __func__, mq_rq->packed_fail_idx);
541 ret = MMC_BLK_PARTIAL;
542 break;
543 }
544 mbtd->err_check_counter = 0;
545 mq->err_check_fn = NULL;
546 ret = MMC_BLK_ABORT;
547 break;
548 case TEST_RET_PARTIAL_MULTIPLE_UNTIL_SUCCESS:
549 test_pr_info("%s: return partial multiple until success",
550 __func__);
551 if (++mbtd->err_check_counter >= (mbtd->num_requests)) {
552 mq->err_check_fn = NULL;
553 mbtd->err_check_counter = 0;
554 ret = MMC_BLK_PARTIAL;
555 break;
556 }
557 mq_rq->packed_fail_idx = 1;
558 ret = MMC_BLK_PARTIAL;
559 break;
560 case TEST_RET_PARTIAL_MAX_FAIL_IDX:
561 test_pr_info("%s: return partial max fail_idx", __func__);
562 mq_rq->packed_fail_idx = max_packed_reqs - 1;
563 mq->err_check_fn = NULL;
564 ret = MMC_BLK_PARTIAL;
565 break;
566 case TEST_RET_RETRY:
567 test_pr_info("%s: return retry", __func__);
568 ret = MMC_BLK_RETRY;
569 break;
570 case TEST_RET_CMD_ERR:
571 test_pr_info("%s: return cmd err", __func__);
572 ret = MMC_BLK_CMD_ERR;
573 break;
574 case TEST_RET_DATA_ERR:
575 test_pr_info("%s: return data err", __func__);
576 ret = MMC_BLK_DATA_ERR;
577 break;
Yaniv Gardie9214c82012-10-18 13:58:18 +0200578 case BKOPS_URGENT_LEVEL_2:
579 case BKOPS_URGENT_LEVEL_3:
580 case BKOPS_URGENT_LEVEL_2_TWO_REQS:
581 if (mbtd->err_check_counter++ == 0) {
582 test_pr_info("%s: simulate an exception from the card",
583 __func__);
584 brq->cmd.resp[0] |= R1_EXCEPTION_EVENT;
585 }
586 mq->err_check_fn = NULL;
587 break;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200588 default:
589 test_pr_err("%s: unexpected testcase %d",
590 __func__, mbtd->test_info.testcase);
591 }
592
593 return ret;
594}
595
596/*
597 * This is a specific implementation for the get_test_case_str_fn function
598 * pointer in the test_info data structure. Given a valid test_data instance,
599 * the function returns a string resembling the test name, based on the testcase
600 */
601static char *get_test_case_str(struct test_data *td)
602{
603 if (!td) {
604 test_pr_err("%s: NULL td", __func__);
605 return NULL;
606 }
607
Lee Susman039ce092012-11-15 13:36:15 +0200608switch (td->test_info.testcase) {
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200609 case TEST_STOP_DUE_TO_FLUSH:
Lee Susman039ce092012-11-15 13:36:15 +0200610 return "\"stop due to flush\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200611 case TEST_STOP_DUE_TO_FLUSH_AFTER_MAX_REQS:
Lee Susman039ce092012-11-15 13:36:15 +0200612 return "\"stop due to flush after max-1 reqs\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200613 case TEST_STOP_DUE_TO_READ:
Lee Susman039ce092012-11-15 13:36:15 +0200614 return "\"stop due to read\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200615 case TEST_STOP_DUE_TO_READ_AFTER_MAX_REQS:
Lee Susman039ce092012-11-15 13:36:15 +0200616 return "\"stop due to read after max-1 reqs\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200617 case TEST_STOP_DUE_TO_EMPTY_QUEUE:
Lee Susman039ce092012-11-15 13:36:15 +0200618 return "\"stop due to empty queue\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200619 case TEST_STOP_DUE_TO_MAX_REQ_NUM:
Lee Susman039ce092012-11-15 13:36:15 +0200620 return "\"stop due to max req num\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200621 case TEST_STOP_DUE_TO_THRESHOLD:
Lee Susman039ce092012-11-15 13:36:15 +0200622 return "\"stop due to exceeding threshold\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200623 case TEST_RET_ABORT:
Lee Susman039ce092012-11-15 13:36:15 +0200624 return "\"err_check return abort\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200625 case TEST_RET_PARTIAL_FOLLOWED_BY_SUCCESS:
Lee Susman039ce092012-11-15 13:36:15 +0200626 return "\"err_check return partial followed by success\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200627 case TEST_RET_PARTIAL_FOLLOWED_BY_ABORT:
Lee Susman039ce092012-11-15 13:36:15 +0200628 return "\"err_check return partial followed by abort\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200629 case TEST_RET_PARTIAL_MULTIPLE_UNTIL_SUCCESS:
Lee Susman039ce092012-11-15 13:36:15 +0200630 return "\"err_check return partial multiple until success\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200631 case TEST_RET_PARTIAL_MAX_FAIL_IDX:
Lee Susman039ce092012-11-15 13:36:15 +0200632 return "\"err_check return partial max fail index\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200633 case TEST_RET_RETRY:
Lee Susman039ce092012-11-15 13:36:15 +0200634 return "\"err_check return retry\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200635 case TEST_RET_CMD_ERR:
Lee Susman039ce092012-11-15 13:36:15 +0200636 return "\"err_check return cmd error\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200637 case TEST_RET_DATA_ERR:
Lee Susman039ce092012-11-15 13:36:15 +0200638 return "\"err_check return data error\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200639 case TEST_HDR_INVALID_VERSION:
Lee Susman039ce092012-11-15 13:36:15 +0200640 return "\"invalid - wrong header version\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200641 case TEST_HDR_WRONG_WRITE_CODE:
Lee Susman039ce092012-11-15 13:36:15 +0200642 return "\"invalid - wrong write code\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200643 case TEST_HDR_INVALID_RW_CODE:
Lee Susman039ce092012-11-15 13:36:15 +0200644 return "\"invalid - wrong R/W code\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200645 case TEST_HDR_DIFFERENT_ADDRESSES:
Lee Susman039ce092012-11-15 13:36:15 +0200646 return "\"invalid - header different addresses\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200647 case TEST_HDR_REQ_NUM_SMALLER_THAN_ACTUAL:
Lee Susman039ce092012-11-15 13:36:15 +0200648 return "\"invalid - header req num smaller than actual\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200649 case TEST_HDR_REQ_NUM_LARGER_THAN_ACTUAL:
Lee Susman039ce092012-11-15 13:36:15 +0200650 return "\"invalid - header req num larger than actual\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200651 case TEST_HDR_CMD23_PACKED_BIT_SET:
Lee Susman039ce092012-11-15 13:36:15 +0200652 return "\"invalid - header cmd23 packed bit set\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200653 case TEST_CMD23_MAX_PACKED_WRITES:
Lee Susman039ce092012-11-15 13:36:15 +0200654 return "\"invalid - cmd23 max packed writes\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200655 case TEST_CMD23_ZERO_PACKED_WRITES:
Lee Susman039ce092012-11-15 13:36:15 +0200656 return "\"invalid - cmd23 zero packed writes\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200657 case TEST_CMD23_PACKED_BIT_UNSET:
Lee Susman039ce092012-11-15 13:36:15 +0200658 return "\"invalid - cmd23 packed bit unset\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200659 case TEST_CMD23_REL_WR_BIT_SET:
Lee Susman039ce092012-11-15 13:36:15 +0200660 return "\"invalid - cmd23 rel wr bit set\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200661 case TEST_CMD23_BITS_16TO29_SET:
Lee Susman039ce092012-11-15 13:36:15 +0200662 return "\"invalid - cmd23 bits [16-29] set\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200663 case TEST_CMD23_HDR_BLK_NOT_IN_COUNT:
Lee Susman039ce092012-11-15 13:36:15 +0200664 return "\"invalid - cmd23 header block not in count\"";
Tatyana Brokhman91e1e322012-10-09 13:53:43 +0200665 case TEST_PACKING_EXP_N_OVER_TRIGGER:
Lee Susman039ce092012-11-15 13:36:15 +0200666 return "\"packing control - pack n\"";
Tatyana Brokhman91e1e322012-10-09 13:53:43 +0200667 case TEST_PACKING_EXP_N_OVER_TRIGGER_FB_READ:
Lee Susman039ce092012-11-15 13:36:15 +0200668 return "\"packing control - pack n followed by read\"";
Tatyana Brokhman91e1e322012-10-09 13:53:43 +0200669 case TEST_PACKING_EXP_N_OVER_TRIGGER_FLUSH_N:
Lee Susman039ce092012-11-15 13:36:15 +0200670 return "\"packing control - pack n followed by flush\"";
Tatyana Brokhman91e1e322012-10-09 13:53:43 +0200671 case TEST_PACKING_EXP_ONE_OVER_TRIGGER_FB_READ:
Lee Susman039ce092012-11-15 13:36:15 +0200672 return "\"packing control - pack one followed by read\"";
Tatyana Brokhman91e1e322012-10-09 13:53:43 +0200673 case TEST_PACKING_EXP_THRESHOLD_OVER_TRIGGER:
Lee Susman039ce092012-11-15 13:36:15 +0200674 return "\"packing control - pack threshold\"";
Tatyana Brokhman91e1e322012-10-09 13:53:43 +0200675 case TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS:
Lee Susman039ce092012-11-15 13:36:15 +0200676 return "\"packing control - no packing\"";
Tatyana Brokhman91e1e322012-10-09 13:53:43 +0200677 case TEST_PACKING_NOT_EXP_TRIGGER_REQUESTS:
Lee Susman039ce092012-11-15 13:36:15 +0200678 return "\"packing control - no packing, trigger requests\"";
Tatyana Brokhman91e1e322012-10-09 13:53:43 +0200679 case TEST_PACKING_NOT_EXP_TRIGGER_READ_TRIGGER:
Lee Susman039ce092012-11-15 13:36:15 +0200680 return "\"packing control - no pack, trigger-read-trigger\"";
Tatyana Brokhman91e1e322012-10-09 13:53:43 +0200681 case TEST_PACKING_NOT_EXP_TRIGGER_FLUSH_TRIGGER:
Lee Susman039ce092012-11-15 13:36:15 +0200682 return "\"packing control- no pack, trigger-flush-trigger\"";
Tatyana Brokhman91e1e322012-10-09 13:53:43 +0200683 case TEST_PACK_MIX_PACKED_NO_PACKED_PACKED:
Lee Susman039ce092012-11-15 13:36:15 +0200684 return "\"packing control - mix: pack -> no pack -> pack\"";
Tatyana Brokhman91e1e322012-10-09 13:53:43 +0200685 case TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED:
Lee Susman039ce092012-11-15 13:36:15 +0200686 return "\"packing control - mix: no pack->pack->no pack\"";
Maya Erezddc55732012-10-17 09:51:01 +0200687 case TEST_WRITE_DISCARD_SANITIZE_READ:
Lee Susman039ce092012-11-15 13:36:15 +0200688 return "\"write, discard, sanitize\"";
Yaniv Gardie9214c82012-10-18 13:58:18 +0200689 case BKOPS_DELAYED_WORK_LEVEL_1:
Lee Susman039ce092012-11-15 13:36:15 +0200690 return "\"delayed work BKOPS level 1\"";
Yaniv Gardie9214c82012-10-18 13:58:18 +0200691 case BKOPS_DELAYED_WORK_LEVEL_1_HPI:
Lee Susman039ce092012-11-15 13:36:15 +0200692 return "\"delayed work BKOPS level 1 with HPI\"";
Yaniv Gardie9214c82012-10-18 13:58:18 +0200693 case BKOPS_CANCEL_DELAYED_WORK:
Lee Susman039ce092012-11-15 13:36:15 +0200694 return "\"cancel delayed BKOPS work\"";
Yaniv Gardie9214c82012-10-18 13:58:18 +0200695 case BKOPS_URGENT_LEVEL_2:
Lee Susman039ce092012-11-15 13:36:15 +0200696 return "\"urgent BKOPS level 2\"";
Yaniv Gardie9214c82012-10-18 13:58:18 +0200697 case BKOPS_URGENT_LEVEL_2_TWO_REQS:
Lee Susman039ce092012-11-15 13:36:15 +0200698 return "\"urgent BKOPS level 2, followed by a request\"";
Yaniv Gardie9214c82012-10-18 13:58:18 +0200699 case BKOPS_URGENT_LEVEL_3:
Lee Susman039ce092012-11-15 13:36:15 +0200700 return "\"urgent BKOPS level 3\"";
Lee Susmanf18263a2012-10-24 14:14:37 +0200701 case TEST_LONG_SEQUENTIAL_READ:
Lee Susman039ce092012-11-15 13:36:15 +0200702 return "\"long sequential read\"";
Lee Susmana35ae6e2012-10-25 16:06:07 +0200703 case TEST_LONG_SEQUENTIAL_WRITE:
Lee Susman039ce092012-11-15 13:36:15 +0200704 return "\"long sequential write\"";
Lee Susmanb09c0412012-12-19 14:28:52 +0200705 case TEST_NEW_REQ_NOTIFICATION:
706 return "\"new request notification test\"";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200707 default:
Lee Susman039ce092012-11-15 13:36:15 +0200708 return " Unknown testcase";
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200709 }
710
711 return NULL;
712}
713
714/*
715 * Compare individual testcase's statistics to the expected statistics:
716 * Compare stop reason and number of packing events
717 */
718static int check_wr_packing_statistics(struct test_data *td)
719{
720 struct mmc_wr_pack_stats *mmc_packed_stats;
721 struct mmc_queue *mq = td->req_q->queuedata;
722 int max_packed_reqs = mq->card->ext_csd.max_packed_writes;
723 int i;
724 struct mmc_card *card = mq->card;
725 struct mmc_wr_pack_stats expected_stats;
726 int *stop_reason;
727 int ret = 0;
728
729 if (!mq) {
730 test_pr_err("%s: NULL mq", __func__);
731 return -EINVAL;
732 }
733
734 expected_stats = mbtd->exp_packed_stats;
735
736 mmc_packed_stats = mmc_blk_get_packed_statistics(card);
737 if (!mmc_packed_stats) {
738 test_pr_err("%s: NULL mmc_packed_stats", __func__);
739 return -EINVAL;
740 }
741
742 if (!mmc_packed_stats->packing_events) {
743 test_pr_err("%s: NULL packing_events", __func__);
744 return -EINVAL;
745 }
746
747 spin_lock(&mmc_packed_stats->lock);
748
749 if (!mmc_packed_stats->enabled) {
750 test_pr_err("%s write packing statistics are not enabled",
751 __func__);
752 ret = -EINVAL;
753 goto exit_err;
754 }
755
756 stop_reason = mmc_packed_stats->pack_stop_reason;
757
Tatyana Brokhman91e1e322012-10-09 13:53:43 +0200758 for (i = 1; i <= max_packed_reqs; ++i) {
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200759 if (mmc_packed_stats->packing_events[i] !=
760 expected_stats.packing_events[i]) {
761 test_pr_err(
762 "%s: Wrong pack stats in index %d, got %d, expected %d",
763 __func__, i, mmc_packed_stats->packing_events[i],
764 expected_stats.packing_events[i]);
765 if (td->fs_wr_reqs_during_test)
766 goto cancel_round;
767 ret = -EINVAL;
768 goto exit_err;
769 }
770 }
771
772 if (mmc_packed_stats->pack_stop_reason[EXCEEDS_SEGMENTS] !=
773 expected_stats.pack_stop_reason[EXCEEDS_SEGMENTS]) {
774 test_pr_err(
775 "%s: Wrong pack stop reason EXCEEDS_SEGMENTS %d, expected %d",
776 __func__, stop_reason[EXCEEDS_SEGMENTS],
777 expected_stats.pack_stop_reason[EXCEEDS_SEGMENTS]);
778 if (td->fs_wr_reqs_during_test)
779 goto cancel_round;
780 ret = -EINVAL;
781 goto exit_err;
782 }
783
784 if (mmc_packed_stats->pack_stop_reason[EXCEEDS_SECTORS] !=
785 expected_stats.pack_stop_reason[EXCEEDS_SECTORS]) {
786 test_pr_err(
787 "%s: Wrong pack stop reason EXCEEDS_SECTORS %d, expected %d",
788 __func__, stop_reason[EXCEEDS_SECTORS],
789 expected_stats.pack_stop_reason[EXCEEDS_SECTORS]);
790 if (td->fs_wr_reqs_during_test)
791 goto cancel_round;
792 ret = -EINVAL;
793 goto exit_err;
794 }
795
796 if (mmc_packed_stats->pack_stop_reason[WRONG_DATA_DIR] !=
797 expected_stats.pack_stop_reason[WRONG_DATA_DIR]) {
798 test_pr_err(
799 "%s: Wrong pack stop reason WRONG_DATA_DIR %d, expected %d",
800 __func__, stop_reason[WRONG_DATA_DIR],
801 expected_stats.pack_stop_reason[WRONG_DATA_DIR]);
802 if (td->fs_wr_reqs_during_test)
803 goto cancel_round;
804 ret = -EINVAL;
805 goto exit_err;
806 }
807
808 if (mmc_packed_stats->pack_stop_reason[FLUSH_OR_DISCARD] !=
809 expected_stats.pack_stop_reason[FLUSH_OR_DISCARD]) {
810 test_pr_err(
811 "%s: Wrong pack stop reason FLUSH_OR_DISCARD %d, expected %d",
812 __func__, stop_reason[FLUSH_OR_DISCARD],
813 expected_stats.pack_stop_reason[FLUSH_OR_DISCARD]);
814 if (td->fs_wr_reqs_during_test)
815 goto cancel_round;
816 ret = -EINVAL;
817 goto exit_err;
818 }
819
820 if (mmc_packed_stats->pack_stop_reason[EMPTY_QUEUE] !=
821 expected_stats.pack_stop_reason[EMPTY_QUEUE]) {
822 test_pr_err(
823 "%s: Wrong pack stop reason EMPTY_QUEUE %d, expected %d",
824 __func__, stop_reason[EMPTY_QUEUE],
825 expected_stats.pack_stop_reason[EMPTY_QUEUE]);
826 if (td->fs_wr_reqs_during_test)
827 goto cancel_round;
828 ret = -EINVAL;
829 goto exit_err;
830 }
831
832 if (mmc_packed_stats->pack_stop_reason[REL_WRITE] !=
833 expected_stats.pack_stop_reason[REL_WRITE]) {
834 test_pr_err(
835 "%s: Wrong pack stop reason REL_WRITE %d, expected %d",
836 __func__, stop_reason[REL_WRITE],
837 expected_stats.pack_stop_reason[REL_WRITE]);
838 if (td->fs_wr_reqs_during_test)
839 goto cancel_round;
840 ret = -EINVAL;
841 goto exit_err;
842 }
843
844exit_err:
845 spin_unlock(&mmc_packed_stats->lock);
846 if (ret && mmc_packed_stats->enabled)
847 print_mmc_packing_stats(card);
848 return ret;
849cancel_round:
850 spin_unlock(&mmc_packed_stats->lock);
851 test_iosched_set_ignore_round(true);
852 return 0;
853}
854
855/*
856 * Pseudo-randomly choose a seed based on the last seed, and update it in
857 * seed_number. then return seed_number (mod max_val), or min_val.
858 */
859static unsigned int pseudo_random_seed(unsigned int *seed_number,
860 unsigned int min_val,
861 unsigned int max_val)
862{
863 int ret = 0;
864
865 if (!seed_number)
866 return 0;
867
868 *seed_number = ((unsigned int)(((unsigned long)*seed_number *
869 (unsigned long)LARGE_PRIME_1) + LARGE_PRIME_2));
870 ret = (unsigned int)((*seed_number) % max_val);
871
872 return (ret > min_val ? ret : min_val);
873}
874
875/*
876 * Given a pseudo-random seed, find a pseudo-random num_of_bios.
877 * Make sure that num_of_bios is not larger than TEST_MAX_SECTOR_RANGE
878 */
879static void pseudo_rnd_num_of_bios(unsigned int *num_bios_seed,
880 unsigned int *num_of_bios)
881{
882 do {
883 *num_of_bios = pseudo_random_seed(num_bios_seed, 1,
884 TEST_MAX_BIOS_PER_REQ);
885 if (!(*num_of_bios))
886 *num_of_bios = 1;
887 } while ((*num_of_bios) * BIO_U32_SIZE * 4 > TEST_MAX_SECTOR_RANGE);
888}
889
890/* Add a single read request to the given td's request queue */
891static int prepare_request_add_read(struct test_data *td)
892{
893 int ret;
894 int start_sec;
895
896 if (td)
897 start_sec = td->start_sector;
898 else {
899 test_pr_err("%s: NULL td", __func__);
900 return 0;
901 }
902
903 test_pr_info("%s: Adding a read request, first req_id=%d", __func__,
904 td->wr_rd_next_req_id);
905
906 ret = test_iosched_add_wr_rd_test_req(0, READ, start_sec, 2,
907 TEST_PATTERN_5A, NULL);
908 if (ret) {
909 test_pr_err("%s: failed to add a read request", __func__);
910 return ret;
911 }
912
913 return 0;
914}
915
916/* Add a single flush request to the given td's request queue */
917static int prepare_request_add_flush(struct test_data *td)
918{
919 int ret;
920
921 if (!td) {
922 test_pr_err("%s: NULL td", __func__);
923 return 0;
924 }
925
926 test_pr_info("%s: Adding a flush request, first req_id=%d", __func__,
927 td->unique_next_req_id);
928 ret = test_iosched_add_unique_test_req(0, REQ_UNIQUE_FLUSH,
929 0, 0, NULL);
930 if (ret) {
931 test_pr_err("%s: failed to add a flush request", __func__);
932 return ret;
933 }
934
935 return ret;
936}
937
938/*
939 * Add num_requets amount of write requests to the given td's request queue.
940 * If random test mode is chosen we pseudo-randomly choose the number of bios
941 * for each write request, otherwise add between 1 to 5 bio per request.
942 */
943static int prepare_request_add_write_reqs(struct test_data *td,
944 int num_requests, int is_err_expected,
945 int is_random)
946{
947 int i;
948 unsigned int start_sec;
949 int num_bios;
950 int ret = 0;
951 unsigned int *bio_seed = &mbtd->random_test_seed;
952
953 if (td)
954 start_sec = td->start_sector;
955 else {
956 test_pr_err("%s: NULL td", __func__);
957 return ret;
958 }
959
960 test_pr_info("%s: Adding %d write requests, first req_id=%d", __func__,
961 num_requests, td->wr_rd_next_req_id);
962
Lee Susmanf18263a2012-10-24 14:14:37 +0200963 for (i = 1 ; i <= num_requests ; i++) {
964 start_sec =
965 td->start_sector + sizeof(int) *
966 BIO_U32_SIZE * td->num_of_write_bios;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +0200967 if (is_random)
968 pseudo_rnd_num_of_bios(bio_seed, &num_bios);
969 else
970 /*
971 * For the non-random case, give num_bios a value
972 * between 1 and 5, to keep a small number of BIOs
973 */
974 num_bios = (i%5)+1;
975
976 ret = test_iosched_add_wr_rd_test_req(is_err_expected, WRITE,
977 start_sec, num_bios, TEST_PATTERN_5A, NULL);
978
979 if (ret) {
980 test_pr_err("%s: failed to add a write request",
981 __func__);
982 return ret;
983 }
984 }
985 return 0;
986}
987
988/*
989 * Prepare the write, read and flush requests for a generic packed commands
990 * testcase
991 */
992static int prepare_packed_requests(struct test_data *td, int is_err_expected,
993 int num_requests, int is_random)
994{
995 int ret = 0;
996 struct mmc_queue *mq;
997 int max_packed_reqs;
998 struct request_queue *req_q;
999
1000 if (!td) {
1001 pr_err("%s: NULL td", __func__);
1002 return -EINVAL;
1003 }
1004
1005 req_q = td->req_q;
1006
1007 if (!req_q) {
1008 pr_err("%s: NULL request queue", __func__);
1009 return -EINVAL;
1010 }
1011
1012 mq = req_q->queuedata;
1013 if (!mq) {
1014 test_pr_err("%s: NULL mq", __func__);
1015 return -EINVAL;
1016 }
1017
1018 max_packed_reqs = mq->card->ext_csd.max_packed_writes;
1019
1020 if (mbtd->random_test_seed <= 0) {
1021 mbtd->random_test_seed =
1022 (unsigned int)(get_jiffies_64() & 0xFFFF);
1023 test_pr_info("%s: got seed from jiffies %d",
1024 __func__, mbtd->random_test_seed);
1025 }
1026
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001027 ret = prepare_request_add_write_reqs(td, num_requests, is_err_expected,
1028 is_random);
1029 if (ret)
1030 return ret;
1031
1032 /* Avoid memory corruption in upcoming stats set */
1033 if (td->test_info.testcase == TEST_STOP_DUE_TO_THRESHOLD)
1034 num_requests--;
1035
1036 memset((void *)mbtd->exp_packed_stats.pack_stop_reason, 0,
1037 sizeof(mbtd->exp_packed_stats.pack_stop_reason));
1038 memset(mbtd->exp_packed_stats.packing_events, 0,
1039 (max_packed_reqs + 1) * sizeof(u32));
1040 if (num_requests <= max_packed_reqs)
1041 mbtd->exp_packed_stats.packing_events[num_requests] = 1;
1042
1043 switch (td->test_info.testcase) {
1044 case TEST_STOP_DUE_TO_FLUSH:
1045 case TEST_STOP_DUE_TO_FLUSH_AFTER_MAX_REQS:
1046 ret = prepare_request_add_flush(td);
1047 if (ret)
1048 return ret;
1049
1050 mbtd->exp_packed_stats.pack_stop_reason[FLUSH_OR_DISCARD] = 1;
1051 break;
1052 case TEST_STOP_DUE_TO_READ:
1053 case TEST_STOP_DUE_TO_READ_AFTER_MAX_REQS:
1054 ret = prepare_request_add_read(td);
1055 if (ret)
1056 return ret;
1057
1058 mbtd->exp_packed_stats.pack_stop_reason[WRONG_DATA_DIR] = 1;
1059 break;
1060 case TEST_STOP_DUE_TO_THRESHOLD:
1061 mbtd->exp_packed_stats.packing_events[num_requests] = 1;
1062 mbtd->exp_packed_stats.packing_events[1] = 1;
1063 mbtd->exp_packed_stats.pack_stop_reason[THRESHOLD] = 1;
1064 mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1;
1065 break;
1066 case TEST_STOP_DUE_TO_MAX_REQ_NUM:
1067 case TEST_RET_PARTIAL_MAX_FAIL_IDX:
1068 mbtd->exp_packed_stats.pack_stop_reason[THRESHOLD] = 1;
1069 break;
1070 default:
1071 mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1;
1072 }
1073 mbtd->num_requests = num_requests;
1074
1075 return 0;
1076}
1077
1078/*
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001079 * Prepare the write, read and flush requests for the packing control
1080 * testcases
1081 */
1082static int prepare_packed_control_tests_requests(struct test_data *td,
1083 int is_err_expected, int num_requests, int is_random)
1084{
1085 int ret = 0;
1086 struct mmc_queue *mq;
1087 int max_packed_reqs;
1088 int temp_num_req = num_requests;
1089 struct request_queue *req_q;
1090 int test_packed_trigger;
1091 int num_packed_reqs;
1092
1093 if (!td) {
1094 test_pr_err("%s: NULL td\n", __func__);
1095 return -EINVAL;
1096 }
1097
1098 req_q = td->req_q;
1099
1100 if (!req_q) {
1101 test_pr_err("%s: NULL request queue\n", __func__);
1102 return -EINVAL;
1103 }
1104
1105 mq = req_q->queuedata;
1106 if (!mq) {
1107 test_pr_err("%s: NULL mq", __func__);
1108 return -EINVAL;
1109 }
1110
1111 max_packed_reqs = mq->card->ext_csd.max_packed_writes;
1112 test_packed_trigger = mq->num_wr_reqs_to_start_packing;
1113 num_packed_reqs = num_requests - test_packed_trigger;
1114
1115 if (mbtd->random_test_seed == 0) {
1116 mbtd->random_test_seed =
1117 (unsigned int)(get_jiffies_64() & 0xFFFF);
1118 test_pr_info("%s: got seed from jiffies %d",
1119 __func__, mbtd->random_test_seed);
1120 }
1121
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001122 if (td->test_info.testcase ==
1123 TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED) {
1124 temp_num_req = num_requests;
1125 num_requests = test_packed_trigger - 1;
1126 }
1127
1128 /* Verify that the packing is disabled before starting the test */
1129 mq->wr_packing_enabled = false;
1130 mq->num_of_potential_packed_wr_reqs = 0;
1131
1132 if (td->test_info.testcase == TEST_PACK_MIX_PACKED_NO_PACKED_PACKED) {
1133 mq->num_of_potential_packed_wr_reqs = test_packed_trigger + 1;
1134 mq->wr_packing_enabled = true;
1135 num_requests = test_packed_trigger + 2;
1136 }
1137
1138 ret = prepare_request_add_write_reqs(td, num_requests, is_err_expected,
1139 is_random);
1140 if (ret)
1141 goto exit;
1142
1143 if (td->test_info.testcase == TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED)
1144 num_requests = temp_num_req;
1145
1146 memset((void *)mbtd->exp_packed_stats.pack_stop_reason, 0,
1147 sizeof(mbtd->exp_packed_stats.pack_stop_reason));
1148 memset(mbtd->exp_packed_stats.packing_events, 0,
1149 (max_packed_reqs + 1) * sizeof(u32));
1150
1151 switch (td->test_info.testcase) {
1152 case TEST_PACKING_EXP_N_OVER_TRIGGER_FB_READ:
1153 case TEST_PACKING_EXP_ONE_OVER_TRIGGER_FB_READ:
1154 ret = prepare_request_add_read(td);
1155 if (ret)
1156 goto exit;
1157
1158 mbtd->exp_packed_stats.pack_stop_reason[WRONG_DATA_DIR] = 1;
1159 mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 1;
1160 break;
1161 case TEST_PACKING_EXP_N_OVER_TRIGGER_FLUSH_N:
1162 ret = prepare_request_add_flush(td);
1163 if (ret)
1164 goto exit;
1165
1166 ret = prepare_request_add_write_reqs(td, num_packed_reqs,
1167 is_err_expected, is_random);
1168 if (ret)
1169 goto exit;
1170
1171 mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1;
1172 mbtd->exp_packed_stats.pack_stop_reason[FLUSH_OR_DISCARD] = 1;
1173 mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 2;
1174 break;
1175 case TEST_PACKING_NOT_EXP_TRIGGER_READ_TRIGGER:
1176 ret = prepare_request_add_read(td);
1177 if (ret)
1178 goto exit;
1179
1180 ret = prepare_request_add_write_reqs(td, test_packed_trigger,
1181 is_err_expected, is_random);
1182 if (ret)
1183 goto exit;
1184
1185 mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 1;
1186 break;
1187 case TEST_PACKING_NOT_EXP_TRIGGER_FLUSH_TRIGGER:
1188 ret = prepare_request_add_flush(td);
1189 if (ret)
1190 goto exit;
1191
1192 ret = prepare_request_add_write_reqs(td, test_packed_trigger,
1193 is_err_expected, is_random);
1194 if (ret)
1195 goto exit;
1196
1197 mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 1;
1198 break;
1199 case TEST_PACK_MIX_PACKED_NO_PACKED_PACKED:
1200 ret = prepare_request_add_read(td);
1201 if (ret)
1202 goto exit;
1203
1204 ret = prepare_request_add_write_reqs(td, test_packed_trigger-1,
1205 is_err_expected, is_random);
1206 if (ret)
1207 goto exit;
1208
1209 ret = prepare_request_add_write_reqs(td, num_requests,
1210 is_err_expected, is_random);
1211 if (ret)
1212 goto exit;
1213
1214 mbtd->exp_packed_stats.packing_events[num_requests] = 1;
1215 mbtd->exp_packed_stats.packing_events[num_requests-1] = 1;
1216 mbtd->exp_packed_stats.pack_stop_reason[WRONG_DATA_DIR] = 1;
1217 mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1;
1218 break;
1219 case TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED:
1220 ret = prepare_request_add_read(td);
1221 if (ret)
1222 goto exit;
1223
1224 ret = prepare_request_add_write_reqs(td, num_requests,
1225 is_err_expected, is_random);
1226 if (ret)
1227 goto exit;
1228
1229 ret = prepare_request_add_read(td);
1230 if (ret)
1231 goto exit;
1232
1233 ret = prepare_request_add_write_reqs(td, test_packed_trigger-1,
1234 is_err_expected, is_random);
1235 if (ret)
1236 goto exit;
1237
1238 mbtd->exp_packed_stats.pack_stop_reason[WRONG_DATA_DIR] = 1;
1239 mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 1;
1240 break;
1241 case TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS:
1242 case TEST_PACKING_NOT_EXP_TRIGGER_REQUESTS:
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001243 break;
1244 default:
Maya Erez5000f1c2012-12-18 09:03:50 +02001245 BUG_ON(num_packed_reqs < 0);
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001246 mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1;
1247 mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 1;
1248 }
1249 mbtd->num_requests = num_requests;
1250
1251exit:
1252 return ret;
1253}
1254
1255/*
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001256 * Prepare requests for the TEST_RET_PARTIAL_FOLLOWED_BY_ABORT testcase.
1257 * In this testcase we have mixed error expectations from different
1258 * write requests, hence the special prepare function.
1259 */
1260static int prepare_partial_followed_by_abort(struct test_data *td,
1261 int num_requests)
1262{
1263 int i, start_address;
1264 int is_err_expected = 0;
1265 int ret = 0;
1266 struct mmc_queue *mq = test_iosched_get_req_queue()->queuedata;
1267 int max_packed_reqs;
1268
1269 if (!mq) {
1270 test_pr_err("%s: NULL mq", __func__);
1271 return -EINVAL;
1272 }
1273
1274 max_packed_reqs = mq->card->ext_csd.max_packed_writes;
1275
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001276 for (i = 1; i <= num_requests; i++) {
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001277 if (i > (num_requests / 2))
1278 is_err_expected = 1;
1279
Lee Susmanf18263a2012-10-24 14:14:37 +02001280 start_address = td->start_sector +
1281 sizeof(int) * BIO_U32_SIZE * td->num_of_write_bios;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001282 ret = test_iosched_add_wr_rd_test_req(is_err_expected, WRITE,
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001283 start_address, (i % 5) + 1, TEST_PATTERN_5A,
1284 NULL);
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001285 if (ret) {
1286 test_pr_err("%s: failed to add a write request",
1287 __func__);
1288 return ret;
1289 }
1290 }
1291
1292 memset((void *)&mbtd->exp_packed_stats.pack_stop_reason, 0,
1293 sizeof(mbtd->exp_packed_stats.pack_stop_reason));
1294 memset(mbtd->exp_packed_stats.packing_events, 0,
1295 (max_packed_reqs + 1) * sizeof(u32));
1296 mbtd->exp_packed_stats.packing_events[num_requests] = 1;
1297 mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1;
1298
1299 mbtd->num_requests = num_requests;
1300
1301 return ret;
1302}
1303
1304/*
1305 * Get number of write requests for current testcase. If random test mode was
1306 * chosen, pseudo-randomly choose the number of requests, otherwise set to
1307 * two less than the packing threshold.
1308 */
1309static int get_num_requests(struct test_data *td)
1310{
1311 int *seed = &mbtd->random_test_seed;
1312 struct request_queue *req_q;
1313 struct mmc_queue *mq;
1314 int max_num_requests;
1315 int num_requests;
1316 int min_num_requests = 2;
1317 int is_random = mbtd->is_random;
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001318 int max_for_double;
1319 int test_packed_trigger;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001320
1321 req_q = test_iosched_get_req_queue();
1322 if (req_q)
1323 mq = req_q->queuedata;
1324 else {
1325 test_pr_err("%s: NULL request queue", __func__);
1326 return 0;
1327 }
1328
1329 if (!mq) {
1330 test_pr_err("%s: NULL mq", __func__);
1331 return -EINVAL;
1332 }
1333
1334 max_num_requests = mq->card->ext_csd.max_packed_writes;
1335 num_requests = max_num_requests - 2;
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001336 test_packed_trigger = mq->num_wr_reqs_to_start_packing;
1337
1338 /*
1339 * Here max_for_double is intended for packed control testcases
1340 * in which we issue many write requests. It's purpose is to prevent
1341 * exceeding max number of req_queue requests.
1342 */
1343 max_for_double = max_num_requests - 10;
1344
1345 if (td->test_info.testcase ==
1346 TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS)
1347 /* Don't expect packing, so issue up to trigger-1 reqs */
1348 num_requests = test_packed_trigger - 1;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001349
1350 if (is_random) {
1351 if (td->test_info.testcase ==
1352 TEST_RET_PARTIAL_FOLLOWED_BY_ABORT)
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001353 /*
1354 * Here we don't want num_requests to be less than 1
1355 * as a consequence of division by 2.
1356 */
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001357 min_num_requests = 3;
1358
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001359 if (td->test_info.testcase ==
1360 TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS)
1361 /* Don't expect packing, so issue up to trigger reqs */
1362 max_num_requests = test_packed_trigger;
1363
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001364 num_requests = pseudo_random_seed(seed, min_num_requests,
1365 max_num_requests - 1);
1366 }
1367
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001368 if (td->test_info.testcase ==
1369 TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS)
1370 num_requests -= test_packed_trigger;
1371
1372 if (td->test_info.testcase == TEST_PACKING_EXP_N_OVER_TRIGGER_FLUSH_N)
1373 num_requests =
1374 num_requests > max_for_double ? max_for_double : num_requests;
1375
1376 if (mbtd->test_group == TEST_PACKING_CONTROL_GROUP)
1377 num_requests += test_packed_trigger;
1378
1379 if (td->test_info.testcase == TEST_PACKING_NOT_EXP_TRIGGER_REQUESTS)
1380 num_requests = test_packed_trigger;
1381
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001382 return num_requests;
1383}
1384
Lee Susmanf18263a2012-10-24 14:14:37 +02001385static int prepare_long_test_requests(struct test_data *td)
1386{
1387
1388 int ret;
1389 int start_sec;
1390 int j;
1391 int test_direction;
1392
1393 if (td)
1394 start_sec = td->start_sector;
1395 else {
1396 test_pr_err("%s: NULL td\n", __func__);
1397 return -EINVAL;
1398 }
1399
Lee Susmana35ae6e2012-10-25 16:06:07 +02001400 if (td->test_info.testcase == TEST_LONG_SEQUENTIAL_WRITE)
1401 test_direction = WRITE;
1402 else
1403 test_direction = READ;
Lee Susmanf18263a2012-10-24 14:14:37 +02001404
Lee Susmana35ae6e2012-10-25 16:06:07 +02001405 test_pr_info("%s: Adding %d write requests, first req_id=%d", __func__,
Lee Susmanf18263a2012-10-24 14:14:37 +02001406 LONG_TEST_ACTUAL_NUM_REQS, td->wr_rd_next_req_id);
1407
1408 for (j = 0; j < LONG_TEST_ACTUAL_NUM_REQS; j++) {
1409
1410 ret = test_iosched_add_wr_rd_test_req(0, test_direction,
1411 start_sec,
1412 TEST_MAX_BIOS_PER_REQ,
1413 TEST_NO_PATTERN, NULL);
1414 if (ret) {
1415 test_pr_err("%s: failed to add a bio request",
1416 __func__);
1417 return ret;
1418 }
1419
1420 start_sec +=
1421 (TEST_MAX_BIOS_PER_REQ * sizeof(int) * BIO_U32_SIZE);
1422 }
1423
1424 return 0;
1425}
1426
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001427/*
1428 * An implementation for the prepare_test_fn pointer in the test_info
1429 * data structure. According to the testcase we add the right number of requests
1430 * and decide if an error is expected or not.
1431 */
1432static int prepare_test(struct test_data *td)
1433{
1434 struct mmc_queue *mq = test_iosched_get_req_queue()->queuedata;
1435 int max_num_requests;
1436 int num_requests = 0;
1437 int ret = 0;
1438 int is_random = mbtd->is_random;
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001439 int test_packed_trigger = mq->num_wr_reqs_to_start_packing;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001440
1441 if (!mq) {
1442 test_pr_err("%s: NULL mq", __func__);
1443 return -EINVAL;
1444 }
1445
1446 max_num_requests = mq->card->ext_csd.max_packed_writes;
1447
1448 if (is_random && mbtd->random_test_seed == 0) {
1449 mbtd->random_test_seed =
1450 (unsigned int)(get_jiffies_64() & 0xFFFF);
1451 test_pr_info("%s: got seed from jiffies %d",
1452 __func__, mbtd->random_test_seed);
1453 }
1454
1455 num_requests = get_num_requests(td);
1456
1457 if (mbtd->test_group == TEST_SEND_INVALID_GROUP)
1458 mq->packed_test_fn =
1459 test_invalid_packed_cmd;
1460
1461 if (mbtd->test_group == TEST_ERR_CHECK_GROUP)
1462 mq->err_check_fn = test_err_check;
1463
1464 switch (td->test_info.testcase) {
1465 case TEST_STOP_DUE_TO_FLUSH:
1466 case TEST_STOP_DUE_TO_READ:
1467 case TEST_RET_PARTIAL_FOLLOWED_BY_SUCCESS:
1468 case TEST_RET_PARTIAL_MULTIPLE_UNTIL_SUCCESS:
1469 case TEST_STOP_DUE_TO_EMPTY_QUEUE:
1470 case TEST_CMD23_PACKED_BIT_UNSET:
1471 ret = prepare_packed_requests(td, 0, num_requests, is_random);
1472 break;
1473 case TEST_STOP_DUE_TO_FLUSH_AFTER_MAX_REQS:
1474 case TEST_STOP_DUE_TO_READ_AFTER_MAX_REQS:
1475 ret = prepare_packed_requests(td, 0, max_num_requests - 1,
1476 is_random);
1477 break;
1478 case TEST_RET_PARTIAL_FOLLOWED_BY_ABORT:
1479 ret = prepare_partial_followed_by_abort(td, num_requests);
1480 break;
1481 case TEST_STOP_DUE_TO_MAX_REQ_NUM:
1482 case TEST_RET_PARTIAL_MAX_FAIL_IDX:
1483 ret = prepare_packed_requests(td, 0, max_num_requests,
1484 is_random);
1485 break;
1486 case TEST_STOP_DUE_TO_THRESHOLD:
1487 ret = prepare_packed_requests(td, 0, max_num_requests + 1,
1488 is_random);
1489 break;
1490 case TEST_RET_ABORT:
1491 case TEST_RET_RETRY:
1492 case TEST_RET_CMD_ERR:
1493 case TEST_RET_DATA_ERR:
1494 case TEST_HDR_INVALID_VERSION:
1495 case TEST_HDR_WRONG_WRITE_CODE:
1496 case TEST_HDR_INVALID_RW_CODE:
1497 case TEST_HDR_DIFFERENT_ADDRESSES:
1498 case TEST_HDR_REQ_NUM_SMALLER_THAN_ACTUAL:
1499 case TEST_HDR_REQ_NUM_LARGER_THAN_ACTUAL:
1500 case TEST_CMD23_MAX_PACKED_WRITES:
1501 case TEST_CMD23_ZERO_PACKED_WRITES:
1502 case TEST_CMD23_REL_WR_BIT_SET:
1503 case TEST_CMD23_BITS_16TO29_SET:
1504 case TEST_CMD23_HDR_BLK_NOT_IN_COUNT:
1505 case TEST_HDR_CMD23_PACKED_BIT_SET:
1506 ret = prepare_packed_requests(td, 1, num_requests, is_random);
1507 break;
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001508 case TEST_PACKING_EXP_N_OVER_TRIGGER:
1509 case TEST_PACKING_EXP_N_OVER_TRIGGER_FB_READ:
1510 case TEST_PACKING_NOT_EXP_TRIGGER_REQUESTS:
1511 case TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS:
1512 case TEST_PACK_MIX_PACKED_NO_PACKED_PACKED:
1513 case TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED:
1514 ret = prepare_packed_control_tests_requests(td, 0, num_requests,
1515 is_random);
1516 break;
1517 case TEST_PACKING_EXP_THRESHOLD_OVER_TRIGGER:
1518 ret = prepare_packed_control_tests_requests(td, 0,
1519 max_num_requests, is_random);
1520 break;
1521 case TEST_PACKING_EXP_ONE_OVER_TRIGGER_FB_READ:
1522 ret = prepare_packed_control_tests_requests(td, 0,
1523 test_packed_trigger + 1,
1524 is_random);
1525 break;
1526 case TEST_PACKING_EXP_N_OVER_TRIGGER_FLUSH_N:
1527 ret = prepare_packed_control_tests_requests(td, 0, num_requests,
1528 is_random);
1529 break;
1530 case TEST_PACKING_NOT_EXP_TRIGGER_READ_TRIGGER:
1531 case TEST_PACKING_NOT_EXP_TRIGGER_FLUSH_TRIGGER:
1532 ret = prepare_packed_control_tests_requests(td, 0,
1533 test_packed_trigger, is_random);
1534 break;
Lee Susmana35ae6e2012-10-25 16:06:07 +02001535 case TEST_LONG_SEQUENTIAL_WRITE:
1536 ret = prepare_long_test_requests(td);
1537 break;
Lee Susmanf18263a2012-10-24 14:14:37 +02001538 case TEST_LONG_SEQUENTIAL_READ:
1539 ret = prepare_long_test_requests(td);
1540 break;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001541 default:
1542 test_pr_info("%s: Invalid test case...", __func__);
Lee Susmanf18263a2012-10-24 14:14:37 +02001543 ret = -EINVAL;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001544 }
1545
1546 return ret;
1547}
1548
Maya Ereza12d1d22013-01-10 23:35:42 +02001549static int run_packed_test(struct test_data *td)
1550{
1551 struct mmc_queue *mq;
1552 struct request_queue *req_q;
1553
1554 if (!td) {
1555 pr_err("%s: NULL td", __func__);
1556 return -EINVAL;
1557 }
1558
1559 req_q = td->req_q;
1560
1561 if (!req_q) {
1562 pr_err("%s: NULL request queue", __func__);
1563 return -EINVAL;
1564 }
1565
1566 mq = req_q->queuedata;
1567 if (!mq) {
1568 test_pr_err("%s: NULL mq", __func__);
1569 return -EINVAL;
1570 }
1571 mmc_blk_init_packed_statistics(mq->card);
1572
1573 if (td->test_info.testcase != TEST_PACK_MIX_PACKED_NO_PACKED_PACKED) {
1574 /*
1575 * Verify that the packing is disabled before starting the
1576 * test
1577 */
1578 mq->wr_packing_enabled = false;
1579 mq->num_of_potential_packed_wr_reqs = 0;
1580 }
1581
1582 __blk_run_queue(td->req_q);
1583
1584 return 0;
1585}
1586
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001587/*
1588 * An implementation for the post_test_fn in the test_info data structure.
1589 * In our case we just reset the function pointers in the mmc_queue in order for
1590 * the FS to be able to dispatch it's requests correctly after the test is
1591 * finished.
1592 */
1593static int post_test(struct test_data *td)
1594{
1595 struct mmc_queue *mq;
1596
1597 if (!td)
1598 return -EINVAL;
1599
1600 mq = td->req_q->queuedata;
1601
1602 if (!mq) {
1603 test_pr_err("%s: NULL mq", __func__);
1604 return -EINVAL;
1605 }
1606
1607 mq->packed_test_fn = NULL;
1608 mq->err_check_fn = NULL;
1609
1610 return 0;
1611}
1612
1613/*
1614 * This function checks, based on the current test's test_group, that the
1615 * packed commands capability and control are set right. In addition, we check
1616 * if the card supports the packed command feature.
1617 */
1618static int validate_packed_commands_settings(void)
1619{
1620 struct request_queue *req_q;
1621 struct mmc_queue *mq;
1622 int max_num_requests;
1623 struct mmc_host *host;
1624
1625 req_q = test_iosched_get_req_queue();
1626 if (!req_q) {
1627 test_pr_err("%s: test_iosched_get_req_queue failed", __func__);
1628 test_iosched_set_test_result(TEST_FAILED);
1629 return -EINVAL;
1630 }
1631
1632 mq = req_q->queuedata;
1633 if (!mq) {
1634 test_pr_err("%s: NULL mq", __func__);
1635 return -EINVAL;
1636 }
1637
1638 max_num_requests = mq->card->ext_csd.max_packed_writes;
1639 host = mq->card->host;
1640
1641 if (!(host->caps2 && MMC_CAP2_PACKED_WR)) {
1642 test_pr_err("%s: Packed Write capability disabled, exit test",
1643 __func__);
1644 test_iosched_set_test_result(TEST_NOT_SUPPORTED);
1645 return -EINVAL;
1646 }
1647
1648 if (max_num_requests == 0) {
1649 test_pr_err(
1650 "%s: no write packing support, ext_csd.max_packed_writes=%d",
1651 __func__, mq->card->ext_csd.max_packed_writes);
1652 test_iosched_set_test_result(TEST_NOT_SUPPORTED);
1653 return -EINVAL;
1654 }
1655
1656 test_pr_info("%s: max number of packed requests supported is %d ",
1657 __func__, max_num_requests);
1658
1659 switch (mbtd->test_group) {
1660 case TEST_SEND_WRITE_PACKING_GROUP:
1661 case TEST_ERR_CHECK_GROUP:
1662 case TEST_SEND_INVALID_GROUP:
1663 /* disable the packing control */
1664 host->caps2 &= ~MMC_CAP2_PACKED_WR_CONTROL;
1665 break;
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02001666 case TEST_PACKING_CONTROL_GROUP:
1667 host->caps2 |= MMC_CAP2_PACKED_WR_CONTROL;
1668 break;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02001669 default:
1670 break;
1671 }
1672
1673 return 0;
1674}
1675
Maya Erezddc55732012-10-17 09:51:01 +02001676static void pseudo_rnd_sector_and_size(unsigned int *seed,
1677 unsigned int min_start_sector,
1678 unsigned int *start_sector,
1679 unsigned int *num_of_bios)
1680{
1681 unsigned int max_sec = min_start_sector + TEST_MAX_SECTOR_RANGE;
1682 do {
1683 *start_sector = pseudo_random_seed(seed,
1684 1, max_sec);
1685 *num_of_bios = pseudo_random_seed(seed,
1686 1, TEST_MAX_BIOS_PER_REQ);
1687 if (!(*num_of_bios))
1688 *num_of_bios = 1;
1689 } while ((*start_sector < min_start_sector) ||
1690 (*start_sector + (*num_of_bios * BIO_U32_SIZE * 4)) > max_sec);
1691}
1692
1693/* sanitize test functions */
1694static int prepare_write_discard_sanitize_read(struct test_data *td)
1695{
1696 unsigned int start_sector;
1697 unsigned int num_of_bios = 0;
1698 static unsigned int total_bios;
1699 unsigned int *num_bios_seed;
1700 int i = 0;
1701
1702 if (mbtd->random_test_seed == 0) {
1703 mbtd->random_test_seed =
1704 (unsigned int)(get_jiffies_64() & 0xFFFF);
1705 test_pr_info("%s: got seed from jiffies %d",
1706 __func__, mbtd->random_test_seed);
1707 }
1708 num_bios_seed = &mbtd->random_test_seed;
1709
1710 do {
1711 pseudo_rnd_sector_and_size(num_bios_seed, td->start_sector,
1712 &start_sector, &num_of_bios);
1713
1714 /* DISCARD */
1715 total_bios += num_of_bios;
1716 test_pr_info("%s: discard req: id=%d, startSec=%d, NumBios=%d",
1717 __func__, td->unique_next_req_id, start_sector,
1718 num_of_bios);
1719 test_iosched_add_unique_test_req(0, REQ_UNIQUE_DISCARD,
1720 start_sector, BIO_TO_SECTOR(num_of_bios),
1721 NULL);
1722
1723 } while (++i < (BLKDEV_MAX_RQ-10));
1724
1725 test_pr_info("%s: total discard bios = %d", __func__, total_bios);
1726
1727 test_pr_info("%s: add sanitize req", __func__);
1728 test_iosched_add_unique_test_req(0, REQ_UNIQUE_SANITIZE, 0, 0, NULL);
1729
1730 return 0;
1731}
1732
Yaniv Gardie9214c82012-10-18 13:58:18 +02001733/*
1734 * Post test operations for BKOPs test
1735 * Disable the BKOPs statistics and clear the feature flags
1736 */
1737static int bkops_post_test(struct test_data *td)
1738{
1739 struct request_queue *q = td->req_q;
1740 struct mmc_queue *mq = (struct mmc_queue *)q->queuedata;
1741 struct mmc_card *card = mq->card;
1742
1743 mmc_card_clr_doing_bkops(mq->card);
1744 card->ext_csd.raw_bkops_status = 0;
1745
1746 spin_lock(&card->bkops_info.bkops_stats.lock);
1747 card->bkops_info.bkops_stats.enabled = false;
1748 spin_unlock(&card->bkops_info.bkops_stats.lock);
1749
1750 return 0;
1751}
1752
1753/*
1754 * Verify the BKOPs statsistics
1755 */
1756static int check_bkops_result(struct test_data *td)
1757{
1758 struct request_queue *q = td->req_q;
1759 struct mmc_queue *mq = (struct mmc_queue *)q->queuedata;
1760 struct mmc_card *card = mq->card;
1761 struct mmc_bkops_stats *bkops_stat;
1762
1763 if (!card)
1764 goto fail;
1765
1766 bkops_stat = &card->bkops_info.bkops_stats;
1767
1768 test_pr_info("%s: Test results: bkops:(%d,%d,%d) hpi:%d, suspend:%d",
1769 __func__,
1770 bkops_stat->bkops_level[BKOPS_SEVERITY_1_INDEX],
1771 bkops_stat->bkops_level[BKOPS_SEVERITY_2_INDEX],
1772 bkops_stat->bkops_level[BKOPS_SEVERITY_3_INDEX],
1773 bkops_stat->hpi,
1774 bkops_stat->suspend);
1775
1776 switch (mbtd->test_info.testcase) {
1777 case BKOPS_DELAYED_WORK_LEVEL_1:
1778 if ((bkops_stat->bkops_level[BKOPS_SEVERITY_1_INDEX] == 1) &&
1779 (bkops_stat->suspend == 1) &&
1780 (bkops_stat->hpi == 0))
1781 goto exit;
1782 else
1783 goto fail;
1784 break;
1785 case BKOPS_DELAYED_WORK_LEVEL_1_HPI:
1786 if ((bkops_stat->bkops_level[BKOPS_SEVERITY_1_INDEX] == 1) &&
1787 (bkops_stat->suspend == 0) &&
1788 (bkops_stat->hpi == 1))
1789 goto exit;
Yaniv Gardidced8e42012-11-25 16:00:40 +02001790 /* this might happen due to timing issues */
1791 else if
1792 ((bkops_stat->bkops_level[BKOPS_SEVERITY_1_INDEX] == 0) &&
1793 (bkops_stat->suspend == 0) &&
1794 (bkops_stat->hpi == 0))
1795 goto ignore;
Yaniv Gardie9214c82012-10-18 13:58:18 +02001796 else
1797 goto fail;
1798 break;
1799 case BKOPS_CANCEL_DELAYED_WORK:
1800 if ((bkops_stat->bkops_level[BKOPS_SEVERITY_1_INDEX] == 0) &&
1801 (bkops_stat->bkops_level[BKOPS_SEVERITY_2_INDEX] == 0) &&
1802 (bkops_stat->bkops_level[BKOPS_SEVERITY_3_INDEX] == 0) &&
1803 (bkops_stat->suspend == 0) &&
1804 (bkops_stat->hpi == 0))
1805 goto exit;
1806 else
1807 goto fail;
1808 case BKOPS_URGENT_LEVEL_2:
1809 case BKOPS_URGENT_LEVEL_2_TWO_REQS:
1810 if ((bkops_stat->bkops_level[BKOPS_SEVERITY_2_INDEX] == 1) &&
1811 (bkops_stat->suspend == 0) &&
1812 (bkops_stat->hpi == 0))
1813 goto exit;
1814 else
1815 goto fail;
1816 case BKOPS_URGENT_LEVEL_3:
1817 if ((bkops_stat->bkops_level[BKOPS_SEVERITY_3_INDEX] == 1) &&
1818 (bkops_stat->suspend == 0) &&
1819 (bkops_stat->hpi == 0))
1820 goto exit;
1821 else
1822 goto fail;
1823 default:
1824 return -EINVAL;
1825 }
1826
1827exit:
1828 return 0;
Yaniv Gardidced8e42012-11-25 16:00:40 +02001829ignore:
1830 test_iosched_set_ignore_round(true);
1831 return 0;
Yaniv Gardie9214c82012-10-18 13:58:18 +02001832fail:
1833 if (td->fs_wr_reqs_during_test) {
1834 test_pr_info("%s: wr reqs during test, cancel the round",
1835 __func__);
1836 test_iosched_set_ignore_round(true);
1837 return 0;
1838 }
1839
1840 test_pr_info("%s: BKOPs statistics are not as expected, test failed",
1841 __func__);
1842 return -EINVAL;
1843}
1844
1845static void bkops_end_io_final_fn(struct request *rq, int err)
1846{
1847 struct test_request *test_rq =
1848 (struct test_request *)rq->elv.priv[0];
1849 BUG_ON(!test_rq);
1850
1851 test_rq->req_completed = 1;
1852 test_rq->req_result = err;
1853
1854 test_pr_info("%s: request %d completed, err=%d",
1855 __func__, test_rq->req_id, err);
1856
1857 mbtd->bkops_stage = BKOPS_STAGE_4;
1858 wake_up(&mbtd->bkops_wait_q);
1859}
1860
1861static void bkops_end_io_fn(struct request *rq, int err)
1862{
1863 struct test_request *test_rq =
1864 (struct test_request *)rq->elv.priv[0];
1865 BUG_ON(!test_rq);
1866
1867 test_rq->req_completed = 1;
1868 test_rq->req_result = err;
1869
1870 test_pr_info("%s: request %d completed, err=%d",
1871 __func__, test_rq->req_id, err);
1872 mbtd->bkops_stage = BKOPS_STAGE_2;
1873 wake_up(&mbtd->bkops_wait_q);
1874
1875}
1876
1877static int prepare_bkops(struct test_data *td)
1878{
1879 int ret = 0;
1880 struct request_queue *q = td->req_q;
1881 struct mmc_queue *mq = (struct mmc_queue *)q->queuedata;
1882 struct mmc_card *card = mq->card;
1883 struct mmc_bkops_stats *bkops_stat;
1884
1885 if (!card)
1886 return -EINVAL;
1887
1888 bkops_stat = &card->bkops_info.bkops_stats;
1889
1890 if (!card->ext_csd.bkops_en) {
1891 test_pr_err("%s: BKOPS is not enabled by card or host)",
1892 __func__);
1893 return -ENOTSUPP;
1894 }
1895 if (mmc_card_doing_bkops(card)) {
1896 test_pr_err("%s: BKOPS in progress, try later", __func__);
1897 return -EAGAIN;
1898 }
1899
1900 mmc_blk_init_bkops_statistics(card);
1901
1902 if ((mbtd->test_info.testcase == BKOPS_URGENT_LEVEL_2) ||
1903 (mbtd->test_info.testcase == BKOPS_URGENT_LEVEL_2_TWO_REQS) ||
1904 (mbtd->test_info.testcase == BKOPS_URGENT_LEVEL_3))
1905 mq->err_check_fn = test_err_check;
1906 mbtd->err_check_counter = 0;
1907
1908 return ret;
1909}
1910
1911static int run_bkops(struct test_data *td)
1912{
1913 int ret = 0;
1914 struct request_queue *q = td->req_q;
1915 struct mmc_queue *mq = (struct mmc_queue *)q->queuedata;
1916 struct mmc_card *card = mq->card;
1917 struct mmc_bkops_stats *bkops_stat;
1918
1919 if (!card)
1920 return -EINVAL;
1921
1922 bkops_stat = &card->bkops_info.bkops_stats;
1923
1924 switch (mbtd->test_info.testcase) {
1925 case BKOPS_DELAYED_WORK_LEVEL_1:
1926 bkops_stat->ignore_card_bkops_status = true;
1927 card->ext_csd.raw_bkops_status = 1;
Yaniv Gardie9214c82012-10-18 13:58:18 +02001928 mbtd->bkops_stage = BKOPS_STAGE_1;
1929
1930 __blk_run_queue(q);
1931 /* this long sleep makes sure the host starts bkops and
1932 also, gets into suspend */
1933 msleep(10000);
1934
1935 bkops_stat->ignore_card_bkops_status = false;
1936 card->ext_csd.raw_bkops_status = 0;
1937
1938 test_iosched_mark_test_completion();
1939 break;
1940
1941 case BKOPS_DELAYED_WORK_LEVEL_1_HPI:
1942 bkops_stat->ignore_card_bkops_status = true;
1943 card->ext_csd.raw_bkops_status = 1;
Yaniv Gardie9214c82012-10-18 13:58:18 +02001944 mbtd->bkops_stage = BKOPS_STAGE_1;
1945
1946 __blk_run_queue(q);
1947 msleep(card->bkops_info.delay_ms);
1948
1949 ret = test_iosched_add_wr_rd_test_req(0, WRITE,
1950 td->start_sector,
1951 TEST_REQUEST_NUM_OF_BIOS,
1952 TEST_PATTERN_5A,
1953 bkops_end_io_final_fn);
1954 if (ret) {
1955 test_pr_err("%s: failed to add a write request",
1956 __func__);
1957 ret = -EINVAL;
1958 break;
1959 }
1960
Yaniv Gardie9214c82012-10-18 13:58:18 +02001961 __blk_run_queue(q);
1962 wait_event(mbtd->bkops_wait_q,
1963 mbtd->bkops_stage == BKOPS_STAGE_4);
1964 bkops_stat->ignore_card_bkops_status = false;
1965
1966 test_iosched_mark_test_completion();
1967 break;
1968
1969 case BKOPS_CANCEL_DELAYED_WORK:
1970 bkops_stat->ignore_card_bkops_status = true;
1971 card->ext_csd.raw_bkops_status = 1;
Yaniv Gardie9214c82012-10-18 13:58:18 +02001972 mbtd->bkops_stage = BKOPS_STAGE_1;
1973
1974 __blk_run_queue(q);
1975
1976 ret = test_iosched_add_wr_rd_test_req(0, WRITE,
1977 td->start_sector,
1978 TEST_REQUEST_NUM_OF_BIOS,
1979 TEST_PATTERN_5A,
1980 bkops_end_io_final_fn);
1981 if (ret) {
1982 test_pr_err("%s: failed to add a write request",
1983 __func__);
1984 ret = -EINVAL;
1985 break;
1986 }
1987
Yaniv Gardie9214c82012-10-18 13:58:18 +02001988 __blk_run_queue(q);
1989 wait_event(mbtd->bkops_wait_q,
1990 mbtd->bkops_stage == BKOPS_STAGE_4);
1991 bkops_stat->ignore_card_bkops_status = false;
1992
1993 test_iosched_mark_test_completion();
1994 break;
1995
1996 case BKOPS_URGENT_LEVEL_2:
1997 case BKOPS_URGENT_LEVEL_3:
1998 bkops_stat->ignore_card_bkops_status = true;
1999 if (mbtd->test_info.testcase == BKOPS_URGENT_LEVEL_2)
2000 card->ext_csd.raw_bkops_status = 2;
2001 else
2002 card->ext_csd.raw_bkops_status = 3;
2003 mbtd->bkops_stage = BKOPS_STAGE_1;
2004
2005 ret = test_iosched_add_wr_rd_test_req(0, WRITE,
2006 td->start_sector,
2007 TEST_REQUEST_NUM_OF_BIOS,
2008 TEST_PATTERN_5A,
2009 bkops_end_io_fn);
2010 if (ret) {
2011 test_pr_err("%s: failed to add a write request",
2012 __func__);
2013 ret = -EINVAL;
2014 break;
2015 }
2016
Yaniv Gardie9214c82012-10-18 13:58:18 +02002017 __blk_run_queue(q);
2018 wait_event(mbtd->bkops_wait_q,
2019 mbtd->bkops_stage == BKOPS_STAGE_2);
2020 card->ext_csd.raw_bkops_status = 0;
2021
2022 ret = test_iosched_add_wr_rd_test_req(0, WRITE,
2023 td->start_sector,
2024 TEST_REQUEST_NUM_OF_BIOS,
2025 TEST_PATTERN_5A,
2026 bkops_end_io_final_fn);
2027 if (ret) {
2028 test_pr_err("%s: failed to add a write request",
2029 __func__);
2030 ret = -EINVAL;
2031 break;
2032 }
2033
Yaniv Gardie9214c82012-10-18 13:58:18 +02002034 __blk_run_queue(q);
2035
2036 wait_event(mbtd->bkops_wait_q,
2037 mbtd->bkops_stage == BKOPS_STAGE_4);
2038
2039 bkops_stat->ignore_card_bkops_status = false;
2040 test_iosched_mark_test_completion();
2041 break;
2042
2043 case BKOPS_URGENT_LEVEL_2_TWO_REQS:
2044 mq->wr_packing_enabled = false;
2045 bkops_stat->ignore_card_bkops_status = true;
2046 card->ext_csd.raw_bkops_status = 2;
2047 mbtd->bkops_stage = BKOPS_STAGE_1;
2048
2049 ret = test_iosched_add_wr_rd_test_req(0, WRITE,
2050 td->start_sector,
2051 TEST_REQUEST_NUM_OF_BIOS,
2052 TEST_PATTERN_5A,
2053 NULL);
2054 if (ret) {
2055 test_pr_err("%s: failed to add a write request",
2056 __func__);
2057 ret = -EINVAL;
2058 break;
2059 }
2060
2061 ret = test_iosched_add_wr_rd_test_req(0, WRITE,
2062 td->start_sector,
2063 TEST_REQUEST_NUM_OF_BIOS,
2064 TEST_PATTERN_5A,
2065 bkops_end_io_fn);
2066 if (ret) {
2067 test_pr_err("%s: failed to add a write request",
2068 __func__);
2069 ret = -EINVAL;
2070 break;
2071 }
2072
Yaniv Gardie9214c82012-10-18 13:58:18 +02002073 __blk_run_queue(q);
2074 wait_event(mbtd->bkops_wait_q,
2075 mbtd->bkops_stage == BKOPS_STAGE_2);
2076 card->ext_csd.raw_bkops_status = 0;
2077
2078 ret = test_iosched_add_wr_rd_test_req(0, WRITE,
2079 td->start_sector,
2080 TEST_REQUEST_NUM_OF_BIOS,
2081 TEST_PATTERN_5A,
2082 bkops_end_io_final_fn);
2083 if (ret) {
2084 test_pr_err("%s: failed to add a write request",
2085 __func__);
2086 ret = -EINVAL;
2087 break;
2088 }
2089
Yaniv Gardie9214c82012-10-18 13:58:18 +02002090 __blk_run_queue(q);
2091
2092 wait_event(mbtd->bkops_wait_q,
2093 mbtd->bkops_stage == BKOPS_STAGE_4);
2094
2095 bkops_stat->ignore_card_bkops_status = false;
2096 test_iosched_mark_test_completion();
2097
2098 break;
2099 default:
2100 test_pr_err("%s: wrong testcase: %d", __func__,
2101 mbtd->test_info.testcase);
2102 ret = -EINVAL;
2103 }
2104 return ret;
2105}
2106
Lee Susmanb09c0412012-12-19 14:28:52 +02002107/*
2108 * new_req_post_test() - Do post test operations for
2109 * new_req_notification test: disable the statistics and clear
2110 * the feature flags.
2111 * @td The test_data for the new_req test that has
2112 * ended.
2113 */
2114static int new_req_post_test(struct test_data *td)
2115{
2116 struct mmc_queue *mq;
2117
2118 if (!td || !td->req_q)
2119 goto exit;
2120
2121 mq = (struct mmc_queue *)td->req_q->queuedata;
2122
2123 if (!mq || !mq->card)
2124 goto exit;
2125
2126 /* disable async_event test stats */
2127 mq->card->async_event_stats.enabled = false;
2128 mmc_print_async_event_stats(mq->card);
2129 test_pr_info("Completed %d requests",
2130 mbtd->completed_req_count);
2131
2132exit:
2133 return 0;
2134}
2135
2136/*
2137 * check_new_req_result() - Print out the number of completed
2138 * requests. Assigned to the check_test_result_fn pointer,
2139 * therefore the name.
2140 * @td The test_data for the new_req test that has
2141 * ended.
2142 */
2143static int check_new_req_result(struct test_data *td)
2144{
2145 test_pr_info("%s: Test results: Completed %d requests",
2146 __func__, mbtd->completed_req_count);
2147 return 0;
2148}
2149
2150/*
2151 * new_req_free_end_io_fn() - Remove request from queuelist and
2152 * free request's allocated memory. Used as a call-back
2153 * assigned to end_io member in request struct.
2154 * @rq The request to be freed
2155 * @err Unused
2156 */
2157static void new_req_free_end_io_fn(struct request *rq, int err)
2158{
2159 struct test_request *test_rq =
2160 (struct test_request *)rq->elv.priv[0];
2161 struct test_data *ptd = test_get_test_data();
2162
2163 BUG_ON(!test_rq);
2164
2165 spin_lock_irq(&ptd->lock);
2166 list_del_init(&test_rq->queuelist);
2167 ptd->dispatched_count--;
2168 spin_unlock_irq(&ptd->lock);
2169
2170 __blk_put_request(ptd->req_q, test_rq->rq);
2171 kfree(test_rq->bios_buffer);
2172 kfree(test_rq);
2173 mbtd->completed_req_count++;
2174}
2175
2176static int prepare_new_req(struct test_data *td)
2177{
2178 struct request_queue *q = td->req_q;
2179 struct mmc_queue *mq = (struct mmc_queue *)q->queuedata;
2180
2181 mmc_blk_init_packed_statistics(mq->card);
2182 mmc_blk_init_async_event_statistics(mq->card);
2183
2184 mbtd->completed_req_count = 0;
2185
2186 return 0;
2187}
2188
2189static int test_new_req_notification(struct test_data *ptd)
2190{
2191 int ret = 0;
2192 int i;
2193 unsigned int requests_count = 2;
2194 unsigned int bio_num;
2195 struct test_request *test_rq = NULL;
2196
2197 while (1) {
2198 for (i = 0; i < requests_count; i++) {
2199 bio_num = TEST_MAX_BIOS_PER_REQ;
2200 test_rq = test_iosched_create_test_req(0, READ,
2201 ptd->start_sector,
2202 bio_num, TEST_PATTERN_5A,
2203 new_req_free_end_io_fn);
2204 if (test_rq) {
2205 spin_lock_irq(ptd->req_q->queue_lock);
2206 list_add_tail(&test_rq->queuelist,
2207 &ptd->test_queue);
2208 ptd->test_count++;
2209 spin_unlock_irq(ptd->req_q->queue_lock);
2210 } else {
2211 test_pr_err("%s: failed to create read request",
2212 __func__);
2213 ret = -ENODEV;
2214 break;
2215 }
2216 }
2217
2218 __blk_run_queue(ptd->req_q);
2219 /* wait while a mmc layer will send all requests in test_queue*/
2220 while (!list_empty(&ptd->test_queue))
2221 msleep(NEW_REQ_TEST_SLEEP_TIME);
2222
2223 /* test finish criteria */
2224 if (mbtd->completed_req_count > 1000) {
2225 if (ptd->dispatched_count)
2226 continue;
2227 else
2228 break;
2229 }
2230
2231 for (i = 0; i < requests_count; i++) {
2232 bio_num = NEW_REQ_TEST_NUM_BIOS;
2233 test_rq = test_iosched_create_test_req(0, READ,
2234 ptd->start_sector,
2235 bio_num, TEST_PATTERN_5A,
2236 new_req_free_end_io_fn);
2237 if (test_rq) {
2238 spin_lock_irq(ptd->req_q->queue_lock);
2239 list_add_tail(&test_rq->queuelist,
2240 &ptd->test_queue);
2241 ptd->test_count++;
2242 spin_unlock_irq(ptd->req_q->queue_lock);
2243 } else {
2244 test_pr_err("%s: failed to create read request",
2245 __func__);
2246 ret = -ENODEV;
2247 break;
2248 }
2249 }
2250 __blk_run_queue(ptd->req_q);
2251 }
2252
2253 test_iosched_mark_test_completion();
2254 test_pr_info("%s: EXIT: %d code", __func__, ret);
2255
2256 return ret;
2257}
2258
2259static int run_new_req(struct test_data *td)
2260{
2261 int ret = 0;
2262 struct request_queue *q = td->req_q;
2263 struct mmc_queue *mq = (struct mmc_queue *)q->queuedata;
2264
2265 mmc_blk_init_async_event_statistics(mq->card);
2266 ret = test_new_req_notification(td);
2267
2268 return ret;
2269}
2270
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02002271static bool message_repeat;
2272static int test_open(struct inode *inode, struct file *file)
2273{
2274 file->private_data = inode->i_private;
2275 message_repeat = 1;
2276 return 0;
2277}
2278
2279/* send_packing TEST */
2280static ssize_t send_write_packing_test_write(struct file *file,
2281 const char __user *buf,
2282 size_t count,
2283 loff_t *ppos)
2284{
2285 int ret = 0;
2286 int i = 0;
2287 int number = -1;
2288 int j = 0;
2289
2290 test_pr_info("%s: -- send_write_packing TEST --", __func__);
2291
2292 sscanf(buf, "%d", &number);
2293
2294 if (number <= 0)
2295 number = 1;
2296
2297
2298 mbtd->test_group = TEST_SEND_WRITE_PACKING_GROUP;
2299
2300 if (validate_packed_commands_settings())
2301 return count;
2302
2303 if (mbtd->random_test_seed > 0)
2304 test_pr_info("%s: Test seed: %d", __func__,
2305 mbtd->random_test_seed);
2306
2307 memset(&mbtd->test_info, 0, sizeof(struct test_info));
2308
2309 mbtd->test_info.data = mbtd;
2310 mbtd->test_info.prepare_test_fn = prepare_test;
Maya Ereza12d1d22013-01-10 23:35:42 +02002311 mbtd->test_info.run_test_fn = run_packed_test;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02002312 mbtd->test_info.check_test_result_fn = check_wr_packing_statistics;
2313 mbtd->test_info.get_test_case_str_fn = get_test_case_str;
2314 mbtd->test_info.post_test_fn = post_test;
2315
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02002316 for (i = 0; i < number; ++i) {
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02002317 test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
2318 test_pr_info("%s: ====================", __func__);
2319
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02002320 for (j = SEND_WRITE_PACKING_MIN_TESTCASE;
2321 j <= SEND_WRITE_PACKING_MAX_TESTCASE; j++) {
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02002322
2323 mbtd->test_info.testcase = j;
2324 mbtd->is_random = RANDOM_TEST;
2325 ret = test_iosched_start_test(&mbtd->test_info);
2326 if (ret)
2327 break;
2328 /* Allow FS requests to be dispatched */
2329 msleep(1000);
2330 mbtd->test_info.testcase = j;
2331 mbtd->is_random = NON_RANDOM_TEST;
2332 ret = test_iosched_start_test(&mbtd->test_info);
2333 if (ret)
2334 break;
2335 /* Allow FS requests to be dispatched */
2336 msleep(1000);
2337 }
2338 }
2339
2340 test_pr_info("%s: Completed all the test cases.", __func__);
2341
2342 return count;
2343}
2344
2345static ssize_t send_write_packing_test_read(struct file *file,
2346 char __user *buffer,
2347 size_t count,
2348 loff_t *offset)
2349{
2350 memset((void *)buffer, 0, count);
2351
2352 snprintf(buffer, count,
2353 "\nsend_write_packing_test\n"
2354 "=========\n"
2355 "Description:\n"
2356 "This test checks the following scenarios\n"
2357 "- Pack due to FLUSH message\n"
2358 "- Pack due to FLUSH after threshold writes\n"
2359 "- Pack due to READ message\n"
2360 "- Pack due to READ after threshold writes\n"
2361 "- Pack due to empty queue\n"
2362 "- Pack due to threshold writes\n"
2363 "- Pack due to one over threshold writes\n");
2364
2365 if (message_repeat == 1) {
2366 message_repeat = 0;
2367 return strnlen(buffer, count);
2368 } else {
2369 return 0;
2370 }
2371}
2372
2373const struct file_operations send_write_packing_test_ops = {
2374 .open = test_open,
2375 .write = send_write_packing_test_write,
2376 .read = send_write_packing_test_read,
2377};
2378
2379/* err_check TEST */
2380static ssize_t err_check_test_write(struct file *file,
2381 const char __user *buf,
2382 size_t count,
2383 loff_t *ppos)
2384{
2385 int ret = 0;
2386 int i = 0;
2387 int number = -1;
2388 int j = 0;
2389
2390 test_pr_info("%s: -- err_check TEST --", __func__);
2391
2392 sscanf(buf, "%d", &number);
2393
2394 if (number <= 0)
2395 number = 1;
2396
2397 mbtd->test_group = TEST_ERR_CHECK_GROUP;
2398
2399 if (validate_packed_commands_settings())
2400 return count;
2401
2402 if (mbtd->random_test_seed > 0)
2403 test_pr_info("%s: Test seed: %d", __func__,
2404 mbtd->random_test_seed);
2405
2406 memset(&mbtd->test_info, 0, sizeof(struct test_info));
2407
2408 mbtd->test_info.data = mbtd;
2409 mbtd->test_info.prepare_test_fn = prepare_test;
Maya Ereza12d1d22013-01-10 23:35:42 +02002410 mbtd->test_info.run_test_fn = run_packed_test;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02002411 mbtd->test_info.check_test_result_fn = check_wr_packing_statistics;
2412 mbtd->test_info.get_test_case_str_fn = get_test_case_str;
2413 mbtd->test_info.post_test_fn = post_test;
2414
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02002415 for (i = 0; i < number; ++i) {
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02002416 test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
2417 test_pr_info("%s: ====================", __func__);
2418
2419 for (j = ERR_CHECK_MIN_TESTCASE;
2420 j <= ERR_CHECK_MAX_TESTCASE ; j++) {
2421 mbtd->test_info.testcase = j;
2422 mbtd->is_random = RANDOM_TEST;
2423 ret = test_iosched_start_test(&mbtd->test_info);
2424 if (ret)
2425 break;
2426 /* Allow FS requests to be dispatched */
2427 msleep(1000);
2428 mbtd->test_info.testcase = j;
2429 mbtd->is_random = NON_RANDOM_TEST;
2430 ret = test_iosched_start_test(&mbtd->test_info);
2431 if (ret)
2432 break;
2433 /* Allow FS requests to be dispatched */
2434 msleep(1000);
2435 }
2436 }
2437
2438 test_pr_info("%s: Completed all the test cases.", __func__);
2439
2440 return count;
2441}
2442
2443static ssize_t err_check_test_read(struct file *file,
2444 char __user *buffer,
2445 size_t count,
2446 loff_t *offset)
2447{
2448 memset((void *)buffer, 0, count);
2449
2450 snprintf(buffer, count,
2451 "\nerr_check_TEST\n"
2452 "=========\n"
2453 "Description:\n"
2454 "This test checks the following scenarios\n"
2455 "- Return ABORT\n"
2456 "- Return PARTIAL followed by success\n"
2457 "- Return PARTIAL followed by abort\n"
2458 "- Return PARTIAL multiple times until success\n"
2459 "- Return PARTIAL with fail index = threshold\n"
2460 "- Return RETRY\n"
2461 "- Return CMD_ERR\n"
2462 "- Return DATA_ERR\n");
2463
2464 if (message_repeat == 1) {
2465 message_repeat = 0;
2466 return strnlen(buffer, count);
2467 } else {
2468 return 0;
2469 }
2470}
2471
2472const struct file_operations err_check_test_ops = {
2473 .open = test_open,
2474 .write = err_check_test_write,
2475 .read = err_check_test_read,
2476};
2477
2478/* send_invalid_packed TEST */
2479static ssize_t send_invalid_packed_test_write(struct file *file,
2480 const char __user *buf,
2481 size_t count,
2482 loff_t *ppos)
2483{
2484 int ret = 0;
2485 int i = 0;
2486 int number = -1;
2487 int j = 0;
2488 int num_of_failures = 0;
2489
2490 test_pr_info("%s: -- send_invalid_packed TEST --", __func__);
2491
2492 sscanf(buf, "%d", &number);
2493
2494 if (number <= 0)
2495 number = 1;
2496
2497 mbtd->test_group = TEST_SEND_INVALID_GROUP;
2498
2499 if (validate_packed_commands_settings())
2500 return count;
2501
2502 if (mbtd->random_test_seed > 0)
2503 test_pr_info("%s: Test seed: %d", __func__,
2504 mbtd->random_test_seed);
2505
2506 memset(&mbtd->test_info, 0, sizeof(struct test_info));
2507
2508 mbtd->test_info.data = mbtd;
2509 mbtd->test_info.prepare_test_fn = prepare_test;
Maya Ereza12d1d22013-01-10 23:35:42 +02002510 mbtd->test_info.run_test_fn = run_packed_test;
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02002511 mbtd->test_info.check_test_result_fn = check_wr_packing_statistics;
2512 mbtd->test_info.get_test_case_str_fn = get_test_case_str;
2513 mbtd->test_info.post_test_fn = post_test;
2514
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02002515 for (i = 0; i < number; ++i) {
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02002516 test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
2517 test_pr_info("%s: ====================", __func__);
2518
2519 for (j = INVALID_CMD_MIN_TESTCASE;
2520 j <= INVALID_CMD_MAX_TESTCASE ; j++) {
2521
2522 mbtd->test_info.testcase = j;
2523 mbtd->is_random = RANDOM_TEST;
2524 ret = test_iosched_start_test(&mbtd->test_info);
2525 if (ret)
2526 num_of_failures++;
2527 /* Allow FS requests to be dispatched */
2528 msleep(1000);
2529
2530 mbtd->test_info.testcase = j;
2531 mbtd->is_random = NON_RANDOM_TEST;
2532 ret = test_iosched_start_test(&mbtd->test_info);
2533 if (ret)
2534 num_of_failures++;
2535 /* Allow FS requests to be dispatched */
2536 msleep(1000);
2537 }
2538 }
2539
2540 test_pr_info("%s: Completed all the test cases.", __func__);
2541
2542 if (num_of_failures > 0) {
2543 test_iosched_set_test_result(TEST_FAILED);
2544 test_pr_err(
2545 "There were %d failures during the test, TEST FAILED",
2546 num_of_failures);
2547 }
2548 return count;
2549}
2550
2551static ssize_t send_invalid_packed_test_read(struct file *file,
2552 char __user *buffer,
2553 size_t count,
2554 loff_t *offset)
2555{
2556 memset((void *)buffer, 0, count);
2557
2558 snprintf(buffer, count,
2559 "\nsend_invalid_packed_TEST\n"
2560 "=========\n"
2561 "Description:\n"
2562 "This test checks the following scenarios\n"
2563 "- Send an invalid header version\n"
2564 "- Send the wrong write code\n"
2565 "- Send an invalid R/W code\n"
2566 "- Send wrong start address in header\n"
2567 "- Send header with block_count smaller than actual\n"
2568 "- Send header with block_count larger than actual\n"
2569 "- Send header CMD23 packed bit set\n"
2570 "- Send CMD23 with block count over threshold\n"
2571 "- Send CMD23 with block_count equals zero\n"
2572 "- Send CMD23 packed bit unset\n"
2573 "- Send CMD23 reliable write bit set\n"
2574 "- Send CMD23 bits [16-29] set\n"
2575 "- Send CMD23 header block not in block_count\n");
2576
2577 if (message_repeat == 1) {
2578 message_repeat = 0;
2579 return strnlen(buffer, count);
2580 } else {
2581 return 0;
2582 }
2583}
2584
2585const struct file_operations send_invalid_packed_test_ops = {
2586 .open = test_open,
2587 .write = send_invalid_packed_test_write,
2588 .read = send_invalid_packed_test_read,
2589};
2590
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02002591/* packing_control TEST */
2592static ssize_t write_packing_control_test_write(struct file *file,
2593 const char __user *buf,
2594 size_t count,
2595 loff_t *ppos)
2596{
2597 int ret = 0;
2598 int i = 0;
2599 int number = -1;
2600 int j = 0;
2601 struct mmc_queue *mq = test_iosched_get_req_queue()->queuedata;
2602 int max_num_requests = mq->card->ext_csd.max_packed_writes;
2603 int test_successful = 1;
2604
2605 test_pr_info("%s: -- write_packing_control TEST --", __func__);
2606
2607 sscanf(buf, "%d", &number);
2608
2609 if (number <= 0)
2610 number = 1;
2611
2612 test_pr_info("%s: max_num_requests = %d ", __func__,
2613 max_num_requests);
2614
2615 memset(&mbtd->test_info, 0, sizeof(struct test_info));
2616 mbtd->test_group = TEST_PACKING_CONTROL_GROUP;
2617
2618 if (validate_packed_commands_settings())
2619 return count;
2620
2621 mbtd->test_info.data = mbtd;
2622 mbtd->test_info.prepare_test_fn = prepare_test;
Maya Ereza12d1d22013-01-10 23:35:42 +02002623 mbtd->test_info.run_test_fn = run_packed_test;
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02002624 mbtd->test_info.check_test_result_fn = check_wr_packing_statistics;
2625 mbtd->test_info.get_test_case_str_fn = get_test_case_str;
2626
2627 for (i = 0; i < number; ++i) {
2628 test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
2629 test_pr_info("%s: ====================", __func__);
2630
2631 for (j = PACKING_CONTROL_MIN_TESTCASE;
2632 j <= PACKING_CONTROL_MAX_TESTCASE; j++) {
2633
2634 test_successful = 1;
2635 mbtd->test_info.testcase = j;
2636 mbtd->is_random = RANDOM_TEST;
2637 ret = test_iosched_start_test(&mbtd->test_info);
2638 if (ret) {
2639 test_successful = 0;
2640 break;
2641 }
2642 /* Allow FS requests to be dispatched */
2643 msleep(1000);
2644
2645 mbtd->test_info.testcase = j;
2646 mbtd->is_random = NON_RANDOM_TEST;
2647 ret = test_iosched_start_test(&mbtd->test_info);
2648 if (ret) {
2649 test_successful = 0;
2650 break;
2651 }
2652 /* Allow FS requests to be dispatched */
2653 msleep(1000);
2654 }
2655
2656 if (!test_successful)
2657 break;
2658 }
2659
2660 test_pr_info("%s: Completed all the test cases.", __func__);
2661
2662 return count;
2663}
2664
2665static ssize_t write_packing_control_test_read(struct file *file,
2666 char __user *buffer,
2667 size_t count,
2668 loff_t *offset)
2669{
2670 memset((void *)buffer, 0, count);
2671
2672 snprintf(buffer, count,
2673 "\nwrite_packing_control_test\n"
2674 "=========\n"
2675 "Description:\n"
2676 "This test checks the following scenarios\n"
2677 "- Packing expected - one over trigger\n"
2678 "- Packing expected - N over trigger\n"
2679 "- Packing expected - N over trigger followed by read\n"
2680 "- Packing expected - N over trigger followed by flush\n"
2681 "- Packing expected - threshold over trigger FB by flush\n"
2682 "- Packing not expected - less than trigger\n"
2683 "- Packing not expected - trigger requests\n"
2684 "- Packing not expected - trigger, read, trigger\n"
2685 "- Mixed state - packing -> no packing -> packing\n"
2686 "- Mixed state - no packing -> packing -> no packing\n");
2687
2688 if (message_repeat == 1) {
2689 message_repeat = 0;
2690 return strnlen(buffer, count);
2691 } else {
2692 return 0;
2693 }
2694}
2695
2696const struct file_operations write_packing_control_test_ops = {
2697 .open = test_open,
2698 .write = write_packing_control_test_write,
2699 .read = write_packing_control_test_read,
2700};
2701
Maya Erezddc55732012-10-17 09:51:01 +02002702static ssize_t write_discard_sanitize_test_write(struct file *file,
2703 const char __user *buf,
2704 size_t count,
2705 loff_t *ppos)
2706{
2707 int ret = 0;
2708 int i = 0;
2709 int number = -1;
2710
2711 sscanf(buf, "%d", &number);
2712 if (number <= 0)
2713 number = 1;
2714
2715 test_pr_info("%s: -- write_discard_sanitize TEST --\n", __func__);
2716
2717 memset(&mbtd->test_info, 0, sizeof(struct test_info));
2718
2719 mbtd->test_group = TEST_GENERAL_GROUP;
2720
2721 mbtd->test_info.data = mbtd;
2722 mbtd->test_info.prepare_test_fn = prepare_write_discard_sanitize_read;
2723 mbtd->test_info.get_test_case_str_fn = get_test_case_str;
2724 mbtd->test_info.timeout_msec = SANITIZE_TEST_TIMEOUT;
2725
2726 for (i = 0 ; i < number ; ++i) {
2727 test_pr_info("%s: Cycle # %d / %d\n", __func__, i+1, number);
2728 test_pr_info("%s: ===================", __func__);
2729
2730 mbtd->test_info.testcase = TEST_WRITE_DISCARD_SANITIZE_READ;
2731 ret = test_iosched_start_test(&mbtd->test_info);
2732
2733 if (ret)
2734 break;
2735 }
2736
2737 return count;
2738}
2739
2740const struct file_operations write_discard_sanitize_test_ops = {
2741 .open = test_open,
2742 .write = write_discard_sanitize_test_write,
2743};
2744
Yaniv Gardie9214c82012-10-18 13:58:18 +02002745static ssize_t bkops_test_write(struct file *file,
2746 const char __user *buf,
2747 size_t count,
2748 loff_t *ppos)
2749{
2750 int ret = 0;
2751 int i = 0, j;
2752 int number = -1;
2753
2754 test_pr_info("%s: -- bkops_test TEST --", __func__);
2755
2756 sscanf(buf, "%d", &number);
2757
2758 if (number <= 0)
2759 number = 1;
2760
2761 mbtd->test_group = TEST_BKOPS_GROUP;
2762
2763 memset(&mbtd->test_info, 0, sizeof(struct test_info));
2764
2765 mbtd->test_info.data = mbtd;
2766 mbtd->test_info.prepare_test_fn = prepare_bkops;
2767 mbtd->test_info.check_test_result_fn = check_bkops_result;
2768 mbtd->test_info.get_test_case_str_fn = get_test_case_str;
2769 mbtd->test_info.run_test_fn = run_bkops;
2770 mbtd->test_info.timeout_msec = BKOPS_TEST_TIMEOUT;
2771 mbtd->test_info.post_test_fn = bkops_post_test;
2772
2773 for (i = 0 ; i < number ; ++i) {
2774 test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
2775 test_pr_info("%s: ===================", __func__);
2776 for (j = BKOPS_MIN_TESTCASE ;
2777 j <= BKOPS_MAX_TESTCASE ; j++) {
2778 mbtd->test_info.testcase = j;
2779 ret = test_iosched_start_test(&mbtd->test_info);
2780 if (ret)
2781 break;
2782 }
2783 }
2784
2785 test_pr_info("%s: Completed all the test cases.", __func__);
2786
2787 return count;
2788}
2789
2790static ssize_t bkops_test_read(struct file *file,
2791 char __user *buffer,
2792 size_t count,
2793 loff_t *offset)
2794{
2795 memset((void *)buffer, 0, count);
2796
2797 snprintf(buffer, count,
2798 "\nbkops_test\n========================\n"
2799 "Description:\n"
2800 "This test simulates BKOPS status from card\n"
2801 "and verifies that:\n"
2802 " - Starting BKOPS delayed work, level 1\n"
2803 " - Starting BKOPS delayed work, level 1, with HPI\n"
2804 " - Cancel starting BKOPS delayed work, "
2805 " when a request is received\n"
2806 " - Starting BKOPS urgent, level 2,3\n"
2807 " - Starting BKOPS urgent with 2 requests\n");
2808 return strnlen(buffer, count);
2809}
2810
2811const struct file_operations bkops_test_ops = {
2812 .open = test_open,
2813 .write = bkops_test_write,
2814 .read = bkops_test_read,
2815};
2816
Lee Susmanf18263a2012-10-24 14:14:37 +02002817static ssize_t long_sequential_read_test_write(struct file *file,
2818 const char __user *buf,
2819 size_t count,
2820 loff_t *ppos)
2821{
2822 int ret = 0;
2823 int i = 0;
2824 int number = -1;
2825 unsigned int mtime, integer, fraction;
2826
2827 test_pr_info("%s: -- Long Sequential Read TEST --", __func__);
2828
2829 sscanf(buf, "%d", &number);
2830
2831 if (number <= 0)
2832 number = 1;
2833
2834 memset(&mbtd->test_info, 0, sizeof(struct test_info));
2835 mbtd->test_group = TEST_GENERAL_GROUP;
2836
2837 mbtd->test_info.data = mbtd;
2838 mbtd->test_info.prepare_test_fn = prepare_test;
2839 mbtd->test_info.get_test_case_str_fn = get_test_case_str;
2840
2841 for (i = 0 ; i < number ; ++i) {
2842 test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
2843 test_pr_info("%s: ====================", __func__);
2844
2845 mbtd->test_info.testcase = TEST_LONG_SEQUENTIAL_READ;
2846 mbtd->is_random = NON_RANDOM_TEST;
2847 ret = test_iosched_start_test(&mbtd->test_info);
2848 if (ret)
2849 break;
2850
2851 mtime = jiffies_to_msecs(mbtd->test_info.test_duration);
2852
2853 test_pr_info("%s: time is %u msec, size is %u.%u MiB",
2854 __func__, mtime, LONG_TEST_SIZE_INTEGER,
2855 LONG_TEST_SIZE_FRACTION);
2856
2857 /* we first multiply in order not to lose precision */
2858 mtime *= MB_MSEC_RATIO_APPROXIMATION;
2859 /* divide values to get a MiB/sec integer value with one
2860 digit of precision. Multiply by 10 for one digit precision
2861 */
2862 fraction = integer = (LONG_TEST_ACTUAL_BYTE_NUM * 10) / mtime;
2863 integer /= 10;
2864 /* and calculate the MiB value fraction */
2865 fraction -= integer * 10;
2866
2867 test_pr_info("%s: Throughput: %u.%u MiB/sec\n"
2868 , __func__, integer, fraction);
2869
2870 /* Allow FS requests to be dispatched */
2871 msleep(1000);
2872 }
2873
2874 return count;
2875}
2876
2877static ssize_t long_sequential_read_test_read(struct file *file,
2878 char __user *buffer,
2879 size_t count,
2880 loff_t *offset)
2881{
2882 memset((void *)buffer, 0, count);
2883
2884 snprintf(buffer, count,
2885 "\nlong_sequential_read_test\n"
2886 "=========\n"
2887 "Description:\n"
2888 "This test runs the following scenarios\n"
2889 "- Long Sequential Read Test: this test measures read "
2890 "throughput at the driver level by sequentially reading many "
2891 "large requests.\n");
2892
2893 if (message_repeat == 1) {
2894 message_repeat = 0;
2895 return strnlen(buffer, count);
2896 } else
2897 return 0;
2898}
2899
2900const struct file_operations long_sequential_read_test_ops = {
2901 .open = test_open,
2902 .write = long_sequential_read_test_write,
2903 .read = long_sequential_read_test_read,
2904};
2905
Lee Susmana35ae6e2012-10-25 16:06:07 +02002906static ssize_t long_sequential_write_test_write(struct file *file,
2907 const char __user *buf,
2908 size_t count,
2909 loff_t *ppos)
2910{
2911 int ret = 0;
2912 int i = 0;
2913 int number = -1;
2914 unsigned int mtime, integer, fraction;
2915
2916 test_pr_info("%s: -- Long Sequential Write TEST --", __func__);
2917
2918 sscanf(buf, "%d", &number);
2919
2920 if (number <= 0)
2921 number = 1;
2922
2923 memset(&mbtd->test_info, 0, sizeof(struct test_info));
2924 mbtd->test_group = TEST_GENERAL_GROUP;
2925
2926 mbtd->test_info.data = mbtd;
2927 mbtd->test_info.prepare_test_fn = prepare_test;
2928 mbtd->test_info.get_test_case_str_fn = get_test_case_str;
2929
2930 for (i = 0 ; i < number ; ++i) {
2931 test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
2932 test_pr_info("%s: ====================", __func__);
2933
2934 mbtd->test_info.testcase = TEST_LONG_SEQUENTIAL_WRITE;
2935 mbtd->is_random = NON_RANDOM_TEST;
2936 ret = test_iosched_start_test(&mbtd->test_info);
2937 if (ret)
2938 break;
2939
2940 mtime = jiffies_to_msecs(mbtd->test_info.test_duration);
2941
2942 test_pr_info("%s: time is %u msec, size is %u.%u MiB",
2943 __func__, mtime, LONG_TEST_SIZE_INTEGER,
2944 LONG_TEST_SIZE_FRACTION);
2945
2946 /* we first multiply in order not to lose precision */
2947 mtime *= MB_MSEC_RATIO_APPROXIMATION;
2948 /* divide values to get a MiB/sec integer value with one
2949 digit of precision
2950 */
2951 fraction = integer = (LONG_TEST_ACTUAL_BYTE_NUM * 10) / mtime;
2952 integer /= 10;
2953 /* and calculate the MiB value fraction */
2954 fraction -= integer * 10;
2955
2956 test_pr_info("%s: Throughput: %u.%u MiB/sec\n",
2957 __func__, integer, fraction);
2958
2959 /* Allow FS requests to be dispatched */
2960 msleep(1000);
2961 }
2962
2963 return count;
2964}
2965
2966static ssize_t long_sequential_write_test_read(struct file *file,
2967 char __user *buffer,
2968 size_t count,
2969 loff_t *offset)
2970{
2971 memset((void *)buffer, 0, count);
2972
2973 snprintf(buffer, count,
2974 "\nlong_sequential_write_test\n"
2975 "=========\n"
2976 "Description:\n"
2977 "This test runs the following scenarios\n"
2978 "- Long Sequential Write Test: this test measures write "
2979 "throughput at the driver level by sequentially writing many "
2980 "large requests\n");
2981
2982 if (message_repeat == 1) {
2983 message_repeat = 0;
2984 return strnlen(buffer, count);
2985 } else
2986 return 0;
2987}
2988
2989const struct file_operations long_sequential_write_test_ops = {
2990 .open = test_open,
2991 .write = long_sequential_write_test_write,
2992 .read = long_sequential_write_test_read,
2993};
2994
Lee Susmanb09c0412012-12-19 14:28:52 +02002995static ssize_t new_req_notification_test_write(struct file *file,
2996 const char __user *buf,
2997 size_t count,
2998 loff_t *ppos)
2999{
3000 int ret = 0;
3001 int i = 0;
3002 int number = -1;
3003
3004 test_pr_info("%s: -- new_req_notification TEST --", __func__);
3005
3006 sscanf(buf, "%d", &number);
3007
3008 if (number <= 0)
3009 number = 1;
3010
3011 mbtd->test_group = TEST_NEW_NOTIFICATION_GROUP;
3012
3013 memset(&mbtd->test_info, 0, sizeof(struct test_info));
3014
3015 mbtd->test_info.data = mbtd;
3016 mbtd->test_info.prepare_test_fn = prepare_new_req;
3017 mbtd->test_info.check_test_result_fn = check_new_req_result;
3018 mbtd->test_info.get_test_case_str_fn = get_test_case_str;
3019 mbtd->test_info.run_test_fn = run_new_req;
3020 mbtd->test_info.timeout_msec = 10 * 60 * 1000; /* 1 min */
3021 mbtd->test_info.post_test_fn = new_req_post_test;
3022
3023 for (i = 0 ; i < number ; ++i) {
3024 test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
3025 test_pr_info("%s: ===================", __func__);
3026 test_pr_info("%s: start test case TEST_NEW_REQ_NOTIFICATION",
3027 __func__);
3028 mbtd->test_info.testcase = TEST_NEW_REQ_NOTIFICATION;
3029 ret = test_iosched_start_test(&mbtd->test_info);
3030 if (ret) {
3031 test_pr_info("%s: break from new_req tests loop",
3032 __func__);
3033 break;
3034 }
3035 }
3036 return count;
3037}
3038
3039static ssize_t new_req_notification_test_read(struct file *file,
3040 char __user *buffer,
3041 size_t count,
3042 loff_t *offset)
3043{
3044 memset((void *)buffer, 0, count);
3045
3046 snprintf(buffer, count,
3047 "\nnew_req_notification_test\n========================\n"
3048 "Description:\n"
3049 "This test checks following scenarious\n"
3050 "- new request arrives after a NULL request was sent to the "
3051 "mmc_queue,\n"
3052 "which is waiting for completion of a former request\n");
3053
3054 return strnlen(buffer, count);
3055}
3056
3057const struct file_operations new_req_notification_test_ops = {
3058 .open = test_open,
3059 .write = new_req_notification_test_write,
3060 .read = new_req_notification_test_read,
3061};
Lee Susmana35ae6e2012-10-25 16:06:07 +02003062
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02003063static void mmc_block_test_debugfs_cleanup(void)
3064{
3065 debugfs_remove(mbtd->debug.random_test_seed);
3066 debugfs_remove(mbtd->debug.send_write_packing_test);
3067 debugfs_remove(mbtd->debug.err_check_test);
3068 debugfs_remove(mbtd->debug.send_invalid_packed_test);
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02003069 debugfs_remove(mbtd->debug.packing_control_test);
Maya Erezddc55732012-10-17 09:51:01 +02003070 debugfs_remove(mbtd->debug.discard_sanitize_test);
Yaniv Gardie9214c82012-10-18 13:58:18 +02003071 debugfs_remove(mbtd->debug.bkops_test);
Lee Susmanf18263a2012-10-24 14:14:37 +02003072 debugfs_remove(mbtd->debug.long_sequential_read_test);
Lee Susmana35ae6e2012-10-25 16:06:07 +02003073 debugfs_remove(mbtd->debug.long_sequential_write_test);
Lee Susmanb09c0412012-12-19 14:28:52 +02003074 debugfs_remove(mbtd->debug.new_req_notification_test);
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02003075}
3076
3077static int mmc_block_test_debugfs_init(void)
3078{
3079 struct dentry *utils_root, *tests_root;
3080
3081 utils_root = test_iosched_get_debugfs_utils_root();
3082 tests_root = test_iosched_get_debugfs_tests_root();
3083
3084 if (!utils_root || !tests_root)
3085 return -EINVAL;
3086
3087 mbtd->debug.random_test_seed = debugfs_create_u32(
3088 "random_test_seed",
3089 S_IRUGO | S_IWUGO,
3090 utils_root,
3091 &mbtd->random_test_seed);
3092
3093 if (!mbtd->debug.random_test_seed)
3094 goto err_nomem;
3095
3096 mbtd->debug.send_write_packing_test =
3097 debugfs_create_file("send_write_packing_test",
3098 S_IRUGO | S_IWUGO,
3099 tests_root,
3100 NULL,
3101 &send_write_packing_test_ops);
3102
3103 if (!mbtd->debug.send_write_packing_test)
3104 goto err_nomem;
3105
3106 mbtd->debug.err_check_test =
3107 debugfs_create_file("err_check_test",
3108 S_IRUGO | S_IWUGO,
3109 tests_root,
3110 NULL,
3111 &err_check_test_ops);
3112
3113 if (!mbtd->debug.err_check_test)
3114 goto err_nomem;
3115
3116 mbtd->debug.send_invalid_packed_test =
3117 debugfs_create_file("send_invalid_packed_test",
3118 S_IRUGO | S_IWUGO,
3119 tests_root,
3120 NULL,
3121 &send_invalid_packed_test_ops);
3122
3123 if (!mbtd->debug.send_invalid_packed_test)
3124 goto err_nomem;
3125
Tatyana Brokhman91e1e322012-10-09 13:53:43 +02003126 mbtd->debug.packing_control_test = debugfs_create_file(
3127 "packing_control_test",
3128 S_IRUGO | S_IWUGO,
3129 tests_root,
3130 NULL,
3131 &write_packing_control_test_ops);
3132
3133 if (!mbtd->debug.packing_control_test)
3134 goto err_nomem;
3135
Maya Erezddc55732012-10-17 09:51:01 +02003136 mbtd->debug.discard_sanitize_test =
3137 debugfs_create_file("write_discard_sanitize_test",
3138 S_IRUGO | S_IWUGO,
3139 tests_root,
3140 NULL,
3141 &write_discard_sanitize_test_ops);
3142 if (!mbtd->debug.discard_sanitize_test) {
3143 mmc_block_test_debugfs_cleanup();
3144 return -ENOMEM;
3145 }
3146
Yaniv Gardie9214c82012-10-18 13:58:18 +02003147 mbtd->debug.bkops_test =
3148 debugfs_create_file("bkops_test",
3149 S_IRUGO | S_IWUGO,
3150 tests_root,
3151 NULL,
3152 &bkops_test_ops);
3153
Lee Susmanb09c0412012-12-19 14:28:52 +02003154 mbtd->debug.new_req_notification_test =
3155 debugfs_create_file("new_req_notification_test",
3156 S_IRUGO | S_IWUGO,
3157 tests_root,
3158 NULL,
3159 &new_req_notification_test_ops);
3160
3161 if (!mbtd->debug.new_req_notification_test)
3162 goto err_nomem;
3163
Yaniv Gardie9214c82012-10-18 13:58:18 +02003164 if (!mbtd->debug.bkops_test)
3165 goto err_nomem;
3166
Lee Susmanf18263a2012-10-24 14:14:37 +02003167 mbtd->debug.long_sequential_read_test = debugfs_create_file(
3168 "long_sequential_read_test",
3169 S_IRUGO | S_IWUGO,
3170 tests_root,
3171 NULL,
3172 &long_sequential_read_test_ops);
3173
3174 if (!mbtd->debug.long_sequential_read_test)
3175 goto err_nomem;
3176
Lee Susmana35ae6e2012-10-25 16:06:07 +02003177 mbtd->debug.long_sequential_write_test = debugfs_create_file(
3178 "long_sequential_write_test",
3179 S_IRUGO | S_IWUGO,
3180 tests_root,
3181 NULL,
3182 &long_sequential_write_test_ops);
3183
3184 if (!mbtd->debug.long_sequential_write_test)
3185 goto err_nomem;
3186
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02003187 return 0;
3188
3189err_nomem:
3190 mmc_block_test_debugfs_cleanup();
3191 return -ENOMEM;
3192}
3193
3194static void mmc_block_test_probe(void)
3195{
3196 struct request_queue *q = test_iosched_get_req_queue();
3197 struct mmc_queue *mq;
3198 int max_packed_reqs;
3199
3200 if (!q) {
3201 test_pr_err("%s: NULL request queue", __func__);
3202 return;
3203 }
3204
3205 mq = q->queuedata;
3206 if (!mq) {
3207 test_pr_err("%s: NULL mq", __func__);
3208 return;
3209 }
3210
3211 max_packed_reqs = mq->card->ext_csd.max_packed_writes;
3212 mbtd->exp_packed_stats.packing_events =
3213 kzalloc((max_packed_reqs + 1) *
3214 sizeof(*mbtd->exp_packed_stats.packing_events),
3215 GFP_KERNEL);
3216
3217 mmc_block_test_debugfs_init();
3218}
3219
3220static void mmc_block_test_remove(void)
3221{
3222 mmc_block_test_debugfs_cleanup();
3223}
3224
3225static int __init mmc_block_test_init(void)
3226{
3227 mbtd = kzalloc(sizeof(struct mmc_block_test_data), GFP_KERNEL);
3228 if (!mbtd) {
3229 test_pr_err("%s: failed to allocate mmc_block_test_data",
3230 __func__);
3231 return -ENODEV;
3232 }
3233
Yaniv Gardie9214c82012-10-18 13:58:18 +02003234 init_waitqueue_head(&mbtd->bkops_wait_q);
Tatyana Brokhman09b010d2012-10-09 13:50:56 +02003235 mbtd->bdt.init_fn = mmc_block_test_probe;
3236 mbtd->bdt.exit_fn = mmc_block_test_remove;
3237 INIT_LIST_HEAD(&mbtd->bdt.list);
3238 test_iosched_register(&mbtd->bdt);
3239
3240 return 0;
3241}
3242
3243static void __exit mmc_block_test_exit(void)
3244{
3245 test_iosched_unregister(&mbtd->bdt);
3246 kfree(mbtd);
3247}
3248
3249module_init(mmc_block_test_init);
3250module_exit(mmc_block_test_exit);
3251
3252MODULE_LICENSE("GPL v2");
3253MODULE_DESCRIPTION("MMC block test");