blob: 07b36b880467356eb056d23073827d2c3a7c4fe1 [file] [log] [blame]
Duy Truong790f06d2013-02-13 16:38:12 -08001/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
Maya Erez60181552012-06-27 11:25:26 +03002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * The test scheduler allows to test the block device by dispatching
13 * specific requests according to the test case and declare PASS/FAIL
14 * according to the requests completion error code.
15 * Each test is exposed via debugfs and can be triggered by writing to
16 * the debugfs file.
17 *
18 */
19
20/* elevator test iosched */
21#include <linux/blkdev.h>
22#include <linux/elevator.h>
23#include <linux/bio.h>
24#include <linux/module.h>
25#include <linux/slab.h>
26#include <linux/init.h>
27#include <linux/debugfs.h>
28#include <linux/test-iosched.h>
29#include <linux/delay.h>
30#include "blk.h"
31
32#define MODULE_NAME "test-iosched"
33#define WR_RD_START_REQ_ID 1234
34#define UNIQUE_START_REQ_ID 5678
35#define TIMEOUT_TIMER_MS 40000
36#define TEST_MAX_TESTCASE_ROUNDS 15
37
38#define test_pr_debug(fmt, args...) pr_debug("%s: "fmt"\n", MODULE_NAME, args)
39#define test_pr_info(fmt, args...) pr_info("%s: "fmt"\n", MODULE_NAME, args)
40#define test_pr_err(fmt, args...) pr_err("%s: "fmt"\n", MODULE_NAME, args)
41
42static DEFINE_SPINLOCK(blk_dev_test_list_lock);
43static LIST_HEAD(blk_dev_test_list);
44static struct test_data *ptd;
45
Maya Erez60181552012-06-27 11:25:26 +030046
Maya Erez60181552012-06-27 11:25:26 +030047/**
48 * test_iosched_get_req_queue() - returns the request queue
49 * served by the scheduler
50 */
51struct request_queue *test_iosched_get_req_queue(void)
52{
53 if (!ptd)
54 return NULL;
55
56 return ptd->req_q;
57}
58EXPORT_SYMBOL(test_iosched_get_req_queue);
59
60/**
61 * test_iosched_mark_test_completion() - Wakeup the debugfs
62 * thread, waiting on the test completion
63 */
64void test_iosched_mark_test_completion(void)
65{
66 if (!ptd)
67 return;
Lee Susman1199b4c2012-12-19 14:19:30 +020068 test_pr_info("%s: mark test is completed, test_count=%d,",
69 __func__, ptd->test_count);
70 test_pr_info("%s: reinsert_count=%d, dispatched_count=%d",
71 __func__, ptd->reinsert_count, ptd->dispatched_count);
Maya Erez60181552012-06-27 11:25:26 +030072
73 ptd->test_state = TEST_COMPLETED;
74 wake_up(&ptd->wait_q);
75}
76EXPORT_SYMBOL(test_iosched_mark_test_completion);
77
Lee Susman70160bb2013-01-06 10:57:30 +020078/**
79 * check_test_completion() - Check if all the queued test
80 * requests were completed
81 */
82void check_test_completion(void)
Maya Erez60181552012-06-27 11:25:26 +030083{
84 struct test_request *test_rq;
Maya Erez60181552012-06-27 11:25:26 +030085
Lee Susman1199b4c2012-12-19 14:19:30 +020086 if (!ptd)
Lee Susman70160bb2013-01-06 10:57:30 +020087 goto exit;
Lee Susman1199b4c2012-12-19 14:19:30 +020088
89 list_for_each_entry(test_rq, &ptd->dispatched_queue, queuelist)
Maya Erez60181552012-06-27 11:25:26 +030090 if (!test_rq->req_completed)
Lee Susman70160bb2013-01-06 10:57:30 +020091 goto exit;
Lee Susman1199b4c2012-12-19 14:19:30 +020092
93 if (!list_empty(&ptd->test_queue)
94 || !list_empty(&ptd->reinsert_queue)
95 || !list_empty(&ptd->urgent_queue)) {
96 test_pr_info("%s: Test still not completed,", __func__);
97 test_pr_info("%s: test_count=%d, reinsert_count=%d",
98 __func__, ptd->test_count, ptd->reinsert_count);
99 test_pr_info("%s: dispatched_count=%d, urgent_count=%d",
100 __func__, ptd->dispatched_count, ptd->urgent_count);
Lee Susman70160bb2013-01-06 10:57:30 +0200101 goto exit;
Maya Erez60181552012-06-27 11:25:26 +0300102 }
103
Lee Susmanec0b8212013-06-27 11:35:20 +0300104 ptd->test_info.test_duration = ktime_sub(ktime_get(),
105 ptd->test_info.test_duration);
Lee Susmanf18263a2012-10-24 14:14:37 +0200106
Lee Susman1199b4c2012-12-19 14:19:30 +0200107 test_pr_info("%s: Test is completed, test_count=%d, reinsert_count=%d,",
108 __func__, ptd->test_count, ptd->reinsert_count);
109 test_pr_info("%s: dispatched_count=%d",
110 __func__, ptd->dispatched_count);
Maya Erez60181552012-06-27 11:25:26 +0300111
112 test_iosched_mark_test_completion();
Lee Susman70160bb2013-01-06 10:57:30 +0200113
114exit:
115 return;
Maya Erez60181552012-06-27 11:25:26 +0300116}
Lee Susman70160bb2013-01-06 10:57:30 +0200117EXPORT_SYMBOL(check_test_completion);
Maya Erez60181552012-06-27 11:25:26 +0300118
119/*
120 * A callback to be called per bio completion.
121 * Frees the bio memory.
122 */
123static void end_test_bio(struct bio *bio, int err)
124{
125 if (err)
126 clear_bit(BIO_UPTODATE, &bio->bi_flags);
Maya Erez60181552012-06-27 11:25:26 +0300127 bio_put(bio);
128}
129
130/*
131 * A callback to be called per request completion.
132 * the request memory is not freed here, will be freed later after the test
133 * results checking.
134 */
135static void end_test_req(struct request *rq, int err)
136{
137 struct test_request *test_rq;
138
139 test_rq = (struct test_request *)rq->elv.priv[0];
140 BUG_ON(!test_rq);
141
Lee Susmanf18263a2012-10-24 14:14:37 +0200142 test_pr_debug("%s: request %d completed, err=%d",
Maya Erez60181552012-06-27 11:25:26 +0300143 __func__, test_rq->req_id, err);
144
Maya Erezdb8cbfe2012-07-15 13:09:08 +0300145 test_rq->req_completed = true;
Maya Erez60181552012-06-27 11:25:26 +0300146 test_rq->req_result = err;
147
148 check_test_completion();
149}
150
151/**
152 * test_iosched_add_unique_test_req() - Create and queue a non
153 * read/write request (such as FLUSH/DISCRAD/SANITIZE).
154 * @is_err_expcted: A flag to indicate if this request
155 * should succeed or not
156 * @req_unique: The type of request to add
157 * @start_sec: start address of the first bio
158 * @nr_sects: number of sectors in the request
159 * @end_req_io: specific completion callback. When not
160 * set, the defaulcallback will be used
161 */
162int test_iosched_add_unique_test_req(int is_err_expcted,
163 enum req_unique_type req_unique,
164 int start_sec, int nr_sects, rq_end_io_fn *end_req_io)
165{
166 struct bio *bio;
167 struct request *rq;
168 int rw_flags;
169 struct test_request *test_rq;
170
171 if (!ptd)
172 return -ENODEV;
173
174 bio = bio_alloc(GFP_KERNEL, 0);
175 if (!bio) {
176 test_pr_err("%s: Failed to allocate a bio", __func__);
177 return -ENODEV;
178 }
179 bio_get(bio);
180 bio->bi_end_io = end_test_bio;
181
182 switch (req_unique) {
183 case REQ_UNIQUE_FLUSH:
184 bio->bi_rw = WRITE_FLUSH;
185 break;
186 case REQ_UNIQUE_DISCARD:
187 bio->bi_rw = REQ_WRITE | REQ_DISCARD;
188 bio->bi_size = nr_sects << 9;
189 bio->bi_sector = start_sec;
190 break;
Maya Erez22f7abf2012-07-18 21:52:33 +0300191 case REQ_UNIQUE_SANITIZE:
192 bio->bi_rw = REQ_WRITE | REQ_SANITIZE;
193 break;
Maya Erez60181552012-06-27 11:25:26 +0300194 default:
195 test_pr_err("%s: Invalid request type %d", __func__,
196 req_unique);
197 bio_put(bio);
198 return -ENODEV;
199 }
200
201 rw_flags = bio_data_dir(bio);
202 if (bio->bi_rw & REQ_SYNC)
203 rw_flags |= REQ_SYNC;
204
205 rq = blk_get_request(ptd->req_q, rw_flags, GFP_KERNEL);
206 if (!rq) {
207 test_pr_err("%s: Failed to allocate a request", __func__);
208 bio_put(bio);
209 return -ENODEV;
210 }
211
212 init_request_from_bio(rq, bio);
213 if (end_req_io)
214 rq->end_io = end_req_io;
215 else
216 rq->end_io = end_test_req;
217
218 test_rq = kzalloc(sizeof(struct test_request), GFP_KERNEL);
219 if (!test_rq) {
220 test_pr_err("%s: Failed to allocate a test request", __func__);
221 bio_put(bio);
222 blk_put_request(rq);
223 return -ENODEV;
224 }
Maya Erezdb8cbfe2012-07-15 13:09:08 +0300225 test_rq->req_completed = false;
226 test_rq->req_result = -EINVAL;
Maya Erez60181552012-06-27 11:25:26 +0300227 test_rq->rq = rq;
228 test_rq->is_err_expected = is_err_expcted;
229 rq->elv.priv[0] = (void *)test_rq;
230 test_rq->req_id = ptd->unique_next_req_id++;
231
232 test_pr_debug(
233 "%s: added request %d to the test requests list, type = %d",
234 __func__, test_rq->req_id, req_unique);
235
Lee Susman1199b4c2012-12-19 14:19:30 +0200236 spin_lock_irq(ptd->req_q->queue_lock);
Maya Erez60181552012-06-27 11:25:26 +0300237 list_add_tail(&test_rq->queuelist, &ptd->test_queue);
Lee Susman1199b4c2012-12-19 14:19:30 +0200238 ptd->test_count++;
239 spin_unlock_irq(ptd->req_q->queue_lock);
Maya Erez60181552012-06-27 11:25:26 +0300240
241 return 0;
242}
243EXPORT_SYMBOL(test_iosched_add_unique_test_req);
244
245/*
246 * Get a pattern to be filled in the request data buffer.
247 * If the pattern used is (-1) the buffer will be filled with sequential
248 * numbers
249 */
250static void fill_buf_with_pattern(int *buf, int num_bytes, int pattern)
251{
252 int i = 0;
253 int num_of_dwords = num_bytes/sizeof(int);
254
255 if (pattern == TEST_NO_PATTERN)
256 return;
257
258 /* num_bytes should be aligned to sizeof(int) */
259 BUG_ON((num_bytes % sizeof(int)) != 0);
260
261 if (pattern == TEST_PATTERN_SEQUENTIAL) {
262 for (i = 0; i < num_of_dwords; i++)
263 buf[i] = i;
264 } else {
265 for (i = 0; i < num_of_dwords; i++)
266 buf[i] = pattern;
267 }
268}
269
270/**
Lee Susman1199b4c2012-12-19 14:19:30 +0200271 * test_iosched_create_test_req() - Create a read/write request.
Maya Erez60181552012-06-27 11:25:26 +0300272 * @is_err_expcted: A flag to indicate if this request
273 * should succeed or not
274 * @direction: READ/WRITE
275 * @start_sec: start address of the first bio
276 * @num_bios: number of BIOs to be allocated for the
277 * request
278 * @pattern: A pattern, to be written into the write
279 * requests data buffer. In case of READ
280 * request, the given pattern is kept as
281 * the expected pattern. The expected
282 * pattern will be compared in the test
283 * check result function. If no comparisson
284 * is required, set pattern to
285 * TEST_NO_PATTERN.
286 * @end_req_io: specific completion callback. When not
287 * set,the default callback will be used
288 *
289 * This function allocates the test request and the block
290 * request and calls blk_rq_map_kern which allocates the
291 * required BIO. The allocated test request and the block
292 * request memory is freed at the end of the test and the
293 * allocated BIO memory is freed by end_test_bio.
294 */
Lee Susman1199b4c2012-12-19 14:19:30 +0200295struct test_request *test_iosched_create_test_req(int is_err_expcted,
Maya Erez60181552012-06-27 11:25:26 +0300296 int direction, int start_sec,
297 int num_bios, int pattern, rq_end_io_fn *end_req_io)
298{
Lee Susman1199b4c2012-12-19 14:19:30 +0200299 struct request *rq;
300 struct test_request *test_rq;
301 int rw_flags, buf_size;
302 int ret = 0, i;
Maya Erez60181552012-06-27 11:25:26 +0300303 unsigned int *bio_ptr = NULL;
304 struct bio *bio = NULL;
305
306 if (!ptd)
Lee Susman1199b4c2012-12-19 14:19:30 +0200307 return NULL;
Maya Erez60181552012-06-27 11:25:26 +0300308
309 rw_flags = direction;
310
311 rq = blk_get_request(ptd->req_q, rw_flags, GFP_KERNEL);
312 if (!rq) {
313 test_pr_err("%s: Failed to allocate a request", __func__);
Lee Susman1199b4c2012-12-19 14:19:30 +0200314 return NULL;
Maya Erez60181552012-06-27 11:25:26 +0300315 }
316
317 test_rq = kzalloc(sizeof(struct test_request), GFP_KERNEL);
318 if (!test_rq) {
319 test_pr_err("%s: Failed to allocate test request", __func__);
320 blk_put_request(rq);
Lee Susman1199b4c2012-12-19 14:19:30 +0200321 return NULL;
Maya Erez60181552012-06-27 11:25:26 +0300322 }
323
324 buf_size = sizeof(unsigned int) * BIO_U32_SIZE * num_bios;
325 test_rq->bios_buffer = kzalloc(buf_size, GFP_KERNEL);
326 if (!test_rq->bios_buffer) {
327 test_pr_err("%s: Failed to allocate the data buf", __func__);
328 goto err;
329 }
330 test_rq->buf_size = buf_size;
331
332 if (direction == WRITE)
333 fill_buf_with_pattern(test_rq->bios_buffer,
334 buf_size, pattern);
335 test_rq->wr_rd_data_pattern = pattern;
336
337 bio_ptr = test_rq->bios_buffer;
338 for (i = 0; i < num_bios; ++i) {
339 ret = blk_rq_map_kern(ptd->req_q, rq,
340 (void *)bio_ptr,
341 sizeof(unsigned int)*BIO_U32_SIZE,
342 GFP_KERNEL);
343 if (ret) {
344 test_pr_err("%s: blk_rq_map_kern returned error %d",
345 __func__, ret);
346 goto err;
347 }
348 bio_ptr += BIO_U32_SIZE;
349 }
350
351 if (end_req_io)
352 rq->end_io = end_req_io;
353 else
354 rq->end_io = end_test_req;
355 rq->__sector = start_sec;
356 rq->cmd_type |= REQ_TYPE_FS;
Lee Susman70160bb2013-01-06 10:57:30 +0200357 rq->cmd_flags |= REQ_SORTED;
Maya Erez60181552012-06-27 11:25:26 +0300358
359 if (rq->bio) {
360 rq->bio->bi_sector = start_sec;
361 rq->bio->bi_end_io = end_test_bio;
362 bio = rq->bio;
363 while ((bio = bio->bi_next) != NULL)
364 bio->bi_end_io = end_test_bio;
365 }
366
367 ptd->num_of_write_bios += num_bios;
368 test_rq->req_id = ptd->wr_rd_next_req_id++;
369
Maya Erezdb8cbfe2012-07-15 13:09:08 +0300370 test_rq->req_completed = false;
371 test_rq->req_result = -EINVAL;
Maya Erez60181552012-06-27 11:25:26 +0300372 test_rq->rq = rq;
Dolev Raviv50032382013-01-09 12:00:02 +0200373 if (ptd->test_info.get_rq_disk_fn)
374 test_rq->rq->rq_disk = ptd->test_info.get_rq_disk_fn();
Maya Erez60181552012-06-27 11:25:26 +0300375 test_rq->is_err_expected = is_err_expcted;
376 rq->elv.priv[0] = (void *)test_rq;
377
Lee Susman1199b4c2012-12-19 14:19:30 +0200378 test_pr_debug("%s: created test request %d, buf_size=%d",
379 __func__, test_rq->req_id, buf_size);
Maya Erez60181552012-06-27 11:25:26 +0300380
Lee Susman1199b4c2012-12-19 14:19:30 +0200381 return test_rq;
Maya Erez60181552012-06-27 11:25:26 +0300382err:
383 blk_put_request(rq);
384 kfree(test_rq->bios_buffer);
Lee Susman1199b4c2012-12-19 14:19:30 +0200385 return NULL;
386}
387EXPORT_SYMBOL(test_iosched_create_test_req);
388
389
390/**
391 * test_iosched_add_wr_rd_test_req() - Create and queue a
392 * read/write request.
393 * @is_err_expcted: A flag to indicate if this request
394 * should succeed or not
395 * @direction: READ/WRITE
396 * @start_sec: start address of the first bio
397 * @num_bios: number of BIOs to be allocated for the
398 * request
399 * @pattern: A pattern, to be written into the write
400 * requests data buffer. In case of READ
401 * request, the given pattern is kept as
402 * the expected pattern. The expected
403 * pattern will be compared in the test
404 * check result function. If no comparisson
405 * is required, set pattern to
406 * TEST_NO_PATTERN.
407 * @end_req_io: specific completion callback. When not
408 * set,the default callback will be used
409 *
410 * This function allocates the test request and the block
411 * request and calls blk_rq_map_kern which allocates the
412 * required BIO. Upon success the new request is added to the
413 * test_queue. The allocated test request and the block request
414 * memory is freed at the end of the test and the allocated BIO
415 * memory is freed by end_test_bio.
416 */
417int test_iosched_add_wr_rd_test_req(int is_err_expcted,
418 int direction, int start_sec,
419 int num_bios, int pattern, rq_end_io_fn *end_req_io)
420{
421 struct test_request *test_rq = NULL;
422
423 test_rq = test_iosched_create_test_req(is_err_expcted,
424 direction, start_sec,
425 num_bios, pattern, end_req_io);
426 if (test_rq) {
427 spin_lock_irq(ptd->req_q->queue_lock);
428 list_add_tail(&test_rq->queuelist, &ptd->test_queue);
429 ptd->test_count++;
430 spin_unlock_irq(ptd->req_q->queue_lock);
431 return 0;
432 }
Maya Erez60181552012-06-27 11:25:26 +0300433 return -ENODEV;
434}
435EXPORT_SYMBOL(test_iosched_add_wr_rd_test_req);
436
437/* Converts the testcase number into a string */
438static char *get_test_case_str(struct test_data *td)
439{
440 if (td->test_info.get_test_case_str_fn)
441 return td->test_info.get_test_case_str_fn(td);
442
443 return "Unknown testcase";
444}
445
446/*
447 * Verify that the test request data buffer includes the expected
448 * pattern
449 */
450static int compare_buffer_to_pattern(struct test_request *test_rq)
451{
452 int i = 0;
453 int num_of_dwords = test_rq->buf_size/sizeof(int);
454
455 /* num_bytes should be aligned to sizeof(int) */
456 BUG_ON((test_rq->buf_size % sizeof(int)) != 0);
457 BUG_ON(test_rq->bios_buffer == NULL);
458
459 if (test_rq->wr_rd_data_pattern == TEST_NO_PATTERN)
460 return 0;
461
462 if (test_rq->wr_rd_data_pattern == TEST_PATTERN_SEQUENTIAL) {
463 for (i = 0; i < num_of_dwords; i++) {
464 if (test_rq->bios_buffer[i] != i) {
465 test_pr_err(
466 "%s: wrong pattern 0x%x in index %d",
467 __func__, test_rq->bios_buffer[i], i);
468 return -EINVAL;
469 }
470 }
471 } else {
472 for (i = 0; i < num_of_dwords; i++) {
473 if (test_rq->bios_buffer[i] !=
474 test_rq->wr_rd_data_pattern) {
475 test_pr_err(
476 "%s: wrong pattern 0x%x in index %d",
477 __func__, test_rq->bios_buffer[i], i);
478 return -EINVAL;
479 }
480 }
481 }
482
483 return 0;
484}
485
486/*
487 * Determine if the test passed or failed.
488 * The function checks the test request completion value and calls
489 * check_testcase_result for result checking that are specific
490 * to a test case.
491 */
492static int check_test_result(struct test_data *td)
493{
494 struct test_request *test_rq;
Maya Erez60181552012-06-27 11:25:26 +0300495 int res = 0;
496 static int run;
497
Lee Susman1199b4c2012-12-19 14:19:30 +0200498 if (!ptd)
499 goto err;
500
501 list_for_each_entry(test_rq, &ptd->dispatched_queue, queuelist) {
502 if (!test_rq->rq) {
503 test_pr_info("%s: req_id %d is contains empty req",
504 __func__, test_rq->req_id);
505 continue;
506 }
Maya Erez60181552012-06-27 11:25:26 +0300507 if (!test_rq->req_completed) {
508 test_pr_err("%s: rq %d not completed", __func__,
509 test_rq->req_id);
510 res = -EINVAL;
511 goto err;
512 }
513
514 if ((test_rq->req_result < 0) && !test_rq->is_err_expected) {
515 test_pr_err(
516 "%s: rq %d completed with err, not as expected",
517 __func__, test_rq->req_id);
518 res = -EINVAL;
519 goto err;
520 }
521 if ((test_rq->req_result == 0) && test_rq->is_err_expected) {
522 test_pr_err("%s: rq %d succeeded, not as expected",
523 __func__, test_rq->req_id);
524 res = -EINVAL;
525 goto err;
526 }
527 if (rq_data_dir(test_rq->rq) == READ) {
528 res = compare_buffer_to_pattern(test_rq);
529 if (res) {
530 test_pr_err("%s: read pattern not as expected",
531 __func__);
532 res = -EINVAL;
533 goto err;
534 }
535 }
536 }
537
538 if (td->test_info.check_test_result_fn) {
539 res = td->test_info.check_test_result_fn(td);
540 if (res)
541 goto err;
542 }
543
544 test_pr_info("%s: %s, run# %03d, PASSED",
545 __func__, get_test_case_str(td), ++run);
546 td->test_result = TEST_PASSED;
547
548 return 0;
549err:
550 test_pr_err("%s: %s, run# %03d, FAILED",
551 __func__, get_test_case_str(td), ++run);
552 td->test_result = TEST_FAILED;
553 return res;
554}
555
556/* Create and queue the required requests according to the test case */
557static int prepare_test(struct test_data *td)
558{
559 int ret = 0;
560
561 if (td->test_info.prepare_test_fn) {
562 ret = td->test_info.prepare_test_fn(td);
563 return ret;
564 }
565
566 return 0;
567}
568
569/* Run the test */
570static int run_test(struct test_data *td)
571{
572 int ret = 0;
573
574 if (td->test_info.run_test_fn) {
575 ret = td->test_info.run_test_fn(td);
576 return ret;
577 }
578
Dolev Raviv17e5e1e2013-02-12 17:11:17 +0200579 blk_run_queue(td->req_q);
Maya Erez60181552012-06-27 11:25:26 +0300580
581 return 0;
582}
583
Lee Susman1199b4c2012-12-19 14:19:30 +0200584/*
585 * free_test_queue() - Free all allocated test requests in the given test_queue:
586 * free their requests and BIOs buffer
587 * @test_queue the test queue to be freed
588 */
589static void free_test_queue(struct list_head *test_queue)
Maya Erez60181552012-06-27 11:25:26 +0300590{
591 struct test_request *test_rq;
Maya Erezdb8cbfe2012-07-15 13:09:08 +0300592 struct bio *bio;
593
Lee Susman1199b4c2012-12-19 14:19:30 +0200594 while (!list_empty(test_queue)) {
595 test_rq = list_entry(test_queue->next, struct test_request,
596 queuelist);
597
Maya Erez60181552012-06-27 11:25:26 +0300598 list_del_init(&test_rq->queuelist);
Maya Erezdb8cbfe2012-07-15 13:09:08 +0300599 /*
600 * If the request was not completed we need to free its BIOs
601 * and remove it from the packed list
602 */
603 if (!test_rq->req_completed) {
604 test_pr_info(
605 "%s: Freeing memory of an uncompleted request",
Lee Susman1199b4c2012-12-19 14:19:30 +0200606 __func__);
Maya Erezdb8cbfe2012-07-15 13:09:08 +0300607 list_del_init(&test_rq->rq->queuelist);
608 while ((bio = test_rq->rq->bio) != NULL) {
609 test_rq->rq->bio = bio->bi_next;
610 bio_put(bio);
611 }
612 }
Maya Erez60181552012-06-27 11:25:26 +0300613 blk_put_request(test_rq->rq);
614 kfree(test_rq->bios_buffer);
615 kfree(test_rq);
616 }
617}
618
619/*
Lee Susman1199b4c2012-12-19 14:19:30 +0200620 * free_test_requests() - Free all allocated test requests in
621 * all test queues in given test_data.
622 * @td The test_data struct whos test requests will be
623 * freed.
624 */
625static void free_test_requests(struct test_data *td)
626{
627 if (!td)
628 return;
629
630 if (td->urgent_count) {
631 free_test_queue(&td->urgent_queue);
632 td->urgent_count = 0;
633 }
634 if (td->test_count) {
635 free_test_queue(&td->test_queue);
636 td->test_count = 0;
637 }
638 if (td->dispatched_count) {
639 free_test_queue(&td->dispatched_queue);
640 td->dispatched_count = 0;
641 }
642 if (td->reinsert_count) {
643 free_test_queue(&td->reinsert_queue);
644 td->reinsert_count = 0;
645 }
646}
647
648/*
649 * post_test() - Do post test operations. Free the allocated
650 * test requests, their requests and BIOs buffer.
651 * @td The test_data struct for the test that has
652 * ended.
Maya Erez60181552012-06-27 11:25:26 +0300653 */
654static int post_test(struct test_data *td)
655{
656 int ret = 0;
657
658 if (td->test_info.post_test_fn)
659 ret = td->test_info.post_test_fn(td);
660
661 ptd->test_info.testcase = 0;
662 ptd->test_state = TEST_IDLE;
663
664 free_test_requests(td);
665
666 return ret;
667}
668
669/*
670 * The timer verifies that the test will be completed even if we don't get
671 * the completion callback for all the requests.
672 */
673static void test_timeout_handler(unsigned long data)
674{
675 struct test_data *td = (struct test_data *)data;
676
677 test_pr_info("%s: TIMEOUT timer expired", __func__);
678 td->test_state = TEST_COMPLETED;
679 wake_up(&td->wait_q);
680 return;
681}
682
683static unsigned int get_timeout_msec(struct test_data *td)
684{
685 if (td->test_info.timeout_msec)
686 return td->test_info.timeout_msec;
687 else
688 return TIMEOUT_TIMER_MS;
689}
690
691/**
692 * test_iosched_start_test() - Prepares and runs the test.
Lee Susman70160bb2013-01-06 10:57:30 +0200693 * The members test_duration and test_byte_count of the input
694 * parameter t_info are modified by this function.
Maya Erez60181552012-06-27 11:25:26 +0300695 * @t_info: the current test testcase and callbacks
696 * functions
697 *
698 * The function also checks the test result upon test completion
699 */
700int test_iosched_start_test(struct test_info *t_info)
701{
702 int ret = 0;
703 unsigned timeout_msec;
704 int counter = 0;
705 char *test_name = NULL;
706
707 if (!ptd)
708 return -ENODEV;
709
710 if (!t_info) {
711 ptd->test_result = TEST_FAILED;
712 return -EINVAL;
713 }
714
715 do {
716 if (ptd->ignore_round)
717 /*
718 * We ignored the last run due to FS write requests.
719 * Sleep to allow those requests to be issued
720 */
721 msleep(2000);
722
723 spin_lock(&ptd->lock);
724
725 if (ptd->test_state != TEST_IDLE) {
726 test_pr_info(
727 "%s: Another test is running, try again later",
728 __func__);
Maya Erezdb8cbfe2012-07-15 13:09:08 +0300729 spin_unlock(&ptd->lock);
730 return -EBUSY;
Maya Erez60181552012-06-27 11:25:26 +0300731 }
732
733 if (ptd->start_sector == 0) {
734 test_pr_err("%s: Invalid start sector", __func__);
735 ptd->test_result = TEST_FAILED;
736 spin_unlock(&ptd->lock);
737 return -EINVAL;
738 }
739
740 memcpy(&ptd->test_info, t_info, sizeof(struct test_info));
741
Maya Erez60181552012-06-27 11:25:26 +0300742 ptd->test_result = TEST_NO_RESULT;
743 ptd->num_of_write_bios = 0;
744
745 ptd->unique_next_req_id = UNIQUE_START_REQ_ID;
746 ptd->wr_rd_next_req_id = WR_RD_START_REQ_ID;
747
748 ptd->ignore_round = false;
749 ptd->fs_wr_reqs_during_test = false;
750
751 ptd->test_state = TEST_RUNNING;
752
753 spin_unlock(&ptd->lock);
Tatyana Brokhman3b6651e2013-02-21 13:46:29 +0200754 /*
755 * Give an already dispatch request from
756 * FS a chanse to complete
757 */
758 msleep(2000);
Maya Erez60181552012-06-27 11:25:26 +0300759
760 timeout_msec = get_timeout_msec(ptd);
761 mod_timer(&ptd->timeout_timer, jiffies +
762 msecs_to_jiffies(timeout_msec));
763
764 if (ptd->test_info.get_test_case_str_fn)
765 test_name = ptd->test_info.get_test_case_str_fn(ptd);
766 else
767 test_name = "Unknown testcase";
Lee Susman039ce092012-11-15 13:36:15 +0200768 test_pr_info("%s: Starting test %s", __func__, test_name);
Maya Erez60181552012-06-27 11:25:26 +0300769
770 ret = prepare_test(ptd);
771 if (ret) {
772 test_pr_err("%s: failed to prepare the test\n",
773 __func__);
774 goto error;
775 }
776
Lee Susmanec0b8212013-06-27 11:35:20 +0300777 ptd->test_info.test_duration = ktime_get();
Maya Erez60181552012-06-27 11:25:26 +0300778 ret = run_test(ptd);
779 if (ret) {
780 test_pr_err("%s: failed to run the test\n", __func__);
781 goto error;
782 }
783
784 test_pr_info("%s: Waiting for the test completion", __func__);
785
786 wait_event(ptd->wait_q, ptd->test_state == TEST_COMPLETED);
787 del_timer_sync(&ptd->timeout_timer);
788
Lee Susman9cb6eda2013-07-02 15:12:24 +0300789 memcpy(t_info, &ptd->test_info, sizeof(struct test_info));
790
Maya Erez60181552012-06-27 11:25:26 +0300791 ret = check_test_result(ptd);
792 if (ret) {
793 test_pr_err("%s: check_test_result failed\n",
794 __func__);
795 goto error;
796 }
797
798 ret = post_test(ptd);
799 if (ret) {
800 test_pr_err("%s: post_test failed\n", __func__);
801 goto error;
802 }
803
804 /*
805 * Wakeup the queue thread to fetch FS requests that might got
806 * postponded due to the test
807 */
Dolev Raviv17e5e1e2013-02-12 17:11:17 +0200808 blk_run_queue(ptd->req_q);
Maya Erez60181552012-06-27 11:25:26 +0300809
810 if (ptd->ignore_round)
811 test_pr_info(
812 "%s: Round canceled (Got wr reqs in the middle)",
813 __func__);
814
815 if (++counter == TEST_MAX_TESTCASE_ROUNDS) {
816 test_pr_info("%s: Too many rounds, did not succeed...",
817 __func__);
818 ptd->test_result = TEST_FAILED;
819 }
820
821 } while ((ptd->ignore_round) && (counter < TEST_MAX_TESTCASE_ROUNDS));
822
823 if (ptd->test_result == TEST_PASSED)
824 return 0;
825 else
826 return -EINVAL;
827
828error:
Maya Erez60181552012-06-27 11:25:26 +0300829 post_test(ptd);
Maya Erezdb8cbfe2012-07-15 13:09:08 +0300830 ptd->test_result = TEST_FAILED;
Maya Erez60181552012-06-27 11:25:26 +0300831 return ret;
832}
833EXPORT_SYMBOL(test_iosched_start_test);
834
835/**
836 * test_iosched_register() - register a block device test
837 * utility.
838 * @bdt: the block device test type to register
839 */
840void test_iosched_register(struct blk_dev_test_type *bdt)
841{
842 spin_lock(&blk_dev_test_list_lock);
843 list_add_tail(&bdt->list, &blk_dev_test_list);
844 spin_unlock(&blk_dev_test_list_lock);
845}
846EXPORT_SYMBOL_GPL(test_iosched_register);
847
848/**
849 * test_iosched_unregister() - unregister a block device test
850 * utility.
851 * @bdt: the block device test type to unregister
852 */
853void test_iosched_unregister(struct blk_dev_test_type *bdt)
854{
855 spin_lock(&blk_dev_test_list_lock);
856 list_del_init(&bdt->list);
857 spin_unlock(&blk_dev_test_list_lock);
858}
859EXPORT_SYMBOL_GPL(test_iosched_unregister);
860
861/**
862 * test_iosched_set_test_result() - Set the test
863 * result(PASS/FAIL)
864 * @test_result: the test result
865 */
866void test_iosched_set_test_result(int test_result)
867{
868 if (!ptd)
869 return;
870
871 ptd->test_result = test_result;
872}
873EXPORT_SYMBOL(test_iosched_set_test_result);
874
875
876/**
877 * test_iosched_set_ignore_round() - Set the ignore_round flag
878 * @ignore_round: A flag to indicate if this test round
879 * should be ignored and re-run
880 */
881void test_iosched_set_ignore_round(bool ignore_round)
882{
883 if (!ptd)
884 return;
885
886 ptd->ignore_round = ignore_round;
887}
888EXPORT_SYMBOL(test_iosched_set_ignore_round);
889
890/**
891 * test_iosched_get_debugfs_tests_root() - returns the root
892 * debugfs directory for the test_iosched tests
893 */
894struct dentry *test_iosched_get_debugfs_tests_root(void)
895{
896 if (!ptd)
897 return NULL;
898
899 return ptd->debug.debug_tests_root;
900}
901EXPORT_SYMBOL(test_iosched_get_debugfs_tests_root);
902
903/**
904 * test_iosched_get_debugfs_utils_root() - returns the root
905 * debugfs directory for the test_iosched utils
906 */
907struct dentry *test_iosched_get_debugfs_utils_root(void)
908{
909 if (!ptd)
910 return NULL;
911
912 return ptd->debug.debug_utils_root;
913}
914EXPORT_SYMBOL(test_iosched_get_debugfs_utils_root);
915
916static int test_debugfs_init(struct test_data *td)
917{
918 td->debug.debug_root = debugfs_create_dir("test-iosched", NULL);
919 if (!td->debug.debug_root)
920 return -ENOENT;
921
922 td->debug.debug_tests_root = debugfs_create_dir("tests",
923 td->debug.debug_root);
924 if (!td->debug.debug_tests_root)
925 goto err;
926
927 td->debug.debug_utils_root = debugfs_create_dir("utils",
928 td->debug.debug_root);
929 if (!td->debug.debug_utils_root)
930 goto err;
931
932 td->debug.debug_test_result = debugfs_create_u32(
933 "test_result",
934 S_IRUGO | S_IWUGO,
935 td->debug.debug_utils_root,
936 &td->test_result);
937 if (!td->debug.debug_test_result)
938 goto err;
939
940 td->debug.start_sector = debugfs_create_u32(
941 "start_sector",
942 S_IRUGO | S_IWUGO,
943 td->debug.debug_utils_root,
944 &td->start_sector);
945 if (!td->debug.start_sector)
946 goto err;
947
948 return 0;
949
950err:
951 debugfs_remove_recursive(td->debug.debug_root);
952 return -ENOENT;
953}
954
955static void test_debugfs_cleanup(struct test_data *td)
956{
957 debugfs_remove_recursive(td->debug.debug_root);
958}
959
960static void print_req(struct request *req)
961{
962 struct bio *bio;
963 struct test_request *test_rq;
964
965 if (!req)
966 return;
967
968 test_rq = (struct test_request *)req->elv.priv[0];
969
970 if (test_rq) {
971 test_pr_debug("%s: Dispatch request %d: __sector=0x%lx",
972 __func__, test_rq->req_id, (unsigned long)req->__sector);
973 test_pr_debug("%s: nr_phys_segments=%d, num_of_sectors=%d",
974 __func__, req->nr_phys_segments, blk_rq_sectors(req));
975 bio = req->bio;
976 test_pr_debug("%s: bio: bi_size=%d, bi_sector=0x%lx",
977 __func__, bio->bi_size,
978 (unsigned long)bio->bi_sector);
979 while ((bio = bio->bi_next) != NULL) {
980 test_pr_debug("%s: bio: bi_size=%d, bi_sector=0x%lx",
981 __func__, bio->bi_size,
982 (unsigned long)bio->bi_sector);
983 }
984 }
985}
986
987static void test_merged_requests(struct request_queue *q,
988 struct request *rq, struct request *next)
989{
990 list_del_init(&next->queuelist);
991}
Lee Susman1199b4c2012-12-19 14:19:30 +0200992/*
993 * test_dispatch_from(): Dispatch request from @queue to the @dispatched_queue.
994 * Also update th dispatched_count counter.
995 */
996static int test_dispatch_from(struct request_queue *q,
997 struct list_head *queue, unsigned int *count)
998{
999 struct test_request *test_rq;
1000 struct request *rq;
1001 int ret = 0;
1002
1003 if (!ptd)
1004 goto err;
1005
1006 spin_lock_irq(&ptd->lock);
1007 if (!list_empty(queue)) {
1008 test_rq = list_entry(queue->next, struct test_request,
1009 queuelist);
1010 rq = test_rq->rq;
1011 if (!rq) {
1012 pr_err("%s: null request,return", __func__);
1013 spin_unlock_irq(&ptd->lock);
1014 goto err;
1015 }
1016 list_move_tail(&test_rq->queuelist, &ptd->dispatched_queue);
1017 ptd->dispatched_count++;
1018 (*count)--;
1019 spin_unlock_irq(&ptd->lock);
1020
1021 print_req(rq);
1022 elv_dispatch_sort(q, rq);
Lee Susman70160bb2013-01-06 10:57:30 +02001023 ptd->test_info.test_byte_count += test_rq->buf_size;
Lee Susman1199b4c2012-12-19 14:19:30 +02001024 ret = 1;
1025 goto err;
1026 }
1027 spin_unlock_irq(&ptd->lock);
1028
1029err:
1030 return ret;
1031}
Maya Erez60181552012-06-27 11:25:26 +03001032
1033/*
1034 * Dispatch a test request in case there is a running test Otherwise, dispatch
1035 * a request that was queued by the FS to keep the card functional.
1036 */
1037static int test_dispatch_requests(struct request_queue *q, int force)
1038{
1039 struct test_data *td = q->elevator->elevator_data;
1040 struct request *rq = NULL;
Lee Susman1199b4c2012-12-19 14:19:30 +02001041 int ret = 0;
Maya Erez60181552012-06-27 11:25:26 +03001042
1043 switch (td->test_state) {
1044 case TEST_IDLE:
1045 if (!list_empty(&td->queue)) {
1046 rq = list_entry(td->queue.next, struct request,
1047 queuelist);
1048 list_del_init(&rq->queuelist);
1049 elv_dispatch_sort(q, rq);
Lee Susman1199b4c2012-12-19 14:19:30 +02001050 ret = 1;
1051 goto exit;
Maya Erez60181552012-06-27 11:25:26 +03001052 }
1053 break;
1054 case TEST_RUNNING:
Lee Susman1199b4c2012-12-19 14:19:30 +02001055 if (test_dispatch_from(q, &td->urgent_queue,
1056 &td->urgent_count)) {
1057 test_pr_debug("%s: Dispatched from urgent_count=%d",
1058 __func__, ptd->urgent_count);
1059 ret = 1;
1060 goto exit;
1061 }
1062 if (test_dispatch_from(q, &td->reinsert_queue,
1063 &td->reinsert_count)) {
1064 test_pr_debug("%s: Dispatched from reinsert_count=%d",
1065 __func__, ptd->reinsert_count);
1066 ret = 1;
1067 goto exit;
1068 }
1069 if (test_dispatch_from(q, &td->test_queue, &td->test_count)) {
1070 test_pr_debug("%s: Dispatched from test_count=%d",
1071 __func__, ptd->test_count);
1072 ret = 1;
1073 goto exit;
Maya Erez60181552012-06-27 11:25:26 +03001074 }
1075 break;
1076 case TEST_COMPLETED:
1077 default:
Lee Susman1199b4c2012-12-19 14:19:30 +02001078 break;
Maya Erez60181552012-06-27 11:25:26 +03001079 }
1080
Lee Susman1199b4c2012-12-19 14:19:30 +02001081exit:
1082 return ret;
Maya Erez60181552012-06-27 11:25:26 +03001083}
1084
1085static void test_add_request(struct request_queue *q, struct request *rq)
1086{
1087 struct test_data *td = q->elevator->elevator_data;
1088
1089 list_add_tail(&rq->queuelist, &td->queue);
1090
1091 /*
1092 * The write requests can be followed by a FLUSH request that might
1093 * cause unexpected results of the test.
1094 */
1095 if ((rq_data_dir(rq) == WRITE) && (td->test_state == TEST_RUNNING)) {
1096 test_pr_debug("%s: got WRITE req in the middle of the test",
1097 __func__);
1098 td->fs_wr_reqs_during_test = true;
1099 }
1100}
1101
1102static struct request *
1103test_former_request(struct request_queue *q, struct request *rq)
1104{
1105 struct test_data *td = q->elevator->elevator_data;
1106
1107 if (rq->queuelist.prev == &td->queue)
1108 return NULL;
1109 return list_entry(rq->queuelist.prev, struct request, queuelist);
1110}
1111
1112static struct request *
1113test_latter_request(struct request_queue *q, struct request *rq)
1114{
1115 struct test_data *td = q->elevator->elevator_data;
1116
1117 if (rq->queuelist.next == &td->queue)
1118 return NULL;
1119 return list_entry(rq->queuelist.next, struct request, queuelist);
1120}
1121
1122static void *test_init_queue(struct request_queue *q)
1123{
1124 struct blk_dev_test_type *__bdt;
1125
1126 ptd = kmalloc_node(sizeof(struct test_data), GFP_KERNEL,
1127 q->node);
1128 if (!ptd) {
1129 test_pr_err("%s: failed to allocate test data", __func__);
1130 return NULL;
1131 }
1132 memset((void *)ptd, 0, sizeof(struct test_data));
1133 INIT_LIST_HEAD(&ptd->queue);
1134 INIT_LIST_HEAD(&ptd->test_queue);
Lee Susman1199b4c2012-12-19 14:19:30 +02001135 INIT_LIST_HEAD(&ptd->dispatched_queue);
1136 INIT_LIST_HEAD(&ptd->reinsert_queue);
1137 INIT_LIST_HEAD(&ptd->urgent_queue);
Maya Erez60181552012-06-27 11:25:26 +03001138 init_waitqueue_head(&ptd->wait_q);
1139 ptd->req_q = q;
1140
1141 setup_timer(&ptd->timeout_timer, test_timeout_handler,
1142 (unsigned long)ptd);
1143
1144 spin_lock_init(&ptd->lock);
1145
1146 if (test_debugfs_init(ptd)) {
1147 test_pr_err("%s: Failed to create debugfs files", __func__);
1148 return NULL;
1149 }
1150
1151 list_for_each_entry(__bdt, &blk_dev_test_list, list)
1152 __bdt->init_fn();
1153
1154 return ptd;
1155}
1156
1157static void test_exit_queue(struct elevator_queue *e)
1158{
1159 struct test_data *td = e->elevator_data;
1160 struct blk_dev_test_type *__bdt;
1161
1162 BUG_ON(!list_empty(&td->queue));
1163
1164 list_for_each_entry(__bdt, &blk_dev_test_list, list)
1165 __bdt->exit_fn();
1166
1167 test_debugfs_cleanup(td);
1168
1169 kfree(td);
1170}
1171
Lee Susman1199b4c2012-12-19 14:19:30 +02001172/**
1173 * test_get_test_data() - Returns a pointer to the test_data
1174 * struct which keeps the current test data.
1175 *
1176 */
1177struct test_data *test_get_test_data(void)
1178{
1179 return ptd;
1180}
1181EXPORT_SYMBOL(test_get_test_data);
1182
1183static bool test_urgent_pending(struct request_queue *q)
1184{
1185 return !list_empty(&ptd->urgent_queue);
1186}
1187
1188/**
1189 * test_iosched_add_urgent_req() - Add an urgent test_request.
1190 * First mark the request as urgent, then add it to the
1191 * urgent_queue test queue.
1192 * @test_rq: pointer to the urgent test_request to be
1193 * added.
1194 *
1195 */
1196void test_iosched_add_urgent_req(struct test_request *test_rq)
1197{
1198 spin_lock_irq(&ptd->lock);
Tatyana Brokhman2c415182013-03-12 21:02:33 +02001199 test_rq->rq->cmd_flags |= REQ_URGENT;
Lee Susman1199b4c2012-12-19 14:19:30 +02001200 list_add_tail(&test_rq->queuelist, &ptd->urgent_queue);
1201 ptd->urgent_count++;
1202 spin_unlock_irq(&ptd->lock);
1203}
1204EXPORT_SYMBOL(test_iosched_add_urgent_req);
1205
1206/**
1207 * test_reinsert_req() - Moves the @rq request from
1208 * @dispatched_queue into @reinsert_queue.
1209 * The @rq must be in @dispatched_queue
1210 * @q: request queue
1211 * @rq: request to be inserted
1212 *
1213 *
1214 */
1215static int test_reinsert_req(struct request_queue *q,
1216 struct request *rq)
1217{
1218 struct test_request *test_rq;
1219 int ret = -EINVAL;
1220
1221 if (!ptd)
1222 goto exit;
1223
1224 if (list_empty(&ptd->dispatched_queue)) {
1225 test_pr_err("%s: dispatched_queue is empty", __func__);
1226 goto exit;
1227 }
1228
1229 list_for_each_entry(test_rq, &ptd->dispatched_queue, queuelist) {
1230 if (test_rq->rq == rq) {
1231 list_move(&test_rq->queuelist, &ptd->reinsert_queue);
1232 ptd->dispatched_count--;
1233 ptd->reinsert_count++;
1234 ret = 0;
1235 break;
1236 }
1237 }
1238
1239exit:
1240 return ret;
1241}
1242
Maya Erez60181552012-06-27 11:25:26 +03001243static struct elevator_type elevator_test_iosched = {
Lee Susman1199b4c2012-12-19 14:19:30 +02001244
Maya Erez60181552012-06-27 11:25:26 +03001245 .ops = {
1246 .elevator_merge_req_fn = test_merged_requests,
1247 .elevator_dispatch_fn = test_dispatch_requests,
1248 .elevator_add_req_fn = test_add_request,
1249 .elevator_former_req_fn = test_former_request,
1250 .elevator_latter_req_fn = test_latter_request,
1251 .elevator_init_fn = test_init_queue,
1252 .elevator_exit_fn = test_exit_queue,
Lee Susman1199b4c2012-12-19 14:19:30 +02001253 .elevator_is_urgent_fn = test_urgent_pending,
1254 .elevator_reinsert_req_fn = test_reinsert_req,
Maya Erez60181552012-06-27 11:25:26 +03001255 },
1256 .elevator_name = "test-iosched",
1257 .elevator_owner = THIS_MODULE,
1258};
1259
1260static int __init test_init(void)
1261{
1262 elv_register(&elevator_test_iosched);
1263
1264 return 0;
1265}
1266
1267static void __exit test_exit(void)
1268{
1269 elv_unregister(&elevator_test_iosched);
1270}
1271
1272module_init(test_init);
1273module_exit(test_exit);
1274
1275MODULE_LICENSE("GPL v2");
1276MODULE_DESCRIPTION("Test IO scheduler");