blob: 0a033dcfbd1f0c260349cd3cdbb2217191a0d1b2 [file] [log] [blame]
Maya Erez60181552012-06-27 11:25:26 +03001/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * The test scheduler allows to test the block device by dispatching
13 * specific requests according to the test case and declare PASS/FAIL
14 * according to the requests completion error code.
15 * Each test is exposed via debugfs and can be triggered by writing to
16 * the debugfs file.
17 *
18 */
19
20/* elevator test iosched */
21#include <linux/blkdev.h>
22#include <linux/elevator.h>
23#include <linux/bio.h>
24#include <linux/module.h>
25#include <linux/slab.h>
26#include <linux/init.h>
27#include <linux/debugfs.h>
28#include <linux/test-iosched.h>
29#include <linux/delay.h>
30#include "blk.h"
31
32#define MODULE_NAME "test-iosched"
33#define WR_RD_START_REQ_ID 1234
34#define UNIQUE_START_REQ_ID 5678
35#define TIMEOUT_TIMER_MS 40000
36#define TEST_MAX_TESTCASE_ROUNDS 15
37
38#define test_pr_debug(fmt, args...) pr_debug("%s: "fmt"\n", MODULE_NAME, args)
39#define test_pr_info(fmt, args...) pr_info("%s: "fmt"\n", MODULE_NAME, args)
40#define test_pr_err(fmt, args...) pr_err("%s: "fmt"\n", MODULE_NAME, args)
41
42static DEFINE_SPINLOCK(blk_dev_test_list_lock);
43static LIST_HEAD(blk_dev_test_list);
44static struct test_data *ptd;
45
46/* Get the request after `test_rq' in the test requests list */
47static struct test_request *
48latter_test_request(struct request_queue *q,
49 struct test_request *test_rq)
50{
51 struct test_data *td = q->elevator->elevator_data;
52
53 if (test_rq->queuelist.next == &td->test_queue)
54 return NULL;
55 return list_entry(test_rq->queuelist.next, struct test_request,
56 queuelist);
57}
58
59/**
60 * test_iosched_get_req_queue() - returns the request queue
61 * served by the scheduler
62 */
63struct request_queue *test_iosched_get_req_queue(void)
64{
65 if (!ptd)
66 return NULL;
67
68 return ptd->req_q;
69}
70EXPORT_SYMBOL(test_iosched_get_req_queue);
71
72/**
73 * test_iosched_mark_test_completion() - Wakeup the debugfs
74 * thread, waiting on the test completion
75 */
76void test_iosched_mark_test_completion(void)
77{
78 if (!ptd)
79 return;
80
81 ptd->test_state = TEST_COMPLETED;
82 wake_up(&ptd->wait_q);
83}
84EXPORT_SYMBOL(test_iosched_mark_test_completion);
85
86/* Check if all the queued test requests were completed */
87static void check_test_completion(void)
88{
89 struct test_request *test_rq;
90 struct request *rq;
91
92 list_for_each_entry(test_rq, &ptd->test_queue, queuelist) {
93 rq = test_rq->rq;
94 if (!test_rq->req_completed)
95 return;
96 }
97
98 test_pr_info("%s: Test is completed", __func__);
99
100 test_iosched_mark_test_completion();
101}
102
103/*
104 * A callback to be called per bio completion.
105 * Frees the bio memory.
106 */
107static void end_test_bio(struct bio *bio, int err)
108{
109 if (err)
110 clear_bit(BIO_UPTODATE, &bio->bi_flags);
111
112 bio_put(bio);
113}
114
115/*
116 * A callback to be called per request completion.
117 * the request memory is not freed here, will be freed later after the test
118 * results checking.
119 */
120static void end_test_req(struct request *rq, int err)
121{
122 struct test_request *test_rq;
123
124 test_rq = (struct test_request *)rq->elv.priv[0];
125 BUG_ON(!test_rq);
126
127 test_pr_info("%s: request %d completed, err=%d",
128 __func__, test_rq->req_id, err);
129
Maya Erezdb8cbfe2012-07-15 13:09:08 +0300130 test_rq->req_completed = true;
Maya Erez60181552012-06-27 11:25:26 +0300131 test_rq->req_result = err;
132
133 check_test_completion();
134}
135
136/**
137 * test_iosched_add_unique_test_req() - Create and queue a non
138 * read/write request (such as FLUSH/DISCRAD/SANITIZE).
139 * @is_err_expcted: A flag to indicate if this request
140 * should succeed or not
141 * @req_unique: The type of request to add
142 * @start_sec: start address of the first bio
143 * @nr_sects: number of sectors in the request
144 * @end_req_io: specific completion callback. When not
145 * set, the defaulcallback will be used
146 */
147int test_iosched_add_unique_test_req(int is_err_expcted,
148 enum req_unique_type req_unique,
149 int start_sec, int nr_sects, rq_end_io_fn *end_req_io)
150{
151 struct bio *bio;
152 struct request *rq;
153 int rw_flags;
154 struct test_request *test_rq;
155
156 if (!ptd)
157 return -ENODEV;
158
159 bio = bio_alloc(GFP_KERNEL, 0);
160 if (!bio) {
161 test_pr_err("%s: Failed to allocate a bio", __func__);
162 return -ENODEV;
163 }
164 bio_get(bio);
165 bio->bi_end_io = end_test_bio;
166
167 switch (req_unique) {
168 case REQ_UNIQUE_FLUSH:
169 bio->bi_rw = WRITE_FLUSH;
170 break;
171 case REQ_UNIQUE_DISCARD:
172 bio->bi_rw = REQ_WRITE | REQ_DISCARD;
173 bio->bi_size = nr_sects << 9;
174 bio->bi_sector = start_sec;
175 break;
Maya Erez22f7abf2012-07-18 21:52:33 +0300176 case REQ_UNIQUE_SANITIZE:
177 bio->bi_rw = REQ_WRITE | REQ_SANITIZE;
178 break;
Maya Erez60181552012-06-27 11:25:26 +0300179 default:
180 test_pr_err("%s: Invalid request type %d", __func__,
181 req_unique);
182 bio_put(bio);
183 return -ENODEV;
184 }
185
186 rw_flags = bio_data_dir(bio);
187 if (bio->bi_rw & REQ_SYNC)
188 rw_flags |= REQ_SYNC;
189
190 rq = blk_get_request(ptd->req_q, rw_flags, GFP_KERNEL);
191 if (!rq) {
192 test_pr_err("%s: Failed to allocate a request", __func__);
193 bio_put(bio);
194 return -ENODEV;
195 }
196
197 init_request_from_bio(rq, bio);
198 if (end_req_io)
199 rq->end_io = end_req_io;
200 else
201 rq->end_io = end_test_req;
202
203 test_rq = kzalloc(sizeof(struct test_request), GFP_KERNEL);
204 if (!test_rq) {
205 test_pr_err("%s: Failed to allocate a test request", __func__);
206 bio_put(bio);
207 blk_put_request(rq);
208 return -ENODEV;
209 }
Maya Erezdb8cbfe2012-07-15 13:09:08 +0300210 test_rq->req_completed = false;
211 test_rq->req_result = -EINVAL;
Maya Erez60181552012-06-27 11:25:26 +0300212 test_rq->rq = rq;
213 test_rq->is_err_expected = is_err_expcted;
214 rq->elv.priv[0] = (void *)test_rq;
215 test_rq->req_id = ptd->unique_next_req_id++;
216
217 test_pr_debug(
218 "%s: added request %d to the test requests list, type = %d",
219 __func__, test_rq->req_id, req_unique);
220
221 list_add_tail(&test_rq->queuelist, &ptd->test_queue);
222
223 return 0;
224}
225EXPORT_SYMBOL(test_iosched_add_unique_test_req);
226
227/*
228 * Get a pattern to be filled in the request data buffer.
229 * If the pattern used is (-1) the buffer will be filled with sequential
230 * numbers
231 */
232static void fill_buf_with_pattern(int *buf, int num_bytes, int pattern)
233{
234 int i = 0;
235 int num_of_dwords = num_bytes/sizeof(int);
236
237 if (pattern == TEST_NO_PATTERN)
238 return;
239
240 /* num_bytes should be aligned to sizeof(int) */
241 BUG_ON((num_bytes % sizeof(int)) != 0);
242
243 if (pattern == TEST_PATTERN_SEQUENTIAL) {
244 for (i = 0; i < num_of_dwords; i++)
245 buf[i] = i;
246 } else {
247 for (i = 0; i < num_of_dwords; i++)
248 buf[i] = pattern;
249 }
250}
251
252/**
253 * test_iosched_add_wr_rd_test_req() - Create and queue a
254 * read/write request.
255 * @is_err_expcted: A flag to indicate if this request
256 * should succeed or not
257 * @direction: READ/WRITE
258 * @start_sec: start address of the first bio
259 * @num_bios: number of BIOs to be allocated for the
260 * request
261 * @pattern: A pattern, to be written into the write
262 * requests data buffer. In case of READ
263 * request, the given pattern is kept as
264 * the expected pattern. The expected
265 * pattern will be compared in the test
266 * check result function. If no comparisson
267 * is required, set pattern to
268 * TEST_NO_PATTERN.
269 * @end_req_io: specific completion callback. When not
270 * set,the default callback will be used
271 *
272 * This function allocates the test request and the block
273 * request and calls blk_rq_map_kern which allocates the
274 * required BIO. The allocated test request and the block
275 * request memory is freed at the end of the test and the
276 * allocated BIO memory is freed by end_test_bio.
277 */
278int test_iosched_add_wr_rd_test_req(int is_err_expcted,
279 int direction, int start_sec,
280 int num_bios, int pattern, rq_end_io_fn *end_req_io)
281{
282 struct request *rq = NULL;
283 struct test_request *test_rq = NULL;
284 int rw_flags = 0;
285 int buf_size = 0;
286 int ret = 0, i = 0;
287 unsigned int *bio_ptr = NULL;
288 struct bio *bio = NULL;
289
290 if (!ptd)
291 return -ENODEV;
292
293 rw_flags = direction;
294
295 rq = blk_get_request(ptd->req_q, rw_flags, GFP_KERNEL);
296 if (!rq) {
297 test_pr_err("%s: Failed to allocate a request", __func__);
298 return -ENODEV;
299 }
300
301 test_rq = kzalloc(sizeof(struct test_request), GFP_KERNEL);
302 if (!test_rq) {
303 test_pr_err("%s: Failed to allocate test request", __func__);
304 blk_put_request(rq);
305 return -ENODEV;
306 }
307
308 buf_size = sizeof(unsigned int) * BIO_U32_SIZE * num_bios;
309 test_rq->bios_buffer = kzalloc(buf_size, GFP_KERNEL);
310 if (!test_rq->bios_buffer) {
311 test_pr_err("%s: Failed to allocate the data buf", __func__);
312 goto err;
313 }
314 test_rq->buf_size = buf_size;
315
316 if (direction == WRITE)
317 fill_buf_with_pattern(test_rq->bios_buffer,
318 buf_size, pattern);
319 test_rq->wr_rd_data_pattern = pattern;
320
321 bio_ptr = test_rq->bios_buffer;
322 for (i = 0; i < num_bios; ++i) {
323 ret = blk_rq_map_kern(ptd->req_q, rq,
324 (void *)bio_ptr,
325 sizeof(unsigned int)*BIO_U32_SIZE,
326 GFP_KERNEL);
327 if (ret) {
328 test_pr_err("%s: blk_rq_map_kern returned error %d",
329 __func__, ret);
330 goto err;
331 }
332 bio_ptr += BIO_U32_SIZE;
333 }
334
335 if (end_req_io)
336 rq->end_io = end_req_io;
337 else
338 rq->end_io = end_test_req;
339 rq->__sector = start_sec;
340 rq->cmd_type |= REQ_TYPE_FS;
341
342 if (rq->bio) {
343 rq->bio->bi_sector = start_sec;
344 rq->bio->bi_end_io = end_test_bio;
345 bio = rq->bio;
346 while ((bio = bio->bi_next) != NULL)
347 bio->bi_end_io = end_test_bio;
348 }
349
350 ptd->num_of_write_bios += num_bios;
351 test_rq->req_id = ptd->wr_rd_next_req_id++;
352
Maya Erezdb8cbfe2012-07-15 13:09:08 +0300353 test_rq->req_completed = false;
354 test_rq->req_result = -EINVAL;
Maya Erez60181552012-06-27 11:25:26 +0300355 test_rq->rq = rq;
356 test_rq->is_err_expected = is_err_expcted;
357 rq->elv.priv[0] = (void *)test_rq;
358
359 test_pr_debug(
360 "%s: added request %d to the test requests list, buf_size=%d",
361 __func__, test_rq->req_id, buf_size);
362
363 list_add_tail(&test_rq->queuelist, &ptd->test_queue);
364
365 return 0;
366err:
367 blk_put_request(rq);
368 kfree(test_rq->bios_buffer);
369 return -ENODEV;
370}
371EXPORT_SYMBOL(test_iosched_add_wr_rd_test_req);
372
373/* Converts the testcase number into a string */
374static char *get_test_case_str(struct test_data *td)
375{
376 if (td->test_info.get_test_case_str_fn)
377 return td->test_info.get_test_case_str_fn(td);
378
379 return "Unknown testcase";
380}
381
382/*
383 * Verify that the test request data buffer includes the expected
384 * pattern
385 */
386static int compare_buffer_to_pattern(struct test_request *test_rq)
387{
388 int i = 0;
389 int num_of_dwords = test_rq->buf_size/sizeof(int);
390
391 /* num_bytes should be aligned to sizeof(int) */
392 BUG_ON((test_rq->buf_size % sizeof(int)) != 0);
393 BUG_ON(test_rq->bios_buffer == NULL);
394
395 if (test_rq->wr_rd_data_pattern == TEST_NO_PATTERN)
396 return 0;
397
398 if (test_rq->wr_rd_data_pattern == TEST_PATTERN_SEQUENTIAL) {
399 for (i = 0; i < num_of_dwords; i++) {
400 if (test_rq->bios_buffer[i] != i) {
401 test_pr_err(
402 "%s: wrong pattern 0x%x in index %d",
403 __func__, test_rq->bios_buffer[i], i);
404 return -EINVAL;
405 }
406 }
407 } else {
408 for (i = 0; i < num_of_dwords; i++) {
409 if (test_rq->bios_buffer[i] !=
410 test_rq->wr_rd_data_pattern) {
411 test_pr_err(
412 "%s: wrong pattern 0x%x in index %d",
413 __func__, test_rq->bios_buffer[i], i);
414 return -EINVAL;
415 }
416 }
417 }
418
419 return 0;
420}
421
422/*
423 * Determine if the test passed or failed.
424 * The function checks the test request completion value and calls
425 * check_testcase_result for result checking that are specific
426 * to a test case.
427 */
428static int check_test_result(struct test_data *td)
429{
430 struct test_request *test_rq;
431 struct request *rq;
432 int res = 0;
433 static int run;
434
435 list_for_each_entry(test_rq, &ptd->test_queue, queuelist) {
436 rq = test_rq->rq;
437 if (!test_rq->req_completed) {
438 test_pr_err("%s: rq %d not completed", __func__,
439 test_rq->req_id);
440 res = -EINVAL;
441 goto err;
442 }
443
444 if ((test_rq->req_result < 0) && !test_rq->is_err_expected) {
445 test_pr_err(
446 "%s: rq %d completed with err, not as expected",
447 __func__, test_rq->req_id);
448 res = -EINVAL;
449 goto err;
450 }
451 if ((test_rq->req_result == 0) && test_rq->is_err_expected) {
452 test_pr_err("%s: rq %d succeeded, not as expected",
453 __func__, test_rq->req_id);
454 res = -EINVAL;
455 goto err;
456 }
457 if (rq_data_dir(test_rq->rq) == READ) {
458 res = compare_buffer_to_pattern(test_rq);
459 if (res) {
460 test_pr_err("%s: read pattern not as expected",
461 __func__);
462 res = -EINVAL;
463 goto err;
464 }
465 }
466 }
467
468 if (td->test_info.check_test_result_fn) {
469 res = td->test_info.check_test_result_fn(td);
470 if (res)
471 goto err;
472 }
473
474 test_pr_info("%s: %s, run# %03d, PASSED",
475 __func__, get_test_case_str(td), ++run);
476 td->test_result = TEST_PASSED;
477
478 return 0;
479err:
480 test_pr_err("%s: %s, run# %03d, FAILED",
481 __func__, get_test_case_str(td), ++run);
482 td->test_result = TEST_FAILED;
483 return res;
484}
485
486/* Create and queue the required requests according to the test case */
487static int prepare_test(struct test_data *td)
488{
489 int ret = 0;
490
491 if (td->test_info.prepare_test_fn) {
492 ret = td->test_info.prepare_test_fn(td);
493 return ret;
494 }
495
496 return 0;
497}
498
499/* Run the test */
500static int run_test(struct test_data *td)
501{
502 int ret = 0;
503
504 if (td->test_info.run_test_fn) {
505 ret = td->test_info.run_test_fn(td);
506 return ret;
507 }
508
509 /*
510 * Set the next_req pointer to the first request in the test requests
511 * list
512 */
513 if (!list_empty(&td->test_queue))
514 td->next_req = list_entry(td->test_queue.next,
515 struct test_request, queuelist);
516 __blk_run_queue(td->req_q);
517
518 return 0;
519}
520
521/* Free the allocated test requests, their requests and BIOs buffer */
522static void free_test_requests(struct test_data *td)
523{
524 struct test_request *test_rq;
Maya Erezdb8cbfe2012-07-15 13:09:08 +0300525 struct bio *bio;
526
Maya Erez60181552012-06-27 11:25:26 +0300527 while (!list_empty(&td->test_queue)) {
528 test_rq = list_entry(td->test_queue.next, struct test_request,
529 queuelist);
530 list_del_init(&test_rq->queuelist);
Maya Erezdb8cbfe2012-07-15 13:09:08 +0300531 /*
532 * If the request was not completed we need to free its BIOs
533 * and remove it from the packed list
534 */
535 if (!test_rq->req_completed) {
536 test_pr_info(
537 "%s: Freeing memory of an uncompleted request",
538 __func__);
539 list_del_init(&test_rq->rq->queuelist);
540 while ((bio = test_rq->rq->bio) != NULL) {
541 test_rq->rq->bio = bio->bi_next;
542 bio_put(bio);
543 }
544 }
Maya Erez60181552012-06-27 11:25:26 +0300545 blk_put_request(test_rq->rq);
546 kfree(test_rq->bios_buffer);
547 kfree(test_rq);
548 }
549}
550
551/*
552 * Do post test operations.
553 * Free the allocated test requests, their requests and BIOs buffer.
554 */
555static int post_test(struct test_data *td)
556{
557 int ret = 0;
558
559 if (td->test_info.post_test_fn)
560 ret = td->test_info.post_test_fn(td);
561
562 ptd->test_info.testcase = 0;
563 ptd->test_state = TEST_IDLE;
564
565 free_test_requests(td);
566
567 return ret;
568}
569
570/*
571 * The timer verifies that the test will be completed even if we don't get
572 * the completion callback for all the requests.
573 */
574static void test_timeout_handler(unsigned long data)
575{
576 struct test_data *td = (struct test_data *)data;
577
578 test_pr_info("%s: TIMEOUT timer expired", __func__);
579 td->test_state = TEST_COMPLETED;
580 wake_up(&td->wait_q);
581 return;
582}
583
584static unsigned int get_timeout_msec(struct test_data *td)
585{
586 if (td->test_info.timeout_msec)
587 return td->test_info.timeout_msec;
588 else
589 return TIMEOUT_TIMER_MS;
590}
591
592/**
593 * test_iosched_start_test() - Prepares and runs the test.
594 * @t_info: the current test testcase and callbacks
595 * functions
596 *
597 * The function also checks the test result upon test completion
598 */
599int test_iosched_start_test(struct test_info *t_info)
600{
601 int ret = 0;
602 unsigned timeout_msec;
603 int counter = 0;
604 char *test_name = NULL;
605
606 if (!ptd)
607 return -ENODEV;
608
609 if (!t_info) {
610 ptd->test_result = TEST_FAILED;
611 return -EINVAL;
612 }
613
614 do {
615 if (ptd->ignore_round)
616 /*
617 * We ignored the last run due to FS write requests.
618 * Sleep to allow those requests to be issued
619 */
620 msleep(2000);
621
622 spin_lock(&ptd->lock);
623
624 if (ptd->test_state != TEST_IDLE) {
625 test_pr_info(
626 "%s: Another test is running, try again later",
627 __func__);
Maya Erezdb8cbfe2012-07-15 13:09:08 +0300628 spin_unlock(&ptd->lock);
629 return -EBUSY;
Maya Erez60181552012-06-27 11:25:26 +0300630 }
631
632 if (ptd->start_sector == 0) {
633 test_pr_err("%s: Invalid start sector", __func__);
634 ptd->test_result = TEST_FAILED;
635 spin_unlock(&ptd->lock);
636 return -EINVAL;
637 }
638
639 memcpy(&ptd->test_info, t_info, sizeof(struct test_info));
640
641 ptd->next_req = NULL;
642 ptd->test_result = TEST_NO_RESULT;
643 ptd->num_of_write_bios = 0;
644
645 ptd->unique_next_req_id = UNIQUE_START_REQ_ID;
646 ptd->wr_rd_next_req_id = WR_RD_START_REQ_ID;
647
648 ptd->ignore_round = false;
649 ptd->fs_wr_reqs_during_test = false;
650
651 ptd->test_state = TEST_RUNNING;
652
653 spin_unlock(&ptd->lock);
654
655 timeout_msec = get_timeout_msec(ptd);
656 mod_timer(&ptd->timeout_timer, jiffies +
657 msecs_to_jiffies(timeout_msec));
658
659 if (ptd->test_info.get_test_case_str_fn)
660 test_name = ptd->test_info.get_test_case_str_fn(ptd);
661 else
662 test_name = "Unknown testcase";
663 test_pr_info("%s: Starting test %s\n", __func__, test_name);
664
665 ret = prepare_test(ptd);
666 if (ret) {
667 test_pr_err("%s: failed to prepare the test\n",
668 __func__);
669 goto error;
670 }
671
672 ret = run_test(ptd);
673 if (ret) {
674 test_pr_err("%s: failed to run the test\n", __func__);
675 goto error;
676 }
677
678 test_pr_info("%s: Waiting for the test completion", __func__);
679
680 wait_event(ptd->wait_q, ptd->test_state == TEST_COMPLETED);
681 del_timer_sync(&ptd->timeout_timer);
682
683 ret = check_test_result(ptd);
684 if (ret) {
685 test_pr_err("%s: check_test_result failed\n",
686 __func__);
687 goto error;
688 }
689
690 ret = post_test(ptd);
691 if (ret) {
692 test_pr_err("%s: post_test failed\n", __func__);
693 goto error;
694 }
695
696 /*
697 * Wakeup the queue thread to fetch FS requests that might got
698 * postponded due to the test
699 */
700 __blk_run_queue(ptd->req_q);
701
702 if (ptd->ignore_round)
703 test_pr_info(
704 "%s: Round canceled (Got wr reqs in the middle)",
705 __func__);
706
707 if (++counter == TEST_MAX_TESTCASE_ROUNDS) {
708 test_pr_info("%s: Too many rounds, did not succeed...",
709 __func__);
710 ptd->test_result = TEST_FAILED;
711 }
712
713 } while ((ptd->ignore_round) && (counter < TEST_MAX_TESTCASE_ROUNDS));
714
715 if (ptd->test_result == TEST_PASSED)
716 return 0;
717 else
718 return -EINVAL;
719
720error:
Maya Erez60181552012-06-27 11:25:26 +0300721 post_test(ptd);
Maya Erezdb8cbfe2012-07-15 13:09:08 +0300722 ptd->test_result = TEST_FAILED;
Maya Erez60181552012-06-27 11:25:26 +0300723 return ret;
724}
725EXPORT_SYMBOL(test_iosched_start_test);
726
727/**
728 * test_iosched_register() - register a block device test
729 * utility.
730 * @bdt: the block device test type to register
731 */
732void test_iosched_register(struct blk_dev_test_type *bdt)
733{
734 spin_lock(&blk_dev_test_list_lock);
735 list_add_tail(&bdt->list, &blk_dev_test_list);
736 spin_unlock(&blk_dev_test_list_lock);
737}
738EXPORT_SYMBOL_GPL(test_iosched_register);
739
740/**
741 * test_iosched_unregister() - unregister a block device test
742 * utility.
743 * @bdt: the block device test type to unregister
744 */
745void test_iosched_unregister(struct blk_dev_test_type *bdt)
746{
747 spin_lock(&blk_dev_test_list_lock);
748 list_del_init(&bdt->list);
749 spin_unlock(&blk_dev_test_list_lock);
750}
751EXPORT_SYMBOL_GPL(test_iosched_unregister);
752
753/**
754 * test_iosched_set_test_result() - Set the test
755 * result(PASS/FAIL)
756 * @test_result: the test result
757 */
758void test_iosched_set_test_result(int test_result)
759{
760 if (!ptd)
761 return;
762
763 ptd->test_result = test_result;
764}
765EXPORT_SYMBOL(test_iosched_set_test_result);
766
767
768/**
769 * test_iosched_set_ignore_round() - Set the ignore_round flag
770 * @ignore_round: A flag to indicate if this test round
771 * should be ignored and re-run
772 */
773void test_iosched_set_ignore_round(bool ignore_round)
774{
775 if (!ptd)
776 return;
777
778 ptd->ignore_round = ignore_round;
779}
780EXPORT_SYMBOL(test_iosched_set_ignore_round);
781
782/**
783 * test_iosched_get_debugfs_tests_root() - returns the root
784 * debugfs directory for the test_iosched tests
785 */
786struct dentry *test_iosched_get_debugfs_tests_root(void)
787{
788 if (!ptd)
789 return NULL;
790
791 return ptd->debug.debug_tests_root;
792}
793EXPORT_SYMBOL(test_iosched_get_debugfs_tests_root);
794
795/**
796 * test_iosched_get_debugfs_utils_root() - returns the root
797 * debugfs directory for the test_iosched utils
798 */
799struct dentry *test_iosched_get_debugfs_utils_root(void)
800{
801 if (!ptd)
802 return NULL;
803
804 return ptd->debug.debug_utils_root;
805}
806EXPORT_SYMBOL(test_iosched_get_debugfs_utils_root);
807
808static int test_debugfs_init(struct test_data *td)
809{
810 td->debug.debug_root = debugfs_create_dir("test-iosched", NULL);
811 if (!td->debug.debug_root)
812 return -ENOENT;
813
814 td->debug.debug_tests_root = debugfs_create_dir("tests",
815 td->debug.debug_root);
816 if (!td->debug.debug_tests_root)
817 goto err;
818
819 td->debug.debug_utils_root = debugfs_create_dir("utils",
820 td->debug.debug_root);
821 if (!td->debug.debug_utils_root)
822 goto err;
823
824 td->debug.debug_test_result = debugfs_create_u32(
825 "test_result",
826 S_IRUGO | S_IWUGO,
827 td->debug.debug_utils_root,
828 &td->test_result);
829 if (!td->debug.debug_test_result)
830 goto err;
831
832 td->debug.start_sector = debugfs_create_u32(
833 "start_sector",
834 S_IRUGO | S_IWUGO,
835 td->debug.debug_utils_root,
836 &td->start_sector);
837 if (!td->debug.start_sector)
838 goto err;
839
840 return 0;
841
842err:
843 debugfs_remove_recursive(td->debug.debug_root);
844 return -ENOENT;
845}
846
847static void test_debugfs_cleanup(struct test_data *td)
848{
849 debugfs_remove_recursive(td->debug.debug_root);
850}
851
852static void print_req(struct request *req)
853{
854 struct bio *bio;
855 struct test_request *test_rq;
856
857 if (!req)
858 return;
859
860 test_rq = (struct test_request *)req->elv.priv[0];
861
862 if (test_rq) {
863 test_pr_debug("%s: Dispatch request %d: __sector=0x%lx",
864 __func__, test_rq->req_id, (unsigned long)req->__sector);
865 test_pr_debug("%s: nr_phys_segments=%d, num_of_sectors=%d",
866 __func__, req->nr_phys_segments, blk_rq_sectors(req));
867 bio = req->bio;
868 test_pr_debug("%s: bio: bi_size=%d, bi_sector=0x%lx",
869 __func__, bio->bi_size,
870 (unsigned long)bio->bi_sector);
871 while ((bio = bio->bi_next) != NULL) {
872 test_pr_debug("%s: bio: bi_size=%d, bi_sector=0x%lx",
873 __func__, bio->bi_size,
874 (unsigned long)bio->bi_sector);
875 }
876 }
877}
878
879static void test_merged_requests(struct request_queue *q,
880 struct request *rq, struct request *next)
881{
882 list_del_init(&next->queuelist);
883}
884
885/*
886 * Dispatch a test request in case there is a running test Otherwise, dispatch
887 * a request that was queued by the FS to keep the card functional.
888 */
889static int test_dispatch_requests(struct request_queue *q, int force)
890{
891 struct test_data *td = q->elevator->elevator_data;
892 struct request *rq = NULL;
893
894 switch (td->test_state) {
895 case TEST_IDLE:
896 if (!list_empty(&td->queue)) {
897 rq = list_entry(td->queue.next, struct request,
898 queuelist);
899 list_del_init(&rq->queuelist);
900 elv_dispatch_sort(q, rq);
901 return 1;
902 }
903 break;
904 case TEST_RUNNING:
905 if (td->next_req) {
906 rq = td->next_req->rq;
907 td->next_req =
908 latter_test_request(td->req_q, td->next_req);
909 if (!rq)
910 return 0;
911 print_req(rq);
912 elv_dispatch_sort(q, rq);
913 return 1;
914 }
915 break;
916 case TEST_COMPLETED:
917 default:
918 return 0;
919 }
920
921 return 0;
922}
923
924static void test_add_request(struct request_queue *q, struct request *rq)
925{
926 struct test_data *td = q->elevator->elevator_data;
927
928 list_add_tail(&rq->queuelist, &td->queue);
929
930 /*
931 * The write requests can be followed by a FLUSH request that might
932 * cause unexpected results of the test.
933 */
934 if ((rq_data_dir(rq) == WRITE) && (td->test_state == TEST_RUNNING)) {
935 test_pr_debug("%s: got WRITE req in the middle of the test",
936 __func__);
937 td->fs_wr_reqs_during_test = true;
938 }
939}
940
941static struct request *
942test_former_request(struct request_queue *q, struct request *rq)
943{
944 struct test_data *td = q->elevator->elevator_data;
945
946 if (rq->queuelist.prev == &td->queue)
947 return NULL;
948 return list_entry(rq->queuelist.prev, struct request, queuelist);
949}
950
951static struct request *
952test_latter_request(struct request_queue *q, struct request *rq)
953{
954 struct test_data *td = q->elevator->elevator_data;
955
956 if (rq->queuelist.next == &td->queue)
957 return NULL;
958 return list_entry(rq->queuelist.next, struct request, queuelist);
959}
960
961static void *test_init_queue(struct request_queue *q)
962{
963 struct blk_dev_test_type *__bdt;
964
965 ptd = kmalloc_node(sizeof(struct test_data), GFP_KERNEL,
966 q->node);
967 if (!ptd) {
968 test_pr_err("%s: failed to allocate test data", __func__);
969 return NULL;
970 }
971 memset((void *)ptd, 0, sizeof(struct test_data));
972 INIT_LIST_HEAD(&ptd->queue);
973 INIT_LIST_HEAD(&ptd->test_queue);
974 init_waitqueue_head(&ptd->wait_q);
975 ptd->req_q = q;
976
977 setup_timer(&ptd->timeout_timer, test_timeout_handler,
978 (unsigned long)ptd);
979
980 spin_lock_init(&ptd->lock);
981
982 if (test_debugfs_init(ptd)) {
983 test_pr_err("%s: Failed to create debugfs files", __func__);
984 return NULL;
985 }
986
987 list_for_each_entry(__bdt, &blk_dev_test_list, list)
988 __bdt->init_fn();
989
990 return ptd;
991}
992
993static void test_exit_queue(struct elevator_queue *e)
994{
995 struct test_data *td = e->elevator_data;
996 struct blk_dev_test_type *__bdt;
997
998 BUG_ON(!list_empty(&td->queue));
999
1000 list_for_each_entry(__bdt, &blk_dev_test_list, list)
1001 __bdt->exit_fn();
1002
1003 test_debugfs_cleanup(td);
1004
1005 kfree(td);
1006}
1007
1008static struct elevator_type elevator_test_iosched = {
1009 .ops = {
1010 .elevator_merge_req_fn = test_merged_requests,
1011 .elevator_dispatch_fn = test_dispatch_requests,
1012 .elevator_add_req_fn = test_add_request,
1013 .elevator_former_req_fn = test_former_request,
1014 .elevator_latter_req_fn = test_latter_request,
1015 .elevator_init_fn = test_init_queue,
1016 .elevator_exit_fn = test_exit_queue,
1017 },
1018 .elevator_name = "test-iosched",
1019 .elevator_owner = THIS_MODULE,
1020};
1021
1022static int __init test_init(void)
1023{
1024 elv_register(&elevator_test_iosched);
1025
1026 return 0;
1027}
1028
1029static void __exit test_exit(void)
1030{
1031 elv_unregister(&elevator_test_iosched);
1032}
1033
1034module_init(test_init);
1035module_exit(test_exit);
1036
1037MODULE_LICENSE("GPL v2");
1038MODULE_DESCRIPTION("Test IO scheduler");