blob: 71e8669c3f32cadcd04f8c4b942f7d1a0838bf63 [file] [log] [blame]
Maya Erez60181552012-06-27 11:25:26 +03001/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * The test scheduler allows to test the block device by dispatching
13 * specific requests according to the test case and declare PASS/FAIL
14 * according to the requests completion error code.
15 * Each test is exposed via debugfs and can be triggered by writing to
16 * the debugfs file.
17 *
18 */
19
20/* elevator test iosched */
21#include <linux/blkdev.h>
22#include <linux/elevator.h>
23#include <linux/bio.h>
24#include <linux/module.h>
25#include <linux/slab.h>
26#include <linux/init.h>
27#include <linux/debugfs.h>
28#include <linux/test-iosched.h>
29#include <linux/delay.h>
30#include "blk.h"
31
32#define MODULE_NAME "test-iosched"
33#define WR_RD_START_REQ_ID 1234
34#define UNIQUE_START_REQ_ID 5678
35#define TIMEOUT_TIMER_MS 40000
36#define TEST_MAX_TESTCASE_ROUNDS 15
37
38#define test_pr_debug(fmt, args...) pr_debug("%s: "fmt"\n", MODULE_NAME, args)
39#define test_pr_info(fmt, args...) pr_info("%s: "fmt"\n", MODULE_NAME, args)
40#define test_pr_err(fmt, args...) pr_err("%s: "fmt"\n", MODULE_NAME, args)
41
42static DEFINE_SPINLOCK(blk_dev_test_list_lock);
43static LIST_HEAD(blk_dev_test_list);
44static struct test_data *ptd;
45
46/* Get the request after `test_rq' in the test requests list */
47static struct test_request *
48latter_test_request(struct request_queue *q,
49 struct test_request *test_rq)
50{
51 struct test_data *td = q->elevator->elevator_data;
52
53 if (test_rq->queuelist.next == &td->test_queue)
54 return NULL;
55 return list_entry(test_rq->queuelist.next, struct test_request,
56 queuelist);
57}
58
59/**
60 * test_iosched_get_req_queue() - returns the request queue
61 * served by the scheduler
62 */
63struct request_queue *test_iosched_get_req_queue(void)
64{
65 if (!ptd)
66 return NULL;
67
68 return ptd->req_q;
69}
70EXPORT_SYMBOL(test_iosched_get_req_queue);
71
72/**
73 * test_iosched_mark_test_completion() - Wakeup the debugfs
74 * thread, waiting on the test completion
75 */
76void test_iosched_mark_test_completion(void)
77{
78 if (!ptd)
79 return;
80
81 ptd->test_state = TEST_COMPLETED;
82 wake_up(&ptd->wait_q);
83}
84EXPORT_SYMBOL(test_iosched_mark_test_completion);
85
86/* Check if all the queued test requests were completed */
87static void check_test_completion(void)
88{
89 struct test_request *test_rq;
90 struct request *rq;
91
92 list_for_each_entry(test_rq, &ptd->test_queue, queuelist) {
93 rq = test_rq->rq;
94 if (!test_rq->req_completed)
95 return;
96 }
97
Lee Susmanf18263a2012-10-24 14:14:37 +020098 ptd->test_info.test_duration = jiffies -
99 ptd->test_info.test_duration;
100
Maya Erez60181552012-06-27 11:25:26 +0300101 test_pr_info("%s: Test is completed", __func__);
102
103 test_iosched_mark_test_completion();
104}
105
106/*
107 * A callback to be called per bio completion.
108 * Frees the bio memory.
109 */
110static void end_test_bio(struct bio *bio, int err)
111{
112 if (err)
113 clear_bit(BIO_UPTODATE, &bio->bi_flags);
114
115 bio_put(bio);
116}
117
118/*
119 * A callback to be called per request completion.
120 * the request memory is not freed here, will be freed later after the test
121 * results checking.
122 */
123static void end_test_req(struct request *rq, int err)
124{
125 struct test_request *test_rq;
126
127 test_rq = (struct test_request *)rq->elv.priv[0];
128 BUG_ON(!test_rq);
129
Lee Susmanf18263a2012-10-24 14:14:37 +0200130 test_pr_debug("%s: request %d completed, err=%d",
Maya Erez60181552012-06-27 11:25:26 +0300131 __func__, test_rq->req_id, err);
132
Maya Erezdb8cbfe2012-07-15 13:09:08 +0300133 test_rq->req_completed = true;
Maya Erez60181552012-06-27 11:25:26 +0300134 test_rq->req_result = err;
135
136 check_test_completion();
137}
138
139/**
140 * test_iosched_add_unique_test_req() - Create and queue a non
141 * read/write request (such as FLUSH/DISCRAD/SANITIZE).
142 * @is_err_expcted: A flag to indicate if this request
143 * should succeed or not
144 * @req_unique: The type of request to add
145 * @start_sec: start address of the first bio
146 * @nr_sects: number of sectors in the request
147 * @end_req_io: specific completion callback. When not
148 * set, the defaulcallback will be used
149 */
150int test_iosched_add_unique_test_req(int is_err_expcted,
151 enum req_unique_type req_unique,
152 int start_sec, int nr_sects, rq_end_io_fn *end_req_io)
153{
154 struct bio *bio;
155 struct request *rq;
156 int rw_flags;
157 struct test_request *test_rq;
158
159 if (!ptd)
160 return -ENODEV;
161
162 bio = bio_alloc(GFP_KERNEL, 0);
163 if (!bio) {
164 test_pr_err("%s: Failed to allocate a bio", __func__);
165 return -ENODEV;
166 }
167 bio_get(bio);
168 bio->bi_end_io = end_test_bio;
169
170 switch (req_unique) {
171 case REQ_UNIQUE_FLUSH:
172 bio->bi_rw = WRITE_FLUSH;
173 break;
174 case REQ_UNIQUE_DISCARD:
175 bio->bi_rw = REQ_WRITE | REQ_DISCARD;
176 bio->bi_size = nr_sects << 9;
177 bio->bi_sector = start_sec;
178 break;
Maya Erez22f7abf2012-07-18 21:52:33 +0300179 case REQ_UNIQUE_SANITIZE:
180 bio->bi_rw = REQ_WRITE | REQ_SANITIZE;
181 break;
Maya Erez60181552012-06-27 11:25:26 +0300182 default:
183 test_pr_err("%s: Invalid request type %d", __func__,
184 req_unique);
185 bio_put(bio);
186 return -ENODEV;
187 }
188
189 rw_flags = bio_data_dir(bio);
190 if (bio->bi_rw & REQ_SYNC)
191 rw_flags |= REQ_SYNC;
192
193 rq = blk_get_request(ptd->req_q, rw_flags, GFP_KERNEL);
194 if (!rq) {
195 test_pr_err("%s: Failed to allocate a request", __func__);
196 bio_put(bio);
197 return -ENODEV;
198 }
199
200 init_request_from_bio(rq, bio);
201 if (end_req_io)
202 rq->end_io = end_req_io;
203 else
204 rq->end_io = end_test_req;
205
206 test_rq = kzalloc(sizeof(struct test_request), GFP_KERNEL);
207 if (!test_rq) {
208 test_pr_err("%s: Failed to allocate a test request", __func__);
209 bio_put(bio);
210 blk_put_request(rq);
211 return -ENODEV;
212 }
Maya Erezdb8cbfe2012-07-15 13:09:08 +0300213 test_rq->req_completed = false;
214 test_rq->req_result = -EINVAL;
Maya Erez60181552012-06-27 11:25:26 +0300215 test_rq->rq = rq;
216 test_rq->is_err_expected = is_err_expcted;
217 rq->elv.priv[0] = (void *)test_rq;
218 test_rq->req_id = ptd->unique_next_req_id++;
219
220 test_pr_debug(
221 "%s: added request %d to the test requests list, type = %d",
222 __func__, test_rq->req_id, req_unique);
223
224 list_add_tail(&test_rq->queuelist, &ptd->test_queue);
225
226 return 0;
227}
228EXPORT_SYMBOL(test_iosched_add_unique_test_req);
229
230/*
231 * Get a pattern to be filled in the request data buffer.
232 * If the pattern used is (-1) the buffer will be filled with sequential
233 * numbers
234 */
235static void fill_buf_with_pattern(int *buf, int num_bytes, int pattern)
236{
237 int i = 0;
238 int num_of_dwords = num_bytes/sizeof(int);
239
240 if (pattern == TEST_NO_PATTERN)
241 return;
242
243 /* num_bytes should be aligned to sizeof(int) */
244 BUG_ON((num_bytes % sizeof(int)) != 0);
245
246 if (pattern == TEST_PATTERN_SEQUENTIAL) {
247 for (i = 0; i < num_of_dwords; i++)
248 buf[i] = i;
249 } else {
250 for (i = 0; i < num_of_dwords; i++)
251 buf[i] = pattern;
252 }
253}
254
255/**
256 * test_iosched_add_wr_rd_test_req() - Create and queue a
257 * read/write request.
258 * @is_err_expcted: A flag to indicate if this request
259 * should succeed or not
260 * @direction: READ/WRITE
261 * @start_sec: start address of the first bio
262 * @num_bios: number of BIOs to be allocated for the
263 * request
264 * @pattern: A pattern, to be written into the write
265 * requests data buffer. In case of READ
266 * request, the given pattern is kept as
267 * the expected pattern. The expected
268 * pattern will be compared in the test
269 * check result function. If no comparisson
270 * is required, set pattern to
271 * TEST_NO_PATTERN.
272 * @end_req_io: specific completion callback. When not
273 * set,the default callback will be used
274 *
275 * This function allocates the test request and the block
276 * request and calls blk_rq_map_kern which allocates the
277 * required BIO. The allocated test request and the block
278 * request memory is freed at the end of the test and the
279 * allocated BIO memory is freed by end_test_bio.
280 */
281int test_iosched_add_wr_rd_test_req(int is_err_expcted,
282 int direction, int start_sec,
283 int num_bios, int pattern, rq_end_io_fn *end_req_io)
284{
285 struct request *rq = NULL;
286 struct test_request *test_rq = NULL;
287 int rw_flags = 0;
288 int buf_size = 0;
289 int ret = 0, i = 0;
290 unsigned int *bio_ptr = NULL;
291 struct bio *bio = NULL;
292
293 if (!ptd)
294 return -ENODEV;
295
296 rw_flags = direction;
297
298 rq = blk_get_request(ptd->req_q, rw_flags, GFP_KERNEL);
299 if (!rq) {
300 test_pr_err("%s: Failed to allocate a request", __func__);
301 return -ENODEV;
302 }
303
304 test_rq = kzalloc(sizeof(struct test_request), GFP_KERNEL);
305 if (!test_rq) {
306 test_pr_err("%s: Failed to allocate test request", __func__);
307 blk_put_request(rq);
308 return -ENODEV;
309 }
310
311 buf_size = sizeof(unsigned int) * BIO_U32_SIZE * num_bios;
312 test_rq->bios_buffer = kzalloc(buf_size, GFP_KERNEL);
313 if (!test_rq->bios_buffer) {
314 test_pr_err("%s: Failed to allocate the data buf", __func__);
315 goto err;
316 }
317 test_rq->buf_size = buf_size;
318
319 if (direction == WRITE)
320 fill_buf_with_pattern(test_rq->bios_buffer,
321 buf_size, pattern);
322 test_rq->wr_rd_data_pattern = pattern;
323
324 bio_ptr = test_rq->bios_buffer;
325 for (i = 0; i < num_bios; ++i) {
326 ret = blk_rq_map_kern(ptd->req_q, rq,
327 (void *)bio_ptr,
328 sizeof(unsigned int)*BIO_U32_SIZE,
329 GFP_KERNEL);
330 if (ret) {
331 test_pr_err("%s: blk_rq_map_kern returned error %d",
332 __func__, ret);
333 goto err;
334 }
335 bio_ptr += BIO_U32_SIZE;
336 }
337
338 if (end_req_io)
339 rq->end_io = end_req_io;
340 else
341 rq->end_io = end_test_req;
342 rq->__sector = start_sec;
343 rq->cmd_type |= REQ_TYPE_FS;
344
345 if (rq->bio) {
346 rq->bio->bi_sector = start_sec;
347 rq->bio->bi_end_io = end_test_bio;
348 bio = rq->bio;
349 while ((bio = bio->bi_next) != NULL)
350 bio->bi_end_io = end_test_bio;
351 }
352
353 ptd->num_of_write_bios += num_bios;
354 test_rq->req_id = ptd->wr_rd_next_req_id++;
355
Maya Erezdb8cbfe2012-07-15 13:09:08 +0300356 test_rq->req_completed = false;
357 test_rq->req_result = -EINVAL;
Maya Erez60181552012-06-27 11:25:26 +0300358 test_rq->rq = rq;
359 test_rq->is_err_expected = is_err_expcted;
360 rq->elv.priv[0] = (void *)test_rq;
361
362 test_pr_debug(
363 "%s: added request %d to the test requests list, buf_size=%d",
364 __func__, test_rq->req_id, buf_size);
365
366 list_add_tail(&test_rq->queuelist, &ptd->test_queue);
367
368 return 0;
369err:
370 blk_put_request(rq);
371 kfree(test_rq->bios_buffer);
372 return -ENODEV;
373}
374EXPORT_SYMBOL(test_iosched_add_wr_rd_test_req);
375
376/* Converts the testcase number into a string */
377static char *get_test_case_str(struct test_data *td)
378{
379 if (td->test_info.get_test_case_str_fn)
380 return td->test_info.get_test_case_str_fn(td);
381
382 return "Unknown testcase";
383}
384
385/*
386 * Verify that the test request data buffer includes the expected
387 * pattern
388 */
389static int compare_buffer_to_pattern(struct test_request *test_rq)
390{
391 int i = 0;
392 int num_of_dwords = test_rq->buf_size/sizeof(int);
393
394 /* num_bytes should be aligned to sizeof(int) */
395 BUG_ON((test_rq->buf_size % sizeof(int)) != 0);
396 BUG_ON(test_rq->bios_buffer == NULL);
397
398 if (test_rq->wr_rd_data_pattern == TEST_NO_PATTERN)
399 return 0;
400
401 if (test_rq->wr_rd_data_pattern == TEST_PATTERN_SEQUENTIAL) {
402 for (i = 0; i < num_of_dwords; i++) {
403 if (test_rq->bios_buffer[i] != i) {
404 test_pr_err(
405 "%s: wrong pattern 0x%x in index %d",
406 __func__, test_rq->bios_buffer[i], i);
407 return -EINVAL;
408 }
409 }
410 } else {
411 for (i = 0; i < num_of_dwords; i++) {
412 if (test_rq->bios_buffer[i] !=
413 test_rq->wr_rd_data_pattern) {
414 test_pr_err(
415 "%s: wrong pattern 0x%x in index %d",
416 __func__, test_rq->bios_buffer[i], i);
417 return -EINVAL;
418 }
419 }
420 }
421
422 return 0;
423}
424
425/*
426 * Determine if the test passed or failed.
427 * The function checks the test request completion value and calls
428 * check_testcase_result for result checking that are specific
429 * to a test case.
430 */
431static int check_test_result(struct test_data *td)
432{
433 struct test_request *test_rq;
434 struct request *rq;
435 int res = 0;
436 static int run;
437
438 list_for_each_entry(test_rq, &ptd->test_queue, queuelist) {
439 rq = test_rq->rq;
440 if (!test_rq->req_completed) {
441 test_pr_err("%s: rq %d not completed", __func__,
442 test_rq->req_id);
443 res = -EINVAL;
444 goto err;
445 }
446
447 if ((test_rq->req_result < 0) && !test_rq->is_err_expected) {
448 test_pr_err(
449 "%s: rq %d completed with err, not as expected",
450 __func__, test_rq->req_id);
451 res = -EINVAL;
452 goto err;
453 }
454 if ((test_rq->req_result == 0) && test_rq->is_err_expected) {
455 test_pr_err("%s: rq %d succeeded, not as expected",
456 __func__, test_rq->req_id);
457 res = -EINVAL;
458 goto err;
459 }
460 if (rq_data_dir(test_rq->rq) == READ) {
461 res = compare_buffer_to_pattern(test_rq);
462 if (res) {
463 test_pr_err("%s: read pattern not as expected",
464 __func__);
465 res = -EINVAL;
466 goto err;
467 }
468 }
469 }
470
471 if (td->test_info.check_test_result_fn) {
472 res = td->test_info.check_test_result_fn(td);
473 if (res)
474 goto err;
475 }
476
477 test_pr_info("%s: %s, run# %03d, PASSED",
478 __func__, get_test_case_str(td), ++run);
479 td->test_result = TEST_PASSED;
480
481 return 0;
482err:
483 test_pr_err("%s: %s, run# %03d, FAILED",
484 __func__, get_test_case_str(td), ++run);
485 td->test_result = TEST_FAILED;
486 return res;
487}
488
489/* Create and queue the required requests according to the test case */
490static int prepare_test(struct test_data *td)
491{
492 int ret = 0;
493
494 if (td->test_info.prepare_test_fn) {
495 ret = td->test_info.prepare_test_fn(td);
496 return ret;
497 }
498
499 return 0;
500}
501
502/* Run the test */
503static int run_test(struct test_data *td)
504{
505 int ret = 0;
506
507 if (td->test_info.run_test_fn) {
508 ret = td->test_info.run_test_fn(td);
509 return ret;
510 }
511
512 /*
513 * Set the next_req pointer to the first request in the test requests
514 * list
515 */
516 if (!list_empty(&td->test_queue))
517 td->next_req = list_entry(td->test_queue.next,
518 struct test_request, queuelist);
519 __blk_run_queue(td->req_q);
520
521 return 0;
522}
523
524/* Free the allocated test requests, their requests and BIOs buffer */
525static void free_test_requests(struct test_data *td)
526{
527 struct test_request *test_rq;
Maya Erezdb8cbfe2012-07-15 13:09:08 +0300528 struct bio *bio;
529
Maya Erez60181552012-06-27 11:25:26 +0300530 while (!list_empty(&td->test_queue)) {
531 test_rq = list_entry(td->test_queue.next, struct test_request,
532 queuelist);
533 list_del_init(&test_rq->queuelist);
Maya Erezdb8cbfe2012-07-15 13:09:08 +0300534 /*
535 * If the request was not completed we need to free its BIOs
536 * and remove it from the packed list
537 */
538 if (!test_rq->req_completed) {
539 test_pr_info(
540 "%s: Freeing memory of an uncompleted request",
541 __func__);
542 list_del_init(&test_rq->rq->queuelist);
543 while ((bio = test_rq->rq->bio) != NULL) {
544 test_rq->rq->bio = bio->bi_next;
545 bio_put(bio);
546 }
547 }
Maya Erez60181552012-06-27 11:25:26 +0300548 blk_put_request(test_rq->rq);
549 kfree(test_rq->bios_buffer);
550 kfree(test_rq);
551 }
552}
553
554/*
555 * Do post test operations.
556 * Free the allocated test requests, their requests and BIOs buffer.
557 */
558static int post_test(struct test_data *td)
559{
560 int ret = 0;
561
562 if (td->test_info.post_test_fn)
563 ret = td->test_info.post_test_fn(td);
564
565 ptd->test_info.testcase = 0;
566 ptd->test_state = TEST_IDLE;
567
568 free_test_requests(td);
569
570 return ret;
571}
572
573/*
574 * The timer verifies that the test will be completed even if we don't get
575 * the completion callback for all the requests.
576 */
577static void test_timeout_handler(unsigned long data)
578{
579 struct test_data *td = (struct test_data *)data;
580
581 test_pr_info("%s: TIMEOUT timer expired", __func__);
582 td->test_state = TEST_COMPLETED;
583 wake_up(&td->wait_q);
584 return;
585}
586
587static unsigned int get_timeout_msec(struct test_data *td)
588{
589 if (td->test_info.timeout_msec)
590 return td->test_info.timeout_msec;
591 else
592 return TIMEOUT_TIMER_MS;
593}
594
595/**
596 * test_iosched_start_test() - Prepares and runs the test.
597 * @t_info: the current test testcase and callbacks
598 * functions
599 *
600 * The function also checks the test result upon test completion
601 */
602int test_iosched_start_test(struct test_info *t_info)
603{
604 int ret = 0;
605 unsigned timeout_msec;
606 int counter = 0;
607 char *test_name = NULL;
608
609 if (!ptd)
610 return -ENODEV;
611
612 if (!t_info) {
613 ptd->test_result = TEST_FAILED;
614 return -EINVAL;
615 }
616
617 do {
618 if (ptd->ignore_round)
619 /*
620 * We ignored the last run due to FS write requests.
621 * Sleep to allow those requests to be issued
622 */
623 msleep(2000);
624
625 spin_lock(&ptd->lock);
626
627 if (ptd->test_state != TEST_IDLE) {
628 test_pr_info(
629 "%s: Another test is running, try again later",
630 __func__);
Maya Erezdb8cbfe2012-07-15 13:09:08 +0300631 spin_unlock(&ptd->lock);
632 return -EBUSY;
Maya Erez60181552012-06-27 11:25:26 +0300633 }
634
635 if (ptd->start_sector == 0) {
636 test_pr_err("%s: Invalid start sector", __func__);
637 ptd->test_result = TEST_FAILED;
638 spin_unlock(&ptd->lock);
639 return -EINVAL;
640 }
641
642 memcpy(&ptd->test_info, t_info, sizeof(struct test_info));
643
644 ptd->next_req = NULL;
645 ptd->test_result = TEST_NO_RESULT;
646 ptd->num_of_write_bios = 0;
647
648 ptd->unique_next_req_id = UNIQUE_START_REQ_ID;
649 ptd->wr_rd_next_req_id = WR_RD_START_REQ_ID;
650
651 ptd->ignore_round = false;
652 ptd->fs_wr_reqs_during_test = false;
653
654 ptd->test_state = TEST_RUNNING;
655
656 spin_unlock(&ptd->lock);
657
658 timeout_msec = get_timeout_msec(ptd);
659 mod_timer(&ptd->timeout_timer, jiffies +
660 msecs_to_jiffies(timeout_msec));
661
662 if (ptd->test_info.get_test_case_str_fn)
663 test_name = ptd->test_info.get_test_case_str_fn(ptd);
664 else
665 test_name = "Unknown testcase";
Lee Susman039ce092012-11-15 13:36:15 +0200666 test_pr_info("%s: Starting test %s", __func__, test_name);
Maya Erez60181552012-06-27 11:25:26 +0300667
668 ret = prepare_test(ptd);
669 if (ret) {
670 test_pr_err("%s: failed to prepare the test\n",
671 __func__);
672 goto error;
673 }
674
Lee Susmanf18263a2012-10-24 14:14:37 +0200675 ptd->test_info.test_duration = jiffies;
Maya Erez60181552012-06-27 11:25:26 +0300676 ret = run_test(ptd);
677 if (ret) {
678 test_pr_err("%s: failed to run the test\n", __func__);
679 goto error;
680 }
681
682 test_pr_info("%s: Waiting for the test completion", __func__);
683
684 wait_event(ptd->wait_q, ptd->test_state == TEST_COMPLETED);
Lee Susmanf18263a2012-10-24 14:14:37 +0200685 t_info->test_duration = ptd->test_info.test_duration;
Maya Erez60181552012-06-27 11:25:26 +0300686 del_timer_sync(&ptd->timeout_timer);
687
688 ret = check_test_result(ptd);
689 if (ret) {
690 test_pr_err("%s: check_test_result failed\n",
691 __func__);
692 goto error;
693 }
694
695 ret = post_test(ptd);
696 if (ret) {
697 test_pr_err("%s: post_test failed\n", __func__);
698 goto error;
699 }
700
701 /*
702 * Wakeup the queue thread to fetch FS requests that might got
703 * postponded due to the test
704 */
705 __blk_run_queue(ptd->req_q);
706
707 if (ptd->ignore_round)
708 test_pr_info(
709 "%s: Round canceled (Got wr reqs in the middle)",
710 __func__);
711
712 if (++counter == TEST_MAX_TESTCASE_ROUNDS) {
713 test_pr_info("%s: Too many rounds, did not succeed...",
714 __func__);
715 ptd->test_result = TEST_FAILED;
716 }
717
718 } while ((ptd->ignore_round) && (counter < TEST_MAX_TESTCASE_ROUNDS));
719
720 if (ptd->test_result == TEST_PASSED)
721 return 0;
722 else
723 return -EINVAL;
724
725error:
Maya Erez60181552012-06-27 11:25:26 +0300726 post_test(ptd);
Maya Erezdb8cbfe2012-07-15 13:09:08 +0300727 ptd->test_result = TEST_FAILED;
Maya Erez60181552012-06-27 11:25:26 +0300728 return ret;
729}
730EXPORT_SYMBOL(test_iosched_start_test);
731
732/**
733 * test_iosched_register() - register a block device test
734 * utility.
735 * @bdt: the block device test type to register
736 */
737void test_iosched_register(struct blk_dev_test_type *bdt)
738{
739 spin_lock(&blk_dev_test_list_lock);
740 list_add_tail(&bdt->list, &blk_dev_test_list);
741 spin_unlock(&blk_dev_test_list_lock);
742}
743EXPORT_SYMBOL_GPL(test_iosched_register);
744
745/**
746 * test_iosched_unregister() - unregister a block device test
747 * utility.
748 * @bdt: the block device test type to unregister
749 */
750void test_iosched_unregister(struct blk_dev_test_type *bdt)
751{
752 spin_lock(&blk_dev_test_list_lock);
753 list_del_init(&bdt->list);
754 spin_unlock(&blk_dev_test_list_lock);
755}
756EXPORT_SYMBOL_GPL(test_iosched_unregister);
757
758/**
759 * test_iosched_set_test_result() - Set the test
760 * result(PASS/FAIL)
761 * @test_result: the test result
762 */
763void test_iosched_set_test_result(int test_result)
764{
765 if (!ptd)
766 return;
767
768 ptd->test_result = test_result;
769}
770EXPORT_SYMBOL(test_iosched_set_test_result);
771
772
773/**
774 * test_iosched_set_ignore_round() - Set the ignore_round flag
775 * @ignore_round: A flag to indicate if this test round
776 * should be ignored and re-run
777 */
778void test_iosched_set_ignore_round(bool ignore_round)
779{
780 if (!ptd)
781 return;
782
783 ptd->ignore_round = ignore_round;
784}
785EXPORT_SYMBOL(test_iosched_set_ignore_round);
786
787/**
788 * test_iosched_get_debugfs_tests_root() - returns the root
789 * debugfs directory for the test_iosched tests
790 */
791struct dentry *test_iosched_get_debugfs_tests_root(void)
792{
793 if (!ptd)
794 return NULL;
795
796 return ptd->debug.debug_tests_root;
797}
798EXPORT_SYMBOL(test_iosched_get_debugfs_tests_root);
799
800/**
801 * test_iosched_get_debugfs_utils_root() - returns the root
802 * debugfs directory for the test_iosched utils
803 */
804struct dentry *test_iosched_get_debugfs_utils_root(void)
805{
806 if (!ptd)
807 return NULL;
808
809 return ptd->debug.debug_utils_root;
810}
811EXPORT_SYMBOL(test_iosched_get_debugfs_utils_root);
812
813static int test_debugfs_init(struct test_data *td)
814{
815 td->debug.debug_root = debugfs_create_dir("test-iosched", NULL);
816 if (!td->debug.debug_root)
817 return -ENOENT;
818
819 td->debug.debug_tests_root = debugfs_create_dir("tests",
820 td->debug.debug_root);
821 if (!td->debug.debug_tests_root)
822 goto err;
823
824 td->debug.debug_utils_root = debugfs_create_dir("utils",
825 td->debug.debug_root);
826 if (!td->debug.debug_utils_root)
827 goto err;
828
829 td->debug.debug_test_result = debugfs_create_u32(
830 "test_result",
831 S_IRUGO | S_IWUGO,
832 td->debug.debug_utils_root,
833 &td->test_result);
834 if (!td->debug.debug_test_result)
835 goto err;
836
837 td->debug.start_sector = debugfs_create_u32(
838 "start_sector",
839 S_IRUGO | S_IWUGO,
840 td->debug.debug_utils_root,
841 &td->start_sector);
842 if (!td->debug.start_sector)
843 goto err;
844
845 return 0;
846
847err:
848 debugfs_remove_recursive(td->debug.debug_root);
849 return -ENOENT;
850}
851
852static void test_debugfs_cleanup(struct test_data *td)
853{
854 debugfs_remove_recursive(td->debug.debug_root);
855}
856
857static void print_req(struct request *req)
858{
859 struct bio *bio;
860 struct test_request *test_rq;
861
862 if (!req)
863 return;
864
865 test_rq = (struct test_request *)req->elv.priv[0];
866
867 if (test_rq) {
868 test_pr_debug("%s: Dispatch request %d: __sector=0x%lx",
869 __func__, test_rq->req_id, (unsigned long)req->__sector);
870 test_pr_debug("%s: nr_phys_segments=%d, num_of_sectors=%d",
871 __func__, req->nr_phys_segments, blk_rq_sectors(req));
872 bio = req->bio;
873 test_pr_debug("%s: bio: bi_size=%d, bi_sector=0x%lx",
874 __func__, bio->bi_size,
875 (unsigned long)bio->bi_sector);
876 while ((bio = bio->bi_next) != NULL) {
877 test_pr_debug("%s: bio: bi_size=%d, bi_sector=0x%lx",
878 __func__, bio->bi_size,
879 (unsigned long)bio->bi_sector);
880 }
881 }
882}
883
884static void test_merged_requests(struct request_queue *q,
885 struct request *rq, struct request *next)
886{
887 list_del_init(&next->queuelist);
888}
889
890/*
891 * Dispatch a test request in case there is a running test Otherwise, dispatch
892 * a request that was queued by the FS to keep the card functional.
893 */
894static int test_dispatch_requests(struct request_queue *q, int force)
895{
896 struct test_data *td = q->elevator->elevator_data;
897 struct request *rq = NULL;
898
899 switch (td->test_state) {
900 case TEST_IDLE:
901 if (!list_empty(&td->queue)) {
902 rq = list_entry(td->queue.next, struct request,
903 queuelist);
904 list_del_init(&rq->queuelist);
905 elv_dispatch_sort(q, rq);
906 return 1;
907 }
908 break;
909 case TEST_RUNNING:
910 if (td->next_req) {
911 rq = td->next_req->rq;
912 td->next_req =
913 latter_test_request(td->req_q, td->next_req);
914 if (!rq)
915 return 0;
916 print_req(rq);
917 elv_dispatch_sort(q, rq);
918 return 1;
919 }
920 break;
921 case TEST_COMPLETED:
922 default:
923 return 0;
924 }
925
926 return 0;
927}
928
929static void test_add_request(struct request_queue *q, struct request *rq)
930{
931 struct test_data *td = q->elevator->elevator_data;
932
933 list_add_tail(&rq->queuelist, &td->queue);
934
935 /*
936 * The write requests can be followed by a FLUSH request that might
937 * cause unexpected results of the test.
938 */
939 if ((rq_data_dir(rq) == WRITE) && (td->test_state == TEST_RUNNING)) {
940 test_pr_debug("%s: got WRITE req in the middle of the test",
941 __func__);
942 td->fs_wr_reqs_during_test = true;
943 }
944}
945
946static struct request *
947test_former_request(struct request_queue *q, struct request *rq)
948{
949 struct test_data *td = q->elevator->elevator_data;
950
951 if (rq->queuelist.prev == &td->queue)
952 return NULL;
953 return list_entry(rq->queuelist.prev, struct request, queuelist);
954}
955
956static struct request *
957test_latter_request(struct request_queue *q, struct request *rq)
958{
959 struct test_data *td = q->elevator->elevator_data;
960
961 if (rq->queuelist.next == &td->queue)
962 return NULL;
963 return list_entry(rq->queuelist.next, struct request, queuelist);
964}
965
966static void *test_init_queue(struct request_queue *q)
967{
968 struct blk_dev_test_type *__bdt;
969
970 ptd = kmalloc_node(sizeof(struct test_data), GFP_KERNEL,
971 q->node);
972 if (!ptd) {
973 test_pr_err("%s: failed to allocate test data", __func__);
974 return NULL;
975 }
976 memset((void *)ptd, 0, sizeof(struct test_data));
977 INIT_LIST_HEAD(&ptd->queue);
978 INIT_LIST_HEAD(&ptd->test_queue);
979 init_waitqueue_head(&ptd->wait_q);
980 ptd->req_q = q;
981
982 setup_timer(&ptd->timeout_timer, test_timeout_handler,
983 (unsigned long)ptd);
984
985 spin_lock_init(&ptd->lock);
986
987 if (test_debugfs_init(ptd)) {
988 test_pr_err("%s: Failed to create debugfs files", __func__);
989 return NULL;
990 }
991
992 list_for_each_entry(__bdt, &blk_dev_test_list, list)
993 __bdt->init_fn();
994
995 return ptd;
996}
997
998static void test_exit_queue(struct elevator_queue *e)
999{
1000 struct test_data *td = e->elevator_data;
1001 struct blk_dev_test_type *__bdt;
1002
1003 BUG_ON(!list_empty(&td->queue));
1004
1005 list_for_each_entry(__bdt, &blk_dev_test_list, list)
1006 __bdt->exit_fn();
1007
1008 test_debugfs_cleanup(td);
1009
1010 kfree(td);
1011}
1012
1013static struct elevator_type elevator_test_iosched = {
1014 .ops = {
1015 .elevator_merge_req_fn = test_merged_requests,
1016 .elevator_dispatch_fn = test_dispatch_requests,
1017 .elevator_add_req_fn = test_add_request,
1018 .elevator_former_req_fn = test_former_request,
1019 .elevator_latter_req_fn = test_latter_request,
1020 .elevator_init_fn = test_init_queue,
1021 .elevator_exit_fn = test_exit_queue,
1022 },
1023 .elevator_name = "test-iosched",
1024 .elevator_owner = THIS_MODULE,
1025};
1026
1027static int __init test_init(void)
1028{
1029 elv_register(&elevator_test_iosched);
1030
1031 return 0;
1032}
1033
1034static void __exit test_exit(void)
1035{
1036 elv_unregister(&elevator_test_iosched);
1037}
1038
1039module_init(test_init);
1040module_exit(test_exit);
1041
1042MODULE_LICENSE("GPL v2");
1043MODULE_DESCRIPTION("Test IO scheduler");