blob: 12f56bd7b78d6758f870b175948fd94bfa6f0726 [file] [log] [blame]
Maya Erez60181552012-06-27 11:25:26 +03001/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * The test scheduler allows to test the block device by dispatching
13 * specific requests according to the test case and declare PASS/FAIL
14 * according to the requests completion error code.
15 * Each test is exposed via debugfs and can be triggered by writing to
16 * the debugfs file.
17 *
18 */
19
20/* elevator test iosched */
21#include <linux/blkdev.h>
22#include <linux/elevator.h>
23#include <linux/bio.h>
24#include <linux/module.h>
25#include <linux/slab.h>
26#include <linux/init.h>
27#include <linux/debugfs.h>
28#include <linux/test-iosched.h>
29#include <linux/delay.h>
30#include "blk.h"
31
32#define MODULE_NAME "test-iosched"
33#define WR_RD_START_REQ_ID 1234
34#define UNIQUE_START_REQ_ID 5678
35#define TIMEOUT_TIMER_MS 40000
36#define TEST_MAX_TESTCASE_ROUNDS 15
37
38#define test_pr_debug(fmt, args...) pr_debug("%s: "fmt"\n", MODULE_NAME, args)
39#define test_pr_info(fmt, args...) pr_info("%s: "fmt"\n", MODULE_NAME, args)
40#define test_pr_err(fmt, args...) pr_err("%s: "fmt"\n", MODULE_NAME, args)
41
42static DEFINE_SPINLOCK(blk_dev_test_list_lock);
43static LIST_HEAD(blk_dev_test_list);
44static struct test_data *ptd;
45
Maya Erez60181552012-06-27 11:25:26 +030046
Maya Erez60181552012-06-27 11:25:26 +030047
48/**
49 * test_iosched_get_req_queue() - returns the request queue
50 * served by the scheduler
51 */
52struct request_queue *test_iosched_get_req_queue(void)
53{
54 if (!ptd)
55 return NULL;
56
57 return ptd->req_q;
58}
59EXPORT_SYMBOL(test_iosched_get_req_queue);
60
61/**
62 * test_iosched_mark_test_completion() - Wakeup the debugfs
63 * thread, waiting on the test completion
64 */
65void test_iosched_mark_test_completion(void)
66{
67 if (!ptd)
68 return;
Lee Susman1199b4c2012-12-19 14:19:30 +020069 test_pr_info("%s: mark test is completed, test_count=%d,",
70 __func__, ptd->test_count);
71 test_pr_info("%s: reinsert_count=%d, dispatched_count=%d",
72 __func__, ptd->reinsert_count, ptd->dispatched_count);
Maya Erez60181552012-06-27 11:25:26 +030073
74 ptd->test_state = TEST_COMPLETED;
75 wake_up(&ptd->wait_q);
76}
77EXPORT_SYMBOL(test_iosched_mark_test_completion);
78
79/* Check if all the queued test requests were completed */
80static void check_test_completion(void)
81{
82 struct test_request *test_rq;
Maya Erez60181552012-06-27 11:25:26 +030083
Lee Susman1199b4c2012-12-19 14:19:30 +020084 if (!ptd)
85 return;
86
87 list_for_each_entry(test_rq, &ptd->dispatched_queue, queuelist)
Maya Erez60181552012-06-27 11:25:26 +030088 if (!test_rq->req_completed)
89 return;
Lee Susman1199b4c2012-12-19 14:19:30 +020090
91 if (!list_empty(&ptd->test_queue)
92 || !list_empty(&ptd->reinsert_queue)
93 || !list_empty(&ptd->urgent_queue)) {
94 test_pr_info("%s: Test still not completed,", __func__);
95 test_pr_info("%s: test_count=%d, reinsert_count=%d",
96 __func__, ptd->test_count, ptd->reinsert_count);
97 test_pr_info("%s: dispatched_count=%d, urgent_count=%d",
98 __func__, ptd->dispatched_count, ptd->urgent_count);
99 return;
Maya Erez60181552012-06-27 11:25:26 +0300100 }
101
Lee Susmanf18263a2012-10-24 14:14:37 +0200102 ptd->test_info.test_duration = jiffies -
103 ptd->test_info.test_duration;
104
Lee Susman1199b4c2012-12-19 14:19:30 +0200105 test_pr_info("%s: Test is completed, test_count=%d, reinsert_count=%d,",
106 __func__, ptd->test_count, ptd->reinsert_count);
107 test_pr_info("%s: dispatched_count=%d",
108 __func__, ptd->dispatched_count);
Maya Erez60181552012-06-27 11:25:26 +0300109
110 test_iosched_mark_test_completion();
111}
112
113/*
114 * A callback to be called per bio completion.
115 * Frees the bio memory.
116 */
117static void end_test_bio(struct bio *bio, int err)
118{
119 if (err)
120 clear_bit(BIO_UPTODATE, &bio->bi_flags);
Maya Erez60181552012-06-27 11:25:26 +0300121 bio_put(bio);
122}
123
124/*
125 * A callback to be called per request completion.
126 * the request memory is not freed here, will be freed later after the test
127 * results checking.
128 */
129static void end_test_req(struct request *rq, int err)
130{
131 struct test_request *test_rq;
132
133 test_rq = (struct test_request *)rq->elv.priv[0];
134 BUG_ON(!test_rq);
135
Lee Susmanf18263a2012-10-24 14:14:37 +0200136 test_pr_debug("%s: request %d completed, err=%d",
Maya Erez60181552012-06-27 11:25:26 +0300137 __func__, test_rq->req_id, err);
138
Maya Erezdb8cbfe2012-07-15 13:09:08 +0300139 test_rq->req_completed = true;
Maya Erez60181552012-06-27 11:25:26 +0300140 test_rq->req_result = err;
141
142 check_test_completion();
143}
144
145/**
146 * test_iosched_add_unique_test_req() - Create and queue a non
147 * read/write request (such as FLUSH/DISCRAD/SANITIZE).
148 * @is_err_expcted: A flag to indicate if this request
149 * should succeed or not
150 * @req_unique: The type of request to add
151 * @start_sec: start address of the first bio
152 * @nr_sects: number of sectors in the request
153 * @end_req_io: specific completion callback. When not
154 * set, the defaulcallback will be used
155 */
156int test_iosched_add_unique_test_req(int is_err_expcted,
157 enum req_unique_type req_unique,
158 int start_sec, int nr_sects, rq_end_io_fn *end_req_io)
159{
160 struct bio *bio;
161 struct request *rq;
162 int rw_flags;
163 struct test_request *test_rq;
164
165 if (!ptd)
166 return -ENODEV;
167
168 bio = bio_alloc(GFP_KERNEL, 0);
169 if (!bio) {
170 test_pr_err("%s: Failed to allocate a bio", __func__);
171 return -ENODEV;
172 }
173 bio_get(bio);
174 bio->bi_end_io = end_test_bio;
175
176 switch (req_unique) {
177 case REQ_UNIQUE_FLUSH:
178 bio->bi_rw = WRITE_FLUSH;
179 break;
180 case REQ_UNIQUE_DISCARD:
181 bio->bi_rw = REQ_WRITE | REQ_DISCARD;
182 bio->bi_size = nr_sects << 9;
183 bio->bi_sector = start_sec;
184 break;
Maya Erez22f7abf2012-07-18 21:52:33 +0300185 case REQ_UNIQUE_SANITIZE:
186 bio->bi_rw = REQ_WRITE | REQ_SANITIZE;
187 break;
Maya Erez60181552012-06-27 11:25:26 +0300188 default:
189 test_pr_err("%s: Invalid request type %d", __func__,
190 req_unique);
191 bio_put(bio);
192 return -ENODEV;
193 }
194
195 rw_flags = bio_data_dir(bio);
196 if (bio->bi_rw & REQ_SYNC)
197 rw_flags |= REQ_SYNC;
198
199 rq = blk_get_request(ptd->req_q, rw_flags, GFP_KERNEL);
200 if (!rq) {
201 test_pr_err("%s: Failed to allocate a request", __func__);
202 bio_put(bio);
203 return -ENODEV;
204 }
205
206 init_request_from_bio(rq, bio);
207 if (end_req_io)
208 rq->end_io = end_req_io;
209 else
210 rq->end_io = end_test_req;
211
212 test_rq = kzalloc(sizeof(struct test_request), GFP_KERNEL);
213 if (!test_rq) {
214 test_pr_err("%s: Failed to allocate a test request", __func__);
215 bio_put(bio);
216 blk_put_request(rq);
217 return -ENODEV;
218 }
Maya Erezdb8cbfe2012-07-15 13:09:08 +0300219 test_rq->req_completed = false;
220 test_rq->req_result = -EINVAL;
Maya Erez60181552012-06-27 11:25:26 +0300221 test_rq->rq = rq;
222 test_rq->is_err_expected = is_err_expcted;
223 rq->elv.priv[0] = (void *)test_rq;
224 test_rq->req_id = ptd->unique_next_req_id++;
225
226 test_pr_debug(
227 "%s: added request %d to the test requests list, type = %d",
228 __func__, test_rq->req_id, req_unique);
229
Lee Susman1199b4c2012-12-19 14:19:30 +0200230 spin_lock_irq(ptd->req_q->queue_lock);
Maya Erez60181552012-06-27 11:25:26 +0300231 list_add_tail(&test_rq->queuelist, &ptd->test_queue);
Lee Susman1199b4c2012-12-19 14:19:30 +0200232 ptd->test_count++;
233 spin_unlock_irq(ptd->req_q->queue_lock);
Maya Erez60181552012-06-27 11:25:26 +0300234
235 return 0;
236}
237EXPORT_SYMBOL(test_iosched_add_unique_test_req);
238
239/*
240 * Get a pattern to be filled in the request data buffer.
241 * If the pattern used is (-1) the buffer will be filled with sequential
242 * numbers
243 */
244static void fill_buf_with_pattern(int *buf, int num_bytes, int pattern)
245{
246 int i = 0;
247 int num_of_dwords = num_bytes/sizeof(int);
248
249 if (pattern == TEST_NO_PATTERN)
250 return;
251
252 /* num_bytes should be aligned to sizeof(int) */
253 BUG_ON((num_bytes % sizeof(int)) != 0);
254
255 if (pattern == TEST_PATTERN_SEQUENTIAL) {
256 for (i = 0; i < num_of_dwords; i++)
257 buf[i] = i;
258 } else {
259 for (i = 0; i < num_of_dwords; i++)
260 buf[i] = pattern;
261 }
262}
263
264/**
Lee Susman1199b4c2012-12-19 14:19:30 +0200265 * test_iosched_create_test_req() - Create a read/write request.
Maya Erez60181552012-06-27 11:25:26 +0300266 * @is_err_expcted: A flag to indicate if this request
267 * should succeed or not
268 * @direction: READ/WRITE
269 * @start_sec: start address of the first bio
270 * @num_bios: number of BIOs to be allocated for the
271 * request
272 * @pattern: A pattern, to be written into the write
273 * requests data buffer. In case of READ
274 * request, the given pattern is kept as
275 * the expected pattern. The expected
276 * pattern will be compared in the test
277 * check result function. If no comparisson
278 * is required, set pattern to
279 * TEST_NO_PATTERN.
280 * @end_req_io: specific completion callback. When not
281 * set,the default callback will be used
282 *
283 * This function allocates the test request and the block
284 * request and calls blk_rq_map_kern which allocates the
285 * required BIO. The allocated test request and the block
286 * request memory is freed at the end of the test and the
287 * allocated BIO memory is freed by end_test_bio.
288 */
Lee Susman1199b4c2012-12-19 14:19:30 +0200289struct test_request *test_iosched_create_test_req(int is_err_expcted,
Maya Erez60181552012-06-27 11:25:26 +0300290 int direction, int start_sec,
291 int num_bios, int pattern, rq_end_io_fn *end_req_io)
292{
Lee Susman1199b4c2012-12-19 14:19:30 +0200293 struct request *rq;
294 struct test_request *test_rq;
295 int rw_flags, buf_size;
296 int ret = 0, i;
Maya Erez60181552012-06-27 11:25:26 +0300297 unsigned int *bio_ptr = NULL;
298 struct bio *bio = NULL;
299
300 if (!ptd)
Lee Susman1199b4c2012-12-19 14:19:30 +0200301 return NULL;
Maya Erez60181552012-06-27 11:25:26 +0300302
303 rw_flags = direction;
304
305 rq = blk_get_request(ptd->req_q, rw_flags, GFP_KERNEL);
306 if (!rq) {
307 test_pr_err("%s: Failed to allocate a request", __func__);
Lee Susman1199b4c2012-12-19 14:19:30 +0200308 return NULL;
Maya Erez60181552012-06-27 11:25:26 +0300309 }
310
311 test_rq = kzalloc(sizeof(struct test_request), GFP_KERNEL);
312 if (!test_rq) {
313 test_pr_err("%s: Failed to allocate test request", __func__);
314 blk_put_request(rq);
Lee Susman1199b4c2012-12-19 14:19:30 +0200315 return NULL;
Maya Erez60181552012-06-27 11:25:26 +0300316 }
317
318 buf_size = sizeof(unsigned int) * BIO_U32_SIZE * num_bios;
319 test_rq->bios_buffer = kzalloc(buf_size, GFP_KERNEL);
320 if (!test_rq->bios_buffer) {
321 test_pr_err("%s: Failed to allocate the data buf", __func__);
322 goto err;
323 }
324 test_rq->buf_size = buf_size;
325
326 if (direction == WRITE)
327 fill_buf_with_pattern(test_rq->bios_buffer,
328 buf_size, pattern);
329 test_rq->wr_rd_data_pattern = pattern;
330
331 bio_ptr = test_rq->bios_buffer;
332 for (i = 0; i < num_bios; ++i) {
333 ret = blk_rq_map_kern(ptd->req_q, rq,
334 (void *)bio_ptr,
335 sizeof(unsigned int)*BIO_U32_SIZE,
336 GFP_KERNEL);
337 if (ret) {
338 test_pr_err("%s: blk_rq_map_kern returned error %d",
339 __func__, ret);
340 goto err;
341 }
342 bio_ptr += BIO_U32_SIZE;
343 }
344
345 if (end_req_io)
346 rq->end_io = end_req_io;
347 else
348 rq->end_io = end_test_req;
349 rq->__sector = start_sec;
350 rq->cmd_type |= REQ_TYPE_FS;
Lee Susman1199b4c2012-12-19 14:19:30 +0200351 rq->cmd_flags |= REQ_SORTED; /* do we need this?*/
Maya Erez60181552012-06-27 11:25:26 +0300352
353 if (rq->bio) {
354 rq->bio->bi_sector = start_sec;
355 rq->bio->bi_end_io = end_test_bio;
356 bio = rq->bio;
357 while ((bio = bio->bi_next) != NULL)
358 bio->bi_end_io = end_test_bio;
359 }
360
361 ptd->num_of_write_bios += num_bios;
362 test_rq->req_id = ptd->wr_rd_next_req_id++;
363
Maya Erezdb8cbfe2012-07-15 13:09:08 +0300364 test_rq->req_completed = false;
365 test_rq->req_result = -EINVAL;
Maya Erez60181552012-06-27 11:25:26 +0300366 test_rq->rq = rq;
Dolev Raviv50032382013-01-09 12:00:02 +0200367 if (ptd->test_info.get_rq_disk_fn)
368 test_rq->rq->rq_disk = ptd->test_info.get_rq_disk_fn();
Maya Erez60181552012-06-27 11:25:26 +0300369 test_rq->is_err_expected = is_err_expcted;
370 rq->elv.priv[0] = (void *)test_rq;
371
Lee Susman1199b4c2012-12-19 14:19:30 +0200372 test_pr_debug("%s: created test request %d, buf_size=%d",
373 __func__, test_rq->req_id, buf_size);
Maya Erez60181552012-06-27 11:25:26 +0300374
Lee Susman1199b4c2012-12-19 14:19:30 +0200375 return test_rq;
Maya Erez60181552012-06-27 11:25:26 +0300376err:
377 blk_put_request(rq);
378 kfree(test_rq->bios_buffer);
Lee Susman1199b4c2012-12-19 14:19:30 +0200379 return NULL;
380}
381EXPORT_SYMBOL(test_iosched_create_test_req);
382
383
384/**
385 * test_iosched_add_wr_rd_test_req() - Create and queue a
386 * read/write request.
387 * @is_err_expcted: A flag to indicate if this request
388 * should succeed or not
389 * @direction: READ/WRITE
390 * @start_sec: start address of the first bio
391 * @num_bios: number of BIOs to be allocated for the
392 * request
393 * @pattern: A pattern, to be written into the write
394 * requests data buffer. In case of READ
395 * request, the given pattern is kept as
396 * the expected pattern. The expected
397 * pattern will be compared in the test
398 * check result function. If no comparisson
399 * is required, set pattern to
400 * TEST_NO_PATTERN.
401 * @end_req_io: specific completion callback. When not
402 * set,the default callback will be used
403 *
404 * This function allocates the test request and the block
405 * request and calls blk_rq_map_kern which allocates the
406 * required BIO. Upon success the new request is added to the
407 * test_queue. The allocated test request and the block request
408 * memory is freed at the end of the test and the allocated BIO
409 * memory is freed by end_test_bio.
410 */
411int test_iosched_add_wr_rd_test_req(int is_err_expcted,
412 int direction, int start_sec,
413 int num_bios, int pattern, rq_end_io_fn *end_req_io)
414{
415 struct test_request *test_rq = NULL;
416
417 test_rq = test_iosched_create_test_req(is_err_expcted,
418 direction, start_sec,
419 num_bios, pattern, end_req_io);
420 if (test_rq) {
421 spin_lock_irq(ptd->req_q->queue_lock);
422 list_add_tail(&test_rq->queuelist, &ptd->test_queue);
423 ptd->test_count++;
424 spin_unlock_irq(ptd->req_q->queue_lock);
425 return 0;
426 }
Maya Erez60181552012-06-27 11:25:26 +0300427 return -ENODEV;
428}
429EXPORT_SYMBOL(test_iosched_add_wr_rd_test_req);
430
431/* Converts the testcase number into a string */
432static char *get_test_case_str(struct test_data *td)
433{
434 if (td->test_info.get_test_case_str_fn)
435 return td->test_info.get_test_case_str_fn(td);
436
437 return "Unknown testcase";
438}
439
440/*
441 * Verify that the test request data buffer includes the expected
442 * pattern
443 */
444static int compare_buffer_to_pattern(struct test_request *test_rq)
445{
446 int i = 0;
447 int num_of_dwords = test_rq->buf_size/sizeof(int);
448
449 /* num_bytes should be aligned to sizeof(int) */
450 BUG_ON((test_rq->buf_size % sizeof(int)) != 0);
451 BUG_ON(test_rq->bios_buffer == NULL);
452
453 if (test_rq->wr_rd_data_pattern == TEST_NO_PATTERN)
454 return 0;
455
456 if (test_rq->wr_rd_data_pattern == TEST_PATTERN_SEQUENTIAL) {
457 for (i = 0; i < num_of_dwords; i++) {
458 if (test_rq->bios_buffer[i] != i) {
459 test_pr_err(
460 "%s: wrong pattern 0x%x in index %d",
461 __func__, test_rq->bios_buffer[i], i);
462 return -EINVAL;
463 }
464 }
465 } else {
466 for (i = 0; i < num_of_dwords; i++) {
467 if (test_rq->bios_buffer[i] !=
468 test_rq->wr_rd_data_pattern) {
469 test_pr_err(
470 "%s: wrong pattern 0x%x in index %d",
471 __func__, test_rq->bios_buffer[i], i);
472 return -EINVAL;
473 }
474 }
475 }
476
477 return 0;
478}
479
480/*
481 * Determine if the test passed or failed.
482 * The function checks the test request completion value and calls
483 * check_testcase_result for result checking that are specific
484 * to a test case.
485 */
486static int check_test_result(struct test_data *td)
487{
488 struct test_request *test_rq;
Maya Erez60181552012-06-27 11:25:26 +0300489 int res = 0;
490 static int run;
491
Lee Susman1199b4c2012-12-19 14:19:30 +0200492 if (!ptd)
493 goto err;
494
495 list_for_each_entry(test_rq, &ptd->dispatched_queue, queuelist) {
496 if (!test_rq->rq) {
497 test_pr_info("%s: req_id %d is contains empty req",
498 __func__, test_rq->req_id);
499 continue;
500 }
Maya Erez60181552012-06-27 11:25:26 +0300501 if (!test_rq->req_completed) {
502 test_pr_err("%s: rq %d not completed", __func__,
503 test_rq->req_id);
504 res = -EINVAL;
505 goto err;
506 }
507
508 if ((test_rq->req_result < 0) && !test_rq->is_err_expected) {
509 test_pr_err(
510 "%s: rq %d completed with err, not as expected",
511 __func__, test_rq->req_id);
512 res = -EINVAL;
513 goto err;
514 }
515 if ((test_rq->req_result == 0) && test_rq->is_err_expected) {
516 test_pr_err("%s: rq %d succeeded, not as expected",
517 __func__, test_rq->req_id);
518 res = -EINVAL;
519 goto err;
520 }
521 if (rq_data_dir(test_rq->rq) == READ) {
522 res = compare_buffer_to_pattern(test_rq);
523 if (res) {
524 test_pr_err("%s: read pattern not as expected",
525 __func__);
526 res = -EINVAL;
527 goto err;
528 }
529 }
530 }
531
532 if (td->test_info.check_test_result_fn) {
533 res = td->test_info.check_test_result_fn(td);
534 if (res)
535 goto err;
536 }
537
538 test_pr_info("%s: %s, run# %03d, PASSED",
539 __func__, get_test_case_str(td), ++run);
540 td->test_result = TEST_PASSED;
541
542 return 0;
543err:
544 test_pr_err("%s: %s, run# %03d, FAILED",
545 __func__, get_test_case_str(td), ++run);
546 td->test_result = TEST_FAILED;
547 return res;
548}
549
550/* Create and queue the required requests according to the test case */
551static int prepare_test(struct test_data *td)
552{
553 int ret = 0;
554
555 if (td->test_info.prepare_test_fn) {
556 ret = td->test_info.prepare_test_fn(td);
557 return ret;
558 }
559
560 return 0;
561}
562
563/* Run the test */
564static int run_test(struct test_data *td)
565{
566 int ret = 0;
567
568 if (td->test_info.run_test_fn) {
569 ret = td->test_info.run_test_fn(td);
570 return ret;
571 }
572
Maya Erez60181552012-06-27 11:25:26 +0300573 __blk_run_queue(td->req_q);
574
575 return 0;
576}
577
Lee Susman1199b4c2012-12-19 14:19:30 +0200578/*
579 * free_test_queue() - Free all allocated test requests in the given test_queue:
580 * free their requests and BIOs buffer
581 * @test_queue the test queue to be freed
582 */
583static void free_test_queue(struct list_head *test_queue)
Maya Erez60181552012-06-27 11:25:26 +0300584{
585 struct test_request *test_rq;
Maya Erezdb8cbfe2012-07-15 13:09:08 +0300586 struct bio *bio;
587
Lee Susman1199b4c2012-12-19 14:19:30 +0200588 while (!list_empty(test_queue)) {
589 test_rq = list_entry(test_queue->next, struct test_request,
590 queuelist);
591
Maya Erez60181552012-06-27 11:25:26 +0300592 list_del_init(&test_rq->queuelist);
Maya Erezdb8cbfe2012-07-15 13:09:08 +0300593 /*
594 * If the request was not completed we need to free its BIOs
595 * and remove it from the packed list
596 */
597 if (!test_rq->req_completed) {
598 test_pr_info(
599 "%s: Freeing memory of an uncompleted request",
Lee Susman1199b4c2012-12-19 14:19:30 +0200600 __func__);
Maya Erezdb8cbfe2012-07-15 13:09:08 +0300601 list_del_init(&test_rq->rq->queuelist);
602 while ((bio = test_rq->rq->bio) != NULL) {
603 test_rq->rq->bio = bio->bi_next;
604 bio_put(bio);
605 }
606 }
Maya Erez60181552012-06-27 11:25:26 +0300607 blk_put_request(test_rq->rq);
608 kfree(test_rq->bios_buffer);
609 kfree(test_rq);
610 }
611}
612
613/*
Lee Susman1199b4c2012-12-19 14:19:30 +0200614 * free_test_requests() - Free all allocated test requests in
615 * all test queues in given test_data.
616 * @td The test_data struct whos test requests will be
617 * freed.
618 */
619static void free_test_requests(struct test_data *td)
620{
621 if (!td)
622 return;
623
624 if (td->urgent_count) {
625 free_test_queue(&td->urgent_queue);
626 td->urgent_count = 0;
627 }
628 if (td->test_count) {
629 free_test_queue(&td->test_queue);
630 td->test_count = 0;
631 }
632 if (td->dispatched_count) {
633 free_test_queue(&td->dispatched_queue);
634 td->dispatched_count = 0;
635 }
636 if (td->reinsert_count) {
637 free_test_queue(&td->reinsert_queue);
638 td->reinsert_count = 0;
639 }
640}
641
642/*
643 * post_test() - Do post test operations. Free the allocated
644 * test requests, their requests and BIOs buffer.
645 * @td The test_data struct for the test that has
646 * ended.
Maya Erez60181552012-06-27 11:25:26 +0300647 */
648static int post_test(struct test_data *td)
649{
650 int ret = 0;
651
652 if (td->test_info.post_test_fn)
653 ret = td->test_info.post_test_fn(td);
654
655 ptd->test_info.testcase = 0;
656 ptd->test_state = TEST_IDLE;
657
658 free_test_requests(td);
659
660 return ret;
661}
662
663/*
664 * The timer verifies that the test will be completed even if we don't get
665 * the completion callback for all the requests.
666 */
667static void test_timeout_handler(unsigned long data)
668{
669 struct test_data *td = (struct test_data *)data;
670
671 test_pr_info("%s: TIMEOUT timer expired", __func__);
672 td->test_state = TEST_COMPLETED;
673 wake_up(&td->wait_q);
674 return;
675}
676
677static unsigned int get_timeout_msec(struct test_data *td)
678{
679 if (td->test_info.timeout_msec)
680 return td->test_info.timeout_msec;
681 else
682 return TIMEOUT_TIMER_MS;
683}
684
685/**
686 * test_iosched_start_test() - Prepares and runs the test.
687 * @t_info: the current test testcase and callbacks
688 * functions
689 *
690 * The function also checks the test result upon test completion
691 */
692int test_iosched_start_test(struct test_info *t_info)
693{
694 int ret = 0;
695 unsigned timeout_msec;
696 int counter = 0;
697 char *test_name = NULL;
698
699 if (!ptd)
700 return -ENODEV;
701
702 if (!t_info) {
703 ptd->test_result = TEST_FAILED;
704 return -EINVAL;
705 }
706
707 do {
708 if (ptd->ignore_round)
709 /*
710 * We ignored the last run due to FS write requests.
711 * Sleep to allow those requests to be issued
712 */
713 msleep(2000);
714
715 spin_lock(&ptd->lock);
716
717 if (ptd->test_state != TEST_IDLE) {
718 test_pr_info(
719 "%s: Another test is running, try again later",
720 __func__);
Maya Erezdb8cbfe2012-07-15 13:09:08 +0300721 spin_unlock(&ptd->lock);
722 return -EBUSY;
Maya Erez60181552012-06-27 11:25:26 +0300723 }
724
725 if (ptd->start_sector == 0) {
726 test_pr_err("%s: Invalid start sector", __func__);
727 ptd->test_result = TEST_FAILED;
728 spin_unlock(&ptd->lock);
729 return -EINVAL;
730 }
731
732 memcpy(&ptd->test_info, t_info, sizeof(struct test_info));
733
Maya Erez60181552012-06-27 11:25:26 +0300734 ptd->test_result = TEST_NO_RESULT;
735 ptd->num_of_write_bios = 0;
736
737 ptd->unique_next_req_id = UNIQUE_START_REQ_ID;
738 ptd->wr_rd_next_req_id = WR_RD_START_REQ_ID;
739
740 ptd->ignore_round = false;
741 ptd->fs_wr_reqs_during_test = false;
742
743 ptd->test_state = TEST_RUNNING;
744
745 spin_unlock(&ptd->lock);
746
747 timeout_msec = get_timeout_msec(ptd);
748 mod_timer(&ptd->timeout_timer, jiffies +
749 msecs_to_jiffies(timeout_msec));
750
751 if (ptd->test_info.get_test_case_str_fn)
752 test_name = ptd->test_info.get_test_case_str_fn(ptd);
753 else
754 test_name = "Unknown testcase";
Lee Susman039ce092012-11-15 13:36:15 +0200755 test_pr_info("%s: Starting test %s", __func__, test_name);
Maya Erez60181552012-06-27 11:25:26 +0300756
757 ret = prepare_test(ptd);
758 if (ret) {
759 test_pr_err("%s: failed to prepare the test\n",
760 __func__);
761 goto error;
762 }
763
Lee Susmanf18263a2012-10-24 14:14:37 +0200764 ptd->test_info.test_duration = jiffies;
Maya Erez60181552012-06-27 11:25:26 +0300765 ret = run_test(ptd);
766 if (ret) {
767 test_pr_err("%s: failed to run the test\n", __func__);
768 goto error;
769 }
770
771 test_pr_info("%s: Waiting for the test completion", __func__);
772
773 wait_event(ptd->wait_q, ptd->test_state == TEST_COMPLETED);
Lee Susmanf18263a2012-10-24 14:14:37 +0200774 t_info->test_duration = ptd->test_info.test_duration;
Maya Erez60181552012-06-27 11:25:26 +0300775 del_timer_sync(&ptd->timeout_timer);
776
777 ret = check_test_result(ptd);
778 if (ret) {
779 test_pr_err("%s: check_test_result failed\n",
780 __func__);
781 goto error;
782 }
783
784 ret = post_test(ptd);
785 if (ret) {
786 test_pr_err("%s: post_test failed\n", __func__);
787 goto error;
788 }
789
790 /*
791 * Wakeup the queue thread to fetch FS requests that might got
792 * postponded due to the test
793 */
794 __blk_run_queue(ptd->req_q);
795
796 if (ptd->ignore_round)
797 test_pr_info(
798 "%s: Round canceled (Got wr reqs in the middle)",
799 __func__);
800
801 if (++counter == TEST_MAX_TESTCASE_ROUNDS) {
802 test_pr_info("%s: Too many rounds, did not succeed...",
803 __func__);
804 ptd->test_result = TEST_FAILED;
805 }
806
807 } while ((ptd->ignore_round) && (counter < TEST_MAX_TESTCASE_ROUNDS));
808
809 if (ptd->test_result == TEST_PASSED)
810 return 0;
811 else
812 return -EINVAL;
813
814error:
Maya Erez60181552012-06-27 11:25:26 +0300815 post_test(ptd);
Maya Erezdb8cbfe2012-07-15 13:09:08 +0300816 ptd->test_result = TEST_FAILED;
Maya Erez60181552012-06-27 11:25:26 +0300817 return ret;
818}
819EXPORT_SYMBOL(test_iosched_start_test);
820
821/**
822 * test_iosched_register() - register a block device test
823 * utility.
824 * @bdt: the block device test type to register
825 */
826void test_iosched_register(struct blk_dev_test_type *bdt)
827{
828 spin_lock(&blk_dev_test_list_lock);
829 list_add_tail(&bdt->list, &blk_dev_test_list);
830 spin_unlock(&blk_dev_test_list_lock);
831}
832EXPORT_SYMBOL_GPL(test_iosched_register);
833
834/**
835 * test_iosched_unregister() - unregister a block device test
836 * utility.
837 * @bdt: the block device test type to unregister
838 */
839void test_iosched_unregister(struct blk_dev_test_type *bdt)
840{
841 spin_lock(&blk_dev_test_list_lock);
842 list_del_init(&bdt->list);
843 spin_unlock(&blk_dev_test_list_lock);
844}
845EXPORT_SYMBOL_GPL(test_iosched_unregister);
846
847/**
848 * test_iosched_set_test_result() - Set the test
849 * result(PASS/FAIL)
850 * @test_result: the test result
851 */
852void test_iosched_set_test_result(int test_result)
853{
854 if (!ptd)
855 return;
856
857 ptd->test_result = test_result;
858}
859EXPORT_SYMBOL(test_iosched_set_test_result);
860
861
862/**
863 * test_iosched_set_ignore_round() - Set the ignore_round flag
864 * @ignore_round: A flag to indicate if this test round
865 * should be ignored and re-run
866 */
867void test_iosched_set_ignore_round(bool ignore_round)
868{
869 if (!ptd)
870 return;
871
872 ptd->ignore_round = ignore_round;
873}
874EXPORT_SYMBOL(test_iosched_set_ignore_round);
875
876/**
877 * test_iosched_get_debugfs_tests_root() - returns the root
878 * debugfs directory for the test_iosched tests
879 */
880struct dentry *test_iosched_get_debugfs_tests_root(void)
881{
882 if (!ptd)
883 return NULL;
884
885 return ptd->debug.debug_tests_root;
886}
887EXPORT_SYMBOL(test_iosched_get_debugfs_tests_root);
888
889/**
890 * test_iosched_get_debugfs_utils_root() - returns the root
891 * debugfs directory for the test_iosched utils
892 */
893struct dentry *test_iosched_get_debugfs_utils_root(void)
894{
895 if (!ptd)
896 return NULL;
897
898 return ptd->debug.debug_utils_root;
899}
900EXPORT_SYMBOL(test_iosched_get_debugfs_utils_root);
901
902static int test_debugfs_init(struct test_data *td)
903{
904 td->debug.debug_root = debugfs_create_dir("test-iosched", NULL);
905 if (!td->debug.debug_root)
906 return -ENOENT;
907
908 td->debug.debug_tests_root = debugfs_create_dir("tests",
909 td->debug.debug_root);
910 if (!td->debug.debug_tests_root)
911 goto err;
912
913 td->debug.debug_utils_root = debugfs_create_dir("utils",
914 td->debug.debug_root);
915 if (!td->debug.debug_utils_root)
916 goto err;
917
918 td->debug.debug_test_result = debugfs_create_u32(
919 "test_result",
920 S_IRUGO | S_IWUGO,
921 td->debug.debug_utils_root,
922 &td->test_result);
923 if (!td->debug.debug_test_result)
924 goto err;
925
926 td->debug.start_sector = debugfs_create_u32(
927 "start_sector",
928 S_IRUGO | S_IWUGO,
929 td->debug.debug_utils_root,
930 &td->start_sector);
931 if (!td->debug.start_sector)
932 goto err;
933
934 return 0;
935
936err:
937 debugfs_remove_recursive(td->debug.debug_root);
938 return -ENOENT;
939}
940
941static void test_debugfs_cleanup(struct test_data *td)
942{
943 debugfs_remove_recursive(td->debug.debug_root);
944}
945
946static void print_req(struct request *req)
947{
948 struct bio *bio;
949 struct test_request *test_rq;
950
951 if (!req)
952 return;
953
954 test_rq = (struct test_request *)req->elv.priv[0];
955
956 if (test_rq) {
957 test_pr_debug("%s: Dispatch request %d: __sector=0x%lx",
958 __func__, test_rq->req_id, (unsigned long)req->__sector);
959 test_pr_debug("%s: nr_phys_segments=%d, num_of_sectors=%d",
960 __func__, req->nr_phys_segments, blk_rq_sectors(req));
961 bio = req->bio;
962 test_pr_debug("%s: bio: bi_size=%d, bi_sector=0x%lx",
963 __func__, bio->bi_size,
964 (unsigned long)bio->bi_sector);
965 while ((bio = bio->bi_next) != NULL) {
966 test_pr_debug("%s: bio: bi_size=%d, bi_sector=0x%lx",
967 __func__, bio->bi_size,
968 (unsigned long)bio->bi_sector);
969 }
970 }
971}
972
973static void test_merged_requests(struct request_queue *q,
974 struct request *rq, struct request *next)
975{
976 list_del_init(&next->queuelist);
977}
Lee Susman1199b4c2012-12-19 14:19:30 +0200978/*
979 * test_dispatch_from(): Dispatch request from @queue to the @dispatched_queue.
980 * Also update th dispatched_count counter.
981 */
982static int test_dispatch_from(struct request_queue *q,
983 struct list_head *queue, unsigned int *count)
984{
985 struct test_request *test_rq;
986 struct request *rq;
987 int ret = 0;
988
989 if (!ptd)
990 goto err;
991
992 spin_lock_irq(&ptd->lock);
993 if (!list_empty(queue)) {
994 test_rq = list_entry(queue->next, struct test_request,
995 queuelist);
996 rq = test_rq->rq;
997 if (!rq) {
998 pr_err("%s: null request,return", __func__);
999 spin_unlock_irq(&ptd->lock);
1000 goto err;
1001 }
1002 list_move_tail(&test_rq->queuelist, &ptd->dispatched_queue);
1003 ptd->dispatched_count++;
1004 (*count)--;
1005 spin_unlock_irq(&ptd->lock);
1006
1007 print_req(rq);
1008 elv_dispatch_sort(q, rq);
1009 ret = 1;
1010 goto err;
1011 }
1012 spin_unlock_irq(&ptd->lock);
1013
1014err:
1015 return ret;
1016}
Maya Erez60181552012-06-27 11:25:26 +03001017
1018/*
1019 * Dispatch a test request in case there is a running test Otherwise, dispatch
1020 * a request that was queued by the FS to keep the card functional.
1021 */
1022static int test_dispatch_requests(struct request_queue *q, int force)
1023{
1024 struct test_data *td = q->elevator->elevator_data;
1025 struct request *rq = NULL;
Lee Susman1199b4c2012-12-19 14:19:30 +02001026 int ret = 0;
Maya Erez60181552012-06-27 11:25:26 +03001027
1028 switch (td->test_state) {
1029 case TEST_IDLE:
1030 if (!list_empty(&td->queue)) {
1031 rq = list_entry(td->queue.next, struct request,
1032 queuelist);
1033 list_del_init(&rq->queuelist);
1034 elv_dispatch_sort(q, rq);
Lee Susman1199b4c2012-12-19 14:19:30 +02001035 ret = 1;
1036 goto exit;
Maya Erez60181552012-06-27 11:25:26 +03001037 }
1038 break;
1039 case TEST_RUNNING:
Lee Susman1199b4c2012-12-19 14:19:30 +02001040 if (test_dispatch_from(q, &td->urgent_queue,
1041 &td->urgent_count)) {
1042 test_pr_debug("%s: Dispatched from urgent_count=%d",
1043 __func__, ptd->urgent_count);
1044 ret = 1;
1045 goto exit;
1046 }
1047 if (test_dispatch_from(q, &td->reinsert_queue,
1048 &td->reinsert_count)) {
1049 test_pr_debug("%s: Dispatched from reinsert_count=%d",
1050 __func__, ptd->reinsert_count);
1051 ret = 1;
1052 goto exit;
1053 }
1054 if (test_dispatch_from(q, &td->test_queue, &td->test_count)) {
1055 test_pr_debug("%s: Dispatched from test_count=%d",
1056 __func__, ptd->test_count);
1057 ret = 1;
1058 goto exit;
Maya Erez60181552012-06-27 11:25:26 +03001059 }
1060 break;
1061 case TEST_COMPLETED:
1062 default:
Lee Susman1199b4c2012-12-19 14:19:30 +02001063 break;
Maya Erez60181552012-06-27 11:25:26 +03001064 }
1065
Lee Susman1199b4c2012-12-19 14:19:30 +02001066exit:
1067 return ret;
Maya Erez60181552012-06-27 11:25:26 +03001068}
1069
1070static void test_add_request(struct request_queue *q, struct request *rq)
1071{
1072 struct test_data *td = q->elevator->elevator_data;
1073
1074 list_add_tail(&rq->queuelist, &td->queue);
1075
1076 /*
1077 * The write requests can be followed by a FLUSH request that might
1078 * cause unexpected results of the test.
1079 */
1080 if ((rq_data_dir(rq) == WRITE) && (td->test_state == TEST_RUNNING)) {
1081 test_pr_debug("%s: got WRITE req in the middle of the test",
1082 __func__);
1083 td->fs_wr_reqs_during_test = true;
1084 }
1085}
1086
1087static struct request *
1088test_former_request(struct request_queue *q, struct request *rq)
1089{
1090 struct test_data *td = q->elevator->elevator_data;
1091
1092 if (rq->queuelist.prev == &td->queue)
1093 return NULL;
1094 return list_entry(rq->queuelist.prev, struct request, queuelist);
1095}
1096
1097static struct request *
1098test_latter_request(struct request_queue *q, struct request *rq)
1099{
1100 struct test_data *td = q->elevator->elevator_data;
1101
1102 if (rq->queuelist.next == &td->queue)
1103 return NULL;
1104 return list_entry(rq->queuelist.next, struct request, queuelist);
1105}
1106
1107static void *test_init_queue(struct request_queue *q)
1108{
1109 struct blk_dev_test_type *__bdt;
1110
1111 ptd = kmalloc_node(sizeof(struct test_data), GFP_KERNEL,
1112 q->node);
1113 if (!ptd) {
1114 test_pr_err("%s: failed to allocate test data", __func__);
1115 return NULL;
1116 }
1117 memset((void *)ptd, 0, sizeof(struct test_data));
1118 INIT_LIST_HEAD(&ptd->queue);
1119 INIT_LIST_HEAD(&ptd->test_queue);
Lee Susman1199b4c2012-12-19 14:19:30 +02001120 INIT_LIST_HEAD(&ptd->dispatched_queue);
1121 INIT_LIST_HEAD(&ptd->reinsert_queue);
1122 INIT_LIST_HEAD(&ptd->urgent_queue);
Maya Erez60181552012-06-27 11:25:26 +03001123 init_waitqueue_head(&ptd->wait_q);
1124 ptd->req_q = q;
1125
1126 setup_timer(&ptd->timeout_timer, test_timeout_handler,
1127 (unsigned long)ptd);
1128
1129 spin_lock_init(&ptd->lock);
1130
1131 if (test_debugfs_init(ptd)) {
1132 test_pr_err("%s: Failed to create debugfs files", __func__);
1133 return NULL;
1134 }
1135
1136 list_for_each_entry(__bdt, &blk_dev_test_list, list)
1137 __bdt->init_fn();
1138
1139 return ptd;
1140}
1141
1142static void test_exit_queue(struct elevator_queue *e)
1143{
1144 struct test_data *td = e->elevator_data;
1145 struct blk_dev_test_type *__bdt;
1146
1147 BUG_ON(!list_empty(&td->queue));
1148
1149 list_for_each_entry(__bdt, &blk_dev_test_list, list)
1150 __bdt->exit_fn();
1151
1152 test_debugfs_cleanup(td);
1153
1154 kfree(td);
1155}
1156
Lee Susman1199b4c2012-12-19 14:19:30 +02001157/**
1158 * test_get_test_data() - Returns a pointer to the test_data
1159 * struct which keeps the current test data.
1160 *
1161 */
1162struct test_data *test_get_test_data(void)
1163{
1164 return ptd;
1165}
1166EXPORT_SYMBOL(test_get_test_data);
1167
1168static bool test_urgent_pending(struct request_queue *q)
1169{
1170 return !list_empty(&ptd->urgent_queue);
1171}
1172
1173/**
1174 * test_iosched_add_urgent_req() - Add an urgent test_request.
1175 * First mark the request as urgent, then add it to the
1176 * urgent_queue test queue.
1177 * @test_rq: pointer to the urgent test_request to be
1178 * added.
1179 *
1180 */
1181void test_iosched_add_urgent_req(struct test_request *test_rq)
1182{
1183 spin_lock_irq(&ptd->lock);
1184 blk_mark_rq_urgent(test_rq->rq);
1185 list_add_tail(&test_rq->queuelist, &ptd->urgent_queue);
1186 ptd->urgent_count++;
1187 spin_unlock_irq(&ptd->lock);
1188}
1189EXPORT_SYMBOL(test_iosched_add_urgent_req);
1190
1191/**
1192 * test_reinsert_req() - Moves the @rq request from
1193 * @dispatched_queue into @reinsert_queue.
1194 * The @rq must be in @dispatched_queue
1195 * @q: request queue
1196 * @rq: request to be inserted
1197 *
1198 *
1199 */
1200static int test_reinsert_req(struct request_queue *q,
1201 struct request *rq)
1202{
1203 struct test_request *test_rq;
1204 int ret = -EINVAL;
1205
1206 if (!ptd)
1207 goto exit;
1208
1209 if (list_empty(&ptd->dispatched_queue)) {
1210 test_pr_err("%s: dispatched_queue is empty", __func__);
1211 goto exit;
1212 }
1213
1214 list_for_each_entry(test_rq, &ptd->dispatched_queue, queuelist) {
1215 if (test_rq->rq == rq) {
1216 list_move(&test_rq->queuelist, &ptd->reinsert_queue);
1217 ptd->dispatched_count--;
1218 ptd->reinsert_count++;
1219 ret = 0;
1220 break;
1221 }
1222 }
1223
1224exit:
1225 return ret;
1226}
1227
Maya Erez60181552012-06-27 11:25:26 +03001228static struct elevator_type elevator_test_iosched = {
Lee Susman1199b4c2012-12-19 14:19:30 +02001229
Maya Erez60181552012-06-27 11:25:26 +03001230 .ops = {
1231 .elevator_merge_req_fn = test_merged_requests,
1232 .elevator_dispatch_fn = test_dispatch_requests,
1233 .elevator_add_req_fn = test_add_request,
1234 .elevator_former_req_fn = test_former_request,
1235 .elevator_latter_req_fn = test_latter_request,
1236 .elevator_init_fn = test_init_queue,
1237 .elevator_exit_fn = test_exit_queue,
Lee Susman1199b4c2012-12-19 14:19:30 +02001238 .elevator_is_urgent_fn = test_urgent_pending,
1239 .elevator_reinsert_req_fn = test_reinsert_req,
Maya Erez60181552012-06-27 11:25:26 +03001240 },
1241 .elevator_name = "test-iosched",
1242 .elevator_owner = THIS_MODULE,
1243};
1244
1245static int __init test_init(void)
1246{
1247 elv_register(&elevator_test_iosched);
1248
1249 return 0;
1250}
1251
1252static void __exit test_exit(void)
1253{
1254 elv_unregister(&elevator_test_iosched);
1255}
1256
1257module_init(test_init);
1258module_exit(test_exit);
1259
1260MODULE_LICENSE("GPL v2");
1261MODULE_DESCRIPTION("Test IO scheduler");