Jens Axboe | fb7b71a | 2007-05-15 08:44:04 +0200 | [diff] [blame] | 1 | /* |
| 2 | * blktrace support code for fio |
| 3 | */ |
| 4 | #include <stdio.h> |
| 5 | #include <stdlib.h> |
Jens Axboe | 8c1fdf0 | 2007-05-15 11:54:21 +0200 | [diff] [blame^] | 6 | |
Jens Axboe | fb7b71a | 2007-05-15 08:44:04 +0200 | [diff] [blame] | 7 | #include "list.h" |
| 8 | #include "fio.h" |
| 9 | #include "blktrace_api.h" |
| 10 | |
Jens Axboe | 8c1fdf0 | 2007-05-15 11:54:21 +0200 | [diff] [blame^] | 11 | /* |
| 12 | * Just discard the pdu by seeking past it. |
| 13 | */ |
Jens Axboe | fb7b71a | 2007-05-15 08:44:04 +0200 | [diff] [blame] | 14 | static int discard_pdu(int fd, struct blk_io_trace *t) |
| 15 | { |
| 16 | if (t->pdu_len == 0) |
| 17 | return 0; |
| 18 | |
| 19 | if (lseek(fd, t->pdu_len, SEEK_CUR) < 0) |
| 20 | return errno; |
| 21 | |
| 22 | return 0; |
| 23 | } |
| 24 | |
Jens Axboe | 8c1fdf0 | 2007-05-15 11:54:21 +0200 | [diff] [blame^] | 25 | /* |
| 26 | * Check if this is a blktrace binary data file. We read a single trace |
| 27 | * into memory and check for the magic signature. |
| 28 | */ |
Jens Axboe | fb7b71a | 2007-05-15 08:44:04 +0200 | [diff] [blame] | 29 | int is_blktrace(const char *filename) |
| 30 | { |
| 31 | struct blk_io_trace t; |
| 32 | int fd, ret; |
| 33 | |
| 34 | fd = open(filename, O_RDONLY); |
| 35 | if (fd < 0) { |
| 36 | perror("open blktrace"); |
| 37 | return 0; |
| 38 | } |
| 39 | |
| 40 | ret = read(fd, &t, sizeof(t)); |
| 41 | close(fd); |
| 42 | |
| 43 | if (ret < 0) { |
| 44 | perror("read blktrace"); |
| 45 | return 0; |
| 46 | } else if (ret != sizeof(t)) { |
| 47 | log_err("fio: short read on blktrace file\n"); |
| 48 | return 0; |
| 49 | } |
| 50 | |
| 51 | if ((t.magic & 0xffffff00) == BLK_IO_TRACE_MAGIC) |
| 52 | return 1; |
| 53 | |
| 54 | return 0; |
| 55 | } |
| 56 | |
Jens Axboe | 8c1fdf0 | 2007-05-15 11:54:21 +0200 | [diff] [blame^] | 57 | /* |
| 58 | * Store blk_io_trace data in an ipo for later retrieval. |
| 59 | */ |
Jens Axboe | fdefd98 | 2007-05-15 10:12:26 +0200 | [diff] [blame] | 60 | static void store_ipo(struct thread_data *td, unsigned long long offset, |
Jens Axboe | 8c1fdf0 | 2007-05-15 11:54:21 +0200 | [diff] [blame^] | 61 | unsigned int bytes, int rw, unsigned long long ttime) |
Jens Axboe | fdefd98 | 2007-05-15 10:12:26 +0200 | [diff] [blame] | 62 | { |
| 63 | struct io_piece *ipo = malloc(sizeof(*ipo)); |
| 64 | |
| 65 | memset(ipo, 0, sizeof(*ipo)); |
| 66 | INIT_LIST_HEAD(&ipo->list); |
| 67 | ipo->offset = offset; |
| 68 | ipo->len = bytes; |
Jens Axboe | 8c1fdf0 | 2007-05-15 11:54:21 +0200 | [diff] [blame^] | 69 | ipo->delay = ttime / 1000; |
Jens Axboe | fdefd98 | 2007-05-15 10:12:26 +0200 | [diff] [blame] | 70 | if (rw) |
| 71 | ipo->ddir = DDIR_WRITE; |
| 72 | else |
| 73 | ipo->ddir = DDIR_READ; |
| 74 | |
| 75 | list_add_tail(&ipo->list, &td->io_log_list); |
| 76 | } |
| 77 | |
Jens Axboe | 8c1fdf0 | 2007-05-15 11:54:21 +0200 | [diff] [blame^] | 78 | /* |
| 79 | * We only care for queue traces, most of the others are side effects |
| 80 | * due to internal workings of the block layer. |
| 81 | */ |
| 82 | static void handle_trace(struct thread_data *td, struct blk_io_trace *t, |
| 83 | unsigned long long ttime, unsigned long *ios) |
Jens Axboe | fb7b71a | 2007-05-15 08:44:04 +0200 | [diff] [blame] | 84 | { |
Jens Axboe | fdefd98 | 2007-05-15 10:12:26 +0200 | [diff] [blame] | 85 | int rw; |
| 86 | |
| 87 | if ((t->action & 0xffff) != __BLK_TA_QUEUE) |
| 88 | return; |
| 89 | |
Jens Axboe | e7a7d70 | 2007-05-15 10:13:04 +0200 | [diff] [blame] | 90 | rw = (t->action & BLK_TC_ACT(BLK_TC_WRITE)) != 0; |
Jens Axboe | 8c1fdf0 | 2007-05-15 11:54:21 +0200 | [diff] [blame^] | 91 | ios[rw]++; |
| 92 | store_ipo(td, t->sector, t->bytes, rw, ttime); |
Jens Axboe | fb7b71a | 2007-05-15 08:44:04 +0200 | [diff] [blame] | 93 | } |
| 94 | |
Jens Axboe | 8c1fdf0 | 2007-05-15 11:54:21 +0200 | [diff] [blame^] | 95 | /* |
| 96 | * Load a blktrace file by reading all the blk_io_trace entries, and storing |
| 97 | * them as io_pieces like the fio text version would do. |
| 98 | */ |
Jens Axboe | fb7b71a | 2007-05-15 08:44:04 +0200 | [diff] [blame] | 99 | int load_blktrace(struct thread_data *td, const char *filename) |
| 100 | { |
Jens Axboe | 8c1fdf0 | 2007-05-15 11:54:21 +0200 | [diff] [blame^] | 101 | unsigned long long ttime; |
Jens Axboe | fb7b71a | 2007-05-15 08:44:04 +0200 | [diff] [blame] | 102 | struct blk_io_trace t; |
Jens Axboe | 8c1fdf0 | 2007-05-15 11:54:21 +0200 | [diff] [blame^] | 103 | unsigned long ios[2]; |
Jens Axboe | fb7b71a | 2007-05-15 08:44:04 +0200 | [diff] [blame] | 104 | int fd; |
| 105 | |
| 106 | fd = open(filename, O_RDONLY); |
| 107 | if (fd < 0) { |
| 108 | td_verror(td, errno, "open blktrace file"); |
| 109 | return 1; |
| 110 | } |
| 111 | |
Jens Axboe | 8c1fdf0 | 2007-05-15 11:54:21 +0200 | [diff] [blame^] | 112 | ios[0] = ios[1] = 0; |
| 113 | ttime = 0; |
Jens Axboe | fb7b71a | 2007-05-15 08:44:04 +0200 | [diff] [blame] | 114 | do { |
Jens Axboe | 8c1fdf0 | 2007-05-15 11:54:21 +0200 | [diff] [blame^] | 115 | /* |
| 116 | * Once this is working fully, I'll add a layer between |
| 117 | * here and read to cache trace data. Then we can avoid |
| 118 | * doing itsy bitsy reads, but instead pull in a larger |
| 119 | * chunk of data at the time. |
| 120 | */ |
Jens Axboe | fb7b71a | 2007-05-15 08:44:04 +0200 | [diff] [blame] | 121 | int ret = read(fd, &t, sizeof(t)); |
| 122 | |
| 123 | if (ret < 0) { |
| 124 | td_verror(td, errno, "read blktrace file"); |
Jens Axboe | 8c1fdf0 | 2007-05-15 11:54:21 +0200 | [diff] [blame^] | 125 | goto err; |
Jens Axboe | fb7b71a | 2007-05-15 08:44:04 +0200 | [diff] [blame] | 126 | } else if (!ret) { |
| 127 | break; |
| 128 | } else if (ret != sizeof(t)) { |
| 129 | log_err("fio: short read on blktrace file\n"); |
Jens Axboe | 8c1fdf0 | 2007-05-15 11:54:21 +0200 | [diff] [blame^] | 130 | goto err; |
Jens Axboe | fb7b71a | 2007-05-15 08:44:04 +0200 | [diff] [blame] | 131 | } |
| 132 | |
| 133 | if ((t.magic & 0xffffff00) != BLK_IO_TRACE_MAGIC) { |
| 134 | log_err("fio: bad magic in blktrace data\n"); |
Jens Axboe | 8c1fdf0 | 2007-05-15 11:54:21 +0200 | [diff] [blame^] | 135 | goto err; |
Jens Axboe | fb7b71a | 2007-05-15 08:44:04 +0200 | [diff] [blame] | 136 | } |
| 137 | if ((t.magic & 0xff) != BLK_IO_TRACE_VERSION) { |
| 138 | log_err("fio: bad blktrace version %d\n", t.magic & 0xff); |
Jens Axboe | 8c1fdf0 | 2007-05-15 11:54:21 +0200 | [diff] [blame^] | 139 | goto err; |
Jens Axboe | fb7b71a | 2007-05-15 08:44:04 +0200 | [diff] [blame] | 140 | } |
| 141 | ret = discard_pdu(fd, &t); |
| 142 | if (ret) { |
| 143 | td_verror(td, ret, "blktrace lseek"); |
Jens Axboe | 8c1fdf0 | 2007-05-15 11:54:21 +0200 | [diff] [blame^] | 144 | goto err; |
Jens Axboe | fb7b71a | 2007-05-15 08:44:04 +0200 | [diff] [blame] | 145 | } |
Jens Axboe | 8c1fdf0 | 2007-05-15 11:54:21 +0200 | [diff] [blame^] | 146 | if (!ttime) |
| 147 | ttime = t.time; |
| 148 | handle_trace(td, &t, t.time - ttime, ios); |
| 149 | ttime = t.time; |
Jens Axboe | fb7b71a | 2007-05-15 08:44:04 +0200 | [diff] [blame] | 150 | } while (1); |
| 151 | |
| 152 | close(fd); |
Jens Axboe | 8c1fdf0 | 2007-05-15 11:54:21 +0200 | [diff] [blame^] | 153 | |
| 154 | if (!ios[DDIR_READ] && !ios[DDIR_WRITE]) { |
| 155 | log_err("fio: found no ios in blktrace data\n"); |
| 156 | return 1; |
| 157 | } else if (ios[DDIR_READ] && !ios[DDIR_READ]) |
| 158 | td->o.td_ddir = TD_DDIR_READ; |
| 159 | else if (!ios[DDIR_READ] && ios[DDIR_WRITE]) |
| 160 | td->o.td_ddir = TD_DDIR_WRITE; |
| 161 | else |
| 162 | td->o.td_ddir = TD_DDIR_RW; |
| 163 | |
| 164 | /* |
| 165 | * We need to do direct/raw ios to the device, to avoid getting |
| 166 | * read-ahead in our way. |
| 167 | */ |
| 168 | td->o.odirect = 1; |
| 169 | |
Jens Axboe | fb7b71a | 2007-05-15 08:44:04 +0200 | [diff] [blame] | 170 | return 0; |
Jens Axboe | 8c1fdf0 | 2007-05-15 11:54:21 +0200 | [diff] [blame^] | 171 | err: |
| 172 | close(fd); |
| 173 | return 1; |
Jens Axboe | fb7b71a | 2007-05-15 08:44:04 +0200 | [diff] [blame] | 174 | } |