blob: c5410c7d4a97f6cbef713de04e15608b0e90f7e8 [file] [log] [blame]
Jens Axboe2866c822006-10-09 15:57:48 +02001/*
gurudas paia31041e2007-10-23 15:12:30 +02002 * sync/psync engine
Jens Axboeda751ca2007-03-14 10:59:33 +01003 *
4 * IO engine that does regular read(2)/write(2) with lseek(2) to transfer
gurudas paia31041e2007-10-23 15:12:30 +02005 * data and IO engine that does regular pread(2)/pwrite(2) to transfer data.
Jens Axboe2866c822006-10-09 15:57:48 +02006 *
7 */
8#include <stdio.h>
9#include <stdlib.h>
10#include <unistd.h>
11#include <errno.h>
12#include <assert.h>
Jens Axboe5f350952006-11-07 15:20:59 +010013
14#include "../fio.h"
Jens Axboe2866c822006-10-09 15:57:48 +020015
Jens Axboe1d2af022008-02-04 10:59:07 +010016struct syncio_data {
17 struct iovec *iovecs;
18 struct io_u **io_us;
19 unsigned int queued;
20 unsigned long queued_bytes;
21
22 unsigned long long last_offset;
23 struct fio_file *last_file;
24 enum fio_ddir last_ddir;
25};
26
Jens Axboe2866c822006-10-09 15:57:48 +020027static int fio_syncio_prep(struct thread_data *td, struct io_u *io_u)
28{
Jens Axboe53cdc682006-10-18 11:50:58 +020029 struct fio_file *f = io_u->file;
30
Jens Axboe87dc1ab2006-10-24 14:41:26 +020031 if (io_u->ddir == DDIR_SYNC)
32 return 0;
Jens Axboe02bcaa82006-11-24 10:42:00 +010033 if (io_u->offset == f->last_completed_pos)
34 return 0;
Jens Axboe87dc1ab2006-10-24 14:41:26 +020035
Jens Axboe53cdc682006-10-18 11:50:58 +020036 if (lseek(f->fd, io_u->offset, SEEK_SET) == -1) {
Jens Axboee1161c32007-02-22 19:36:48 +010037 td_verror(td, errno, "lseek");
Jens Axboe2866c822006-10-09 15:57:48 +020038 return 1;
39 }
40
41 return 0;
42}
43
Jens Axboe2bd3eab2008-02-04 09:35:30 +010044static int fio_io_end(struct thread_data *td, struct io_u *io_u, int ret)
Jens Axboe2866c822006-10-09 15:57:48 +020045{
Jens Axboecec6b552007-02-06 20:15:38 +010046 if (ret != (int) io_u->xfer_buflen) {
Jens Axboe22819ec2007-02-18 07:47:14 +010047 if (ret >= 0) {
Jens Axboecec6b552007-02-06 20:15:38 +010048 io_u->resid = io_u->xfer_buflen - ret;
49 io_u->error = 0;
Jens Axboe36167d82007-02-18 05:41:31 +010050 return FIO_Q_COMPLETED;
Jens Axboe2866c822006-10-09 15:57:48 +020051 } else
52 io_u->error = errno;
53 }
54
Jens Axboe36167d82007-02-18 05:41:31 +010055 if (io_u->error)
Jens Axboee1161c32007-02-22 19:36:48 +010056 td_verror(td, io_u->error, "xfer");
Jens Axboe2866c822006-10-09 15:57:48 +020057
Jens Axboe36167d82007-02-18 05:41:31 +010058 return FIO_Q_COMPLETED;
Jens Axboe2866c822006-10-09 15:57:48 +020059}
60
Jens Axboe2bd3eab2008-02-04 09:35:30 +010061static int fio_psyncio_queue(struct thread_data *td, struct io_u *io_u)
gurudas paia31041e2007-10-23 15:12:30 +020062{
Jens Axboe2bd3eab2008-02-04 09:35:30 +010063 struct fio_file *f = io_u->file;
64 int ret;
65
66 fio_ro_check(td, io_u);
67
68 if (io_u->ddir == DDIR_READ)
69 ret = pread(f->fd, io_u->xfer_buf, io_u->xfer_buflen, io_u->offset);
70 else if (io_u->ddir == DDIR_WRITE)
71 ret = pwrite(f->fd, io_u->xfer_buf, io_u->xfer_buflen, io_u->offset);
72 else
73 ret = fsync(f->fd);
74
75 return fio_io_end(td, io_u, ret);
76}
77
78static int fio_syncio_queue(struct thread_data *td, struct io_u *io_u)
79{
80 struct fio_file *f = io_u->file;
81 int ret;
82
83 fio_ro_check(td, io_u);
84
85 if (io_u->ddir == DDIR_READ)
86 ret = read(f->fd, io_u->xfer_buf, io_u->xfer_buflen);
87 else if (io_u->ddir == DDIR_WRITE)
88 ret = write(f->fd, io_u->xfer_buf, io_u->xfer_buflen);
89 else
90 ret = fsync(f->fd);
91
92 return fio_io_end(td, io_u, ret);
gurudas paia31041e2007-10-23 15:12:30 +020093}
94
Jens Axboe1d2af022008-02-04 10:59:07 +010095static int fio_vsyncio_getevents(struct thread_data *td, unsigned int min,
96 unsigned int max,
97 struct timespec fio_unused *t)
98{
99 struct syncio_data *sd = td->io_ops->data;
100 int ret;
101
102 if (min) {
103 ret = sd->queued;
104 sd->queued = 0;
105 } else
106 ret = 0;
107
108 dprint(FD_IO, "vsyncio_getevents: min=%d,max=%d: %d\n", min, max, ret);
109 return ret;
110}
111
112static struct io_u *fio_vsyncio_event(struct thread_data *td, int event)
113{
114 struct syncio_data *sd = td->io_ops->data;
115
116 return sd->io_us[event];
117}
118
119static int fio_vsyncio_append(struct thread_data *td, struct io_u *io_u)
120{
121 struct syncio_data *sd = td->io_ops->data;
122
123 if (io_u->ddir == DDIR_SYNC)
124 return 0;
125
126 if (io_u->offset == sd->last_offset && io_u->file == sd->last_file &&
127 io_u->ddir == sd->last_ddir)
128 return 1;
129
130 return 0;
131}
132
133static void fio_vsyncio_set_iov(struct syncio_data *sd, struct io_u *io_u,
134 int index)
135{
136 sd->io_us[index] = io_u;
137 sd->iovecs[index].iov_base = io_u->xfer_buf;
138 sd->iovecs[index].iov_len = io_u->xfer_buflen;
139 sd->last_offset = io_u->offset + io_u->xfer_buflen;
140 sd->last_file = io_u->file;
141 sd->last_ddir = io_u->ddir;
142 sd->queued_bytes += io_u->xfer_buflen;
143 sd->queued++;
144}
145
146static int fio_vsyncio_queue(struct thread_data *td, struct io_u *io_u)
147{
148 struct syncio_data *sd = td->io_ops->data;
149
150 fio_ro_check(td, io_u);
151
152 if (!fio_vsyncio_append(td, io_u)) {
153 dprint(FD_IO, "vsyncio_queue: no append (%d)\n", sd->queued);
154 /*
155 * If we can't append and have stuff queued, tell fio to
156 * commit those first and then retry this io
157 */
158 if (sd->queued)
159 return FIO_Q_BUSY;
Jens Axboecc9159c2008-02-04 15:58:24 +0100160 if (io_u->ddir == DDIR_SYNC) {
161 int ret = fsync(io_u->file->fd);
162
163 return fio_io_end(td, io_u, ret);
164 }
Jens Axboe1d2af022008-02-04 10:59:07 +0100165
166 sd->queued = 0;
167 sd->queued_bytes = 0;
168 fio_vsyncio_set_iov(sd, io_u, 0);
169 } else {
170 if (sd->queued == td->o.iodepth) {
171 dprint(FD_IO, "vsyncio_queue: max depth %d\n", sd->queued);
172 return FIO_Q_BUSY;
173 }
174
175 dprint(FD_IO, "vsyncio_queue: append\n");
176 fio_vsyncio_set_iov(sd, io_u, sd->queued);
177 }
178
179 dprint(FD_IO, "vsyncio_queue: depth now %d\n", sd->queued);
180 return FIO_Q_QUEUED;
181}
182
183/*
184 * Check that we transferred all bytes, or saw an error, etc
185 */
186static int fio_vsyncio_end(struct thread_data *td, ssize_t bytes)
187{
188 struct syncio_data *sd = td->io_ops->data;
189 struct io_u *io_u;
190 unsigned int i;
191 int err;
192
193 /*
194 * transferred everything, perfect
195 */
196 if (bytes == sd->queued_bytes)
197 return 0;
198
199 err = errno;
200 for (i = 0; i < sd->queued; i++) {
201 io_u = sd->io_us[i];
202
203 if (bytes == -1) {
204 io_u->error = err;
205 } else {
206 unsigned int this_io;
207
208 this_io = bytes;
209 if (this_io > io_u->xfer_buflen)
210 this_io = io_u->xfer_buflen;
211
212 io_u->resid = io_u->xfer_buflen - this_io;
213 io_u->error = 0;
214 bytes -= this_io;
215 }
216 }
217
218 if (bytes == -1) {
219 td_verror(td, err, "xfer vsync");
220 return -err;
221 }
222
223 return 0;
224}
225
226static int fio_vsyncio_commit(struct thread_data *td)
227{
228 struct syncio_data *sd = td->io_ops->data;
229 struct fio_file *f;
230 ssize_t ret;
231
232 if (!sd->queued)
233 return 0;
234
235 f = sd->last_file;
236
237 if (lseek(f->fd, sd->io_us[0]->offset, SEEK_SET) == -1) {
238 int err = -errno;
239
240 td_verror(td, errno, "lseek");
241 return err;
242 }
243
244 if (sd->last_ddir == DDIR_READ)
245 ret = readv(f->fd, sd->iovecs, sd->queued);
246 else
247 ret = writev(f->fd, sd->iovecs, sd->queued);
248
249 dprint(FD_IO, "vsyncio_commit: %d\n", (int) ret);
250 return fio_vsyncio_end(td, ret);
251}
252
253static int fio_vsyncio_init(struct thread_data *td)
254{
255 struct syncio_data *sd;
256
257 sd = malloc(sizeof(*sd));
258 memset(sd, 0, sizeof(*sd));
259 sd->last_offset = -1ULL;
260 sd->iovecs = malloc(td->o.iodepth * sizeof(struct iovec));
261 sd->io_us = malloc(td->o.iodepth * sizeof(struct io_u *));
262
263 td->io_ops->data = sd;
264 return 0;
265}
266
267static void fio_vsyncio_cleanup(struct thread_data *td)
268{
269 struct syncio_data *sd = td->io_ops->data;
270
271 free(sd->iovecs);
272 free(sd->io_us);
273 free(sd);
274 td->io_ops->data = NULL;
275}
276
gurudas paia31041e2007-10-23 15:12:30 +0200277static struct ioengine_ops ioengine_rw = {
Jens Axboe2866c822006-10-09 15:57:48 +0200278 .name = "sync",
279 .version = FIO_IOOPS_VERSION,
Jens Axboe2866c822006-10-09 15:57:48 +0200280 .prep = fio_syncio_prep,
281 .queue = fio_syncio_queue,
Jens Axboeb5af8292007-03-08 12:43:13 +0100282 .open_file = generic_open_file,
283 .close_file = generic_close_file,
Jens Axboe2866c822006-10-09 15:57:48 +0200284 .flags = FIO_SYNCIO,
285};
Jens Axboe5f350952006-11-07 15:20:59 +0100286
gurudas paia31041e2007-10-23 15:12:30 +0200287static struct ioengine_ops ioengine_prw = {
288 .name = "psync",
289 .version = FIO_IOOPS_VERSION,
Jens Axboe2bd3eab2008-02-04 09:35:30 +0100290 .queue = fio_psyncio_queue,
gurudas paia31041e2007-10-23 15:12:30 +0200291 .open_file = generic_open_file,
292 .close_file = generic_close_file,
293 .flags = FIO_SYNCIO,
294};
295
Jens Axboe1d2af022008-02-04 10:59:07 +0100296static struct ioengine_ops ioengine_vrw = {
297 .name = "vsync",
298 .version = FIO_IOOPS_VERSION,
299 .init = fio_vsyncio_init,
300 .cleanup = fio_vsyncio_cleanup,
301 .queue = fio_vsyncio_queue,
302 .commit = fio_vsyncio_commit,
303 .event = fio_vsyncio_event,
304 .getevents = fio_vsyncio_getevents,
305 .open_file = generic_open_file,
306 .close_file = generic_close_file,
307 .flags = FIO_SYNCIO,
308};
309
Jens Axboe5f350952006-11-07 15:20:59 +0100310static void fio_init fio_syncio_register(void)
311{
gurudas paia31041e2007-10-23 15:12:30 +0200312 register_ioengine(&ioengine_rw);
313 register_ioengine(&ioengine_prw);
Jens Axboe1d2af022008-02-04 10:59:07 +0100314 register_ioengine(&ioengine_vrw);
Jens Axboe5f350952006-11-07 15:20:59 +0100315}
316
317static void fio_exit fio_syncio_unregister(void)
318{
gurudas paia31041e2007-10-23 15:12:30 +0200319 unregister_ioengine(&ioengine_rw);
320 unregister_ioengine(&ioengine_prw);
Jens Axboe1d2af022008-02-04 10:59:07 +0100321 unregister_ioengine(&ioengine_vrw);
Jens Axboe5f350952006-11-07 15:20:59 +0100322}