blob: 505381d4ca942b932d7171393a2dc8f51c558726 [file] [log] [blame]
Jens Axboe2866c822006-10-09 15:57:48 +02001/*
gurudas paia31041e2007-10-23 15:12:30 +02002 * sync/psync engine
Jens Axboeda751ca2007-03-14 10:59:33 +01003 *
4 * IO engine that does regular read(2)/write(2) with lseek(2) to transfer
gurudas paia31041e2007-10-23 15:12:30 +02005 * data and IO engine that does regular pread(2)/pwrite(2) to transfer data.
Jens Axboe2866c822006-10-09 15:57:48 +02006 *
7 */
8#include <stdio.h>
9#include <stdlib.h>
10#include <unistd.h>
11#include <errno.h>
12#include <assert.h>
Jens Axboe5f350952006-11-07 15:20:59 +010013
14#include "../fio.h"
Jens Axboe2866c822006-10-09 15:57:48 +020015
Jens Axboe1d2af022008-02-04 10:59:07 +010016struct syncio_data {
17 struct iovec *iovecs;
18 struct io_u **io_us;
19 unsigned int queued;
20 unsigned long queued_bytes;
21
22 unsigned long long last_offset;
23 struct fio_file *last_file;
24 enum fio_ddir last_ddir;
25};
26
Jens Axboe2866c822006-10-09 15:57:48 +020027static int fio_syncio_prep(struct thread_data *td, struct io_u *io_u)
28{
Jens Axboe53cdc682006-10-18 11:50:58 +020029 struct fio_file *f = io_u->file;
30
Jens Axboe87dc1ab2006-10-24 14:41:26 +020031 if (io_u->ddir == DDIR_SYNC)
32 return 0;
Jens Axboe02bcaa82006-11-24 10:42:00 +010033 if (io_u->offset == f->last_completed_pos)
34 return 0;
Jens Axboe87dc1ab2006-10-24 14:41:26 +020035
Jens Axboe53cdc682006-10-18 11:50:58 +020036 if (lseek(f->fd, io_u->offset, SEEK_SET) == -1) {
Jens Axboee1161c32007-02-22 19:36:48 +010037 td_verror(td, errno, "lseek");
Jens Axboe2866c822006-10-09 15:57:48 +020038 return 1;
39 }
40
41 return 0;
42}
43
Jens Axboe2bd3eab2008-02-04 09:35:30 +010044static int fio_io_end(struct thread_data *td, struct io_u *io_u, int ret)
Jens Axboe2866c822006-10-09 15:57:48 +020045{
Jens Axboecec6b552007-02-06 20:15:38 +010046 if (ret != (int) io_u->xfer_buflen) {
Jens Axboe22819ec2007-02-18 07:47:14 +010047 if (ret >= 0) {
Jens Axboecec6b552007-02-06 20:15:38 +010048 io_u->resid = io_u->xfer_buflen - ret;
49 io_u->error = 0;
Jens Axboe36167d82007-02-18 05:41:31 +010050 return FIO_Q_COMPLETED;
Jens Axboe2866c822006-10-09 15:57:48 +020051 } else
52 io_u->error = errno;
53 }
54
Jens Axboe36167d82007-02-18 05:41:31 +010055 if (io_u->error)
Jens Axboee1161c32007-02-22 19:36:48 +010056 td_verror(td, io_u->error, "xfer");
Jens Axboe2866c822006-10-09 15:57:48 +020057
Jens Axboe36167d82007-02-18 05:41:31 +010058 return FIO_Q_COMPLETED;
Jens Axboe2866c822006-10-09 15:57:48 +020059}
60
Jens Axboe2bd3eab2008-02-04 09:35:30 +010061static int fio_psyncio_queue(struct thread_data *td, struct io_u *io_u)
gurudas paia31041e2007-10-23 15:12:30 +020062{
Jens Axboe2bd3eab2008-02-04 09:35:30 +010063 struct fio_file *f = io_u->file;
64 int ret;
65
66 fio_ro_check(td, io_u);
67
68 if (io_u->ddir == DDIR_READ)
69 ret = pread(f->fd, io_u->xfer_buf, io_u->xfer_buflen, io_u->offset);
70 else if (io_u->ddir == DDIR_WRITE)
71 ret = pwrite(f->fd, io_u->xfer_buf, io_u->xfer_buflen, io_u->offset);
72 else
73 ret = fsync(f->fd);
74
75 return fio_io_end(td, io_u, ret);
76}
77
78static int fio_syncio_queue(struct thread_data *td, struct io_u *io_u)
79{
80 struct fio_file *f = io_u->file;
81 int ret;
82
83 fio_ro_check(td, io_u);
84
85 if (io_u->ddir == DDIR_READ)
86 ret = read(f->fd, io_u->xfer_buf, io_u->xfer_buflen);
87 else if (io_u->ddir == DDIR_WRITE)
88 ret = write(f->fd, io_u->xfer_buf, io_u->xfer_buflen);
89 else
90 ret = fsync(f->fd);
91
92 return fio_io_end(td, io_u, ret);
gurudas paia31041e2007-10-23 15:12:30 +020093}
94
Jens Axboe1d2af022008-02-04 10:59:07 +010095static int fio_vsyncio_getevents(struct thread_data *td, unsigned int min,
96 unsigned int max,
97 struct timespec fio_unused *t)
98{
99 struct syncio_data *sd = td->io_ops->data;
100 int ret;
101
102 if (min) {
103 ret = sd->queued;
104 sd->queued = 0;
105 } else
106 ret = 0;
107
108 dprint(FD_IO, "vsyncio_getevents: min=%d,max=%d: %d\n", min, max, ret);
109 return ret;
110}
111
112static struct io_u *fio_vsyncio_event(struct thread_data *td, int event)
113{
114 struct syncio_data *sd = td->io_ops->data;
115
116 return sd->io_us[event];
117}
118
119static int fio_vsyncio_append(struct thread_data *td, struct io_u *io_u)
120{
121 struct syncio_data *sd = td->io_ops->data;
122
123 if (io_u->ddir == DDIR_SYNC)
124 return 0;
125
126 if (io_u->offset == sd->last_offset && io_u->file == sd->last_file &&
127 io_u->ddir == sd->last_ddir)
128 return 1;
129
130 return 0;
131}
132
133static void fio_vsyncio_set_iov(struct syncio_data *sd, struct io_u *io_u,
134 int index)
135{
136 sd->io_us[index] = io_u;
137 sd->iovecs[index].iov_base = io_u->xfer_buf;
138 sd->iovecs[index].iov_len = io_u->xfer_buflen;
139 sd->last_offset = io_u->offset + io_u->xfer_buflen;
140 sd->last_file = io_u->file;
141 sd->last_ddir = io_u->ddir;
142 sd->queued_bytes += io_u->xfer_buflen;
143 sd->queued++;
144}
145
146static int fio_vsyncio_queue(struct thread_data *td, struct io_u *io_u)
147{
148 struct syncio_data *sd = td->io_ops->data;
149
150 fio_ro_check(td, io_u);
151
152 if (!fio_vsyncio_append(td, io_u)) {
153 dprint(FD_IO, "vsyncio_queue: no append (%d)\n", sd->queued);
154 /*
155 * If we can't append and have stuff queued, tell fio to
156 * commit those first and then retry this io
157 */
158 if (sd->queued)
159 return FIO_Q_BUSY;
160
161 sd->queued = 0;
162 sd->queued_bytes = 0;
163 fio_vsyncio_set_iov(sd, io_u, 0);
164 } else {
165 if (sd->queued == td->o.iodepth) {
166 dprint(FD_IO, "vsyncio_queue: max depth %d\n", sd->queued);
167 return FIO_Q_BUSY;
168 }
169
170 dprint(FD_IO, "vsyncio_queue: append\n");
171 fio_vsyncio_set_iov(sd, io_u, sd->queued);
172 }
173
174 dprint(FD_IO, "vsyncio_queue: depth now %d\n", sd->queued);
175 return FIO_Q_QUEUED;
176}
177
178/*
179 * Check that we transferred all bytes, or saw an error, etc
180 */
181static int fio_vsyncio_end(struct thread_data *td, ssize_t bytes)
182{
183 struct syncio_data *sd = td->io_ops->data;
184 struct io_u *io_u;
185 unsigned int i;
186 int err;
187
188 /*
189 * transferred everything, perfect
190 */
191 if (bytes == sd->queued_bytes)
192 return 0;
193
194 err = errno;
195 for (i = 0; i < sd->queued; i++) {
196 io_u = sd->io_us[i];
197
198 if (bytes == -1) {
199 io_u->error = err;
200 } else {
201 unsigned int this_io;
202
203 this_io = bytes;
204 if (this_io > io_u->xfer_buflen)
205 this_io = io_u->xfer_buflen;
206
207 io_u->resid = io_u->xfer_buflen - this_io;
208 io_u->error = 0;
209 bytes -= this_io;
210 }
211 }
212
213 if (bytes == -1) {
214 td_verror(td, err, "xfer vsync");
215 return -err;
216 }
217
218 return 0;
219}
220
221static int fio_vsyncio_commit(struct thread_data *td)
222{
223 struct syncio_data *sd = td->io_ops->data;
224 struct fio_file *f;
225 ssize_t ret;
226
227 if (!sd->queued)
228 return 0;
229
230 f = sd->last_file;
231
232 if (lseek(f->fd, sd->io_us[0]->offset, SEEK_SET) == -1) {
233 int err = -errno;
234
235 td_verror(td, errno, "lseek");
236 return err;
237 }
238
239 if (sd->last_ddir == DDIR_READ)
240 ret = readv(f->fd, sd->iovecs, sd->queued);
241 else
242 ret = writev(f->fd, sd->iovecs, sd->queued);
243
244 dprint(FD_IO, "vsyncio_commit: %d\n", (int) ret);
245 return fio_vsyncio_end(td, ret);
246}
247
248static int fio_vsyncio_init(struct thread_data *td)
249{
250 struct syncio_data *sd;
251
252 sd = malloc(sizeof(*sd));
253 memset(sd, 0, sizeof(*sd));
254 sd->last_offset = -1ULL;
255 sd->iovecs = malloc(td->o.iodepth * sizeof(struct iovec));
256 sd->io_us = malloc(td->o.iodepth * sizeof(struct io_u *));
257
258 td->io_ops->data = sd;
259 return 0;
260}
261
262static void fio_vsyncio_cleanup(struct thread_data *td)
263{
264 struct syncio_data *sd = td->io_ops->data;
265
266 free(sd->iovecs);
267 free(sd->io_us);
268 free(sd);
269 td->io_ops->data = NULL;
270}
271
gurudas paia31041e2007-10-23 15:12:30 +0200272static struct ioengine_ops ioengine_rw = {
Jens Axboe2866c822006-10-09 15:57:48 +0200273 .name = "sync",
274 .version = FIO_IOOPS_VERSION,
Jens Axboe2866c822006-10-09 15:57:48 +0200275 .prep = fio_syncio_prep,
276 .queue = fio_syncio_queue,
Jens Axboeb5af8292007-03-08 12:43:13 +0100277 .open_file = generic_open_file,
278 .close_file = generic_close_file,
Jens Axboe2866c822006-10-09 15:57:48 +0200279 .flags = FIO_SYNCIO,
280};
Jens Axboe5f350952006-11-07 15:20:59 +0100281
gurudas paia31041e2007-10-23 15:12:30 +0200282static struct ioengine_ops ioengine_prw = {
283 .name = "psync",
284 .version = FIO_IOOPS_VERSION,
Jens Axboe2bd3eab2008-02-04 09:35:30 +0100285 .queue = fio_psyncio_queue,
gurudas paia31041e2007-10-23 15:12:30 +0200286 .open_file = generic_open_file,
287 .close_file = generic_close_file,
288 .flags = FIO_SYNCIO,
289};
290
Jens Axboe1d2af022008-02-04 10:59:07 +0100291static struct ioengine_ops ioengine_vrw = {
292 .name = "vsync",
293 .version = FIO_IOOPS_VERSION,
294 .init = fio_vsyncio_init,
295 .cleanup = fio_vsyncio_cleanup,
296 .queue = fio_vsyncio_queue,
297 .commit = fio_vsyncio_commit,
298 .event = fio_vsyncio_event,
299 .getevents = fio_vsyncio_getevents,
300 .open_file = generic_open_file,
301 .close_file = generic_close_file,
302 .flags = FIO_SYNCIO,
303};
304
Jens Axboe5f350952006-11-07 15:20:59 +0100305static void fio_init fio_syncio_register(void)
306{
gurudas paia31041e2007-10-23 15:12:30 +0200307 register_ioengine(&ioengine_rw);
308 register_ioengine(&ioengine_prw);
Jens Axboe1d2af022008-02-04 10:59:07 +0100309 register_ioengine(&ioengine_vrw);
Jens Axboe5f350952006-11-07 15:20:59 +0100310}
311
312static void fio_exit fio_syncio_unregister(void)
313{
gurudas paia31041e2007-10-23 15:12:30 +0200314 unregister_ioengine(&ioengine_rw);
315 unregister_ioengine(&ioengine_prw);
Jens Axboe1d2af022008-02-04 10:59:07 +0100316 unregister_ioengine(&ioengine_vrw);
Jens Axboe5f350952006-11-07 15:20:59 +0100317}