blob: 71abfb9ac546247cd339033373ebbbda35778d54 [file] [log] [blame]
Jens Axboe2866c822006-10-09 15:57:48 +02001/*
Jens Axboeda751ca2007-03-14 10:59:33 +01002 * mmap engine
3 *
4 * IO engine that reads/writes from files by doing memcpy to/from
5 * a memory mapped region of the file.
Jens Axboe2866c822006-10-09 15:57:48 +02006 *
7 */
8#include <stdio.h>
9#include <stdlib.h>
10#include <unistd.h>
11#include <errno.h>
Jens Axboe2866c822006-10-09 15:57:48 +020012#include <sys/mman.h>
Jens Axboe5f350952006-11-07 15:20:59 +010013
14#include "../fio.h"
Jens Axboe4f5af7b2009-06-03 08:45:40 +020015#include "../verify.h"
Jens Axboe2866c822006-10-09 15:57:48 +020016
Jens Axboeac893112009-06-02 13:06:01 +020017/*
Jens Axboeff455a02009-06-29 11:54:56 +020018 * Limits us to 1GB of mapped files in total
Jens Axboeac893112009-06-02 13:06:01 +020019 */
Jens Axboeff455a02009-06-29 11:54:56 +020020#define MMAP_TOTAL_SZ (1 * 1024 * 1024 * 1024UL)
Jens Axboeac893112009-06-02 13:06:01 +020021
22static unsigned long mmap_map_size;
23static unsigned long mmap_map_mask;
24
25static int fio_mmap_file(struct thread_data *td, struct fio_file *f,
26 size_t length, off_t off)
Jens Axboe2866c822006-10-09 15:57:48 +020027{
Jens Axboeac893112009-06-02 13:06:01 +020028 int flags = 0;
Jens Axboec97d8362007-05-21 14:52:43 +020029
Jens Axboeb5af8292007-03-08 12:43:13 +010030 if (td_rw(td))
31 flags = PROT_READ | PROT_WRITE;
32 else if (td_write(td)) {
33 flags = PROT_WRITE;
34
Jens Axboe2dc1bbe2007-03-15 15:01:33 +010035 if (td->o.verify != VERIFY_NONE)
Jens Axboeb5af8292007-03-08 12:43:13 +010036 flags |= PROT_READ;
37 } else
38 flags = PROT_READ;
39
Jens Axboeac893112009-06-02 13:06:01 +020040 f->mmap_ptr = mmap(NULL, length, flags, MAP_SHARED, f->fd, off);
41 if (f->mmap_ptr == MAP_FAILED) {
Jens Axboeac893112009-06-02 13:06:01 +020042 f->mmap_ptr = NULL;
Jens Axboeed47cbf2009-07-03 22:52:38 +020043 td_verror(td, errno, "mmap");
Jens Axboeb5af8292007-03-08 12:43:13 +010044 goto err;
45 }
46
Jens Axboeb5af8292007-03-08 12:43:13 +010047 if (!td_random(td)) {
Jens Axboeac893112009-06-02 13:06:01 +020048 if (madvise(f->mmap_ptr, length, MADV_SEQUENTIAL) < 0) {
Jens Axboeb5af8292007-03-08 12:43:13 +010049 td_verror(td, errno, "madvise");
50 goto err;
51 }
52 } else {
Jens Axboeac893112009-06-02 13:06:01 +020053 if (madvise(f->mmap_ptr, length, MADV_RANDOM) < 0) {
Jens Axboeb5af8292007-03-08 12:43:13 +010054 td_verror(td, errno, "madvise");
55 goto err;
56 }
57 }
58
Jens Axboeb5af8292007-03-08 12:43:13 +010059err:
Jens Axboeed47cbf2009-07-03 22:52:38 +020060 if (td->error && f->mmap_ptr)
61 munmap(f->mmap_ptr, length);
62
63 return td->error;
Jens Axboeb5af8292007-03-08 12:43:13 +010064}
65
Jens Axboeed47cbf2009-07-03 22:52:38 +020066/*
67 * Just mmap an appropriate portion, we cannot mmap the full extent
68 */
69static int fio_mmapio_prep_limited(struct thread_data *td, struct io_u *io_u)
Jens Axboeb5af8292007-03-08 12:43:13 +010070{
Jens Axboeac893112009-06-02 13:06:01 +020071 struct fio_file *f = io_u->file;
Jens Axboe6977bcd2008-03-01 15:55:36 +010072
Jens Axboeac893112009-06-02 13:06:01 +020073 if (io_u->buflen > mmap_map_size) {
74 log_err("fio: bs too big for mmap engine\n");
Jens Axboeed47cbf2009-07-03 22:52:38 +020075 return EIO;
Jens Axboeb5af8292007-03-08 12:43:13 +010076 }
Jens Axboe6977bcd2008-03-01 15:55:36 +010077
Jens Axboeac893112009-06-02 13:06:01 +020078 if (f->mmap_ptr) {
Jens Axboeed47cbf2009-07-03 22:52:38 +020079 if (munmap(f->mmap_ptr, f->mmap_sz) < 0)
80 return errno;
Jens Axboeac893112009-06-02 13:06:01 +020081 f->mmap_ptr = NULL;
82 }
83
84 f->mmap_sz = mmap_map_size;
85 if (f->mmap_sz > f->io_size)
86 f->mmap_sz = f->io_size;
87
Jens Axboeff455a02009-06-29 11:54:56 +020088 f->mmap_off = io_u->offset;
Jens Axboeac893112009-06-02 13:06:01 +020089
Jens Axboeed47cbf2009-07-03 22:52:38 +020090 return fio_mmap_file(td, f, f->mmap_sz, f->mmap_off);
91}
92
93/*
94 * Attempt to mmap the entire file
95 */
96static int fio_mmapio_prep_full(struct thread_data *td, struct io_u *io_u)
97{
98 struct fio_file *f = io_u->file;
99 int ret;
100
101 if (fio_file_partial_mmap(f))
102 return EINVAL;
103
104 if (f->mmap_ptr) {
105 if (munmap(f->mmap_ptr, f->mmap_sz) < 0)
106 return errno;
107 f->mmap_ptr = NULL;
108 }
109
110 f->mmap_sz = f->io_size;
111 f->mmap_off = 0;
112
Jens Axboeac893112009-06-02 13:06:01 +0200113 ret = fio_mmap_file(td, f, f->mmap_sz, f->mmap_off);
Jens Axboeed47cbf2009-07-03 22:52:38 +0200114 if (ret)
115 fio_file_set_partial_mmap(f);
116
Jens Axboe6977bcd2008-03-01 15:55:36 +0100117 return ret;
Jens Axboeb5af8292007-03-08 12:43:13 +0100118}
119
Jens Axboeed47cbf2009-07-03 22:52:38 +0200120static int fio_mmapio_prep(struct thread_data *td, struct io_u *io_u)
121{
122 struct fio_file *f = io_u->file;
123 int ret;
124
125 if (io_u->offset >= f->mmap_off &&
126 io_u->offset + io_u->buflen < f->mmap_off + f->mmap_sz)
127 goto done;
128
129 if (fio_mmapio_prep_full(td, io_u)) {
130 td_clear_error(td);
131 ret = fio_mmapio_prep_limited(td, io_u);
132 if (ret)
133 return ret;
134 }
135
136done:
137 io_u->mmap_data = f->mmap_ptr + io_u->offset - f->mmap_off -
138 f->file_offset;
139 return 0;
140}
141
Jens Axboeac893112009-06-02 13:06:01 +0200142static int fio_mmapio_queue(struct thread_data *td, struct io_u *io_u)
143{
144 struct fio_file *f = io_u->file;
145
146 fio_ro_check(td, io_u);
147
148 if (io_u->ddir == DDIR_READ)
149 memcpy(io_u->xfer_buf, io_u->mmap_data, io_u->xfer_buflen);
150 else if (io_u->ddir == DDIR_WRITE)
151 memcpy(io_u->mmap_data, io_u->xfer_buf, io_u->xfer_buflen);
Jens Axboe5f9099e2009-06-16 22:40:26 +0200152 else if (ddir_sync(io_u->ddir)) {
Jens Axboeac893112009-06-02 13:06:01 +0200153 if (msync(f->mmap_ptr, f->mmap_sz, MS_SYNC)) {
154 io_u->error = errno;
155 td_verror(td, io_u->error, "msync");
156 }
157 }
158
159 /*
160 * not really direct, but should drop the pages from the cache
161 */
Jens Axboe5f9099e2009-06-16 22:40:26 +0200162 if (td->o.odirect && !ddir_sync(io_u->ddir)) {
Jens Axboeac893112009-06-02 13:06:01 +0200163 if (msync(io_u->mmap_data, io_u->xfer_buflen, MS_SYNC) < 0) {
164 io_u->error = errno;
165 td_verror(td, io_u->error, "msync");
166 }
167 if (madvise(io_u->mmap_data, io_u->xfer_buflen, MADV_DONTNEED) < 0) {
168 io_u->error = errno;
169 td_verror(td, io_u->error, "madvise");
170 }
171 }
172
173 return FIO_Q_COMPLETED;
174}
175
176static int fio_mmapio_init(struct thread_data *td)
177{
178 unsigned long shift, mask;
179
180 mmap_map_size = MMAP_TOTAL_SZ / td->o.nr_files;
181 mask = mmap_map_size;
182 shift = 0;
183 do {
184 mask >>= 1;
185 if (!mask)
186 break;
187 shift++;
188 } while (1);
189
190 mmap_map_mask = 1UL << shift;
191 return 0;
192}
193
Jens Axboe5f350952006-11-07 15:20:59 +0100194static struct ioengine_ops ioengine = {
Jens Axboe2866c822006-10-09 15:57:48 +0200195 .name = "mmap",
196 .version = FIO_IOOPS_VERSION,
Jens Axboeac893112009-06-02 13:06:01 +0200197 .init = fio_mmapio_init,
198 .prep = fio_mmapio_prep,
Jens Axboe2866c822006-10-09 15:57:48 +0200199 .queue = fio_mmapio_queue,
Jens Axboeac893112009-06-02 13:06:01 +0200200 .open_file = generic_open_file,
201 .close_file = generic_close_file,
Jens Axboedf9c26b2009-03-05 10:13:58 +0100202 .get_file_size = generic_get_file_size,
Jens Axboe02638822007-03-12 09:25:55 +0100203 .flags = FIO_SYNCIO | FIO_NOEXTEND,
Jens Axboe2866c822006-10-09 15:57:48 +0200204};
Jens Axboe5f350952006-11-07 15:20:59 +0100205
206static void fio_init fio_mmapio_register(void)
207{
208 register_ioengine(&ioengine);
209}
210
211static void fio_exit fio_mmapio_unregister(void)
212{
213 unregister_ioengine(&ioengine);
214}