Merge branch 'master' into gfio
[fio.git] / engines / sync.c
CommitLineData
2866c82d 1/*
a31041ea 2 * sync/psync engine
da751ca9
JA
3 *
4 * IO engine that does regular read(2)/write(2) with lseek(2) to transfer
a31041ea 5 * data and IO engine that does regular pread(2)/pwrite(2) to transfer data.
2866c82d
JA
6 *
7 */
8#include <stdio.h>
9#include <stdlib.h>
10#include <unistd.h>
5921e80c 11#include <sys/uio.h>
2866c82d
JA
12#include <errno.h>
13#include <assert.h>
5f350952
JA
14
15#include "../fio.h"
2866c82d 16
ef5f5a3a
JA
17/*
18 * Sync engine uses engine_data to store last offset
19 */
20#define LAST_POS(f) ((f)->engine_data)
21
1d2af02a
JA
22struct syncio_data {
23 struct iovec *iovecs;
24 struct io_u **io_us;
25 unsigned int queued;
e51cf72c 26 unsigned int events;
1d2af02a
JA
27 unsigned long queued_bytes;
28
29 unsigned long long last_offset;
30 struct fio_file *last_file;
31 enum fio_ddir last_ddir;
32};
33
2866c82d
JA
34static int fio_syncio_prep(struct thread_data *td, struct io_u *io_u)
35{
53cdc686
JA
36 struct fio_file *f = io_u->file;
37
ff58fced 38 if (!ddir_rw(io_u->ddir))
87dc1ab1
JA
39 return 0;
40
ef5f5a3a 41 if (LAST_POS(f) != -1ULL && LAST_POS(f) == io_u->offset)
e943b878
JA
42 return 0;
43
53cdc686 44 if (lseek(f->fd, io_u->offset, SEEK_SET) == -1) {
e1161c32 45 td_verror(td, errno, "lseek");
2866c82d
JA
46 return 1;
47 }
48
49 return 0;
50}
51
2bd3eabc 52static int fio_io_end(struct thread_data *td, struct io_u *io_u, int ret)
2866c82d 53{
ff58fced 54 if (io_u->file && ret >= 0 && ddir_rw(io_u->ddir))
ef5f5a3a 55 LAST_POS(io_u->file) = io_u->offset + ret;
e943b878 56
cec6b55d 57 if (ret != (int) io_u->xfer_buflen) {
22819ec2 58 if (ret >= 0) {
cec6b55d
JA
59 io_u->resid = io_u->xfer_buflen - ret;
60 io_u->error = 0;
36167d82 61 return FIO_Q_COMPLETED;
2866c82d
JA
62 } else
63 io_u->error = errno;
64 }
65
36167d82 66 if (io_u->error)
e1161c32 67 td_verror(td, io_u->error, "xfer");
2866c82d 68
36167d82 69 return FIO_Q_COMPLETED;
2866c82d
JA
70}
71
2bd3eabc 72static int fio_psyncio_queue(struct thread_data *td, struct io_u *io_u)
a31041ea 73{
2bd3eabc
JA
74 struct fio_file *f = io_u->file;
75 int ret;
76
77 fio_ro_check(td, io_u);
78
79 if (io_u->ddir == DDIR_READ)
80 ret = pread(f->fd, io_u->xfer_buf, io_u->xfer_buflen, io_u->offset);
81 else if (io_u->ddir == DDIR_WRITE)
82 ret = pwrite(f->fd, io_u->xfer_buf, io_u->xfer_buflen, io_u->offset);
6eaf09d6
SL
83 else if (io_u->ddir == DDIR_TRIM) {
84 do_io_u_trim(td, io_u);
85 return FIO_Q_COMPLETED;
86 } else
0a28ecda 87 ret = do_io_u_sync(td, io_u);
2bd3eabc
JA
88
89 return fio_io_end(td, io_u, ret);
90}
91
92static int fio_syncio_queue(struct thread_data *td, struct io_u *io_u)
93{
94 struct fio_file *f = io_u->file;
95 int ret;
96
97 fio_ro_check(td, io_u);
98
99 if (io_u->ddir == DDIR_READ)
100 ret = read(f->fd, io_u->xfer_buf, io_u->xfer_buflen);
101 else if (io_u->ddir == DDIR_WRITE)
102 ret = write(f->fd, io_u->xfer_buf, io_u->xfer_buflen);
6eaf09d6
SL
103 else if (io_u->ddir == DDIR_TRIM) {
104 do_io_u_trim(td, io_u);
105 return FIO_Q_COMPLETED;
106 } else
0a28ecda 107 ret = do_io_u_sync(td, io_u);
2bd3eabc
JA
108
109 return fio_io_end(td, io_u, ret);
a31041ea 110}
111
1d2af02a
JA
112static int fio_vsyncio_getevents(struct thread_data *td, unsigned int min,
113 unsigned int max,
114 struct timespec fio_unused *t)
115{
116 struct syncio_data *sd = td->io_ops->data;
117 int ret;
118
119 if (min) {
e51cf72c
JA
120 ret = sd->events;
121 sd->events = 0;
1d2af02a
JA
122 } else
123 ret = 0;
124
125 dprint(FD_IO, "vsyncio_getevents: min=%d,max=%d: %d\n", min, max, ret);
126 return ret;
127}
128
129static struct io_u *fio_vsyncio_event(struct thread_data *td, int event)
130{
131 struct syncio_data *sd = td->io_ops->data;
132
133 return sd->io_us[event];
134}
135
136static int fio_vsyncio_append(struct thread_data *td, struct io_u *io_u)
137{
138 struct syncio_data *sd = td->io_ops->data;
139
5f9099ea 140 if (ddir_sync(io_u->ddir))
1d2af02a
JA
141 return 0;
142
143 if (io_u->offset == sd->last_offset && io_u->file == sd->last_file &&
144 io_u->ddir == sd->last_ddir)
145 return 1;
146
147 return 0;
148}
149
150static void fio_vsyncio_set_iov(struct syncio_data *sd, struct io_u *io_u,
2b13e716 151 int idx)
1d2af02a 152{
2b13e716
JA
153 sd->io_us[idx] = io_u;
154 sd->iovecs[idx].iov_base = io_u->xfer_buf;
155 sd->iovecs[idx].iov_len = io_u->xfer_buflen;
1d2af02a
JA
156 sd->last_offset = io_u->offset + io_u->xfer_buflen;
157 sd->last_file = io_u->file;
158 sd->last_ddir = io_u->ddir;
159 sd->queued_bytes += io_u->xfer_buflen;
160 sd->queued++;
161}
162
163static int fio_vsyncio_queue(struct thread_data *td, struct io_u *io_u)
164{
165 struct syncio_data *sd = td->io_ops->data;
166
167 fio_ro_check(td, io_u);
168
169 if (!fio_vsyncio_append(td, io_u)) {
170 dprint(FD_IO, "vsyncio_queue: no append (%d)\n", sd->queued);
171 /*
172 * If we can't append and have stuff queued, tell fio to
173 * commit those first and then retry this io
174 */
175 if (sd->queued)
176 return FIO_Q_BUSY;
0a28ecda
JA
177 if (ddir_sync(io_u->ddir)) {
178 int ret = do_io_u_sync(td, io_u);
179
180 return fio_io_end(td, io_u, ret);
181 }
cc9159c3 182
1d2af02a
JA
183 sd->queued = 0;
184 sd->queued_bytes = 0;
185 fio_vsyncio_set_iov(sd, io_u, 0);
186 } else {
187 if (sd->queued == td->o.iodepth) {
188 dprint(FD_IO, "vsyncio_queue: max depth %d\n", sd->queued);
189 return FIO_Q_BUSY;
190 }
191
192 dprint(FD_IO, "vsyncio_queue: append\n");
193 fio_vsyncio_set_iov(sd, io_u, sd->queued);
194 }
195
196 dprint(FD_IO, "vsyncio_queue: depth now %d\n", sd->queued);
197 return FIO_Q_QUEUED;
198}
199
200/*
201 * Check that we transferred all bytes, or saw an error, etc
202 */
203static int fio_vsyncio_end(struct thread_data *td, ssize_t bytes)
204{
205 struct syncio_data *sd = td->io_ops->data;
206 struct io_u *io_u;
207 unsigned int i;
208 int err;
209
210 /*
211 * transferred everything, perfect
212 */
213 if (bytes == sd->queued_bytes)
214 return 0;
215
216 err = errno;
217 for (i = 0; i < sd->queued; i++) {
218 io_u = sd->io_us[i];
219
220 if (bytes == -1) {
221 io_u->error = err;
222 } else {
223 unsigned int this_io;
224
225 this_io = bytes;
226 if (this_io > io_u->xfer_buflen)
227 this_io = io_u->xfer_buflen;
228
229 io_u->resid = io_u->xfer_buflen - this_io;
230 io_u->error = 0;
231 bytes -= this_io;
232 }
233 }
234
235 if (bytes == -1) {
236 td_verror(td, err, "xfer vsync");
237 return -err;
238 }
239
240 return 0;
241}
242
243static int fio_vsyncio_commit(struct thread_data *td)
244{
245 struct syncio_data *sd = td->io_ops->data;
246 struct fio_file *f;
247 ssize_t ret;
248
249 if (!sd->queued)
250 return 0;
251
838bc709 252 io_u_mark_submit(td, sd->queued);
1d2af02a
JA
253 f = sd->last_file;
254
255 if (lseek(f->fd, sd->io_us[0]->offset, SEEK_SET) == -1) {
256 int err = -errno;
257
258 td_verror(td, errno, "lseek");
259 return err;
260 }
261
262 if (sd->last_ddir == DDIR_READ)
263 ret = readv(f->fd, sd->iovecs, sd->queued);
264 else
265 ret = writev(f->fd, sd->iovecs, sd->queued);
266
267 dprint(FD_IO, "vsyncio_commit: %d\n", (int) ret);
e51cf72c
JA
268 sd->events = sd->queued;
269 sd->queued = 0;
1d2af02a
JA
270 return fio_vsyncio_end(td, ret);
271}
272
273static int fio_vsyncio_init(struct thread_data *td)
274{
275 struct syncio_data *sd;
276
277 sd = malloc(sizeof(*sd));
278 memset(sd, 0, sizeof(*sd));
279 sd->last_offset = -1ULL;
280 sd->iovecs = malloc(td->o.iodepth * sizeof(struct iovec));
281 sd->io_us = malloc(td->o.iodepth * sizeof(struct io_u *));
282
283 td->io_ops->data = sd;
284 return 0;
285}
286
287static void fio_vsyncio_cleanup(struct thread_data *td)
288{
289 struct syncio_data *sd = td->io_ops->data;
290
291 free(sd->iovecs);
292 free(sd->io_us);
293 free(sd);
1d2af02a
JA
294}
295
a31041ea 296static struct ioengine_ops ioengine_rw = {
2866c82d
JA
297 .name = "sync",
298 .version = FIO_IOOPS_VERSION,
2866c82d
JA
299 .prep = fio_syncio_prep,
300 .queue = fio_syncio_queue,
b5af8293
JA
301 .open_file = generic_open_file,
302 .close_file = generic_close_file,
df9c26b1 303 .get_file_size = generic_get_file_size,
2866c82d
JA
304 .flags = FIO_SYNCIO,
305};
5f350952 306
a31041ea 307static struct ioengine_ops ioengine_prw = {
308 .name = "psync",
309 .version = FIO_IOOPS_VERSION,
2bd3eabc 310 .queue = fio_psyncio_queue,
a31041ea 311 .open_file = generic_open_file,
312 .close_file = generic_close_file,
df9c26b1 313 .get_file_size = generic_get_file_size,
a31041ea 314 .flags = FIO_SYNCIO,
315};
316
1d2af02a
JA
317static struct ioengine_ops ioengine_vrw = {
318 .name = "vsync",
319 .version = FIO_IOOPS_VERSION,
320 .init = fio_vsyncio_init,
321 .cleanup = fio_vsyncio_cleanup,
322 .queue = fio_vsyncio_queue,
323 .commit = fio_vsyncio_commit,
324 .event = fio_vsyncio_event,
325 .getevents = fio_vsyncio_getevents,
326 .open_file = generic_open_file,
327 .close_file = generic_close_file,
df9c26b1 328 .get_file_size = generic_get_file_size,
1d2af02a
JA
329 .flags = FIO_SYNCIO,
330};
331
5f350952
JA
332static void fio_init fio_syncio_register(void)
333{
a31041ea 334 register_ioengine(&ioengine_rw);
335 register_ioengine(&ioengine_prw);
1d2af02a 336 register_ioengine(&ioengine_vrw);
5f350952
JA
337}
338
339static void fio_exit fio_syncio_unregister(void)
340{
a31041ea 341 unregister_ioengine(&ioengine_rw);
342 unregister_ioengine(&ioengine_prw);
1d2af02a 343 unregister_ioengine(&ioengine_vrw);
5f350952 344}