rand: cleanup rand_between() and helpers
[fio.git] / engines / sync.c
CommitLineData
2866c82d 1/*
a31041ea 2 * sync/psync engine
da751ca9
JA
3 *
4 * IO engine that does regular read(2)/write(2) with lseek(2) to transfer
a31041ea 5 * data and IO engine that does regular pread(2)/pwrite(2) to transfer data.
2866c82d
JA
6 *
7 */
8#include <stdio.h>
9#include <stdlib.h>
10#include <unistd.h>
5921e80c 11#include <sys/uio.h>
2866c82d 12#include <errno.h>
5f350952
JA
13
14#include "../fio.h"
2cafffbe 15#include "../optgroup.h"
a0679ce5 16#include "../lib/rand.h"
2866c82d 17
ef5f5a3a
JA
18/*
19 * Sync engine uses engine_data to store last offset
20 */
710bf9c5 21#define LAST_POS(f) ((f)->engine_pos)
ef5f5a3a 22
1d2af02a
JA
23struct syncio_data {
24 struct iovec *iovecs;
25 struct io_u **io_us;
26 unsigned int queued;
e51cf72c 27 unsigned int events;
1d2af02a
JA
28 unsigned long queued_bytes;
29
30 unsigned long long last_offset;
31 struct fio_file *last_file;
32 enum fio_ddir last_ddir;
a0679ce5
SB
33
34 struct frand_state rand_state;
1d2af02a
JA
35};
36
6562685f 37#ifdef FIO_HAVE_PWRITEV2
2cafffbe
JA
38struct psyncv2_options {
39 void *pad;
40 unsigned int hipri;
a0679ce5 41 unsigned int hipri_percentage;
2cafffbe
JA
42};
43
44static struct fio_option options[] = {
45 {
46 .name = "hipri",
47 .lname = "RWF_HIPRI",
48 .type = FIO_OPT_STR_SET,
49 .off1 = offsetof(struct psyncv2_options, hipri),
50 .help = "Set RWF_HIPRI for pwritev2/preadv2",
51 .category = FIO_OPT_C_ENGINE,
52 .group = FIO_OPT_G_INVALID,
53 },
a0679ce5
SB
54 {
55 .name = "hipri_percentage",
56 .lname = "RWF_HIPRI_PERCENTAGE",
57 .type = FIO_OPT_INT,
58 .off1 = offsetof(struct psyncv2_options, hipri_percentage),
59 .minval = 0,
60 .maxval = 100,
61 .def = "100",
62 .help = "Probabilistically set RWF_HIPRI for pwritev2/preadv2",
63 .category = FIO_OPT_C_ENGINE,
64 .group = FIO_OPT_G_INVALID,
65 },
2cafffbe
JA
66 {
67 .name = NULL,
68 },
69};
70#endif
71
2866c82d
JA
72static int fio_syncio_prep(struct thread_data *td, struct io_u *io_u)
73{
53cdc686
JA
74 struct fio_file *f = io_u->file;
75
ff58fced 76 if (!ddir_rw(io_u->ddir))
87dc1ab1
JA
77 return 0;
78
ef5f5a3a 79 if (LAST_POS(f) != -1ULL && LAST_POS(f) == io_u->offset)
e943b878
JA
80 return 0;
81
53cdc686 82 if (lseek(f->fd, io_u->offset, SEEK_SET) == -1) {
e1161c32 83 td_verror(td, errno, "lseek");
2866c82d
JA
84 return 1;
85 }
86
87 return 0;
88}
89
2bd3eabc 90static int fio_io_end(struct thread_data *td, struct io_u *io_u, int ret)
2866c82d 91{
ff58fced 92 if (io_u->file && ret >= 0 && ddir_rw(io_u->ddir))
ef5f5a3a 93 LAST_POS(io_u->file) = io_u->offset + ret;
e943b878 94
cec6b55d 95 if (ret != (int) io_u->xfer_buflen) {
22819ec2 96 if (ret >= 0) {
cec6b55d
JA
97 io_u->resid = io_u->xfer_buflen - ret;
98 io_u->error = 0;
36167d82 99 return FIO_Q_COMPLETED;
2866c82d
JA
100 } else
101 io_u->error = errno;
102 }
103
bfd197cb
JA
104 if (io_u->error) {
105 io_u_log_error(td, io_u);
e1161c32 106 td_verror(td, io_u->error, "xfer");
bfd197cb 107 }
2866c82d 108
36167d82 109 return FIO_Q_COMPLETED;
2866c82d
JA
110}
111
07fc0acd 112#ifdef CONFIG_PWRITEV
2e4ef4fb
JA
113static enum fio_q_status fio_pvsyncio_queue(struct thread_data *td,
114 struct io_u *io_u)
07fc0acd 115{
565e784d 116 struct syncio_data *sd = td->io_ops_data;
07fc0acd
JA
117 struct iovec *iov = &sd->iovecs[0];
118 struct fio_file *f = io_u->file;
119 int ret;
120
121 fio_ro_check(td, io_u);
122
123 iov->iov_base = io_u->xfer_buf;
124 iov->iov_len = io_u->xfer_buflen;
125
126 if (io_u->ddir == DDIR_READ)
127 ret = preadv(f->fd, iov, 1, io_u->offset);
128 else if (io_u->ddir == DDIR_WRITE)
129 ret = pwritev(f->fd, iov, 1, io_u->offset);
130 else if (io_u->ddir == DDIR_TRIM) {
131 do_io_u_trim(td, io_u);
132 return FIO_Q_COMPLETED;
133 } else
134 ret = do_io_u_sync(td, io_u);
135
136 return fio_io_end(td, io_u, ret);
137}
138#endif
139
6562685f 140#ifdef FIO_HAVE_PWRITEV2
2e4ef4fb
JA
141static enum fio_q_status fio_pvsyncio2_queue(struct thread_data *td,
142 struct io_u *io_u)
2cafffbe 143{
565e784d 144 struct syncio_data *sd = td->io_ops_data;
2cafffbe
JA
145 struct psyncv2_options *o = td->eo;
146 struct iovec *iov = &sd->iovecs[0];
147 struct fio_file *f = io_u->file;
148 int ret, flags = 0;
149
150 fio_ro_check(td, io_u);
151
a0679ce5 152 if (o->hipri &&
1bd5d213 153 (rand_between(&sd->rand_state, 1, 100) <= o->hipri_percentage))
2cafffbe
JA
154 flags |= RWF_HIPRI;
155
156 iov->iov_base = io_u->xfer_buf;
157 iov->iov_len = io_u->xfer_buflen;
158
159 if (io_u->ddir == DDIR_READ)
160 ret = preadv2(f->fd, iov, 1, io_u->offset, flags);
161 else if (io_u->ddir == DDIR_WRITE)
162 ret = pwritev2(f->fd, iov, 1, io_u->offset, flags);
163 else if (io_u->ddir == DDIR_TRIM) {
164 do_io_u_trim(td, io_u);
165 return FIO_Q_COMPLETED;
166 } else
167 ret = do_io_u_sync(td, io_u);
168
169 return fio_io_end(td, io_u, ret);
170}
171#endif
172
2e4ef4fb
JA
173static enum fio_q_status fio_psyncio_queue(struct thread_data *td,
174 struct io_u *io_u)
a31041ea 175{
2bd3eabc
JA
176 struct fio_file *f = io_u->file;
177 int ret;
178
179 fio_ro_check(td, io_u);
180
181 if (io_u->ddir == DDIR_READ)
182 ret = pread(f->fd, io_u->xfer_buf, io_u->xfer_buflen, io_u->offset);
183 else if (io_u->ddir == DDIR_WRITE)
184 ret = pwrite(f->fd, io_u->xfer_buf, io_u->xfer_buflen, io_u->offset);
6eaf09d6
SL
185 else if (io_u->ddir == DDIR_TRIM) {
186 do_io_u_trim(td, io_u);
187 return FIO_Q_COMPLETED;
188 } else
0a28ecda 189 ret = do_io_u_sync(td, io_u);
2bd3eabc
JA
190
191 return fio_io_end(td, io_u, ret);
192}
193
2e4ef4fb
JA
194static enum fio_q_status fio_syncio_queue(struct thread_data *td,
195 struct io_u *io_u)
2bd3eabc
JA
196{
197 struct fio_file *f = io_u->file;
198 int ret;
199
200 fio_ro_check(td, io_u);
201
202 if (io_u->ddir == DDIR_READ)
203 ret = read(f->fd, io_u->xfer_buf, io_u->xfer_buflen);
204 else if (io_u->ddir == DDIR_WRITE)
205 ret = write(f->fd, io_u->xfer_buf, io_u->xfer_buflen);
6eaf09d6
SL
206 else if (io_u->ddir == DDIR_TRIM) {
207 do_io_u_trim(td, io_u);
208 return FIO_Q_COMPLETED;
209 } else
0a28ecda 210 ret = do_io_u_sync(td, io_u);
2bd3eabc
JA
211
212 return fio_io_end(td, io_u, ret);
a31041ea 213}
214
1d2af02a
JA
215static int fio_vsyncio_getevents(struct thread_data *td, unsigned int min,
216 unsigned int max,
1f440ece 217 const struct timespec fio_unused *t)
1d2af02a 218{
565e784d 219 struct syncio_data *sd = td->io_ops_data;
1d2af02a
JA
220 int ret;
221
222 if (min) {
e51cf72c
JA
223 ret = sd->events;
224 sd->events = 0;
1d2af02a
JA
225 } else
226 ret = 0;
227
228 dprint(FD_IO, "vsyncio_getevents: min=%d,max=%d: %d\n", min, max, ret);
229 return ret;
230}
231
232static struct io_u *fio_vsyncio_event(struct thread_data *td, int event)
233{
565e784d 234 struct syncio_data *sd = td->io_ops_data;
1d2af02a
JA
235
236 return sd->io_us[event];
237}
238
239static int fio_vsyncio_append(struct thread_data *td, struct io_u *io_u)
240{
565e784d 241 struct syncio_data *sd = td->io_ops_data;
1d2af02a 242
5f9099ea 243 if (ddir_sync(io_u->ddir))
1d2af02a
JA
244 return 0;
245
246 if (io_u->offset == sd->last_offset && io_u->file == sd->last_file &&
247 io_u->ddir == sd->last_ddir)
248 return 1;
249
250 return 0;
251}
252
253static void fio_vsyncio_set_iov(struct syncio_data *sd, struct io_u *io_u,
2b13e716 254 int idx)
1d2af02a 255{
2b13e716
JA
256 sd->io_us[idx] = io_u;
257 sd->iovecs[idx].iov_base = io_u->xfer_buf;
258 sd->iovecs[idx].iov_len = io_u->xfer_buflen;
1d2af02a
JA
259 sd->last_offset = io_u->offset + io_u->xfer_buflen;
260 sd->last_file = io_u->file;
261 sd->last_ddir = io_u->ddir;
262 sd->queued_bytes += io_u->xfer_buflen;
263 sd->queued++;
264}
265
2e4ef4fb
JA
266static enum fio_q_status fio_vsyncio_queue(struct thread_data *td,
267 struct io_u *io_u)
1d2af02a 268{
565e784d 269 struct syncio_data *sd = td->io_ops_data;
1d2af02a
JA
270
271 fio_ro_check(td, io_u);
272
273 if (!fio_vsyncio_append(td, io_u)) {
274 dprint(FD_IO, "vsyncio_queue: no append (%d)\n", sd->queued);
275 /*
276 * If we can't append and have stuff queued, tell fio to
277 * commit those first and then retry this io
278 */
279 if (sd->queued)
280 return FIO_Q_BUSY;
0a28ecda
JA
281 if (ddir_sync(io_u->ddir)) {
282 int ret = do_io_u_sync(td, io_u);
283
284 return fio_io_end(td, io_u, ret);
285 }
cc9159c3 286
1d2af02a
JA
287 sd->queued = 0;
288 sd->queued_bytes = 0;
289 fio_vsyncio_set_iov(sd, io_u, 0);
290 } else {
291 if (sd->queued == td->o.iodepth) {
292 dprint(FD_IO, "vsyncio_queue: max depth %d\n", sd->queued);
293 return FIO_Q_BUSY;
294 }
295
296 dprint(FD_IO, "vsyncio_queue: append\n");
297 fio_vsyncio_set_iov(sd, io_u, sd->queued);
298 }
299
300 dprint(FD_IO, "vsyncio_queue: depth now %d\n", sd->queued);
301 return FIO_Q_QUEUED;
302}
303
304/*
305 * Check that we transferred all bytes, or saw an error, etc
306 */
307static int fio_vsyncio_end(struct thread_data *td, ssize_t bytes)
308{
565e784d 309 struct syncio_data *sd = td->io_ops_data;
1d2af02a
JA
310 struct io_u *io_u;
311 unsigned int i;
312 int err;
313
314 /*
315 * transferred everything, perfect
316 */
317 if (bytes == sd->queued_bytes)
318 return 0;
319
320 err = errno;
321 for (i = 0; i < sd->queued; i++) {
322 io_u = sd->io_us[i];
323
324 if (bytes == -1) {
325 io_u->error = err;
326 } else {
327 unsigned int this_io;
328
329 this_io = bytes;
330 if (this_io > io_u->xfer_buflen)
331 this_io = io_u->xfer_buflen;
332
333 io_u->resid = io_u->xfer_buflen - this_io;
334 io_u->error = 0;
335 bytes -= this_io;
336 }
337 }
338
339 if (bytes == -1) {
340 td_verror(td, err, "xfer vsync");
341 return -err;
342 }
343
344 return 0;
345}
346
347static int fio_vsyncio_commit(struct thread_data *td)
348{
565e784d 349 struct syncio_data *sd = td->io_ops_data;
1d2af02a
JA
350 struct fio_file *f;
351 ssize_t ret;
352
353 if (!sd->queued)
354 return 0;
355
838bc709 356 io_u_mark_submit(td, sd->queued);
1d2af02a
JA
357 f = sd->last_file;
358
359 if (lseek(f->fd, sd->io_us[0]->offset, SEEK_SET) == -1) {
360 int err = -errno;
361
362 td_verror(td, errno, "lseek");
363 return err;
364 }
365
366 if (sd->last_ddir == DDIR_READ)
367 ret = readv(f->fd, sd->iovecs, sd->queued);
368 else
369 ret = writev(f->fd, sd->iovecs, sd->queued);
370
371 dprint(FD_IO, "vsyncio_commit: %d\n", (int) ret);
e51cf72c
JA
372 sd->events = sd->queued;
373 sd->queued = 0;
1d2af02a
JA
374 return fio_vsyncio_end(td, ret);
375}
376
377static int fio_vsyncio_init(struct thread_data *td)
378{
379 struct syncio_data *sd;
380
381 sd = malloc(sizeof(*sd));
382 memset(sd, 0, sizeof(*sd));
383 sd->last_offset = -1ULL;
384 sd->iovecs = malloc(td->o.iodepth * sizeof(struct iovec));
385 sd->io_us = malloc(td->o.iodepth * sizeof(struct io_u *));
a0679ce5 386 init_rand(&sd->rand_state, 0);
1d2af02a 387
565e784d 388 td->io_ops_data = sd;
1d2af02a
JA
389 return 0;
390}
391
392static void fio_vsyncio_cleanup(struct thread_data *td)
393{
565e784d 394 struct syncio_data *sd = td->io_ops_data;
1d2af02a 395
02a3d83f
TK
396 if (sd) {
397 free(sd->iovecs);
398 free(sd->io_us);
399 free(sd);
400 }
1d2af02a
JA
401}
402
a31041ea 403static struct ioengine_ops ioengine_rw = {
2866c82d
JA
404 .name = "sync",
405 .version = FIO_IOOPS_VERSION,
2866c82d
JA
406 .prep = fio_syncio_prep,
407 .queue = fio_syncio_queue,
b5af8293
JA
408 .open_file = generic_open_file,
409 .close_file = generic_close_file,
df9c26b1 410 .get_file_size = generic_get_file_size,
2866c82d
JA
411 .flags = FIO_SYNCIO,
412};
5f350952 413
a31041ea 414static struct ioengine_ops ioengine_prw = {
415 .name = "psync",
416 .version = FIO_IOOPS_VERSION,
2bd3eabc 417 .queue = fio_psyncio_queue,
a31041ea 418 .open_file = generic_open_file,
419 .close_file = generic_close_file,
df9c26b1 420 .get_file_size = generic_get_file_size,
a31041ea 421 .flags = FIO_SYNCIO,
422};
423
1d2af02a
JA
424static struct ioengine_ops ioengine_vrw = {
425 .name = "vsync",
426 .version = FIO_IOOPS_VERSION,
427 .init = fio_vsyncio_init,
428 .cleanup = fio_vsyncio_cleanup,
429 .queue = fio_vsyncio_queue,
430 .commit = fio_vsyncio_commit,
431 .event = fio_vsyncio_event,
432 .getevents = fio_vsyncio_getevents,
433 .open_file = generic_open_file,
434 .close_file = generic_close_file,
df9c26b1 435 .get_file_size = generic_get_file_size,
1d2af02a
JA
436 .flags = FIO_SYNCIO,
437};
438
07fc0acd
JA
439#ifdef CONFIG_PWRITEV
440static struct ioengine_ops ioengine_pvrw = {
441 .name = "pvsync",
442 .version = FIO_IOOPS_VERSION,
443 .init = fio_vsyncio_init,
444 .cleanup = fio_vsyncio_cleanup,
445 .queue = fio_pvsyncio_queue,
446 .open_file = generic_open_file,
447 .close_file = generic_close_file,
448 .get_file_size = generic_get_file_size,
449 .flags = FIO_SYNCIO,
450};
451#endif
452
6562685f 453#ifdef FIO_HAVE_PWRITEV2
2cafffbe
JA
454static struct ioengine_ops ioengine_pvrw2 = {
455 .name = "pvsync2",
456 .version = FIO_IOOPS_VERSION,
457 .init = fio_vsyncio_init,
458 .cleanup = fio_vsyncio_cleanup,
459 .queue = fio_pvsyncio2_queue,
460 .open_file = generic_open_file,
461 .close_file = generic_close_file,
462 .get_file_size = generic_get_file_size,
463 .flags = FIO_SYNCIO,
464 .options = options,
465 .option_struct_size = sizeof(struct psyncv2_options),
466};
467#endif
468
5f350952
JA
469static void fio_init fio_syncio_register(void)
470{
a31041ea 471 register_ioengine(&ioengine_rw);
472 register_ioengine(&ioengine_prw);
1d2af02a 473 register_ioengine(&ioengine_vrw);
9a0ced5a 474#ifdef CONFIG_PWRITEV
07fc0acd 475 register_ioengine(&ioengine_pvrw);
9a0ced5a 476#endif
6562685f 477#ifdef FIO_HAVE_PWRITEV2
526c403d
JD
478 register_ioengine(&ioengine_pvrw2);
479#endif
5f350952
JA
480}
481
482static void fio_exit fio_syncio_unregister(void)
483{
a31041ea 484 unregister_ioengine(&ioengine_rw);
485 unregister_ioengine(&ioengine_prw);
1d2af02a 486 unregister_ioengine(&ioengine_vrw);
9a0ced5a 487#ifdef CONFIG_PWRITEV
07fc0acd 488 unregister_ioengine(&ioengine_pvrw);
9a0ced5a 489#endif
6562685f 490#ifdef FIO_HAVE_PWRITEV2
526c403d
JD
491 unregister_ioengine(&ioengine_pvrw2);
492#endif
5f350952 493}