not sync the file. The exception is the sg io engine, which
synchronizes the disk cache anyway.
+fsyncdata=int Like fsync= but uses fdatasync() to only sync data and not
+ metadata blocks.
+
overwrite=bool If true, writes to a file will always overwrite existing
data. If the file doesn't already exist, it will be
created before the write phase begins. If the file exists
io_u->greq = guasi__pwrite(ld->hctx, ld, io_u, 0,
f->fd, io_u->xfer_buf, io_u->xfer_buflen,
io_u->offset);
- else if (io_u->ddir == DDIR_SYNC)
+ else if (ddir_sync(io_u->ddir))
io_u->greq = guasi__fsync(ld->hctx, ld, io_u, 0, f->fd);
else {
log_err("fio_guasi_commit() FAILED: unknow request %d\n",
io_prep_pread(&io_u->iocb, f->fd, io_u->xfer_buf, io_u->xfer_buflen, io_u->offset);
else if (io_u->ddir == DDIR_WRITE)
io_prep_pwrite(&io_u->iocb, f->fd, io_u->xfer_buf, io_u->xfer_buflen, io_u->offset);
- else if (io_u->ddir == DDIR_SYNC)
+ else if (ddir_sync(io_u->ddir))
io_prep_fsync(&io_u->iocb, f->fd);
else
return 1;
if (fsync(io_u->file->fd) < 0)
io_u->error = errno;
+ return FIO_Q_COMPLETED;
+ } else if (io_u->ddir == DDIR_DATASYNC) {
+ if (ld->iocbs_nr)
+ return FIO_Q_BUSY;
+ if (fdatasync(io_u->file->fd) < 0)
+ io_u->error = errno;
+
return FIO_Q_COMPLETED;
}
memcpy(io_u->xfer_buf, io_u->mmap_data, io_u->xfer_buflen);
else if (io_u->ddir == DDIR_WRITE)
memcpy(io_u->mmap_data, io_u->xfer_buf, io_u->xfer_buflen);
- else if (io_u->ddir == DDIR_SYNC) {
+ else if (ddir_sync(io_u->ddir)) {
if (msync(f->mmap_ptr, f->mmap_sz, MS_SYNC)) {
io_u->error = errno;
td_verror(td, io_u->error, "msync");
/*
* not really direct, but should drop the pages from the cache
*/
- if (td->o.odirect && io_u->ddir != DDIR_SYNC) {
+ if (td->o.odirect && !ddir_sync(io_u->ddir)) {
if (msync(io_u->mmap_data, io_u->xfer_buflen, MS_SYNC) < 0) {
io_u->error = errno;
td_verror(td, io_u->error, "msync");
fio_ro_check(td, io_u);
- ret = fio_sgio_doio(td, io_u, io_u->ddir == DDIR_SYNC);
+ ret = fio_sgio_doio(td, io_u, ddir_sync(io_u->ddir));
if (ret < 0)
io_u->error = errno;
return FIO_Q_COMPLETED;
}
+ if (io_u->ddir == DDIR_DATASYNC) {
+ if (sd->nr)
+ return FIO_Q_BUSY;
+ if (fdatasync(f->fd) < 0)
+ io_u->error = errno;
+
+ return FIO_Q_COMPLETED;
+ }
+
if (sd->nr == sd->max_depth)
return FIO_Q_BUSY;
{
struct fio_file *f = io_u->file;
- if (io_u->ddir == DDIR_SYNC)
+ if (ddir_sync(io_u->ddir))
return 0;
if (lseek(f->fd, io_u->offset, SEEK_SET) == -1) {
{
struct syncio_data *sd = td->io_ops->data;
- if (io_u->ddir == DDIR_SYNC)
+ if (ddir_sync(io_u->ddir))
return 0;
if (io_u->offset == sd->last_offset && io_u->file == sd->last_file &&
int ret = fsync(io_u->file->fd);
return fio_io_end(td, io_u, ret);
- }
+ } else if (io_u->ddir == DDIR_DATASYNC) {
+ int ret = fdatasync(io_u->file->fd);
+ return fio_io_end(td, io_u, ret);
+ }
+
sd->queued = 0;
sd->queued_bytes = 0;
fio_vsyncio_set_iov(sd, io_u, 0);
FILL_IN(*regs, __NR_fsync, (long) f->fd);
}
+static void fio_syslet_prep_datasync(struct fio_file *f,
+ struct indirect_registers *regs)
+{
+ FILL_IN(*regs, __NR_fdatasync, (long) f->fd);
+}
+
static void fio_syslet_prep_rw(struct io_u *io_u, struct fio_file *f,
struct indirect_registers *regs)
{
if (io_u->ddir == DDIR_SYNC)
fio_syslet_prep_sync(f, regs);
+ else if (io_u->ddir == DDIR_DATASYNC)
+ fio_syslet_prep_datasync(f, regs);
else
fio_syslet_prep_rw(io_u, f, regs);
}
How many I/Os to perform before issuing an \fBfsync\fR\|(2) of dirty data. If
0, don't sync. Default: 0.
.TP
+.BI fdatasync \fR=\fPint
+Like \fBfsync\fR, but uses \fBfdatasync\fR\|(2) instead to only sync the
+data parts of the file. Default: 0.
+.TP
.BI overwrite \fR=\fPbool
If writing, setup the file first and do overwrites. Default: false.
.TP
unsigned int thinktime_spin;
unsigned int thinktime_blocks;
unsigned int fsync_blocks;
+ unsigned int fdatasync_blocks;
unsigned int start_delay;
unsigned long long timeout;
unsigned long long ramp_time;
DDIR_READ = 0,
DDIR_WRITE,
DDIR_SYNC,
+ DDIR_DATASYNC,
DDIR_INVAL = -1,
};
#define td_random(td) ((td)->o.td_ddir & TD_DDIR_RAND)
#define file_randommap(td, f) (!(td)->o.norandommap && (f)->file_map)
+static inline int ddir_sync(enum fio_ddir ddir)
+{
+ return ddir == DDIR_SYNC || ddir == DDIR_DATASYNC;
+}
+
#endif
{
enum fio_ddir ddir;
+ /*
+ * see if it's time to fsync
+ */
+ if (td->o.fsync_blocks &&
+ !(td->io_issues[DDIR_WRITE] % td->o.fsync_blocks) &&
+ td->io_issues[DDIR_WRITE] && should_fsync(td))
+ return DDIR_SYNC;
+
+ /*
+ * see if it's time to fdatasync
+ */
+ if (td->o.fdatasync_blocks &&
+ !(td->io_issues[DDIR_WRITE] % td->o.fdatasync_blocks) &&
+ td->io_issues[DDIR_WRITE] && should_fsync(td))
+ return DDIR_DATASYNC;
+
if (td_rw(td)) {
/*
* Check if it's time to seed a new data direction.
dprint(FD_IO, "requeue %p\n", __io_u);
__io_u->flags |= IO_U_F_FREE;
- if ((__io_u->flags & IO_U_F_FLIGHT) && (__io_u->ddir != DDIR_SYNC))
+ if ((__io_u->flags & IO_U_F_FLIGHT) && !ddir_sync(__io_u->ddir))
td->io_issues[__io_u->ddir]--;
__io_u->flags &= ~IO_U_F_FLIGHT;
if (td->io_ops->flags & FIO_NOIO)
goto out;
+ io_u->ddir = get_rw_ddir(td);
+
/*
- * see if it's time to sync
+ * fsync() or fdatasync(), we are done
*/
- if (td->o.fsync_blocks &&
- !(td->io_issues[DDIR_WRITE] % td->o.fsync_blocks) &&
- td->io_issues[DDIR_WRITE] && should_fsync(td)) {
- io_u->ddir = DDIR_SYNC;
+ if (ddir_sync(io_u->ddir))
goto out;
- }
-
- io_u->ddir = get_rw_ddir(td);
/*
* See if it's time to switch to a new zone
f = io_u->file;
assert(fio_file_open(f));
- if (io_u->ddir != DDIR_SYNC) {
+ if (!ddir_sync(io_u->ddir)) {
if (!io_u->buflen && !(td->io_ops->flags & FIO_NOIO)) {
dprint(FD_IO, "get_io_u: zero buflen on %p\n", io_u);
goto err_put;
assert(io_u->flags & IO_U_F_FLIGHT);
io_u->flags &= ~IO_U_F_FLIGHT;
- if (io_u->ddir == DDIR_SYNC) {
+ if (ddir_sync(io_u->ddir)) {
td->last_was_sync = 1;
return;
}
sizeof(struct timeval));
}
- if (io_u->ddir != DDIR_SYNC)
+ if (!ddir_sync(io_u->ddir))
td->io_issues[io_u->ddir]++;
ret = td->io_ops->queue(td, io_u);
}
if (ret == FIO_Q_COMPLETED) {
- if (io_u->ddir != DDIR_SYNC) {
+ if (!ddir_sync(io_u->ddir)) {
io_u_mark_depth(td, 1);
td->ts.total_io_u[io_u->ddir]++;
}
} else if (ret == FIO_Q_QUEUED) {
int r;
- if (io_u->ddir != DDIR_SYNC) {
+ if (!ddir_sync(io_u->ddir)) {
td->io_u_queued++;
td->ts.total_io_u[io_u->ddir]++;
}
void log_io_u(struct thread_data *td, struct io_u *io_u)
{
- const char *act[] = { "read", "write", "sync" };
+ const char *act[] = { "read", "write", "sync", "datasync" };
assert(io_u->ddir < 3);
rw = DDIR_WRITE;
else if (!strcmp(act, "sync"))
rw = DDIR_SYNC;
+ else if (!strcmp(act, "datasync"))
+ rw = DDIR_DATASYNC;
else {
log_err("fio: bad iolog file action: %s\n",
act);
if (read_only)
continue;
writes++;
- } else if (rw != DDIR_SYNC && rw != DDIR_INVAL) {
+ } else if (!ddir_sync(rw)) {
log_err("bad ddir: %d\n", rw);
continue;
}
.help = "Issue fsync for writes every given number of blocks",
.def = "0",
},
+ {
+ .name = "fdatasync",
+ .type = FIO_OPT_INT,
+ .off1 = td_var_offset(fdatasync_blocks),
+ .help = "Issue fdatasync for writes every given number of blocks",
+ .def = "0",
+ },
{
.name = "direct",
.type = FIO_OPT_BOOL,