io_u->ddir = DDIR_SYNC;
io_u->file = f;
+ io_u_set(td, io_u, IO_U_F_NO_FILE_PUT);
if (td_io_prep(td, io_u)) {
put_io_u(td, io_u);
static int fio_file_fsync(struct thread_data *td, struct fio_file *f)
{
- int ret;
+ int ret, ret2;
if (fio_file_open(f))
return fio_io_sync(td, f);
return 1;
ret = fio_io_sync(td, f);
- td_io_close_file(td, f);
- return ret;
+ ret2 = 0;
+ if (fio_file_open(f))
+ ret2 = td_io_close_file(td, f);
+ return (ret || ret2);
}
static inline void __update_ts_cache(struct thread_data *td)
td->error = 0;
}
- if (should_fsync(td) && td->o.end_fsync) {
+ if (should_fsync(td) && (td->o.end_fsync || td->o.fsync_on_close)) {
td_set_runstate(td, TD_FSYNCING);
for_each_file(td, f, i) {
}
if (ret == FIO_Q_COMPLETED) {
- if (ddir_rw(io_u->ddir) || ddir_sync(io_u->ddir)) {
+ if (ddir_rw(io_u->ddir) ||
+ (ddir_sync(io_u->ddir) && td->runstate != TD_FSYNCING)) {
io_u_mark_depth(td, 1);
td->ts.total_io_u[io_u->ddir]++;
}
} else if (ret == FIO_Q_QUEUED) {
td->io_u_queued++;
- if (ddir_rw(io_u->ddir) || ddir_sync(io_u->ddir))
+ if (ddir_rw(io_u->ddir) ||
+ (ddir_sync(io_u->ddir) && td->runstate != TD_FSYNCING))
td->ts.total_io_u[io_u->ddir]++;
if (td->io_u_queued >= td->o.iodepth_batch)