diff options
author | Jens Axboe <axboe@kernel.dk> | 2019-10-15 09:27:06 -0600 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2019-10-15 09:27:06 -0600 |
commit | 2e97fa1b0d76edc6517fa4a8a4f6e0792b458e8c (patch) | |
tree | ecc986a7434cbed0bc7a03ba8b9ff200cdacc983 | |
parent | 6d5af4909c58f6b7bef6c31bfa21becd6d000945 (diff) | |
parent | 00615bfb4a5903b87220bd1d8c18f9b6515bdae0 (diff) | |
download | fio-2e97fa1b0d76edc6517fa4a8a4f6e0792b458e8c.tar.gz fio-2e97fa1b0d76edc6517fa4a8a4f6e0792b458e8c.tar.bz2 |
Merge branch 'fix-fsync-on-close' of https://github.com/sitsofe/fio
* 'fix-fsync-on-close' of https://github.com/sitsofe/fio:
backend: fix final fsync behaviour
-rw-r--r-- | backend.c | 11 | ||||
-rw-r--r-- | ioengines.c | 6 |
2 files changed, 11 insertions, 6 deletions
@@ -281,6 +281,7 @@ static bool fio_io_sync(struct thread_data *td, struct fio_file *f) io_u->ddir = DDIR_SYNC; io_u->file = f; + io_u_set(td, io_u, IO_U_F_NO_FILE_PUT); if (td_io_prep(td, io_u)) { put_io_u(td, io_u); @@ -314,7 +315,7 @@ requeue: static int fio_file_fsync(struct thread_data *td, struct fio_file *f) { - int ret; + int ret, ret2; if (fio_file_open(f)) return fio_io_sync(td, f); @@ -323,8 +324,10 @@ static int fio_file_fsync(struct thread_data *td, struct fio_file *f) return 1; ret = fio_io_sync(td, f); - td_io_close_file(td, f); - return ret; + ret2 = 0; + if (fio_file_open(f)) + ret2 = td_io_close_file(td, f); + return (ret || ret2); } static inline void __update_ts_cache(struct thread_data *td) @@ -1124,7 +1127,7 @@ reap: td->error = 0; } - if (should_fsync(td) && td->o.end_fsync) { + if (should_fsync(td) && (td->o.end_fsync || td->o.fsync_on_close)) { td_set_runstate(td, TD_FSYNCING); for_each_file(td, f, i) { diff --git a/ioengines.c b/ioengines.c index 40fa75c3..9e3fcc9f 100644 --- a/ioengines.c +++ b/ioengines.c @@ -376,14 +376,16 @@ enum fio_q_status td_io_queue(struct thread_data *td, struct io_u *io_u) } if (ret == FIO_Q_COMPLETED) { - if (ddir_rw(io_u->ddir) || ddir_sync(io_u->ddir)) { + if (ddir_rw(io_u->ddir) || + (ddir_sync(io_u->ddir) && td->runstate != TD_FSYNCING)) { io_u_mark_depth(td, 1); td->ts.total_io_u[io_u->ddir]++; } } else if (ret == FIO_Q_QUEUED) { td->io_u_queued++; - if (ddir_rw(io_u->ddir) || ddir_sync(io_u->ddir)) + if (ddir_rw(io_u->ddir) || + (ddir_sync(io_u->ddir) && td->runstate != TD_FSYNCING)) td->ts.total_io_u[io_u->ddir]++; if (td->io_u_queued >= td->o.iodepth_batch) |