Merge branch 'fix-fsync-on-close' of https://github.com/sitsofe/fio
authorJens Axboe <axboe@kernel.dk>
Tue, 15 Oct 2019 15:27:06 +0000 (09:27 -0600)
committerJens Axboe <axboe@kernel.dk>
Tue, 15 Oct 2019 15:27:06 +0000 (09:27 -0600)
* 'fix-fsync-on-close' of https://github.com/sitsofe/fio:
  backend: fix final fsync behaviour

backend.c
ioengines.c

index 2f46329376451f18a49c94696e84687dda4390d8..fe868271a6cd90e755743965bdaf0936826272c1 100644 (file)
--- a/backend.c
+++ b/backend.c
@@ -281,6 +281,7 @@ static bool fio_io_sync(struct thread_data *td, struct fio_file *f)
 
        io_u->ddir = DDIR_SYNC;
        io_u->file = f;
+       io_u_set(td, io_u, IO_U_F_NO_FILE_PUT);
 
        if (td_io_prep(td, io_u)) {
                put_io_u(td, io_u);
@@ -314,7 +315,7 @@ requeue:
 
 static int fio_file_fsync(struct thread_data *td, struct fio_file *f)
 {
-       int ret;
+       int ret, ret2;
 
        if (fio_file_open(f))
                return fio_io_sync(td, f);
@@ -323,8 +324,10 @@ static int fio_file_fsync(struct thread_data *td, struct fio_file *f)
                return 1;
 
        ret = fio_io_sync(td, f);
-       td_io_close_file(td, f);
-       return ret;
+       ret2 = 0;
+       if (fio_file_open(f))
+               ret2 = td_io_close_file(td, f);
+       return (ret || ret2);
 }
 
 static inline void __update_ts_cache(struct thread_data *td)
@@ -1124,7 +1127,7 @@ reap:
                                td->error = 0;
                }
 
-               if (should_fsync(td) && td->o.end_fsync) {
+               if (should_fsync(td) && (td->o.end_fsync || td->o.fsync_on_close)) {
                        td_set_runstate(td, TD_FSYNCING);
 
                        for_each_file(td, f, i) {
index 40fa75c382b4f99ff4a265f58a24e403b804a929..9e3fcc9f681a46fa336ef247a6aa6497dade09af 100644 (file)
@@ -376,14 +376,16 @@ enum fio_q_status td_io_queue(struct thread_data *td, struct io_u *io_u)
        }
 
        if (ret == FIO_Q_COMPLETED) {
-               if (ddir_rw(io_u->ddir) || ddir_sync(io_u->ddir)) {
+               if (ddir_rw(io_u->ddir) ||
+                   (ddir_sync(io_u->ddir) && td->runstate != TD_FSYNCING)) {
                        io_u_mark_depth(td, 1);
                        td->ts.total_io_u[io_u->ddir]++;
                }
        } else if (ret == FIO_Q_QUEUED) {
                td->io_u_queued++;
 
-               if (ddir_rw(io_u->ddir) || ddir_sync(io_u->ddir))
+               if (ddir_rw(io_u->ddir) ||
+                   (ddir_sync(io_u->ddir) && td->runstate != TD_FSYNCING))
                        td->ts.total_io_u[io_u->ddir]++;
 
                if (td->io_u_queued >= td->o.iodepth_batch)