X-Git-Url: https://git.kernel.dk/?p=fio.git;a=blobdiff_plain;f=fio.c;h=c5b85aeb8c88ca2ef732135fc74ffb8d789325c1;hp=dbea661d961ca5b911b0a35161510c238d6f0b2a;hb=cb5ab5121ac4fa62e0ca2612b359f19bfdd30f29;hpb=0904200b6f113233a28c4c1ddf68f216bd922b78 diff --git a/fio.c b/fio.c index dbea661d..c5b85aeb 100644 --- a/fio.c +++ b/fio.c @@ -159,7 +159,7 @@ static void cleanup_pending_aio(struct thread_data *td) /* * get immediately available events, if any */ - r = io_u_queued_complete(td, 0, NULL); + r = io_u_queued_complete(td, 0); if (r < 0) return; @@ -187,7 +187,7 @@ static void cleanup_pending_aio(struct thread_data *td) } if (td->cur_depth) - r = io_u_queued_complete(td, td->cur_depth, NULL); + r = io_u_queued_complete(td, td->cur_depth); } /* @@ -217,7 +217,7 @@ requeue: put_io_u(td, io_u); return 1; } else if (ret == FIO_Q_QUEUED) { - if (io_u_queued_complete(td, 1, NULL) < 0) + if (io_u_queued_complete(td, 1) < 0) return 1; } else if (ret == FIO_Q_COMPLETED) { if (io_u->error) { @@ -225,7 +225,7 @@ requeue: return 1; } - if (io_u_sync_complete(td, io_u, NULL) < 0) + if (io_u_sync_complete(td, io_u) < 0) return 1; } else if (ret == FIO_Q_BUSY) { if (td_io_commit(td)) @@ -282,6 +282,8 @@ static void do_verify(struct thread_data *td) put_io_u(td, io_u); break; } + + io_u->end_io = verify_io_u; requeue: ret = td_io_queue(td, io_u); @@ -296,7 +298,7 @@ requeue: io_u->xfer_buf += bytes; goto requeue; } - ret = io_u_sync_complete(td, io_u, verify_io_u); + ret = io_u_sync_complete(td, io_u); if (ret < 0) break; continue; @@ -331,11 +333,16 @@ requeue: * Reap required number of io units, if any, and do the * verification on them through the callback handler */ - if (io_u_queued_complete(td, min_events, verify_io_u) < 0) + if (io_u_queued_complete(td, min_events) < 0) break; } - if (td->cur_depth) + if (!td->error) { + min_events = td->cur_depth; + + if (min_events) + ret = io_u_queued_complete(td, min_events); + } else cleanup_pending_aio(td); td_set_runstate(td, TD_RUNNING); @@ -414,7 +421,7 @@ requeue: goto requeue; } fio_gettime(&comp_time, NULL); - bytes_done = io_u_sync_complete(td, io_u, NULL); + bytes_done = io_u_sync_complete(td, io_u); if (bytes_done < 0) ret = bytes_done; break; @@ -426,6 +433,8 @@ requeue: */ if (td->io_ops->commit == NULL) io_u_queued(td, io_u); + else if (td->io_u_queued >= td->iodepth_batch) + ret = td_io_commit(td); break; case FIO_Q_BUSY: requeue_io_u(td, &io_u); @@ -453,7 +462,7 @@ requeue: } fio_gettime(&comp_time, NULL); - bytes_done = io_u_queued_complete(td, min_evts, NULL); + bytes_done = io_u_queued_complete(td, min_evts); if (bytes_done < 0) break; } @@ -497,15 +506,17 @@ requeue: if (!td->error) { struct fio_file *f; - if (td->cur_depth) - cleanup_pending_aio(td); + i = td->cur_depth; + if (i) + ret = io_u_queued_complete(td, i); if (should_fsync(td) && td->end_fsync) { td_set_runstate(td, TD_FSYNCING); for_each_file(td, f, i) fio_io_sync(td, f); } - } + } else + cleanup_pending_aio(td); } static void cleanup_io_u(struct thread_data *td)