X-Git-Url: https://git.kernel.dk/?p=fio.git;a=blobdiff_plain;f=fio.c;h=9a3cc94897d3c49928ef27f2dade7504c955f81a;hp=dbea661d961ca5b911b0a35161510c238d6f0b2a;hb=7c83c089a9b8bd742f788157ac47a5af363adb19;hpb=0904200b6f113233a28c4c1ddf68f216bd922b78 diff --git a/fio.c b/fio.c index dbea661d..9a3cc948 100644 --- a/fio.c +++ b/fio.c @@ -100,6 +100,12 @@ static int check_min_rate(struct thread_data *td, struct timeval *now) unsigned long spent; unsigned long rate; + /* + * No minimum rate set, always ok + */ + if (!td->ratemin) + return 0; + /* * allow a 2 second settle period in the beginning */ @@ -159,7 +165,7 @@ static void cleanup_pending_aio(struct thread_data *td) /* * get immediately available events, if any */ - r = io_u_queued_complete(td, 0, NULL); + r = io_u_queued_complete(td, 0); if (r < 0) return; @@ -187,7 +193,7 @@ static void cleanup_pending_aio(struct thread_data *td) } if (td->cur_depth) - r = io_u_queued_complete(td, td->cur_depth, NULL); + r = io_u_queued_complete(td, td->cur_depth); } /* @@ -217,7 +223,7 @@ requeue: put_io_u(td, io_u); return 1; } else if (ret == FIO_Q_QUEUED) { - if (io_u_queued_complete(td, 1, NULL) < 0) + if (io_u_queued_complete(td, 1) < 0) return 1; } else if (ret == FIO_Q_COMPLETED) { if (io_u->error) { @@ -225,7 +231,7 @@ requeue: return 1; } - if (io_u_sync_complete(td, io_u, NULL) < 0) + if (io_u_sync_complete(td, io_u) < 0) return 1; } else if (ret == FIO_Q_BUSY) { if (td_io_commit(td)) @@ -282,6 +288,8 @@ static void do_verify(struct thread_data *td) put_io_u(td, io_u); break; } + + io_u->end_io = verify_io_u; requeue: ret = td_io_queue(td, io_u); @@ -296,7 +304,7 @@ requeue: io_u->xfer_buf += bytes; goto requeue; } - ret = io_u_sync_complete(td, io_u, verify_io_u); + ret = io_u_sync_complete(td, io_u); if (ret < 0) break; continue; @@ -331,11 +339,16 @@ requeue: * Reap required number of io units, if any, and do the * verification on them through the callback handler */ - if (io_u_queued_complete(td, min_events, verify_io_u) < 0) + if (io_u_queued_complete(td, min_events) < 0) break; } - if (td->cur_depth) + if (!td->error) { + min_events = td->cur_depth; + + if (min_events) + ret = io_u_queued_complete(td, min_events); + } else cleanup_pending_aio(td); td_set_runstate(td, TD_RUNNING); @@ -414,7 +427,7 @@ requeue: goto requeue; } fio_gettime(&comp_time, NULL); - bytes_done = io_u_sync_complete(td, io_u, NULL); + bytes_done = io_u_sync_complete(td, io_u); if (bytes_done < 0) ret = bytes_done; break; @@ -453,7 +466,7 @@ requeue: } fio_gettime(&comp_time, NULL); - bytes_done = io_u_queued_complete(td, min_evts, NULL); + bytes_done = io_u_queued_complete(td, min_evts); if (bytes_done < 0) break; } @@ -497,15 +510,17 @@ requeue: if (!td->error) { struct fio_file *f; - if (td->cur_depth) - cleanup_pending_aio(td); + i = td->cur_depth; + if (i) + ret = io_u_queued_complete(td, i); if (should_fsync(td) && td->end_fsync) { td_set_runstate(td, TD_FSYNCING); for_each_file(td, f, i) fio_io_sync(td, f); } - } + } else + cleanup_pending_aio(td); } static void cleanup_io_u(struct thread_data *td)