X-Git-Url: https://git.kernel.dk/?p=fio.git;a=blobdiff_plain;f=fio.c;h=b095527e4d028d18ab30d185a7b25c4a93eba7ab;hp=920639395cbda1101ba57c36b7cc421f6a3d4f27;hb=bc5b77a8c46aabea554c4a2c8cca37f27f97969a;hpb=e1161c325f7866bae879e686d1c673ca32ab09ae diff --git a/fio.c b/fio.c index 92063939..b095527e 100644 --- a/fio.c +++ b/fio.c @@ -96,9 +96,9 @@ static void sig_handler(int sig) */ static int check_min_rate(struct thread_data *td, struct timeval *now) { + unsigned long long bytes = 0; unsigned long spent; unsigned long rate; - int ddir = td->ddir; /* * allow a 2 second settle period in the beginning @@ -106,6 +106,11 @@ static int check_min_rate(struct thread_data *td, struct timeval *now) if (mtime_since(&td->start, now) < 2000) return 0; + if (td_read(td)) + bytes += td->this_io_bytes[DDIR_READ]; + if (td_write(td)) + bytes += td->this_io_bytes[DDIR_WRITE]; + /* * if rate blocks is set, sample is running */ @@ -114,14 +119,19 @@ static int check_min_rate(struct thread_data *td, struct timeval *now) if (spent < td->ratecycle) return 0; - rate = (td->this_io_bytes[ddir] - td->rate_bytes) / spent; - if (rate < td->ratemin) { - fprintf(f_out, "%s: min rate %u not met, got %luKiB/sec\n", td->name, td->ratemin, rate); + if (bytes < td->rate_bytes) { + fprintf(f_out, "%s: min rate %u not met\n", td->name, td->ratemin); return 1; + } else { + rate = (bytes - td->rate_bytes) / spent; + if (rate < td->ratemin || bytes < td->rate_bytes) { + fprintf(f_out, "%s: min rate %u not met, got %luKiB/sec\n", td->name, td->ratemin, rate); + return 1; + } } } - td->rate_bytes = td->this_io_bytes[ddir]; + td->rate_bytes = bytes; memcpy(&td->lastrate, now, sizeof(*now)); return 0; } @@ -149,7 +159,7 @@ static void cleanup_pending_aio(struct thread_data *td) /* * get immediately available events, if any */ - r = io_u_queued_complete(td, 0, NULL); + r = io_u_queued_complete(td, 0); if (r < 0) return; @@ -177,7 +187,7 @@ static void cleanup_pending_aio(struct thread_data *td) } if (td->cur_depth) - r = io_u_queued_complete(td, td->cur_depth, NULL); + r = io_u_queued_complete(td, td->cur_depth); } /* @@ -207,7 +217,7 @@ requeue: put_io_u(td, io_u); return 1; } else if (ret == FIO_Q_QUEUED) { - if (io_u_queued_complete(td, 1, NULL) < 0) + if (io_u_queued_complete(td, 1) < 0) return 1; } else if (ret == FIO_Q_COMPLETED) { if (io_u->error) { @@ -215,7 +225,7 @@ requeue: return 1; } - if (io_u_sync_complete(td, io_u, NULL) < 0) + if (io_u_sync_complete(td, io_u) < 0) return 1; } else if (ret == FIO_Q_BUSY) { if (td_io_commit(td)) @@ -272,6 +282,8 @@ static void do_verify(struct thread_data *td) put_io_u(td, io_u); break; } + + io_u->end_io = verify_io_u; requeue: ret = td_io_queue(td, io_u); @@ -286,7 +298,7 @@ requeue: io_u->xfer_buf += bytes; goto requeue; } - ret = io_u_sync_complete(td, io_u, verify_io_u); + ret = io_u_sync_complete(td, io_u); if (ret < 0) break; continue; @@ -321,11 +333,16 @@ requeue: * Reap required number of io units, if any, and do the * verification on them through the callback handler */ - if (io_u_queued_complete(td, min_events, verify_io_u) < 0) + if (io_u_queued_complete(td, min_events) < 0) break; } - if (td->cur_depth) + if (!td->error) { + min_events = td->cur_depth; + + if (min_events) + ret = io_u_queued_complete(td, min_events); + } else cleanup_pending_aio(td); td_set_runstate(td, TD_RUNNING); @@ -404,7 +421,7 @@ requeue: goto requeue; } fio_gettime(&comp_time, NULL); - bytes_done = io_u_sync_complete(td, io_u, NULL); + bytes_done = io_u_sync_complete(td, io_u); if (bytes_done < 0) ret = bytes_done; break; @@ -443,7 +460,7 @@ requeue: } fio_gettime(&comp_time, NULL); - bytes_done = io_u_queued_complete(td, min_evts, NULL); + bytes_done = io_u_queued_complete(td, min_evts); if (bytes_done < 0) break; } @@ -458,7 +475,7 @@ requeue: */ usec = utime_since(&s, &comp_time); - rate_throttle(td, usec, bytes_done, td->ddir); + rate_throttle(td, usec, bytes_done); if (check_min_rate(td, &comp_time)) { if (exitall_on_terminate) @@ -487,15 +504,17 @@ requeue: if (!td->error) { struct fio_file *f; - if (td->cur_depth) - cleanup_pending_aio(td); + i = td->cur_depth; + if (i) + ret = io_u_queued_complete(td, i); if (should_fsync(td) && td->end_fsync) { td_set_runstate(td, TD_FSYNCING); for_each_file(td, f, i) fio_io_sync(td, f); } - } + } else + cleanup_pending_aio(td); } static void cleanup_io_u(struct thread_data *td) @@ -736,10 +755,11 @@ static void *thread_main(void *data) else do_io(td); - runtime[td->ddir] += utime_since_now(&td->start); - if (td_rw(td) && td->io_bytes[td->ddir ^ 1]) - runtime[td->ddir ^ 1] = runtime[td->ddir]; - + if (td_read(td) && td->io_bytes[DDIR_READ]) + runtime[DDIR_READ] += utime_since_now(&td->start); + if (td_write(td) && td->io_bytes[DDIR_WRITE]) + runtime[DDIR_WRITE] += utime_since_now(&td->start); + if (td->error || td->terminate) break;