X-Git-Url: https://git.kernel.dk/?p=fio.git;a=blobdiff_plain;f=backend.c;h=936203dcb1c4858b1b3f7ddea23ab5f965e04bb4;hp=5f0740395bad36cd9d13c30bf6bb9d871805c785;hb=HEAD;hpb=0b47b2cf3dab1d26d72f52ed8c19f782a8277d3a diff --git a/backend.c b/backend.c index 5f074039..fe03eab3 100644 --- a/backend.c +++ b/backend.c @@ -49,6 +49,7 @@ #include "helper_thread.h" #include "pshared.h" #include "zone-dist.h" +#include "fio_time.h" static struct fio_sem *startup_sem; static struct flist_head *cgroup_list; @@ -976,6 +977,11 @@ static void do_io(struct thread_data *td, uint64_t *bytes_done) */ if (td_write(td) && td_random(td) && td->o.norandommap) total_bytes = max(total_bytes, (uint64_t) td->o.io_size); + + /* Don't break too early if io_size > size */ + if (td_rw(td) && !td_random(td)) + total_bytes = max(total_bytes, (uint64_t)td->o.io_size); + /* * If verify_backlog is enabled, we'll run the verify in this * handler as well. For that case, we may need up to twice the @@ -1133,6 +1139,9 @@ reap: if (ret < 0) break; + if (ddir_rw(ddir) && td->o.thinkcycles) + cycles_spin(td->o.thinkcycles); + if (ddir_rw(ddir) && td->o.thinktime) handle_thinktime(td, ddir, &comp_time); @@ -1329,7 +1338,7 @@ static int init_io_u(struct thread_data *td) int init_io_u_buffers(struct thread_data *td) { struct io_u *io_u; - unsigned long long max_bs, min_write; + unsigned long long max_bs, min_write, trim_bs = 0; int i, max_units; int data_xfer = 1; char *p; @@ -1340,7 +1349,18 @@ int init_io_u_buffers(struct thread_data *td) td->orig_buffer_size = (unsigned long long) max_bs * (unsigned long long) max_units; - if (td_ioengine_flagged(td, FIO_NOIO) || !(td_read(td) || td_write(td))) + if (td_trim(td) && td->o.num_range > 1) { + trim_bs = td->o.num_range * sizeof(struct trim_range); + td->orig_buffer_size = trim_bs + * (unsigned long long) max_units; + } + + /* + * For reads, writes, and multi-range trim operations we need a + * data buffer + */ + if (td_ioengine_flagged(td, FIO_NOIO) || + !(td_read(td) || td_write(td) || (td_trim(td) && td->o.num_range > 1))) data_xfer = 0; /* @@ -1392,7 +1412,10 @@ int init_io_u_buffers(struct thread_data *td) fill_verify_pattern(td, io_u->buf, max_bs, io_u, 0, 0); } } - p += max_bs; + if (td_trim(td) && td->o.num_range > 1) + p += trim_bs; + else + p += max_bs; } return 0; @@ -1858,7 +1881,7 @@ static void *thread_main(void *data) if (rate_submit_init(td, sk_out)) goto err; - set_epoch_time(td, o->log_unix_epoch | o->log_alternate_epoch, o->log_alternate_epoch_clock_id); + set_epoch_time(td, o->log_alternate_epoch_clock_id, o->job_start_clock_id); fio_getrusage(&td->ru_start); memcpy(&td->bw_sample_time, &td->epoch, sizeof(td->epoch)); memcpy(&td->iops_sample_time, &td->epoch, sizeof(td->epoch)); @@ -2076,14 +2099,14 @@ static void reap_threads(unsigned int *nr_running, uint64_t *t_rate, uint64_t *m_rate) { unsigned int cputhreads, realthreads, pending; - int status, ret; + int ret; /* * reap exited threads (TD_EXITED -> TD_REAPED) */ realthreads = pending = cputhreads = 0; for_each_td(td) { - int flags = 0; + int flags = 0, status; if (!strcmp(td->o.ioengine, "cpuio")) cputhreads++;