X-Git-Url: https://git.kernel.dk/?a=blobdiff_plain;f=io_u.c;h=69edd70e5eafbb97ce3f5f915ee50450fcc90ac6;hb=1727951019202ef3229167c07813598d485b5b06;hp=278d47a4d88baaf30e8a4c0bfeaff9a8e128dd87;hpb=15dc1934435dc84d66547c4fc92d936224d7238f;p=fio.git diff --git a/io_u.c b/io_u.c index 278d47a4..69edd70e 100644 --- a/io_u.c +++ b/io_u.c @@ -8,6 +8,7 @@ #include "fio.h" #include "hash.h" #include "verify.h" +#include "lib/rand.h" struct io_completion_data { int nr; /* input */ @@ -53,11 +54,8 @@ static void mark_random_map(struct thread_data *td, struct io_u *io_u) * If we have a mixed random workload, we may * encounter blocks we already did IO to. */ - if ((td->o.ddir_nr == 1) && !random_map_free(f, block)) { - if (!blocks) - blocks = 1; + if ((td->o.ddir_nr == 1) && !random_map_free(f, block)) break; - } idx = RAND_MAP_IDX(f, block); bit = RAND_MAP_BIT(f, block); @@ -68,10 +66,20 @@ static void mark_random_map(struct thread_data *td, struct io_u *io_u) if (this_blocks + bit > BLOCKS_PER_MAP) this_blocks = BLOCKS_PER_MAP - bit; - if (this_blocks == BLOCKS_PER_MAP) - mask = -1U; - else - mask = ((1U << this_blocks) - 1) << bit; + do { + if (this_blocks == BLOCKS_PER_MAP) + mask = -1U; + else + mask = ((1U << this_blocks) - 1) << bit; + + if (!(f->file_map[idx] & mask)) + break; + + this_blocks--; + } while (this_blocks); + + if (!this_blocks) + break; f->file_map[idx] |= mask; nr_blocks -= this_blocks; @@ -233,8 +241,10 @@ static int __get_next_offset(struct thread_data *td, struct io_u *io_u) static int get_next_offset(struct thread_data *td, struct io_u *io_u) { - if (td->fill_io_u_off) - return td->fill_io_u_off(td, io_u); + struct prof_io_ops *ops = &td->prof_io_ops; + + if (ops->fill_io_u_off) + return ops->fill_io_u_off(td, io_u); return __get_next_offset(td, io_u); } @@ -286,8 +296,10 @@ static unsigned int __get_next_buflen(struct thread_data *td, struct io_u *io_u) static unsigned int get_next_buflen(struct thread_data *td, struct io_u *io_u) { - if (td->fill_io_u_size) - return td->fill_io_u_size(td, io_u); + struct prof_io_ops *ops = &td->prof_io_ops; + + if (ops->fill_io_u_size) + return ops->fill_io_u_size(td, io_u); return __get_next_buflen(td, io_u); } @@ -390,6 +402,14 @@ static enum fio_ddir get_rw_ddir(struct thread_data *td) td->io_issues[DDIR_WRITE] && should_fsync(td)) return DDIR_DATASYNC; + /* + * see if it's time to sync_file_range + */ + if (td->sync_file_range_nr && + !(td->io_issues[DDIR_WRITE] % td->sync_file_range_nr) && + td->io_issues[DDIR_WRITE] && should_fsync(td)) + return DDIR_SYNC_FILE_RANGE; + if (td_rw(td)) { /* * Check if it's time to seed a new data direction. @@ -785,7 +805,7 @@ static struct fio_file *get_next_file_rr(struct thread_data *td, int goodf, return f; } -static struct fio_file *get_next_file(struct thread_data *td) +static struct fio_file *__get_next_file(struct thread_data *td) { struct fio_file *f; @@ -820,6 +840,16 @@ out: return f; } +static struct fio_file *get_next_file(struct thread_data *td) +{ + struct prof_io_ops *ops = &td->prof_io_ops; + + if (ops->get_next_file) + return ops->get_next_file(td); + + return __get_next_file(td); +} + static int set_io_u_file(struct thread_data *td, struct io_u *io_u) { struct fio_file *f; @@ -866,14 +896,6 @@ again: io_u->end_io = NULL; } - /* - * We ran out, wait for async verify threads to finish and return one - */ - if (!io_u && td->o.verify_async) { - pthread_cond_wait(&td->free_cond, &td->io_u_lock); - goto again; - } - if (io_u) { assert(io_u->flags & IO_U_F_FREE); io_u->flags &= ~(IO_U_F_FREE | IO_U_F_FREE_DEF); @@ -883,6 +905,13 @@ again: flist_add(&io_u->list, &td->io_u_busylist); td->cur_depth++; io_u->flags |= IO_U_F_IN_CUR_DEPTH; + } else if (td->o.verify_async) { + /* + * We ran out, wait for async verify threads to finish and + * return one + */ + pthread_cond_wait(&td->free_cond, &td->io_u_lock); + goto again; } td_io_u_unlock(td); @@ -904,6 +933,24 @@ struct io_u *get_io_u(struct thread_data *td) return NULL; } + if (td->o.verify_backlog && td->io_hist_len) { + int get_verify = 0; + + if (td->verify_batch) { + td->verify_batch--; + get_verify = 1; + } else if (!(td->io_hist_len % td->o.verify_backlog) && + td->last_ddir != DDIR_READ) { + td->verify_batch = td->o.verify_batch; + if (!td->verify_batch) + td->verify_batch = td->o.verify_backlog; + get_verify = 1; + } + + if (get_verify && !get_next_verify(td, io_u)) + goto out; + } + /* * from a requeue, io_u already setup */ @@ -982,6 +1029,7 @@ static void io_completed(struct thread_data *td, struct io_u *io_u, * initialized, silence that warning. */ unsigned long uninitialized_var(usec); + struct fio_file *f; dprint_io_u(io_u, "io complete"); @@ -992,10 +1040,16 @@ static void io_completed(struct thread_data *td, struct io_u *io_u, if (ddir_sync(io_u->ddir)) { td->last_was_sync = 1; + f = io_u->file; + if (f) { + f->first_write = -1ULL; + f->last_write = -1ULL; + } return; } td->last_was_sync = 0; + td->last_ddir = io_u->ddir; if (!io_u->error) { unsigned int bytes = io_u->buflen - io_u->resid; @@ -1007,6 +1061,18 @@ static void io_completed(struct thread_data *td, struct io_u *io_u, td->io_bytes[idx] += bytes; td->this_io_bytes[idx] += bytes; + if (idx == DDIR_WRITE) { + f = io_u->file; + if (f) { + if (f->first_write == -1ULL || + io_u->offset < f->first_write) + f->first_write = io_u->offset; + if (f->last_write == -1ULL || + ((io_u->offset + bytes) > f->last_write)) + f->last_write = io_u->offset + bytes; + } + } + if (ramp_time_over(td)) { unsigned long uninitialized_var(lusec); @@ -1174,13 +1240,8 @@ void io_u_queued(struct thread_data *td, struct io_u *io_u) void io_u_fill_buffer(struct thread_data *td, struct io_u *io_u, unsigned int max_bs) { - long *ptr = io_u->buf; - - if (!td->o.zero_buffers) { - while ((void *) ptr - io_u->buf < max_bs) { - *ptr = rand() * GOLDEN_RATIO_PRIME; - ptr++; - } - } else - memset(ptr, 0, max_bs); + if (!td->o.zero_buffers) + fill_random_buf(io_u->buf, max_bs); + else + memset(io_u->buf, 0, max_bs); }