X-Git-Url: https://git.kernel.dk/?p=fio.git;a=blobdiff_plain;f=io_u.c;h=985c99b2bb7f1094ee28804bfcef060987cb3446;hp=7a23d2cf7fdd4a9127ec09298b8548cbca60588c;hb=aec2de209564a6951e6c91d653fc99a75508607d;hpb=47d85e2bd4763204a983c7bf4833c0d466dc36ca diff --git a/io_u.c b/io_u.c index 7a23d2cf..985c99b2 100644 --- a/io_u.c +++ b/io_u.c @@ -24,11 +24,10 @@ struct io_completion_data { * The ->file_map[] contains a map of blocks we have or have not done io * to yet. Used to make sure we cover the entire range in a fair fashion. */ -static int random_map_free(struct thread_data *td, struct fio_file *f, - const unsigned long long block) +static int random_map_free(struct fio_file *f, const unsigned long long block) { - unsigned int idx = RAND_MAP_IDX(td, f, block); - unsigned int bit = RAND_MAP_BIT(td, f, block); + unsigned int idx = RAND_MAP_IDX(f, block); + unsigned int bit = RAND_MAP_BIT(f, block); dprint(FD_RANDOM, "free: b=%llu, idx=%u, bit=%u\n", block, idx, bit); @@ -57,11 +56,11 @@ static void mark_random_map(struct thread_data *td, struct io_u *io_u) * If we have a mixed random workload, we may * encounter blocks we already did IO to. */ - if ((td->o.ddir_nr == 1) && !random_map_free(td, f, block)) + if ((td->o.ddir_nr == 1) && !random_map_free(f, block)) break; - idx = RAND_MAP_IDX(td, f, block); - bit = RAND_MAP_BIT(td, f, block); + idx = RAND_MAP_IDX(f, block); + bit = RAND_MAP_BIT(f, block); fio_assert(td, idx < f->num_maps); @@ -136,7 +135,7 @@ static int get_next_rand_offset(struct thread_data *td, struct fio_file *f, /* * calculate map offset and check if it's free */ - if (random_map_free(td, f, *b)) + if (random_map_free(f, *b)) return 0; dprint(FD_RANDOM, "get_next_rand_offset: offset %llu busy\n", @@ -243,7 +242,7 @@ static unsigned int get_next_buflen(struct thread_data *td, struct io_u *io_u) static void set_rwmix_bytes(struct thread_data *td) { - unsigned long long rbytes; + unsigned long issues; unsigned int diff; /* @@ -251,11 +250,11 @@ static void set_rwmix_bytes(struct thread_data *td) * buffered writes may issue a lot quicker than they complete, * whereas reads do not. */ - rbytes = td->io_bytes[td->rwmix_ddir] - td->rwmix_bytes; + issues = td->io_issues[td->rwmix_ddir] - td->rwmix_issues; diff = td->o.rwmix[td->rwmix_ddir ^ 1]; - td->rwmix_bytes = td->io_bytes[td->rwmix_ddir] - + (rbytes * ((100 - diff)) / diff); + td->rwmix_issues = td->io_issues[td->rwmix_ddir] + + (issues * ((100 - diff)) / diff); } static inline enum fio_ddir get_rand_ddir(struct thread_data *td) @@ -279,25 +278,10 @@ static inline enum fio_ddir get_rand_ddir(struct thread_data *td) static enum fio_ddir get_rw_ddir(struct thread_data *td) { if (td_rw(td)) { - struct timeval now; - unsigned long elapsed; - unsigned int cycle; - - fio_gettime(&now, NULL); - elapsed = mtime_since_now(&td->rwmix_switch); - - /* - * if this is the first cycle, make it shorter - */ - cycle = td->o.rwmixcycle; - if (!td->rwmix_bytes) - cycle /= 10; - /* * Check if it's time to seed a new data direction. */ - if (elapsed >= cycle || - td->io_bytes[td->rwmix_ddir] >= td->rwmix_bytes) { + if (td->io_issues[td->rwmix_ddir] >= td->rwmix_issues) { unsigned long long max_bytes; enum fio_ddir ddir; @@ -310,11 +294,9 @@ static enum fio_ddir get_rw_ddir(struct thread_data *td) max_bytes = td->this_io_bytes[ddir]; if (max_bytes >= (td->o.size * td->o.rwmix[ddir] / 100)) { - if (!td->rw_end_set[ddir]) { + if (!td->rw_end_set[ddir]) td->rw_end_set[ddir] = 1; - memcpy(&td->rw_end[ddir], &now, - sizeof(now)); - } + ddir ^= 1; } @@ -322,7 +304,6 @@ static enum fio_ddir get_rw_ddir(struct thread_data *td) set_rwmix_bytes(td); td->rwmix_ddir = ddir; - memcpy(&td->rwmix_switch, &now, sizeof(now)); } return td->rwmix_ddir; } else if (td_read(td))