X-Git-Url: https://git.kernel.dk/?a=blobdiff_plain;f=io_u.c;h=fd25dfe704423dc989ef36df741efab67283d609;hb=4cf6b5d30eebf225391d6d6ccbf2f7aa3994a0ca;hp=647751385e35c613ab821a5a2fe7d61aa7a924cd;hpb=2dc1bbeb58edc85f2829eed6729862c438ea2353;p=fio.git diff --git a/io_u.c b/io_u.c index 64775138..fd25dfe7 100644 --- a/io_u.c +++ b/io_u.c @@ -169,6 +169,35 @@ static unsigned int get_next_buflen(struct thread_data *td, struct io_u *io_u) return buflen; } +static void set_rwmix_bytes(struct thread_data *td) +{ + unsigned long long rbytes; + unsigned int diff; + + /* + * we do time or byte based switch. this is needed because + * buffered writes may issue a lot quicker than they complete, + * whereas reads do not. + */ + rbytes = td->io_bytes[td->rwmix_ddir] - td->rwmix_bytes; + diff = td->o.rwmix[td->rwmix_ddir ^ 1]; + + td->rwmix_bytes = td->io_bytes[td->rwmix_ddir] + (rbytes * ((100 - diff)) / diff); +} + +static inline enum fio_ddir get_rand_ddir(struct thread_data *td) +{ + unsigned int v; + long r; + + r = os_random_long(&td->rwmix_state); + v = 1 + (int) (100.0 * (r / (RAND_MAX + 1.0))); + if (v < td->o.rwmix[DDIR_READ]) + return DDIR_READ; + + return DDIR_WRITE; +} + /* * Return the data direction for the next io_u. If the job is a * mixed read/write workload, check the rwmix cycle and switch if @@ -179,23 +208,45 @@ static enum fio_ddir get_rw_ddir(struct thread_data *td) if (td_rw(td)) { struct timeval now; unsigned long elapsed; + unsigned int cycle; fio_gettime(&now, NULL); elapsed = mtime_since_now(&td->rwmix_switch); + /* + * if this is the first cycle, make it shorter + */ + cycle = td->o.rwmixcycle; + if (!td->rwmix_bytes) + cycle /= 10; + /* * Check if it's time to seed a new data direction. */ - if (elapsed >= td->o.rwmixcycle) { - unsigned int v; - long r; - - r = os_random_long(&td->rwmix_state); - v = 1 + (int) (100.0 * (r / (RAND_MAX + 1.0))); - if (v < td->o.rwmixread) - td->rwmix_ddir = DDIR_READ; - else - td->rwmix_ddir = DDIR_WRITE; + if (elapsed >= cycle || + td->io_bytes[td->rwmix_ddir] >= td->rwmix_bytes) { + unsigned long long max_bytes; + enum fio_ddir ddir; + + /* + * Put a top limit on how many bytes we do for + * one data direction, to avoid overflowing the + * ranges too much + */ + ddir = get_rand_ddir(td); + max_bytes = td->this_io_bytes[ddir]; + if (max_bytes >= (td->io_size * td->o.rwmix[ddir] / 100)) { + if (!td->rw_end_set[ddir]) { + td->rw_end_set[ddir] = 1; + memcpy(&td->rw_end[ddir], &now, sizeof(now)); + } + ddir ^= 1; + } + + if (ddir != td->rwmix_ddir) + set_rwmix_bytes(td); + + td->rwmix_ddir = ddir; memcpy(&td->rwmix_switch, &now, sizeof(now)); } return td->rwmix_ddir; @@ -754,7 +805,7 @@ static void io_u_timeout_handler(int fio_unused sig) return; } - log_err("fio: io_u timeout: job=%s, pid=%d\n", td->name, td->pid); + log_err("fio: io_u timeout: job=%s, pid=%d\n", td->o.name, td->pid); list_for_each(entry, &td->io_u_busylist) { io_u = list_entry(entry, struct io_u, list);