X-Git-Url: https://git.kernel.dk/?p=fio.git;a=blobdiff_plain;f=io_u.c;h=9c8ff2638df5ed3ea57bb943f04672a7b8f63077;hp=8f0cdff718e70267c5a330b7aba9f439c09847ca;hb=36690c9b5b50736cec1fcd53100f01fd1181fdf4;hpb=4981d2de251698ebbe78451bfe4615e0d09a5296 diff --git a/io_u.c b/io_u.c index 8f0cdff7..9c8ff263 100644 --- a/io_u.c +++ b/io_u.c @@ -39,7 +39,7 @@ static int random_map_free(struct thread_data *td, struct fio_file *f, */ static void mark_random_map(struct thread_data *td, struct io_u *io_u) { - unsigned int min_bs = td->rw_min_bs; + unsigned int min_bs = td->o.rw_min_bs; struct fio_file *f = io_u->file; unsigned long long block; unsigned int blocks; @@ -52,7 +52,11 @@ static void mark_random_map(struct thread_data *td, struct io_u *io_u) while (blocks < nr_blocks) { unsigned int idx, bit; - if (!random_map_free(td, f, block)) + /* + * If we have a mixed random workload, we may + * encounter blocks we already did IO to. + */ + if (!td->o.ddir_nr && !random_map_free(td, f, block)) break; idx = RAND_MAP_IDX(td, f, block); @@ -79,7 +83,7 @@ static int get_next_free_block(struct thread_data *td, struct fio_file *f, i = f->last_free_lookup; *b = (i * BLOCKS_PER_MAP); - while ((*b) * td->rw_min_bs < f->real_file_size) { + while ((*b) * td->o.rw_min_bs < f->real_file_size) { if (f->file_map[i] != -1UL) { *b += ffz(f->file_map[i]); f->last_free_lookup = i; @@ -93,6 +97,35 @@ static int get_next_free_block(struct thread_data *td, struct fio_file *f, return 1; } +static int get_next_rand_offset(struct thread_data *td, struct fio_file *f, + int ddir, unsigned long long *b) +{ + unsigned long long max_blocks = f->file_size / td->o.min_bs[ddir]; + unsigned long long r, rb; + int loops = 5; + + do { + r = os_random_long(&td->random_state); + if (!max_blocks) + *b = 0; + else + *b = ((max_blocks - 1) * r / (unsigned long long) (RAND_MAX+1.0)); + if (td->o.norandommap) + break; + rb = *b + (f->file_offset / td->o.min_bs[ddir]); + loops--; + } while (!random_map_free(td, f, rb) && loops); + + /* + * if we failed to retrieve a truly random offset within + * the loops assigned, see if there are free ones left at all + */ + if (!loops && get_next_free_block(td, f, b)) + return 1; + + return 0; +} + /* * For random io, generate a random new block and see if it's used. Repeat * until we find a free one. For sequential io, just return the end of @@ -102,35 +135,17 @@ static int get_next_offset(struct thread_data *td, struct io_u *io_u) { struct fio_file *f = io_u->file; const int ddir = io_u->ddir; - unsigned long long b, rb; - long r; + unsigned long long b; - if (td_random(td)) { - unsigned long long max_blocks = f->file_size / td->min_bs[ddir]; - int loops = 5; + if (td_random(td) && (td->o.ddir_nr && !--td->ddir_nr)) { + td->ddir_nr = td->o.ddir_nr; - if (!max_blocks) - return 1; - - do { - r = os_random_long(&td->random_state); - b = ((max_blocks - 1) * r / (unsigned long long) (RAND_MAX+1.0)); - if (td->norandommap) - break; - rb = b + (f->file_offset / td->min_bs[ddir]); - loops--; - } while (!random_map_free(td, f, rb) && loops); - - /* - * if we failed to retrieve a truly random offset within - * the loops assigned, see if there are free ones left at all - */ - if (!loops && get_next_free_block(td, f, &b)) + if (get_next_rand_offset(td, f, ddir, &b)) return 1; } else - b = f->last_pos / td->min_bs[ddir]; + b = f->last_pos / td->o.min_bs[ddir]; - io_u->offset = (b * td->min_bs[ddir]) + f->file_offset; + io_u->offset = (b * td->o.min_bs[ddir]) + f->file_offset; if (io_u->offset >= f->real_file_size) return 1; @@ -144,25 +159,60 @@ static unsigned int get_next_buflen(struct thread_data *td, struct io_u *io_u) unsigned int buflen; long r; - if (td->min_bs[ddir] == td->max_bs[ddir]) - buflen = td->min_bs[ddir]; + if (td->o.min_bs[ddir] == td->o.max_bs[ddir]) + buflen = td->o.min_bs[ddir]; else { r = os_random_long(&td->bsrange_state); - buflen = (unsigned int) (1 + (double) (td->max_bs[ddir] - 1) * r / (RAND_MAX + 1.0)); - if (!td->bs_unaligned) - buflen = (buflen + td->min_bs[ddir] - 1) & ~(td->min_bs[ddir] - 1); + buflen = (unsigned int) (1 + (double) (td->o.max_bs[ddir] - 1) * r / (RAND_MAX + 1.0)); + if (!td->o.bs_unaligned) + buflen = (buflen + td->o.min_bs[ddir] - 1) & ~(td->o.min_bs[ddir] - 1); } while (buflen + io_u->offset > f->real_file_size) { - if (buflen == td->min_bs[ddir]) + if (buflen == td->o.min_bs[ddir]) { + if (!td->o.odirect) { + assert(io_u->offset <= f->real_file_size); + buflen = f->real_file_size - io_u->offset; + return buflen; + } return 0; + } - buflen = td->min_bs[ddir]; + buflen = td->o.min_bs[ddir]; } return buflen; } +static void set_rwmix_bytes(struct thread_data *td) +{ + unsigned long long rbytes; + unsigned int diff; + + /* + * we do time or byte based switch. this is needed because + * buffered writes may issue a lot quicker than they complete, + * whereas reads do not. + */ + rbytes = td->io_bytes[td->rwmix_ddir] - td->rwmix_bytes; + diff = td->o.rwmix[td->rwmix_ddir ^ 1]; + + td->rwmix_bytes = td->io_bytes[td->rwmix_ddir] + (rbytes * ((100 - diff)) / diff); +} + +static inline enum fio_ddir get_rand_ddir(struct thread_data *td) +{ + unsigned int v; + long r; + + r = os_random_long(&td->rwmix_state); + v = 1 + (int) (100.0 * (r / (RAND_MAX + 1.0))); + if (v < td->o.rwmix[DDIR_READ]) + return DDIR_READ; + + return DDIR_WRITE; +} + /* * Return the data direction for the next io_u. If the job is a * mixed read/write workload, check the rwmix cycle and switch if @@ -173,23 +223,45 @@ static enum fio_ddir get_rw_ddir(struct thread_data *td) if (td_rw(td)) { struct timeval now; unsigned long elapsed; + unsigned int cycle; fio_gettime(&now, NULL); elapsed = mtime_since_now(&td->rwmix_switch); + /* + * if this is the first cycle, make it shorter + */ + cycle = td->o.rwmixcycle; + if (!td->rwmix_bytes) + cycle /= 10; + /* * Check if it's time to seed a new data direction. */ - if (elapsed >= td->rwmixcycle) { - unsigned int v; - long r; - - r = os_random_long(&td->rwmix_state); - v = 1 + (int) (100.0 * (r / (RAND_MAX + 1.0))); - if (v < td->rwmixread) - td->rwmix_ddir = DDIR_READ; - else - td->rwmix_ddir = DDIR_WRITE; + if (elapsed >= cycle || + td->io_bytes[td->rwmix_ddir] >= td->rwmix_bytes) { + unsigned long long max_bytes; + enum fio_ddir ddir; + + /* + * Put a top limit on how many bytes we do for + * one data direction, to avoid overflowing the + * ranges too much + */ + ddir = get_rand_ddir(td); + max_bytes = td->this_io_bytes[ddir]; + if (max_bytes >= (td->io_size * td->o.rwmix[ddir] / 100)) { + if (!td->rw_end_set[ddir]) { + td->rw_end_set[ddir] = 1; + memcpy(&td->rw_end[ddir], &now, sizeof(now)); + } + ddir ^= 1; + } + + if (ddir != td->rwmix_ddir) + set_rwmix_bytes(td); + + td->rwmix_ddir = ddir; memcpy(&td->rwmix_switch, &now, sizeof(now)); } return td->rwmix_ddir; @@ -228,14 +300,15 @@ static int fill_io_u(struct thread_data *td, struct io_u *io_u) /* * If using an iolog, grab next piece if any available. */ - if (td->read_iolog) + if (td->o.read_iolog) return read_iolog_get(td, io_u); /* * see if it's time to sync */ - if (td->fsync_blocks && !(td->io_issues[DDIR_WRITE] % td->fsync_blocks) - && td->io_issues[DDIR_WRITE] && should_fsync(td)) { + if (td->o.fsync_blocks && + !(td->io_issues[DDIR_WRITE] % td->o.fsync_blocks) && + td->io_issues[DDIR_WRITE] && should_fsync(td)) { io_u->ddir = DDIR_SYNC; return 0; } @@ -256,13 +329,13 @@ static int fill_io_u(struct thread_data *td, struct io_u *io_u) /* * mark entry before potentially trimming io_u */ - if (!td->read_iolog && td_random(td) && !td->norandommap) + if (!td->o.read_iolog && td_random(td) && !td->o.norandommap) mark_random_map(td, io_u); /* * If using a write iolog, store this entry. */ - if (td->write_iolog_file) + if (td->o.write_iolog_file) write_iolog_put(td, io_u); return 0; @@ -342,7 +415,7 @@ static struct fio_file *get_next_file_rand(struct thread_data *td, int goodf, do { long r = os_random_long(&td->next_file_state); - fno = (unsigned int) ((double) td->nr_files * (r / (RAND_MAX + 1.0))); + fno = (unsigned int) ((double) td->o.nr_files * (r / (RAND_MAX + 1.0))); f = &td->files[fno]; if ((!goodf || (f->flags & goodf)) && !(f->flags & badf)) @@ -363,7 +436,7 @@ static struct fio_file *get_next_file_rr(struct thread_data *td, int goodf, f = &td->files[td->next_file]; td->next_file++; - if (td->next_file >= td->nr_files) + if (td->next_file >= td->o.nr_files) td->next_file = 0; if ((!goodf || (f->flags & goodf)) && !(f->flags & badf)) @@ -379,7 +452,7 @@ static struct fio_file *get_next_file(struct thread_data *td) { struct fio_file *f; - assert(td->nr_files <= td->files_index); + assert(td->o.nr_files <= td->files_index); if (!td->nr_open_files) return NULL; @@ -388,7 +461,7 @@ static struct fio_file *get_next_file(struct thread_data *td) if (f && (f->flags & FIO_FILE_OPEN) && td->file_service_left--) return f; - if (td->file_service_type == FIO_FSERVICE_RR) + if (td->o.file_service_type == FIO_FSERVICE_RR) f = get_next_file_rr(td, FIO_FILE_OPEN, FIO_FILE_CLOSING); else f = get_next_file_rand(td, FIO_FILE_OPEN, FIO_FILE_CLOSING); @@ -402,7 +475,7 @@ static struct fio_file *find_next_new_file(struct thread_data *td) { struct fio_file *f; - if (td->file_service_type == FIO_FSERVICE_RR) + if (td->o.file_service_type == FIO_FSERVICE_RR) f = get_next_file_rr(td, 0, FIO_FILE_OPEN); else f = get_next_file_rand(td, 0, FIO_FILE_OPEN); @@ -481,8 +554,8 @@ set_file: * probably not the right place to do this, but see * if we need to open a new file */ - if (td->nr_open_files < td->open_files && - td->open_files != td->nr_files) { + if (td->nr_open_files < td->o.open_files && + td->o.open_files != td->o.nr_files) { f = find_next_new_file(td); if (!f || (ret = td_io_open_file(td, f))) { @@ -493,9 +566,9 @@ set_file: } } while (1); - if (td->zone_bytes >= td->zone_size) { + if (td->zone_bytes >= td->o.zone_size) { td->zone_bytes = 0; - f->last_pos += td->zone_skip; + f->last_pos += td->o.zone_skip; } if (io_u->buflen + io_u->offset > f->real_file_size) { @@ -515,7 +588,7 @@ set_file: f->last_pos = io_u->offset + io_u->buflen; - if (td->verify != VERIFY_NONE) + if (td->o.verify != VERIFY_NONE) populate_verify_io_u(td, io_u); } @@ -593,7 +666,7 @@ static void io_completed(struct thread_data *td, struct io_u *io_u, icd->bytes_done[idx] += bytes; if (io_u->end_io) { - ret = io_u->end_io(io_u); + ret = io_u->end_io(td, io_u); if (ret && !icd->error) icd->error = ret; } @@ -653,12 +726,10 @@ long io_u_queued_complete(struct thread_data *td, int min_events) struct io_completion_data icd; struct timespec *tvp = NULL; int ret; + struct timespec ts = { .tv_sec = 0, .tv_nsec = 0, }; - if (!min_events) { - struct timespec ts = { .tv_sec = 0, .tv_nsec = 0, }; - + if (!min_events) tvp = &ts; - } ret = td_io_getevents(td, min_events, td->cur_depth, tvp); if (ret < 0) { @@ -747,7 +818,7 @@ static void io_u_timeout_handler(int fio_unused sig) return; } - log_err("fio: io_u timeout: job=%s, pid=%d\n", td->name, td->pid); + log_err("fio: io_u timeout: job=%s, pid=%d\n", td->o.name, td->pid); list_for_each(entry, &td->io_u_busylist) { io_u = list_entry(entry, struct io_u, list);