X-Git-Url: https://git.kernel.dk/?p=fio.git;a=blobdiff_plain;f=io_u.c;h=4d3116b7fd4fc0901f4419e8690270c2c18a2d43;hp=d92d940522191451a830e2b1c44428d385e4ccbd;hb=1fbbf72e16c27a6fda636db3891a41cd37dc6666;hpb=720e84ad8292ab7b3a8e264fb00db71d796600d1 diff --git a/io_u.c b/io_u.c index d92d9405..4d3116b7 100644 --- a/io_u.c +++ b/io_u.c @@ -7,6 +7,8 @@ #include "fio.h" #include "hash.h" +#include "verify.h" +#include "lib/rand.h" struct io_completion_data { int nr; /* input */ @@ -113,7 +115,8 @@ static int get_next_free_block(struct thread_data *td, struct fio_file *f, i = f->last_free_lookup; *b = (i * BLOCKS_PER_MAP); - while ((*b) * min_bs < f->real_file_size) { + while ((*b) * min_bs < f->real_file_size && + (*b) * min_bs < f->io_size) { if (f->file_map[i] != (unsigned int) -1) { *b += ffz(f->file_map[i]); if (*b > last_block(td, f, ddir)) @@ -185,7 +188,7 @@ static int get_next_rand_offset(struct thread_data *td, struct fio_file *f, * until we find a free one. For sequential io, just return the end of * the last io issued. */ -static int get_next_offset(struct thread_data *td, struct io_u *io_u) +static int __get_next_offset(struct thread_data *td, struct io_u *io_u) { struct fio_file *f = io_u->file; unsigned long long b; @@ -229,12 +232,17 @@ static int get_next_offset(struct thread_data *td, struct io_u *io_u) return 0; } -static inline int is_power_of_2(unsigned int val) +static int get_next_offset(struct thread_data *td, struct io_u *io_u) { - return (val != 0 && ((val & (val - 1)) == 0)); + struct prof_io_ops *ops = &td->prof_io_ops; + + if (ops->fill_io_u_off) + return ops->fill_io_u_off(td, io_u); + + return __get_next_offset(td, io_u); } -static unsigned int get_next_buflen(struct thread_data *td, struct io_u *io_u) +static unsigned int __get_next_buflen(struct thread_data *td, struct io_u *io_u) { const int ddir = io_u->ddir; unsigned int uninitialized_var(buflen); @@ -279,6 +287,16 @@ static unsigned int get_next_buflen(struct thread_data *td, struct io_u *io_u) return buflen; } +static unsigned int get_next_buflen(struct thread_data *td, struct io_u *io_u) +{ + struct prof_io_ops *ops = &td->prof_io_ops; + + if (ops->fill_io_u_size) + return ops->fill_io_u_size(td, io_u); + + return __get_next_buflen(td, io_u); +} + static void set_rwmix_bytes(struct thread_data *td) { unsigned int diff; @@ -305,6 +323,53 @@ static inline enum fio_ddir get_rand_ddir(struct thread_data *td) return DDIR_WRITE; } +static enum fio_ddir rate_ddir(struct thread_data *td, enum fio_ddir ddir) +{ + enum fio_ddir odir = ddir ^ 1; + struct timeval t; + long usec; + + if (td->rate_pending_usleep[ddir] <= 0) + return ddir; + + /* + * We have too much pending sleep in this direction. See if we + * should switch. + */ + if (td_rw(td)) { + /* + * Other direction does not have too much pending, switch + */ + if (td->rate_pending_usleep[odir] < 100000) + return odir; + + /* + * Both directions have pending sleep. Sleep the minimum time + * and deduct from both. + */ + if (td->rate_pending_usleep[ddir] <= + td->rate_pending_usleep[odir]) { + usec = td->rate_pending_usleep[ddir]; + } else { + usec = td->rate_pending_usleep[odir]; + ddir = odir; + } + } else + usec = td->rate_pending_usleep[ddir]; + + fio_gettime(&t, NULL); + usec_sleep(td, usec); + usec = utime_since_now(&t); + + td->rate_pending_usleep[ddir] -= usec; + + odir = ddir ^ 1; + if (td_rw(td) && __should_check_rate(td, odir)) + td->rate_pending_usleep[odir] -= usec; + + return ddir; +} + /* * Return the data direction for the next io_u. If the job is a * mixed read/write workload, check the rwmix cycle and switch if @@ -312,44 +377,60 @@ static inline enum fio_ddir get_rand_ddir(struct thread_data *td) */ static enum fio_ddir get_rw_ddir(struct thread_data *td) { + enum fio_ddir ddir; + + /* + * see if it's time to fsync + */ + if (td->o.fsync_blocks && + !(td->io_issues[DDIR_WRITE] % td->o.fsync_blocks) && + td->io_issues[DDIR_WRITE] && should_fsync(td)) + return DDIR_SYNC; + + /* + * see if it's time to fdatasync + */ + if (td->o.fdatasync_blocks && + !(td->io_issues[DDIR_WRITE] % td->o.fdatasync_blocks) && + td->io_issues[DDIR_WRITE] && should_fsync(td)) + return DDIR_DATASYNC; + + /* + * see if it's time to sync_file_range + */ + if (td->sync_file_range_nr && + !(td->io_issues[DDIR_WRITE] % td->sync_file_range_nr) && + td->io_issues[DDIR_WRITE] && should_fsync(td)) + return DDIR_SYNC_FILE_RANGE; + if (td_rw(td)) { /* * Check if it's time to seed a new data direction. */ if (td->io_issues[td->rwmix_ddir] >= td->rwmix_issues) { - unsigned long long max_bytes; - enum fio_ddir ddir; - /* * Put a top limit on how many bytes we do for * one data direction, to avoid overflowing the * ranges too much */ ddir = get_rand_ddir(td); - max_bytes = td->this_io_bytes[ddir]; - if (max_bytes >= - (td->o.size * td->o.rwmix[ddir] / 100)) { - if (!td->rw_end_set[ddir]) { - td->rw_end_set[ddir] = 1; - fio_gettime(&td->rw_end[ddir], NULL); - } - - ddir ^= 1; - } if (ddir != td->rwmix_ddir) set_rwmix_bytes(td); td->rwmix_ddir = ddir; } - return td->rwmix_ddir; + ddir = td->rwmix_ddir; } else if (td_read(td)) - return DDIR_READ; + ddir = DDIR_READ; else - return DDIR_WRITE; + ddir = DDIR_WRITE; + + td->rwmix_ddir = rate_ddir(td, ddir); + return td->rwmix_ddir; } -static void put_file_log(struct thread_data *td, struct fio_file *f) +void put_file_log(struct thread_data *td, struct fio_file *f) { int ret = put_file(td, f); @@ -359,16 +440,27 @@ static void put_file_log(struct thread_data *td, struct fio_file *f) void put_io_u(struct thread_data *td, struct io_u *io_u) { - assert((io_u->flags & IO_U_F_FREE) == 0); + td_io_u_lock(td); + io_u->flags |= IO_U_F_FREE; + io_u->flags &= ~IO_U_F_FREE_DEF; if (io_u->file) put_file_log(td, io_u->file); io_u->file = NULL; - flist_del(&io_u->list); + if (io_u->flags & IO_U_F_IN_CUR_DEPTH) + td->cur_depth--; + flist_del_init(&io_u->list); flist_add(&io_u->list, &td->io_u_freelist); - td->cur_depth--; + td_io_u_unlock(td); + td_io_u_free_notify(td); +} + +void clear_io_u(struct thread_data *td, struct io_u *io_u) +{ + io_u->flags &= ~IO_U_F_FLIGHT; + put_io_u(td, io_u); } void requeue_io_u(struct thread_data *td, struct io_u **io_u) @@ -377,15 +469,18 @@ void requeue_io_u(struct thread_data *td, struct io_u **io_u) dprint(FD_IO, "requeue %p\n", __io_u); + td_io_u_lock(td); + __io_u->flags |= IO_U_F_FREE; - if ((__io_u->flags & IO_U_F_FLIGHT) && (__io_u->ddir != DDIR_SYNC)) + if ((__io_u->flags & IO_U_F_FLIGHT) && !ddir_sync(__io_u->ddir)) td->io_issues[__io_u->ddir]--; __io_u->flags &= ~IO_U_F_FLIGHT; - + if (__io_u->flags & IO_U_F_IN_CUR_DEPTH) + td->cur_depth--; flist_del(&__io_u->list); flist_add_tail(&__io_u->list, &td->io_u_requeues); - td->cur_depth--; + td_io_u_unlock(td); *io_u = NULL; } @@ -394,17 +489,13 @@ static int fill_io_u(struct thread_data *td, struct io_u *io_u) if (td->io_ops->flags & FIO_NOIO) goto out; + io_u->ddir = get_rw_ddir(td); + /* - * see if it's time to sync + * fsync() or fdatasync(), we are done */ - if (td->o.fsync_blocks && - !(td->io_issues[DDIR_WRITE] % td->o.fsync_blocks) && - td->io_issues[DDIR_WRITE] && should_fsync(td)) { - io_u->ddir = DDIR_SYNC; + if (ddir_sync(io_u->ddir)) goto out; - } - - io_u->ddir = get_rw_ddir(td); /* * See if it's time to switch to a new zone @@ -620,8 +711,9 @@ static void io_u_mark_latency(struct thread_data *td, unsigned long usec) /* * Get next file to service by choosing one at random */ -static struct fio_file *get_next_file_rand(struct thread_data *td, int goodf, - int badf) +static struct fio_file *get_next_file_rand(struct thread_data *td, + enum fio_file_flags goodf, + enum fio_file_flags badf) { struct fio_file *f; int fno; @@ -633,10 +725,10 @@ static struct fio_file *get_next_file_rand(struct thread_data *td, int goodf, fno = (unsigned int) ((double) td->o.nr_files * (r / (OS_RAND_MAX + 1.0))); f = td->files[fno]; - if (f->flags & FIO_FILE_DONE) + if (fio_file_done(f)) continue; - if (!(f->flags & FIO_FILE_OPEN)) { + if (!fio_file_open(f)) { int err; err = td_io_open_file(td, f); @@ -673,24 +765,26 @@ static struct fio_file *get_next_file_rr(struct thread_data *td, int goodf, td->next_file = 0; dprint(FD_FILE, "trying file %s %x\n", f->file_name, f->flags); - if (f->flags & FIO_FILE_DONE) { + if (fio_file_done(f)) { f = NULL; continue; } - if (!(f->flags & FIO_FILE_OPEN)) { + if (!fio_file_open(f)) { int err; err = td_io_open_file(td, f); if (err) { dprint(FD_FILE, "error %d on open of %s\n", err, f->file_name); + f = NULL; continue; } opened = 1; } - dprint(FD_FILE, "goodf=%x, badf=%x, ff=%x\n", goodf, badf, f->flags); + dprint(FD_FILE, "goodf=%x, badf=%x, ff=%x\n", goodf, badf, + f->flags); if ((!goodf || (f->flags & goodf)) && !(f->flags & badf)) break; @@ -704,7 +798,7 @@ static struct fio_file *get_next_file_rr(struct thread_data *td, int goodf, return f; } -static struct fio_file *get_next_file(struct thread_data *td) +static struct fio_file *__get_next_file(struct thread_data *td) { struct fio_file *f; @@ -719,7 +813,7 @@ static struct fio_file *get_next_file(struct thread_data *td) } f = td->file_service_file; - if (f && (f->flags & FIO_FILE_OPEN) && !(f->flags & FIO_FILE_CLOSING)) { + if (f && fio_file_open(f) && !fio_file_closing(f)) { if (td->o.file_service_type == FIO_FSERVICE_SEQ) goto out; if (td->file_service_left--) @@ -728,9 +822,9 @@ static struct fio_file *get_next_file(struct thread_data *td) if (td->o.file_service_type == FIO_FSERVICE_RR || td->o.file_service_type == FIO_FSERVICE_SEQ) - f = get_next_file_rr(td, FIO_FILE_OPEN, FIO_FILE_CLOSING); + f = get_next_file_rr(td, FIO_FILE_open, FIO_FILE_closing); else - f = get_next_file_rand(td, FIO_FILE_OPEN, FIO_FILE_CLOSING); + f = get_next_file_rand(td, FIO_FILE_open, FIO_FILE_closing); td->file_service_file = f; td->file_service_left = td->file_service_nr - 1; @@ -739,6 +833,16 @@ out: return f; } +static struct fio_file *get_next_file(struct thread_data *td) +{ + struct prof_io_ops *ops = &td->prof_io_ops; + + if (ops->get_next_file) + return ops->get_next_file(td); + + return __get_next_file(td); +} + static int set_io_u_file(struct thread_data *td, struct io_u *io_u) { struct fio_file *f; @@ -757,9 +861,10 @@ static int set_io_u_file(struct thread_data *td, struct io_u *io_u) put_file_log(td, f); td_io_close_file(td, f); io_u->file = NULL; - f->flags |= FIO_FILE_DONE; + fio_file_set_done(f); td->nr_done_files++; - dprint(FD_FILE, "%s: is done (%d of %d)\n", f->file_name, td->nr_done_files, td->o.nr_files); + dprint(FD_FILE, "%s: is done (%d of %d)\n", f->file_name, + td->nr_done_files, td->o.nr_files); } while (1); return 0; @@ -770,6 +875,9 @@ struct io_u *__get_io_u(struct thread_data *td) { struct io_u *io_u = NULL; + td_io_u_lock(td); + +again: if (!flist_empty(&td->io_u_requeues)) io_u = flist_entry(td->io_u_requeues.next, struct io_u, list); else if (!queue_full(td)) { @@ -783,14 +891,23 @@ struct io_u *__get_io_u(struct thread_data *td) if (io_u) { assert(io_u->flags & IO_U_F_FREE); - io_u->flags &= ~IO_U_F_FREE; + io_u->flags &= ~(IO_U_F_FREE | IO_U_F_FREE_DEF); io_u->error = 0; flist_del(&io_u->list); flist_add(&io_u->list, &td->io_u_busylist); td->cur_depth++; + io_u->flags |= IO_U_F_IN_CUR_DEPTH; + } else if (td->o.verify_async) { + /* + * We ran out, wait for async verify threads to finish and + * return one + */ + pthread_cond_wait(&td->free_cond, &td->io_u_lock); + goto again; } + td_io_u_unlock(td); return io_u; } @@ -827,9 +944,9 @@ struct io_u *get_io_u(struct thread_data *td) } f = io_u->file; - assert(f->flags & FIO_FILE_OPEN); + assert(fio_file_open(f)); - if (io_u->ddir != DDIR_SYNC) { + if (!ddir_sync(io_u->ddir)) { if (!io_u->buflen && !(td->io_ops->flags & FIO_NOIO)) { dprint(FD_IO, "get_io_u: zero buflen on %p\n", io_u); goto err_put; @@ -837,7 +954,7 @@ struct io_u *get_io_u(struct thread_data *td) f->last_pos = io_u->offset + io_u->buflen; - if (td->o.verify != VERIFY_NONE) + if (td->o.verify != VERIFY_NONE && io_u->ddir == DDIR_WRITE) populate_verify_io_u(td, io_u); else if (td->o.refill_buffers && io_u->ddir == DDIR_WRITE) io_u_fill_buffer(td, io_u, io_u->xfer_buflen); @@ -846,7 +963,6 @@ struct io_u *get_io_u(struct thread_data *td) /* * Set io data pointers. */ - io_u->endpos = io_u->offset + io_u->buflen; io_u->xfer_buf = io_u->buf; io_u->xfer_buflen = io_u->buflen; @@ -888,14 +1004,22 @@ static void io_completed(struct thread_data *td, struct io_u *io_u, * initialized, silence that warning. */ unsigned long uninitialized_var(usec); + struct fio_file *f; dprint_io_u(io_u, "io complete"); + td_io_u_lock(td); assert(io_u->flags & IO_U_F_FLIGHT); io_u->flags &= ~IO_U_F_FLIGHT; + td_io_u_unlock(td); - if (io_u->ddir == DDIR_SYNC) { + if (ddir_sync(io_u->ddir)) { td->last_was_sync = 1; + f = io_u->file; + if (f) { + f->first_write = -1ULL; + f->last_write = -1ULL; + } return; } @@ -904,23 +1028,49 @@ static void io_completed(struct thread_data *td, struct io_u *io_u, if (!io_u->error) { unsigned int bytes = io_u->buflen - io_u->resid; const enum fio_ddir idx = io_u->ddir; + const enum fio_ddir odx = io_u->ddir ^ 1; int ret; td->io_blocks[idx]++; td->io_bytes[idx] += bytes; td->this_io_bytes[idx] += bytes; + if (idx == DDIR_WRITE) { + f = io_u->file; + if (f) { + if (f->first_write == -1ULL || + io_u->offset < f->first_write) + f->first_write = io_u->offset; + if (f->last_write == -1ULL || + ((io_u->offset + bytes) > f->last_write)) + f->last_write = io_u->offset + bytes; + } + } + if (ramp_time_over(td)) { + unsigned long uninitialized_var(lusec); + if (!td->o.disable_clat || !td->o.disable_bw) - usec = utime_since(&io_u->issue_time, + lusec = utime_since(&io_u->issue_time, &icd->time); if (!td->o.disable_clat) { - add_clat_sample(td, idx, usec); - io_u_mark_latency(td, usec); + add_clat_sample(td, idx, lusec, bytes); + io_u_mark_latency(td, lusec); } if (!td->o.disable_bw) - add_bw_sample(td, idx, &icd->time); + add_bw_sample(td, idx, bytes, &icd->time); + if (__should_check_rate(td, idx)) { + td->rate_pending_usleep[idx] = + ((td->this_io_bytes[idx] * + td->rate_nsec_cycle[idx]) / 1000 - + utime_since_now(&td->start)); + } + if (__should_check_rate(td, idx ^ 1)) + td->rate_pending_usleep[odx] = + ((td->this_io_bytes[odx] * + td->rate_nsec_cycle[odx]) / 1000 - + utime_since_now(&td->start)); } if (td_write(td) && idx == DDIR_WRITE && @@ -939,6 +1089,17 @@ static void io_completed(struct thread_data *td, struct io_u *io_u, icd->error = io_u->error; io_u_log_error(td, io_u); } + if (td->o.continue_on_error && icd->error && + td_non_fatal_error(icd->error)) { + /* + * If there is a non_fatal error, then add to the error count + * and clear all the errors. + */ + update_error_count(td, icd->error); + td_clear_error(td); + icd->error = 0; + io_u->error = 0; + } } static void init_icd(struct thread_data *td, struct io_completion_data *icd, @@ -963,32 +1124,44 @@ static void ios_completed(struct thread_data *td, io_u = td->io_ops->event(td, i); io_completed(td, io_u, icd); - put_io_u(td, io_u); + + if (!(io_u->flags & IO_U_F_FREE_DEF)) + put_io_u(td, io_u); } } /* * Complete a single io_u for the sync engines. */ -long io_u_sync_complete(struct thread_data *td, struct io_u *io_u) +int io_u_sync_complete(struct thread_data *td, struct io_u *io_u, + unsigned long *bytes) { struct io_completion_data icd; init_icd(td, &icd, 1); io_completed(td, io_u, &icd); - put_io_u(td, io_u); - if (!icd.error) - return icd.bytes_done[0] + icd.bytes_done[1]; + if (!(io_u->flags & IO_U_F_FREE_DEF)) + put_io_u(td, io_u); + + if (icd.error) { + td_verror(td, icd.error, "io_u_sync_complete"); + return -1; + } + + if (bytes) { + bytes[0] += icd.bytes_done[0]; + bytes[1] += icd.bytes_done[1]; + } - td_verror(td, icd.error, "io_u_sync_complete"); - return -1; + return 0; } /* * Called to complete min_events number of io for the async engines. */ -long io_u_queued_complete(struct thread_data *td, int min_evts) +int io_u_queued_complete(struct thread_data *td, int min_evts, + unsigned long *bytes) { struct io_completion_data icd; struct timespec *tvp = NULL; @@ -1009,11 +1182,17 @@ long io_u_queued_complete(struct thread_data *td, int min_evts) init_icd(td, &icd, ret); ios_completed(td, &icd); - if (!icd.error) - return icd.bytes_done[0] + icd.bytes_done[1]; + if (icd.error) { + td_verror(td, icd.error, "io_u_queued_complete"); + return -1; + } - td_verror(td, icd.error, "io_u_queued_complete"); - return -1; + if (bytes) { + bytes[0] += icd.bytes_done[0]; + bytes[1] += icd.bytes_done[1]; + } + + return 0; } /* @@ -1025,7 +1204,7 @@ void io_u_queued(struct thread_data *td, struct io_u *io_u) unsigned long slat_time; slat_time = utime_since(&io_u->start_time, &io_u->issue_time); - add_slat_sample(td, io_u->ddir, slat_time); + add_slat_sample(td, io_u->ddir, slat_time, io_u->xfer_buflen); } } @@ -1039,7 +1218,7 @@ void io_u_fill_buffer(struct thread_data *td, struct io_u *io_u, if (!td->o.zero_buffers) { while ((void *) ptr - io_u->buf < max_bs) { - *ptr = rand() * GOLDEN_RATIO_PRIME; + *ptr = __rand(&__fio_rand_state); ptr++; } } else