X-Git-Url: https://git.kernel.dk/?p=fio.git;a=blobdiff_plain;f=io_u.c;h=d81fefdeefe2269e2be9df49d976cc7c0c801c22;hp=428b312c9b34fea3cd1db6d38b375fd1a14376e6;hb=24d23ca76f9be4500270e7074a1dab038e3a4a2b;hpb=422f9e4b57549ce1e163b9c1de71932d9ea24de4 diff --git a/io_u.c b/io_u.c index 428b312c..d81fefde 100644 --- a/io_u.c +++ b/io_u.c @@ -13,10 +13,9 @@ struct io_completion_data { int nr; /* input */ - int account; /* input */ int error; /* output */ - unsigned long bytes_done[2]; /* output */ + unsigned long bytes_done[DDIR_RWDIR_CNT]; /* output */ struct timeval time; /* output */ }; @@ -158,8 +157,8 @@ static int get_next_free_block(struct thread_data *td, struct fio_file *f, return 1; } -static int get_next_rand_offset(struct thread_data *td, struct fio_file *f, - enum fio_ddir ddir, unsigned long long *b) +static int __get_next_rand_offset(struct thread_data *td, struct fio_file *f, + enum fio_ddir ddir, unsigned long long *b) { unsigned long long rmax, r, lastb; int loops = 5; @@ -235,23 +234,61 @@ ret: return 0; } +static int __get_next_rand_offset_zipf(struct thread_data *td, + struct fio_file *f, enum fio_ddir ddir, + unsigned long long *b) +{ + *b = zipf_next(&f->zipf); + return 0; +} + +static int __get_next_rand_offset_pareto(struct thread_data *td, + struct fio_file *f, enum fio_ddir ddir, + unsigned long long *b) +{ + *b = pareto_next(&f->zipf); + return 0; +} + +static int get_next_rand_offset(struct thread_data *td, struct fio_file *f, + enum fio_ddir ddir, unsigned long long *b) +{ + if (td->o.random_distribution == FIO_RAND_DIST_RANDOM) + return __get_next_rand_offset(td, f, ddir, b); + else if (td->o.random_distribution == FIO_RAND_DIST_ZIPF) + return __get_next_rand_offset_zipf(td, f, ddir, b); + else if (td->o.random_distribution == FIO_RAND_DIST_PARETO) + return __get_next_rand_offset_pareto(td, f, ddir, b); + + log_err("fio: unknown random distribution: %d\n", td->o.random_distribution); + return 1; +} + static int get_next_rand_block(struct thread_data *td, struct fio_file *f, enum fio_ddir ddir, unsigned long long *b) { - if (get_next_rand_offset(td, f, ddir, b)) { - dprint(FD_IO, "%s: rand offset failed, last=%llu, size=%llu\n", - f->file_name, f->last_pos, f->real_file_size); - return 1; + if (!get_next_rand_offset(td, f, ddir, b)) + return 0; + + if (td->o.time_based) { + fio_file_reset(f); + if (!get_next_rand_offset(td, f, ddir, b)) + return 0; } - return 0; + dprint(FD_IO, "%s: rand offset failed, last=%llu, size=%llu\n", + f->file_name, f->last_pos, f->real_file_size); + return 1; } -static int get_next_seq_block(struct thread_data *td, struct fio_file *f, - enum fio_ddir ddir, unsigned long long *b) +static int get_next_seq_offset(struct thread_data *td, struct fio_file *f, + enum fio_ddir ddir, unsigned long long *offset) { assert(ddir_rw(ddir)); + if (f->last_pos >= f->io_size + get_start_offset(td) && td->o.time_based) + f->last_pos = f->last_pos - f->io_size; + if (f->last_pos < f->real_file_size) { unsigned long long pos; @@ -262,7 +299,7 @@ static int get_next_seq_block(struct thread_data *td, struct fio_file *f, if (pos) pos += td->o.ddir_seq_add; - *b = pos / td->o.min_bs[ddir]; + *offset = pos; return 0; } @@ -270,31 +307,33 @@ static int get_next_seq_block(struct thread_data *td, struct fio_file *f, } static int get_next_block(struct thread_data *td, struct io_u *io_u, - enum fio_ddir ddir, int rw_seq, unsigned long long *b) + enum fio_ddir ddir, int rw_seq) { struct fio_file *f = io_u->file; + unsigned long long b, offset; int ret; assert(ddir_rw(ddir)); + b = offset = -1ULL; + if (rw_seq) { if (td_random(td)) - ret = get_next_rand_block(td, f, ddir, b); + ret = get_next_rand_block(td, f, ddir, &b); else - ret = get_next_seq_block(td, f, ddir, b); + ret = get_next_seq_offset(td, f, ddir, &offset); } else { io_u->flags |= IO_U_F_BUSY_OK; if (td->o.rw_seq == RW_SEQ_SEQ) { - ret = get_next_seq_block(td, f, ddir, b); + ret = get_next_seq_offset(td, f, ddir, &offset); if (ret) - ret = get_next_rand_block(td, f, ddir, b); + ret = get_next_rand_block(td, f, ddir, &b); } else if (td->o.rw_seq == RW_SEQ_IDENT) { if (f->last_start != -1ULL) - *b = (f->last_start - f->file_offset) - / td->o.min_bs[ddir]; + offset = f->last_start - f->file_offset; else - *b = 0; + offset = 0; ret = 0; } else { log_err("fio: unknown rw_seq=%d\n", td->o.rw_seq); @@ -302,6 +341,17 @@ static int get_next_block(struct thread_data *td, struct io_u *io_u, } } + if (!ret) { + if (offset != -1ULL) + io_u->offset = offset; + else if (b != -1ULL) + io_u->offset = b * td->o.ba[ddir]; + else { + log_err("fio: bug in offset generation\n"); + ret = 1; + } + } + return ret; } @@ -313,7 +363,6 @@ static int get_next_block(struct thread_data *td, struct io_u *io_u, static int __get_next_offset(struct thread_data *td, struct io_u *io_u) { struct fio_file *f = io_u->file; - unsigned long long b; enum fio_ddir ddir = io_u->ddir; int rw_seq_hit = 0; @@ -324,10 +373,9 @@ static int __get_next_offset(struct thread_data *td, struct io_u *io_u) td->ddir_seq_nr = td->o.ddir_seq_nr; } - if (get_next_block(td, io_u, ddir, rw_seq_hit, &b)) + if (get_next_block(td, io_u, ddir, rw_seq_hit)) return 1; - io_u->offset = b * td->o.ba[ddir]; if (io_u->offset >= f->io_size) { dprint(FD_IO, "get_next_offset: offset %llu >= io_size %llu\n", io_u->offset, f->io_size); @@ -359,13 +407,13 @@ static inline int io_u_fits(struct thread_data *td, struct io_u *io_u, { struct fio_file *f = io_u->file; - return io_u->offset + buflen <= f->io_size + td->o.start_offset; + return io_u->offset + buflen <= f->io_size + get_start_offset(td); } static unsigned int __get_next_buflen(struct thread_data *td, struct io_u *io_u) { const int ddir = io_u->ddir; - unsigned int uninitialized_var(buflen); + unsigned int buflen = 0; unsigned int minbs, maxbs; unsigned long r, rand_max; @@ -377,6 +425,12 @@ static unsigned int __get_next_buflen(struct thread_data *td, struct io_u *io_u) if (minbs == maxbs) return minbs; + /* + * If we can't satisfy the min block size from here, then fail + */ + if (!io_u_fits(td, io_u, minbs)) + return 0; + if (td->o.use_os_rand) rand_max = OS_RAND_MAX; else @@ -519,6 +573,8 @@ static enum fio_ddir rate_ddir(struct thread_data *td, enum fio_ddir ddir) if (td_rw(td) && __should_check_rate(td, odir)) td->rate_pending_usleep[odir] -= usec; + if (ddir_trim(ddir)) + return ddir; return ddir; } @@ -575,8 +631,10 @@ static enum fio_ddir get_rw_ddir(struct thread_data *td) ddir = td->rwmix_ddir; } else if (td_read(td)) ddir = DDIR_READ; - else + else if (td_write(td)) ddir = DDIR_WRITE; + else + ddir = DDIR_TRIM; td->rwmix_ddir = rate_ddir(td, ddir); return td->rwmix_ddir; @@ -662,7 +720,7 @@ static int fill_io_u(struct thread_data *td, struct io_u *io_u) /* * See if it's time to switch to a new zone */ - if (td->zone_bytes >= td->o.zone_size) { + if (td->zone_bytes >= td->o.zone_size && td->o.zone_skip) { td->zone_bytes = 0; io_u->file->file_offset += td->o.zone_range + td->o.zone_skip; io_u->file->last_pos = io_u->file->file_offset; @@ -1064,10 +1122,11 @@ again: assert(io_u->flags & IO_U_F_FREE); io_u->flags &= ~(IO_U_F_FREE | IO_U_F_FREE_DEF); io_u->flags &= ~(IO_U_F_TRIMMED | IO_U_F_BARRIER); + io_u->flags &= ~IO_U_F_VER_LIST; io_u->error = 0; flist_del(&io_u->list); - flist_add(&io_u->list, &td->io_u_busylist); + flist_add_tail(&io_u->list, &td->io_u_busylist); td->cur_depth++; io_u->flags |= IO_U_F_IN_CUR_DEPTH; } else if (td->o.verify_async) { @@ -1111,10 +1170,9 @@ static int check_get_verify(struct thread_data *td, struct io_u *io_u) if (td->o.verify_backlog && td->io_hist_len) { int get_verify = 0; - if (td->verify_batch) { - td->verify_batch--; + if (td->verify_batch) get_verify = 1; - } else if (!(td->io_hist_len % td->o.verify_backlog) && + else if (!(td->io_hist_len % td->o.verify_backlog) && td->last_ddir != DDIR_READ) { td->verify_batch = td->o.verify_batch; if (!td->verify_batch) @@ -1122,8 +1180,10 @@ static int check_get_verify(struct thread_data *td, struct io_u *io_u) get_verify = 1; } - if (get_verify && !get_next_verify(td, io_u)) + if (get_verify && !get_next_verify(td, io_u)) { + td->verify_batch--; return 1; + } } return 0; @@ -1147,6 +1207,7 @@ static void small_content_scramble(struct io_u *io_u) p = io_u->xfer_buf; boffset = io_u->offset; + io_u->buf_filled_len = 0; for (i = 0; i < nr_blocks; i++) { /* @@ -1218,12 +1279,15 @@ struct io_u *get_io_u(struct thread_data *td) f->last_pos = io_u->offset + io_u->buflen; if (io_u->ddir == DDIR_WRITE) { - if (td->o.verify != VERIFY_NONE) - populate_verify_io_u(td, io_u); - else if (td->o.refill_buffers) - io_u_fill_buffer(td, io_u, io_u->xfer_buflen); - else if (td->o.scramble_buffers) + if (td->o.refill_buffers) { + io_u_fill_buffer(td, io_u, + io_u->xfer_buflen, io_u->xfer_buflen); + } else if (td->o.scramble_buffers) do_scramble = 1; + if (td->o.verify != VERIFY_NONE) { + populate_verify_io_u(td, io_u); + do_scramble = 0; + } } else if (io_u->ddir == DDIR_READ) { /* * Reset the buf_filled parameters so next time if the @@ -1256,10 +1320,12 @@ err_put: void io_u_log_error(struct thread_data *td, struct io_u *io_u) { + enum error_type_bit eb = td_error_type(io_u->ddir, io_u->error); const char *msg[] = { "read", "write", "sync", "datasync", "sync_file_range", "wait", "trim" }; - + if (td_non_fatal_error(td, eb, io_u->error) && !td->o.error_dump) + return; log_err("fio: io_u error"); @@ -1279,10 +1345,7 @@ static void account_io_completion(struct thread_data *td, struct io_u *io_u, struct io_completion_data *icd, const enum fio_ddir idx, unsigned int bytes) { - unsigned long uninitialized_var(lusec); - - if (!icd->account) - return; + unsigned long lusec = 0; if (!td->o.disable_clat || !td->o.disable_bw) lusec = utime_since(&io_u->issue_time, &icd->time); @@ -1292,6 +1355,13 @@ static void account_io_completion(struct thread_data *td, struct io_u *io_u, tusec = utime_since(&io_u->start_time, &icd->time); add_lat_sample(td, idx, tusec, bytes); + + if (td->o.max_latency && tusec > td->o.max_latency) { + if (!td->error) + log_err("fio: latency of %lu usec exceeds specified max (%u usec)\n", tusec, td->o.max_latency); + td_verror(td, ETIMEDOUT, "max latency exceeded"); + icd->error = ETIMEDOUT; + } } if (!td->o.disable_clat) { @@ -1318,11 +1388,6 @@ static long long usec_for_io(struct thread_data *td, enum fio_ddir ddir) static void io_completed(struct thread_data *td, struct io_u *io_u, struct io_completion_data *icd) { - /* - * Older gcc's are too dumb to realize that usec is always used - * initialized, silence that warning. - */ - unsigned long uninitialized_var(usec); struct fio_file *f; dprint_io_u(io_u, "io complete"); @@ -1354,7 +1419,9 @@ static void io_completed(struct thread_data *td, struct io_u *io_u, td->io_blocks[idx]++; td->this_io_blocks[idx]++; td->io_bytes[idx] += bytes; - td->this_io_bytes[idx] += bytes; + + if (!(io_u->flags & IO_U_F_VER_LIST)) + td->this_io_bytes[idx] += bytes; if (idx == DDIR_WRITE) { f = io_u->file; @@ -1368,7 +1435,8 @@ static void io_completed(struct thread_data *td, struct io_u *io_u, } } - if (ramp_time_over(td) && td->runstate == TD_RUNNING) { + if (ramp_time_over(td) && (td->runstate == TD_RUNNING || + td->runstate == TD_VERIFYING)) { account_io_completion(td, io_u, icd, idx, bytes); if (__should_check_rate(td, idx)) { @@ -1376,7 +1444,7 @@ static void io_completed(struct thread_data *td, struct io_u *io_u, (usec_for_io(td, idx) - utime_since_now(&td->start)); } - if (__should_check_rate(td, odx)) + if (idx != DDIR_TRIM && __should_check_rate(td, odx)) td->rate_pending_usleep[odx] = (usec_for_io(td, odx) - utime_since_now(&td->start)); @@ -1398,8 +1466,10 @@ static void io_completed(struct thread_data *td, struct io_u *io_u, icd->error = io_u->error; io_u_log_error(td, io_u); } - if (icd->error && td_non_fatal_error(icd->error) && - (td->o.continue_on_error & td_error_type(io_u->ddir, icd->error))) { + if (icd->error) { + enum error_type_bit eb = td_error_type(io_u->ddir, icd->error); + if (!td_non_fatal_error(td, eb, icd->error)) + return; /* * If there is a non_fatal error, then add to the error count * and clear all the errors. @@ -1414,14 +1484,15 @@ static void io_completed(struct thread_data *td, struct io_u *io_u, static void init_icd(struct thread_data *td, struct io_completion_data *icd, int nr) { + int ddir; if (!td->o.disable_clat || !td->o.disable_bw) fio_gettime(&icd->time, NULL); icd->nr = nr; - icd->account = 1; icd->error = 0; - icd->bytes_done[0] = icd->bytes_done[1] = 0; + for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++) + icd->bytes_done[ddir] = 0; } static void ios_completed(struct thread_data *td, @@ -1437,8 +1508,6 @@ static void ios_completed(struct thread_data *td, if (!(io_u->flags & IO_U_F_FREE_DEF)) put_io_u(td, io_u); - - icd->account = 0; } } @@ -1462,8 +1531,10 @@ int io_u_sync_complete(struct thread_data *td, struct io_u *io_u, } if (bytes) { - bytes[0] += icd.bytes_done[0]; - bytes[1] += icd.bytes_done[1]; + int ddir; + + for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++) + bytes[ddir] += icd.bytes_done[ddir]; } return 0; @@ -1500,8 +1571,10 @@ int io_u_queued_complete(struct thread_data *td, int min_evts, } if (bytes) { - bytes[0] += icd.bytes_done[0]; - bytes[1] += icd.bytes_done[1]; + int ddir; + + for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++) + bytes[ddir] += icd.bytes_done[ddir]; } return 0; @@ -1524,12 +1597,21 @@ void io_u_queued(struct thread_data *td, struct io_u *io_u) * "randomly" fill the buffer contents */ void io_u_fill_buffer(struct thread_data *td, struct io_u *io_u, - unsigned int max_bs) + unsigned int min_write, unsigned int max_bs) { io_u->buf_filled_len = 0; - if (!td->o.zero_buffers) - fill_random_buf(&td->buf_state, io_u->buf, max_bs); - else + if (!td->o.zero_buffers) { + unsigned int perc = td->o.compress_percentage; + + if (perc) { + unsigned int seg = min_write; + + seg = min(min_write, td->o.compress_chunk); + fill_random_buf_percentage(&td->buf_state, io_u->buf, + perc, seg, max_bs); + } else + fill_random_buf(&td->buf_state, io_u->buf, max_bs); + } else memset(io_u->buf, 0, max_bs); }