X-Git-Url: https://git.kernel.dk/?a=blobdiff_plain;f=io_u.c;h=6f5fc94d9a3e87bf68e3a5cdd2149579ac855720;hb=870ea00243b1290541334bec2a56428c9f68dba6;hp=059637e592d2b8f6d9b5bff8617be961ca919c0f;hpb=692dec0cfb4bcf2ddcb6438cfbe73d585c7a3bbc;p=fio.git diff --git a/io_u.c b/io_u.c index 059637e5..6f5fc94d 100644 --- a/io_u.c +++ b/io_u.c @@ -355,7 +355,7 @@ static int get_next_seq_offset(struct thread_data *td, struct fio_file *f, * and invalidate the cache, if we need to. */ if (f->last_pos[ddir] >= f->io_size + get_start_offset(td, f) && - o->time_based) { + o->time_based && o->nr_files == 1) { f->last_pos[ddir] = f->file_offset; loop_cache_invalidate(td, f); } @@ -417,7 +417,13 @@ static int get_next_block(struct thread_data *td, struct io_u *io_u, b = offset = -1ULL; - if (rw_seq) { + if (td_randtrimwrite(td) && ddir == DDIR_WRITE) { + /* don't mark randommap for these writes */ + io_u_set(td, io_u, IO_U_F_BUSY_OK); + offset = f->last_start[DDIR_TRIM]; + *is_random = true; + ret = 0; + } else if (rw_seq) { if (td_random(td)) { if (should_do_random(td, ddir)) { ret = get_next_rand_block(td, f, ddir, &b); @@ -507,6 +513,24 @@ static int get_next_offset(struct thread_data *td, struct io_u *io_u, return 1; } + /* + * For randtrimwrite, we decide whether to issue a trim or a write + * based on whether the offsets for the most recent trim and write + * operations match. If they don't match that means we just issued a + * new trim and the next operation should be a write. If they *do* + * match that means we just completed a trim+write pair and the next + * command should be a trim. + * + * This works fine for sequential workloads but for random workloads + * it's possible to complete a trim+write pair and then have the next + * randomly generated offset match the previous offset. If that happens + * we need to alter the offset for the last write operation in order + * to ensure that we issue a write operation the next time through. + */ + if (td_randtrimwrite(td) && ddir == DDIR_TRIM && + f->last_start[DDIR_TRIM] == io_u->offset) + f->last_start[DDIR_WRITE]--; + io_u->verify_offset = io_u->offset; return 0; } @@ -530,6 +554,12 @@ static unsigned long long get_next_buflen(struct thread_data *td, struct io_u *i assert(ddir_rw(ddir)); + if (td_randtrimwrite(td) && ddir == DDIR_WRITE) { + struct fio_file *f = io_u->file; + + return f->last_pos[DDIR_TRIM] - f->last_start[DDIR_TRIM]; + } + if (td->o.bs_is_seq_rand) ddir = is_random ? DDIR_WRITE : DDIR_READ; @@ -755,7 +785,15 @@ static enum fio_ddir get_rw_ddir(struct thread_data *td) else ddir = DDIR_INVAL; - td->rwmix_ddir = rate_ddir(td, ddir); + if (!should_check_rate(td)) { + /* + * avoid time-consuming call to utime_since_now() if rate checking + * isn't being used. this imrpoves IOPs 50%. See: + * https://github.com/axboe/fio/issues/1501#issuecomment-1418327049 + */ + td->rwmix_ddir = ddir; + } else + td->rwmix_ddir = rate_ddir(td, ddir); return td->rwmix_ddir; } @@ -768,7 +806,7 @@ static void set_rw_ddir(struct thread_data *td, struct io_u *io_u) if (td_trimwrite(td)) { struct fio_file *f = io_u->file; - if (f->last_pos[DDIR_WRITE] == f->last_pos[DDIR_TRIM]) + if (f->last_start[DDIR_WRITE] == f->last_start[DDIR_TRIM]) ddir = DDIR_TRIM; else ddir = DDIR_WRITE; @@ -946,10 +984,15 @@ static int fill_io_u(struct thread_data *td, struct io_u *io_u) offset = io_u->offset; if (td->o.zone_mode == ZONE_MODE_ZBD) { ret = zbd_adjust_block(td, io_u); - if (ret == io_u_eof) + if (ret == io_u_eof) { + dprint(FD_IO, "zbd_adjust_block() returned io_u_eof\n"); return 1; + } } + if (td->o.fdp) + fdp_fill_dspec_data(td, io_u); + if (io_u->offset + io_u->buflen > io_u->file->real_file_size) { dprint(FD_IO, "io_u %p, off=0x%llx + len=0x%llx exceeds file size=0x%llx\n", io_u, @@ -993,7 +1036,7 @@ static void __io_u_mark_map(uint64_t *map, unsigned int nr) break; case 1 ... 4: idx = 1; - fallthrough; + fio_fallthrough; case 0: break; } @@ -1035,7 +1078,7 @@ void io_u_mark_depth(struct thread_data *td, unsigned int nr) break; case 2 ... 3: idx = 1; - fallthrough; + fio_fallthrough; case 1: break; } @@ -1076,7 +1119,7 @@ static void io_u_mark_lat_nsec(struct thread_data *td, unsigned long long nsec) break; case 2 ... 3: idx = 1; - fallthrough; + fio_fallthrough; case 0 ... 1: break; } @@ -1118,7 +1161,7 @@ static void io_u_mark_lat_usec(struct thread_data *td, unsigned long long usec) break; case 2 ... 3: idx = 1; - fallthrough; + fio_fallthrough; case 0 ... 1: break; } @@ -1166,7 +1209,7 @@ static void io_u_mark_lat_msec(struct thread_data *td, unsigned long long msec) break; case 2 ... 3: idx = 1; - fallthrough; + fio_fallthrough; case 0 ... 1: break; } @@ -1327,8 +1370,8 @@ static struct fio_file *__get_next_file(struct thread_data *td) if (td->o.file_service_type == FIO_FSERVICE_SEQ) goto out; if (td->file_service_left) { - td->file_service_left--; - goto out; + td->file_service_left--; + goto out; } } @@ -1966,7 +2009,7 @@ static void io_completed(struct thread_data *td, struct io_u **io_u_ptr, dprint_io_u(io_u, "complete"); assert(io_u->flags & IO_U_F_FLIGHT); - io_u_clear(td, io_u, IO_U_F_FLIGHT | IO_U_F_BUSY_OK); + io_u_clear(td, io_u, IO_U_F_FLIGHT | IO_U_F_BUSY_OK | IO_U_F_PATTERN_DONE); /* * Mark IO ok to verify @@ -1984,6 +2027,8 @@ static void io_completed(struct thread_data *td, struct io_u **io_u_ptr, } if (ddir_sync(ddir)) { + if (io_u->error) + goto error; td->last_was_sync = true; if (f) { f->first_write = -1ULL; @@ -2039,6 +2084,7 @@ static void io_completed(struct thread_data *td, struct io_u **io_u_ptr, icd->error = ret; } } else if (io_u->error) { +error: icd->error = io_u->error; io_u_log_error(td, io_u); } @@ -2091,13 +2137,26 @@ static void ios_completed(struct thread_data *td, } } +static void io_u_update_bytes_done(struct thread_data *td, + struct io_completion_data *icd) +{ + int ddir; + + if (td->runstate == TD_VERIFYING) { + td->bytes_verified += icd->bytes_done[DDIR_READ]; + return; + } + + for (ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++) + td->bytes_done[ddir] += icd->bytes_done[ddir]; +} + /* * Complete a single io_u for the sync engines. */ int io_u_sync_complete(struct thread_data *td, struct io_u *io_u) { struct io_completion_data icd; - int ddir; init_icd(td, &icd, 1); io_completed(td, &io_u, &icd); @@ -2110,8 +2169,7 @@ int io_u_sync_complete(struct thread_data *td, struct io_u *io_u) return -1; } - for (ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++) - td->bytes_done[ddir] += icd.bytes_done[ddir]; + io_u_update_bytes_done(td, &icd); return 0; } @@ -2123,7 +2181,7 @@ int io_u_queued_complete(struct thread_data *td, int min_evts) { struct io_completion_data icd; struct timespec *tvp = NULL; - int ret, ddir; + int ret; struct timespec ts = { .tv_sec = 0, .tv_nsec = 0, }; dprint(FD_IO, "io_u_queued_complete: min=%d\n", min_evts); @@ -2149,8 +2207,7 @@ int io_u_queued_complete(struct thread_data *td, int min_evts) return -1; } - for (ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++) - td->bytes_done[ddir] += icd.bytes_done[ddir]; + io_u_update_bytes_done(td, &icd); return ret; } @@ -2297,7 +2354,11 @@ int do_io_u_sync(const struct thread_data *td, struct io_u *io_u) int ret; if (io_u->ddir == DDIR_SYNC) { +#ifdef CONFIG_FCNTL_SYNC + ret = fcntl(io_u->file->fd, F_FULLFSYNC); +#else ret = fsync(io_u->file->fd); +#endif } else if (io_u->ddir == DDIR_DATASYNC) { #ifdef CONFIG_FDATASYNC ret = fdatasync(io_u->file->fd);