X-Git-Url: https://git.kernel.dk/?a=blobdiff_plain;f=io_u.c;h=eb617e649c1df82dba42259361cd36f3a32e7796;hb=615c794cbf851c994e94fffe8b8f565e64f137a5;hp=059637e592d2b8f6d9b5bff8617be961ca919c0f;hpb=d04ed5d3764df8ef793478f17a424613aa506a39;p=fio.git diff --git a/io_u.c b/io_u.c index 059637e5..eb617e64 100644 --- a/io_u.c +++ b/io_u.c @@ -355,7 +355,7 @@ static int get_next_seq_offset(struct thread_data *td, struct fio_file *f, * and invalidate the cache, if we need to. */ if (f->last_pos[ddir] >= f->io_size + get_start_offset(td, f) && - o->time_based) { + o->time_based && o->nr_files == 1) { f->last_pos[ddir] = f->file_offset; loop_cache_invalidate(td, f); } @@ -417,7 +417,13 @@ static int get_next_block(struct thread_data *td, struct io_u *io_u, b = offset = -1ULL; - if (rw_seq) { + if (td_randtrimwrite(td) && ddir == DDIR_WRITE) { + /* don't mark randommap for these writes */ + io_u_set(td, io_u, IO_U_F_BUSY_OK); + offset = f->last_start[DDIR_TRIM]; + *is_random = true; + ret = 0; + } else if (rw_seq) { if (td_random(td)) { if (should_do_random(td, ddir)) { ret = get_next_rand_block(td, f, ddir, &b); @@ -507,6 +513,24 @@ static int get_next_offset(struct thread_data *td, struct io_u *io_u, return 1; } + /* + * For randtrimwrite, we decide whether to issue a trim or a write + * based on whether the offsets for the most recent trim and write + * operations match. If they don't match that means we just issued a + * new trim and the next operation should be a write. If they *do* + * match that means we just completed a trim+write pair and the next + * command should be a trim. + * + * This works fine for sequential workloads but for random workloads + * it's possible to complete a trim+write pair and then have the next + * randomly generated offset match the previous offset. If that happens + * we need to alter the offset for the last write operation in order + * to ensure that we issue a write operation the next time through. + */ + if (td_randtrimwrite(td) && ddir == DDIR_TRIM && + f->last_start[DDIR_TRIM] == io_u->offset) + f->last_start[DDIR_WRITE]--; + io_u->verify_offset = io_u->offset; return 0; } @@ -530,6 +554,12 @@ static unsigned long long get_next_buflen(struct thread_data *td, struct io_u *i assert(ddir_rw(ddir)); + if (td_randtrimwrite(td) && ddir == DDIR_WRITE) { + struct fio_file *f = io_u->file; + + return f->last_pos[DDIR_TRIM] - f->last_start[DDIR_TRIM]; + } + if (td->o.bs_is_seq_rand) ddir = is_random ? DDIR_WRITE : DDIR_READ; @@ -755,7 +785,15 @@ static enum fio_ddir get_rw_ddir(struct thread_data *td) else ddir = DDIR_INVAL; - td->rwmix_ddir = rate_ddir(td, ddir); + if (!should_check_rate(td)) { + /* + * avoid time-consuming call to utime_since_now() if rate checking + * isn't being used. this imrpoves IOPs 50%. See: + * https://github.com/axboe/fio/issues/1501#issuecomment-1418327049 + */ + td->rwmix_ddir = ddir; + } else + td->rwmix_ddir = rate_ddir(td, ddir); return td->rwmix_ddir; } @@ -768,7 +806,7 @@ static void set_rw_ddir(struct thread_data *td, struct io_u *io_u) if (td_trimwrite(td)) { struct fio_file *f = io_u->file; - if (f->last_pos[DDIR_WRITE] == f->last_pos[DDIR_TRIM]) + if (f->last_start[DDIR_WRITE] == f->last_start[DDIR_TRIM]) ddir = DDIR_TRIM; else ddir = DDIR_WRITE; @@ -993,7 +1031,7 @@ static void __io_u_mark_map(uint64_t *map, unsigned int nr) break; case 1 ... 4: idx = 1; - fallthrough; + fio_fallthrough; case 0: break; } @@ -1035,7 +1073,7 @@ void io_u_mark_depth(struct thread_data *td, unsigned int nr) break; case 2 ... 3: idx = 1; - fallthrough; + fio_fallthrough; case 1: break; } @@ -1076,7 +1114,7 @@ static void io_u_mark_lat_nsec(struct thread_data *td, unsigned long long nsec) break; case 2 ... 3: idx = 1; - fallthrough; + fio_fallthrough; case 0 ... 1: break; } @@ -1118,7 +1156,7 @@ static void io_u_mark_lat_usec(struct thread_data *td, unsigned long long usec) break; case 2 ... 3: idx = 1; - fallthrough; + fio_fallthrough; case 0 ... 1: break; } @@ -1166,7 +1204,7 @@ static void io_u_mark_lat_msec(struct thread_data *td, unsigned long long msec) break; case 2 ... 3: idx = 1; - fallthrough; + fio_fallthrough; case 0 ... 1: break; } @@ -2091,13 +2129,26 @@ static void ios_completed(struct thread_data *td, } } +static void io_u_update_bytes_done(struct thread_data *td, + struct io_completion_data *icd) +{ + int ddir; + + if (td->runstate == TD_VERIFYING) { + td->bytes_verified += icd->bytes_done[DDIR_READ]; + return; + } + + for (ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++) + td->bytes_done[ddir] += icd->bytes_done[ddir]; +} + /* * Complete a single io_u for the sync engines. */ int io_u_sync_complete(struct thread_data *td, struct io_u *io_u) { struct io_completion_data icd; - int ddir; init_icd(td, &icd, 1); io_completed(td, &io_u, &icd); @@ -2110,8 +2161,7 @@ int io_u_sync_complete(struct thread_data *td, struct io_u *io_u) return -1; } - for (ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++) - td->bytes_done[ddir] += icd.bytes_done[ddir]; + io_u_update_bytes_done(td, &icd); return 0; } @@ -2123,7 +2173,7 @@ int io_u_queued_complete(struct thread_data *td, int min_evts) { struct io_completion_data icd; struct timespec *tvp = NULL; - int ret, ddir; + int ret; struct timespec ts = { .tv_sec = 0, .tv_nsec = 0, }; dprint(FD_IO, "io_u_queued_complete: min=%d\n", min_evts); @@ -2149,8 +2199,7 @@ int io_u_queued_complete(struct thread_data *td, int min_evts) return -1; } - for (ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++) - td->bytes_done[ddir] += icd.bytes_done[ddir]; + io_u_update_bytes_done(td, &icd); return ret; } @@ -2297,7 +2346,11 @@ int do_io_u_sync(const struct thread_data *td, struct io_u *io_u) int ret; if (io_u->ddir == DDIR_SYNC) { +#ifdef CONFIG_FCNTL_SYNC + ret = fcntl(io_u->file->fd, F_FULLFSYNC); +#else ret = fsync(io_u->file->fd); +#endif } else if (io_u->ddir == DDIR_DATASYNC) { #ifdef CONFIG_FDATASYNC ret = fdatasync(io_u->file->fd);