X-Git-Url: https://git.kernel.dk/?a=blobdiff_plain;f=io_u.c;h=92b707647cc17bf40805f6b7055a675374bf8b0b;hb=091708da23c98d1d8769ede36dd30768ccb85086;hp=c33dddf66f7c1337ed9157ffe5ad52628703945d;hpb=c9dd34b291ad747804040cb2bb60127e6afcc3fa;p=fio.git diff --git a/io_u.c b/io_u.c index c33dddf6..92b70764 100644 --- a/io_u.c +++ b/io_u.c @@ -57,8 +57,11 @@ static void mark_random_map(struct thread_data *td, struct io_u *io_u) * If we have a mixed random workload, we may * encounter blocks we already did IO to. */ - if ((td->o.ddir_nr == 1) && !random_map_free(f, block)) + if ((td->o.ddir_nr == 1) && !random_map_free(f, block)) { + if (!blocks) + blocks = 1; break; + } idx = RAND_MAP_IDX(f, block); bit = RAND_MAP_BIT(f, block); @@ -143,7 +146,7 @@ static int get_next_rand_offset(struct thread_data *td, struct fio_file *f, r = os_random_long(&td->random_state); dprint(FD_RANDOM, "off rand %llu\n", r); *b = (last_block(td, f, ddir) - 1) - * (r / ((unsigned long long) RAND_MAX + 1.0)); + * (r / ((unsigned long long) OS_RAND_MAX + 1.0)); /* * if we are not maintaining a random map, we are done. @@ -168,7 +171,8 @@ static int get_next_rand_offset(struct thread_data *td, struct fio_file *f, */ loops = 10; do { - f->last_free_lookup = (f->num_maps - 1) * (r / (RAND_MAX+1.0)); + f->last_free_lookup = (f->num_maps - 1) * + (r / (OS_RAND_MAX + 1.0)); if (!get_next_free_block(td, f, ddir, b)) return 0; @@ -237,7 +241,7 @@ static unsigned int get_next_buflen(struct thread_data *td, struct io_u *io_u) if (!td->o.bssplit_nr) { buflen = (unsigned int) (1 + (double) (td->o.max_bs[ddir] - 1) - * r / (RAND_MAX + 1.0)); + * r / (OS_RAND_MAX + 1.0)); } else { long perc = 0; unsigned int i; @@ -285,7 +289,7 @@ static inline enum fio_ddir get_rand_ddir(struct thread_data *td) long r; r = os_random_long(&td->rwmix_state); - v = 1 + (int) (100.0 * (r / (RAND_MAX + 1.0))); + v = 1 + (int) (100.0 * (r / (OS_RAND_MAX + 1.0))); if (v <= td->o.rwmix[DDIR_READ]) return DDIR_READ; @@ -316,8 +320,10 @@ static enum fio_ddir get_rw_ddir(struct thread_data *td) max_bytes = td->this_io_bytes[ddir]; if (max_bytes >= (td->o.size * td->o.rwmix[ddir] / 100)) { - if (!td->rw_end_set[ddir]) + if (!td->rw_end_set[ddir]) { td->rw_end_set[ddir] = 1; + fio_gettime(&td->rw_end[ddir], NULL); + } ddir ^= 1; } @@ -351,8 +357,8 @@ void put_io_u(struct thread_data *td, struct io_u *io_u) put_file_log(td, io_u->file); io_u->file = NULL; - list_del(&io_u->list); - list_add(&io_u->list, &td->io_u_freelist); + flist_del(&io_u->list); + flist_add(&io_u->list, &td->io_u_freelist); td->cur_depth--; } @@ -368,8 +374,8 @@ void requeue_io_u(struct thread_data *td, struct io_u **io_u) __io_u->flags &= ~IO_U_F_FLIGHT; - list_del(&__io_u->list); - list_add_tail(&__io_u->list, &td->io_u_requeues); + flist_del(&__io_u->list); + flist_add_tail(&__io_u->list, &td->io_u_requeues); td->cur_depth--; *io_u = NULL; } @@ -615,7 +621,7 @@ static struct fio_file *get_next_file_rand(struct thread_data *td, int goodf, long r = os_random_long(&td->next_file_state); fno = (unsigned int) ((double) td->o.nr_files - * (r / (RAND_MAX + 1.0))); + * (r / (OS_RAND_MAX + 1.0))); f = td->files[fno]; if (f->flags & FIO_FILE_DONE) continue; @@ -761,10 +767,10 @@ struct io_u *__get_io_u(struct thread_data *td) { struct io_u *io_u = NULL; - if (!list_empty(&td->io_u_requeues)) - io_u = list_entry(td->io_u_requeues.next, struct io_u, list); + if (!flist_empty(&td->io_u_requeues)) + io_u = flist_entry(td->io_u_requeues.next, struct io_u, list); else if (!queue_full(td)) { - io_u = list_entry(td->io_u_freelist.next, struct io_u, list); + io_u = flist_entry(td->io_u_freelist.next, struct io_u, list); io_u->buflen = 0; io_u->resid = 0; @@ -777,8 +783,8 @@ struct io_u *__get_io_u(struct thread_data *td) io_u->flags &= ~IO_U_F_FREE; io_u->error = 0; - list_del(&io_u->list); - list_add(&io_u->list, &td->io_u_busylist); + flist_del(&io_u->list); + flist_add(&io_u->list, &td->io_u_busylist); td->cur_depth++; } @@ -965,19 +971,19 @@ long io_u_sync_complete(struct thread_data *td, struct io_u *io_u) /* * Called to complete min_events number of io for the async engines. */ -long io_u_queued_complete(struct thread_data *td, int min_events) +long io_u_queued_complete(struct thread_data *td, int min_evts) { struct io_completion_data icd; struct timespec *tvp = NULL; int ret; struct timespec ts = { .tv_sec = 0, .tv_nsec = 0, }; - dprint(FD_IO, "io_u_queued_completed: min=%d\n", min_events); + dprint(FD_IO, "io_u_queued_completed: min=%d\n", min_evts); - if (!min_events) + if (!min_evts) tvp = &ts; - ret = td_io_getevents(td, min_events, td->cur_depth, tvp); + ret = td_io_getevents(td, min_evts, td->o.iodepth_batch_complete, tvp); if (ret < 0) { td_verror(td, -ret, "td_io_getevents"); return ret; @@ -1057,7 +1063,7 @@ static void io_u_timeout_handler(int fio_unused sig) { struct thread_data *td, *__td; pid_t pid = getpid(); - struct list_head *entry; + struct flist_head *entry; struct io_u *io_u; int i; @@ -1086,8 +1092,8 @@ static void io_u_timeout_handler(int fio_unused sig) log_err("fio: io_u timeout: job=%s, pid=%d\n", td->o.name, td->pid); - list_for_each(entry, &td->io_u_busylist) { - io_u = list_entry(entry, struct io_u, list); + flist_for_each(entry, &td->io_u_busylist) { + io_u = flist_entry(entry, struct io_u, list); io_u_dump(io_u); }