X-Git-Url: https://git.kernel.dk/?p=fio.git;a=blobdiff_plain;f=io_u.c;h=45e8bb81ccc6e4dcfd7bbab57c781824c2d63161;hp=baf5d474dcee5b10f78a457f61571839f90b4c23;hb=6040dabcaaf29ecea9c68735a4d3a6ad6411f573;hpb=10ba535a5cbb95b5576e33a6f8af093a6ca3bfd7 diff --git a/io_u.c b/io_u.c index baf5d474..45e8bb81 100644 --- a/io_u.c +++ b/io_u.c @@ -21,6 +21,35 @@ static int random_map_free(struct thread_data *td, struct fio_file *f, return (f->file_map[idx] & (1UL << bit)) == 0; } +/* + * Mark a given offset as used in the map. + */ +static void mark_random_map(struct thread_data *td, struct fio_file *f, + struct io_u *io_u) +{ + unsigned long long block = io_u->offset / (unsigned long long) td->min_bs; + unsigned int blocks = 0; + + while (blocks < (io_u->buflen / td->min_bs)) { + unsigned int idx, bit; + + if (!random_map_free(td, f, block)) + break; + + idx = RAND_MAP_IDX(td, f, block); + bit = RAND_MAP_BIT(td, f, block); + + assert(idx < f->num_maps); + + f->file_map[idx] |= (1UL << bit); + block++; + blocks++; + } + + if ((blocks * td->min_bs) < io_u->buflen) + io_u->buflen = blocks * td->min_bs; +} + /* * Return the next free block in the map. */ @@ -143,35 +172,6 @@ static int get_rw_ddir(struct thread_data *td) return DDIR_WRITE; } -/* - * Mark a given offset as used in the map. - */ -static void mark_random_map(struct thread_data *td, struct fio_file *f, - struct io_u *io_u) -{ - unsigned long long block = io_u->offset / (unsigned long long) td->min_bs; - unsigned int blocks = 0; - - while (blocks < (io_u->buflen / td->min_bs)) { - unsigned int idx, bit; - - if (!random_map_free(td, f, block)) - break; - - idx = RAND_MAP_IDX(td, f, block); - bit = RAND_MAP_BIT(td, f, block); - - assert(idx < f->num_maps); - - f->file_map[idx] |= (1UL << bit); - block++; - blocks++; - } - - if ((blocks * td->min_bs) < io_u->buflen) - io_u->buflen = blocks * td->min_bs; -} - void put_io_u(struct thread_data *td, struct io_u *io_u) { io_u->file = NULL; @@ -189,6 +189,16 @@ static int fill_io_u(struct thread_data *td, struct fio_file *f, if (td->read_iolog) return read_iolog_get(td, io_u); + /* + * see if it's time to sync + */ + if (td->fsync_blocks && !(td->io_blocks[DDIR_WRITE] % td->fsync_blocks) + && should_fsync(td)) { + io_u->ddir = DDIR_SYNC; + io_u->file = f; + return 0; + } + /* * No log, let the seq/rand engine retrieve the next position. */ @@ -219,6 +229,7 @@ struct io_u *__get_io_u(struct thread_data *td) if (!queue_full(td)) { io_u = list_entry(td->io_u_freelist.next, struct io_u, list); + io_u->buflen = 0; io_u->error = 0; io_u->resid = 0; list_del(&io_u->list); @@ -260,18 +271,20 @@ struct io_u *get_io_u(struct thread_data *td, struct fio_file *f) io_u->buflen = f->file_size - io_u->offset; } - if (!io_u->buflen) { - put_io_u(td, io_u); - return NULL; - } + if (io_u->ddir != DDIR_SYNC) { + if (!io_u->buflen) { + put_io_u(td, io_u); + return NULL; + } - if (!td->read_iolog && !td->sequential) - mark_random_map(td, f, io_u); + if (!td->read_iolog && !td->sequential) + mark_random_map(td, f, io_u); - f->last_pos += io_u->buflen; + f->last_pos += io_u->buflen; - if (td->verify != VERIFY_NONE) - populate_verify_io_u(td, io_u); + if (td->verify != VERIFY_NONE) + populate_verify_io_u(td, io_u); + } if (td_io_prep(td, io_u)) { put_io_u(td, io_u); @@ -288,6 +301,13 @@ void io_completed(struct thread_data *td, struct io_u *io_u, struct timeval e; unsigned long msec; + if (io_u->ddir == DDIR_SYNC) { + td->last_was_sync = 1; + return; + } + + td->last_was_sync = 0; + gettimeofday(&e, NULL); if (!io_u->error) {