#include "fio.h"
#include "hash.h"
#include "verify.h"
+#include "lib/rand.h"
struct io_completion_data {
int nr; /* input */
* If we have a mixed random workload, we may
* encounter blocks we already did IO to.
*/
- if ((td->o.ddir_nr == 1) && !random_map_free(f, block)) {
- if (!blocks)
- blocks = 1;
+ if ((td->o.ddir_nr == 1) && !random_map_free(f, block))
break;
- }
idx = RAND_MAP_IDX(f, block);
bit = RAND_MAP_BIT(f, block);
if (this_blocks + bit > BLOCKS_PER_MAP)
this_blocks = BLOCKS_PER_MAP - bit;
- if (this_blocks == BLOCKS_PER_MAP)
- mask = -1U;
- else
- mask = ((1U << this_blocks) - 1) << bit;
+ do {
+ if (this_blocks == BLOCKS_PER_MAP)
+ mask = -1U;
+ else
+ mask = ((1U << this_blocks) - 1) << bit;
+
+ if (!(f->file_map[idx] & mask))
+ break;
+
+ this_blocks--;
+ } while (this_blocks);
+
+ if (!this_blocks)
+ break;
f->file_map[idx] |= mask;
nr_blocks -= this_blocks;
* until we find a free one. For sequential io, just return the end of
* the last io issued.
*/
-static int get_next_offset(struct thread_data *td, struct io_u *io_u)
+static int __get_next_offset(struct thread_data *td, struct io_u *io_u)
{
struct fio_file *f = io_u->file;
unsigned long long b;
return 0;
}
-static unsigned int get_next_buflen(struct thread_data *td, struct io_u *io_u)
+static int get_next_offset(struct thread_data *td, struct io_u *io_u)
+{
+ struct prof_io_ops *ops = &td->prof_io_ops;
+
+ if (ops->fill_io_u_off)
+ return ops->fill_io_u_off(td, io_u);
+
+ return __get_next_offset(td, io_u);
+}
+
+static unsigned int __get_next_buflen(struct thread_data *td, struct io_u *io_u)
{
const int ddir = io_u->ddir;
unsigned int uninitialized_var(buflen);
return buflen;
}
+static unsigned int get_next_buflen(struct thread_data *td, struct io_u *io_u)
+{
+ struct prof_io_ops *ops = &td->prof_io_ops;
+
+ if (ops->fill_io_u_size)
+ return ops->fill_io_u_size(td, io_u);
+
+ return __get_next_buflen(td, io_u);
+}
+
static void set_rwmix_bytes(struct thread_data *td)
{
unsigned int diff;
odir = ddir ^ 1;
if (td_rw(td) && __should_check_rate(td, odir))
td->rate_pending_usleep[odir] -= usec;
-
+
return ddir;
}
td->io_issues[DDIR_WRITE] && should_fsync(td))
return DDIR_DATASYNC;
+ /*
+ * see if it's time to sync_file_range
+ */
+ if (td->sync_file_range_nr &&
+ !(td->io_issues[DDIR_WRITE] % td->sync_file_range_nr) &&
+ td->io_issues[DDIR_WRITE] && should_fsync(td))
+ return DDIR_SYNC_FILE_RANGE;
+
if (td_rw(td)) {
/*
* Check if it's time to seed a new data direction.
{
td_io_u_lock(td);
- assert((io_u->flags & IO_U_F_FREE) == 0);
io_u->flags |= IO_U_F_FREE;
io_u->flags &= ~IO_U_F_FREE_DEF;
put_file_log(td, io_u->file);
io_u->file = NULL;
+ if (io_u->flags & IO_U_F_IN_CUR_DEPTH)
+ td->cur_depth--;
flist_del_init(&io_u->list);
flist_add(&io_u->list, &td->io_u_freelist);
- td->cur_depth--;
td_io_u_unlock(td);
td_io_u_free_notify(td);
}
td->io_issues[__io_u->ddir]--;
__io_u->flags &= ~IO_U_F_FLIGHT;
-
+ if (__io_u->flags & IO_U_F_IN_CUR_DEPTH)
+ td->cur_depth--;
flist_del(&__io_u->list);
flist_add_tail(&__io_u->list, &td->io_u_requeues);
- td->cur_depth--;
td_io_u_unlock(td);
*io_u = NULL;
}
opened = 1;
}
- dprint(FD_FILE, "goodf=%x, badf=%x, ff=%x\n", goodf, badf, f->flags);
+ dprint(FD_FILE, "goodf=%x, badf=%x, ff=%x\n", goodf, badf,
+ f->flags);
if ((!goodf || (f->flags & goodf)) && !(f->flags & badf))
break;
return f;
}
-static struct fio_file *get_next_file(struct thread_data *td)
+static struct fio_file *__get_next_file(struct thread_data *td)
{
struct fio_file *f;
return f;
}
+static struct fio_file *get_next_file(struct thread_data *td)
+{
+ struct prof_io_ops *ops = &td->prof_io_ops;
+
+ if (ops->get_next_file)
+ return ops->get_next_file(td);
+
+ return __get_next_file(td);
+}
+
static int set_io_u_file(struct thread_data *td, struct io_u *io_u)
{
struct fio_file *f;
io_u->file = NULL;
fio_file_set_done(f);
td->nr_done_files++;
- dprint(FD_FILE, "%s: is done (%d of %d)\n", f->file_name, td->nr_done_files, td->o.nr_files);
+ dprint(FD_FILE, "%s: is done (%d of %d)\n", f->file_name,
+ td->nr_done_files, td->o.nr_files);
} while (1);
return 0;
io_u->end_io = NULL;
}
- /*
- * We ran out, wait for async verify threads to finish and return one
- */
- if (!io_u && td->o.verify_async) {
- pthread_cond_wait(&td->free_cond, &td->io_u_lock);
- goto again;
- }
-
if (io_u) {
assert(io_u->flags & IO_U_F_FREE);
- io_u->flags &= ~IO_U_F_FREE;
- io_u->flags &= ~IO_U_F_FREE_DEF;
+ io_u->flags &= ~(IO_U_F_FREE | IO_U_F_FREE_DEF);
io_u->error = 0;
flist_del(&io_u->list);
flist_add(&io_u->list, &td->io_u_busylist);
td->cur_depth++;
+ io_u->flags |= IO_U_F_IN_CUR_DEPTH;
+ } else if (td->o.verify_async) {
+ /*
+ * We ran out, wait for async verify threads to finish and
+ * return one
+ */
+ pthread_cond_wait(&td->free_cond, &td->io_u_lock);
+ goto again;
}
td_io_u_unlock(td);
return NULL;
}
+ if (td->o.verify_backlog && td->io_hist_len) {
+ int get_verify = 0;
+
+ if (td->verify_batch) {
+ td->verify_batch--;
+ get_verify = 1;
+ } else if (!(td->io_hist_len % td->o.verify_backlog) &&
+ td->last_ddir != DDIR_READ) {
+ td->verify_batch = td->o.verify_batch;
+ if (!td->verify_batch)
+ td->verify_batch = td->o.verify_backlog;
+ get_verify = 1;
+ }
+
+ if (get_verify && !get_next_verify(td, io_u))
+ goto out;
+ }
+
/*
* from a requeue, io_u already setup
*/
* initialized, silence that warning.
*/
unsigned long uninitialized_var(usec);
+ struct fio_file *f;
dprint_io_u(io_u, "io complete");
+ td_io_u_lock(td);
assert(io_u->flags & IO_U_F_FLIGHT);
io_u->flags &= ~IO_U_F_FLIGHT;
+ td_io_u_unlock(td);
if (ddir_sync(io_u->ddir)) {
td->last_was_sync = 1;
+ f = io_u->file;
+ if (f) {
+ f->first_write = -1ULL;
+ f->last_write = -1ULL;
+ }
return;
}
td->last_was_sync = 0;
+ td->last_ddir = io_u->ddir;
if (!io_u->error) {
unsigned int bytes = io_u->buflen - io_u->resid;
const enum fio_ddir idx = io_u->ddir;
+ const enum fio_ddir odx = io_u->ddir ^ 1;
int ret;
td->io_blocks[idx]++;
td->io_bytes[idx] += bytes;
td->this_io_bytes[idx] += bytes;
+ if (idx == DDIR_WRITE) {
+ f = io_u->file;
+ if (f) {
+ if (f->first_write == -1ULL ||
+ io_u->offset < f->first_write)
+ f->first_write = io_u->offset;
+ if (f->last_write == -1ULL ||
+ ((io_u->offset + bytes) > f->last_write))
+ f->last_write = io_u->offset + bytes;
+ }
+ }
+
if (ramp_time_over(td)) {
unsigned long uninitialized_var(lusec);
- unsigned long uninitialized_var(rusec);
if (!td->o.disable_clat || !td->o.disable_bw)
lusec = utime_since(&io_u->issue_time,
&icd->time);
- if (__should_check_rate(td, idx) ||
- __should_check_rate(td, idx ^ 1))
- rusec = utime_since(&io_u->start_time,
- &icd->time);
if (!td->o.disable_clat) {
add_clat_sample(td, idx, lusec, bytes);
if (!td->o.disable_bw)
add_bw_sample(td, idx, bytes, &icd->time);
if (__should_check_rate(td, idx)) {
- td->rate_pending_usleep[idx] +=
- (long) td->rate_usec_cycle[idx] - rusec;
+ td->rate_pending_usleep[idx] =
+ ((td->this_io_bytes[idx] *
+ td->rate_nsec_cycle[idx]) / 1000 -
+ utime_since_now(&td->start));
}
if (__should_check_rate(td, idx ^ 1))
- td->rate_pending_usleep[idx ^ 1] -= rusec;
+ td->rate_pending_usleep[odx] =
+ ((td->this_io_bytes[odx] *
+ td->rate_nsec_cycle[odx]) / 1000 -
+ utime_since_now(&td->start));
}
if (td_write(td) && idx == DDIR_WRITE &&
long *ptr = io_u->buf;
if (!td->o.zero_buffers) {
+ unsigned long r = __rand(&__fio_rand_state);
+
+ if (sizeof(int) != sizeof(*ptr))
+ r *= (unsigned long) __rand(&__fio_rand_state);
+
while ((void *) ptr - io_u->buf < max_bs) {
- *ptr = rand() * GOLDEN_RATIO_PRIME;
+ *ptr = r;
ptr++;
+ r *= GOLDEN_RATIO_PRIME;
+ r >>= 3;
}
} else
memset(ptr, 0, max_bs);