Signed-off-by: Jens Axboe <axboe@kernel.dk>
unsigned int sync_file_range;
};
unsigned int sync_file_range;
};
+enum {
+ TD_F_VER_BACKLOG = 1,
+ TD_F_TRIM_BACKLOG = 2,
+ TD_F_READ_IOLOG = 4,
+ TD_F_REFILL_BUFFERS = 8,
+ TD_F_SCRAMBLE_BUFFERS = 16,
+ TD_F_VER_NONE = 32,
+ TD_F_PROFILE_OPS = 64,
+};
+
/*
* This describes a single thread/process executing a fio job.
*/
struct thread_data {
struct thread_options o;
/*
* This describes a single thread/process executing a fio job.
*/
struct thread_data {
struct thread_options o;
void *eo;
char verror[FIO_VERROR_SIZE];
pthread_t thread;
void *eo;
char verror[FIO_VERROR_SIZE];
pthread_t thread;
+static void init_flags(struct thread_data *td)
+{
+ struct thread_options *o = &td->o;
+
+ if (o->verify_backlog)
+ td->flags |= TD_F_VER_BACKLOG;
+ if (o->trim_backlog)
+ td->flags |= TD_F_TRIM_BACKLOG;
+ if (o->read_iolog_file)
+ td->flags |= TD_F_READ_IOLOG;
+ if (o->refill_buffers)
+ td->flags |= TD_F_REFILL_BUFFERS;
+ if (o->scramble_buffers)
+ td->flags |= TD_F_SCRAMBLE_BUFFERS;
+ if (o->verify != VERIFY_NONE)
+ td->flags |= TD_F_VER_NONE;
+}
+
/*
* Adds a job to the list of things todo. Sanitizes the various options
* to make sure we don't have conflicts, and initializes various
/*
* Adds a job to the list of things todo. Sanitizes the various options
* to make sure we don't have conflicts, and initializes various
if (td == &def_thread)
return 0;
if (td == &def_thread)
return 0;
/*
* if we are just dumping the output command line, don't add the job
*/
/*
* if we are just dumping the output command line, don't add the job
*/
static int get_next_offset(struct thread_data *td, struct io_u *io_u)
{
static int get_next_offset(struct thread_data *td, struct io_u *io_u)
{
- struct prof_io_ops *ops = &td->prof_io_ops;
+ if (td->flags & TD_F_PROFILE_OPS) {
+ struct prof_io_ops *ops = &td->prof_io_ops;
- if (ops->fill_io_u_off)
- return ops->fill_io_u_off(td, io_u);
+ if (ops->fill_io_u_off)
+ return ops->fill_io_u_off(td, io_u);
+ }
return __get_next_offset(td, io_u);
}
return __get_next_offset(td, io_u);
}
static unsigned int get_next_buflen(struct thread_data *td, struct io_u *io_u)
{
static unsigned int get_next_buflen(struct thread_data *td, struct io_u *io_u)
{
- struct prof_io_ops *ops = &td->prof_io_ops;
+ if (td->flags & TD_F_PROFILE_OPS) {
+ struct prof_io_ops *ops = &td->prof_io_ops;
- if (ops->fill_io_u_size)
- return ops->fill_io_u_size(td, io_u);
+ if (ops->fill_io_u_size)
+ return ops->fill_io_u_size(td, io_u);
+ }
return __get_next_buflen(td, io_u);
}
return __get_next_buflen(td, io_u);
}
static struct fio_file *get_next_file(struct thread_data *td)
{
static struct fio_file *get_next_file(struct thread_data *td)
{
- struct prof_io_ops *ops = &td->prof_io_ops;
+ if (!(td->flags & TD_F_PROFILE_OPS)) {
+ struct prof_io_ops *ops = &td->prof_io_ops;
- if (ops->get_next_file)
- return ops->get_next_file(td);
+ if (ops->get_next_file)
+ return ops->get_next_file(td);
+ }
return __get_next_file(td);
}
return __get_next_file(td);
}
static int check_get_trim(struct thread_data *td, struct io_u *io_u)
{
static int check_get_trim(struct thread_data *td, struct io_u *io_u)
{
- if (td->o.trim_backlog && td->trim_entries) {
+ if (!(td->flags & TD_F_TRIM_BACKLOG))
+ return 0;
+
+ if (td->trim_entries) {
int get_trim = 0;
if (td->trim_batch) {
int get_trim = 0;
if (td->trim_batch) {
static int check_get_verify(struct thread_data *td, struct io_u *io_u)
{
static int check_get_verify(struct thread_data *td, struct io_u *io_u)
{
- if (td->o.verify_backlog && td->io_hist_len) {
+ if (!(td->flags & TD_F_VER_BACKLOG))
+ return 0;
+
+ if (td->io_hist_len) {
int get_verify = 0;
if (td->verify_batch)
int get_verify = 0;
if (td->verify_batch)
/*
* If using an iolog, grab next piece if any available.
*/
/*
* If using an iolog, grab next piece if any available.
*/
- if (td->o.read_iolog_file) {
+ if (td->flags & TD_F_READ_IOLOG) {
if (read_iolog_get(td, io_u))
goto err_put;
} else if (set_io_u_file(td, io_u)) {
if (read_iolog_get(td, io_u))
goto err_put;
} else if (set_io_u_file(td, io_u)) {
f->last_pos = io_u->offset + io_u->buflen;
if (io_u->ddir == DDIR_WRITE) {
f->last_pos = io_u->offset + io_u->buflen;
if (io_u->ddir == DDIR_WRITE) {
- if (td->o.refill_buffers) {
+ if (td->flags & TD_F_REFILL_BUFFERS) {
io_u_fill_buffer(td, io_u,
io_u->xfer_buflen, io_u->xfer_buflen);
io_u_fill_buffer(td, io_u,
io_u->xfer_buflen, io_u->xfer_buflen);
- } else if (td->o.scramble_buffers)
+ } else if (td->flags & TD_F_SCRAMBLE_BUFFERS)
- if (td->o.verify != VERIFY_NONE) {
+ if (td->flags & TD_F_VER_NONE) {
populate_verify_io_u(td, io_u);
do_scramble = 0;
}
populate_verify_io_u(td, io_u);
do_scramble = 0;
}
struct timeval start_time;
struct timeval issue_time;
struct timeval start_time;
struct timeval issue_time;
+ struct fio_file *file;
+ unsigned int flags;
+ enum fio_ddir ddir;
+
/*
* Allocated/set buffer and length
*/
/*
* Allocated/set buffer and length
*/
unsigned long buflen;
unsigned long long offset;
unsigned long buflen;
unsigned long long offset;
/*
* Initial seed for generating the buffer contents
/*
* Initial seed for generating the buffer contents
unsigned int resid;
unsigned int error;
unsigned int resid;
unsigned int error;
/*
* io engine private data
*/
/*
* io engine private data
*/
- unsigned int flags;
-
- struct fio_file *file;
-
struct flist_head list;
/*
struct flist_head list;
/*
td->prof_io_ops = *ops->io_ops;
td->prof_io_ops = *ops->io_ops;
+ td->flags |= TD_F_PROFILE_OPS;
+ }
}
int profile_td_init(struct thread_data *td)
}
int profile_td_init(struct thread_data *td)