};
#endif
+enum {
+ IO_U_F_FREE = 1 << 0,
+ IO_U_F_FLIGHT = 1 << 1,
+};
+
/*
* The io unit
*/
unsigned int seen;
};
+ unsigned int flags;
+
struct fio_file *file;
struct list_head list;
enum {
FIO_Q_COMPLETED = 0, /* completed sync */
FIO_Q_QUEUED = 1, /* queued, will complete async */
+ FIO_Q_BUSY = 2, /* no more room, call ->commit() */
};
#define FIO_HDR_MAGIC 0xf00baaef
unsigned int unlink;
};
+struct thread_stat {
+ struct io_log *slat_log;
+ struct io_log *clat_log;
+ struct io_log *bw_log;
+
+ /*
+ * bandwidth and latency stats
+ */
+ struct io_stat clat_stat[2]; /* completion latency */
+ struct io_stat slat_stat[2]; /* submission latency */
+ struct io_stat bw_stat[2]; /* bandwidth stats */
+
+ unsigned long long stat_io_bytes[2];
+ struct timeval stat_sample_time[2];
+
+ /*
+ * fio system usage accounting
+ */
+ struct rusage ru_start;
+ struct rusage ru_end;
+ unsigned long usr_time;
+ unsigned long sys_time;
+ unsigned long ctx;
+};
+
/*
* How many depth levels to log
*/
pthread_t thread;
int thread_number;
int groupid;
+ struct thread_stat ts;
enum fio_filetype filetype;
struct fio_file *files;
unsigned int nr_files;
unsigned int stonewall;
unsigned int numjobs;
unsigned int iodepth;
+ unsigned int iodepth_low;
os_cpu_mask_t cpumask;
unsigned int iolog;
unsigned int read_iolog;
unsigned long total_io_u;
struct list_head io_u_freelist;
struct list_head io_u_busylist;
+ struct list_head io_u_requeues;
/*
* Rate state
unsigned long long start_offset;
unsigned long long total_io_size;
+ unsigned long io_issues[2];
unsigned long long io_blocks[2];
unsigned long long io_bytes[2];
- unsigned long long zone_bytes;
unsigned long long this_io_bytes[2];
+ unsigned long long zone_bytes;
volatile int mutex;
/*
unsigned int cpuload;
unsigned int cpucycle;
- /*
- * bandwidth and latency stats
- */
- struct io_stat clat_stat[2]; /* completion latency */
- struct io_stat slat_stat[2]; /* submission latency */
- struct io_stat bw_stat[2]; /* bandwidth stats */
-
- unsigned long long stat_io_bytes[2];
- struct timeval stat_sample_time[2];
-
- struct io_log *slat_log;
- struct io_log *clat_log;
- struct io_log *bw_log;
-
struct timeval start; /* start of this loop */
struct timeval epoch; /* time job was started */
struct timeval end_time;/* time job ended */
- /*
- * fio system usage accounting
- */
- struct rusage ru_start;
- struct rusage ru_end;
- unsigned long usr_time;
- unsigned long sys_time;
- unsigned long ctx;
-
/*
* read/write mixed workload state
*/
*/
struct list_head io_hist_list;
struct list_head io_log_list;
+
+ /*
+ * timeout handling
+ */
+ struct timeval timeout_end;
+ struct itimerval timer;
};
+/*
+ * 30 second per-io_u timeout, with 5 second intervals to avoid resetting
+ * the timer on each queue operation.
+ */
+#define IO_U_TIMEOUT_INC 5
+#define IO_U_TIMEOUT 30
+
#define __td_verror(td, err, msg) \
do { \
+ if ((td)->error) \
+ break; \
int e = (err); \
(td)->error = e; \
snprintf(td->verror, sizeof(td->verror) - 1, "file:%s:%d, error=%s", __FILE__, __LINE__, (msg)); \
};
/*
- * Used for passing io_u completion data
+ * Callback for io completion
*/
-struct io_completion_data {
- int nr; /* input */
-
- int error; /* output */
- unsigned long bytes_done[2]; /* output */
- struct timeval time; /* output */
-};
+typedef int (endio_handler)(struct io_u *);
#define DISK_UTIL_MSEC (250)
/*
* Log exports
*/
-extern int read_iolog_get(struct thread_data *, struct io_u *);
+extern int __must_check read_iolog_get(struct thread_data *, struct io_u *);
extern void write_iolog_put(struct thread_data *, struct io_u *);
-extern int init_iolog(struct thread_data *td);
+extern int __must_check init_iolog(struct thread_data *td);
extern void log_io_piece(struct thread_data *, struct io_u *);
extern void prune_io_piece_log(struct thread_data *);
extern void write_iolog_close(struct thread_data *);
/*
* Init functions
*/
-extern int parse_options(int, char **);
-extern int init_random_state(struct thread_data *);
+extern int __must_check parse_options(int, char **);
+extern int __must_check init_random_state(struct thread_data *);
/*
* File setup/shutdown
*/
extern void close_files(struct thread_data *);
-extern int setup_files(struct thread_data *);
-extern int open_files(struct thread_data *);
-extern int file_invalidate_cache(struct thread_data *, struct fio_file *);
+extern int __must_check setup_files(struct thread_data *);
+extern int __must_check open_files(struct thread_data *);
+extern int __must_check file_invalidate_cache(struct thread_data *, struct fio_file *);
/*
* ETA/status stuff
* Verify helpers
*/
extern void populate_verify_io_u(struct thread_data *, struct io_u *);
-extern int get_next_verify(struct thread_data *td, struct io_u *);
-extern int do_io_u_verify(struct thread_data *, struct io_u **);
+extern int __must_check get_next_verify(struct thread_data *td, struct io_u *);
+extern int __must_check verify_io_u(struct io_u *);
/*
* Memory helpers
*/
-extern int fio_pin_memory(void);
+extern int __must_check fio_pin_memory(void);
extern void fio_unpin_memory(void);
-extern int allocate_io_mem(struct thread_data *);
+extern int __must_check allocate_io_mem(struct thread_data *);
extern void free_io_mem(struct thread_data *);
/*
*/
#define queue_full(td) list_empty(&(td)->io_u_freelist)
extern struct io_u *__get_io_u(struct thread_data *);
-extern struct io_u *get_io_u(struct thread_data *, struct fio_file *);
+extern struct io_u *get_io_u(struct thread_data *);
extern void put_io_u(struct thread_data *, struct io_u *);
-extern void ios_completed(struct thread_data *, struct io_completion_data *);
-extern void io_completed(struct thread_data *, struct io_u *, struct io_completion_data *);
-extern void init_icd(struct io_completion_data *);
+extern void requeue_io_u(struct thread_data *, struct io_u **);
+extern long __must_check io_u_sync_complete(struct thread_data *, struct io_u *, endio_handler *);
+extern long __must_check io_u_queued_complete(struct thread_data *, int, endio_handler *);
+extern void io_u_queued(struct thread_data *, struct io_u *);
+extern void io_u_init_timeout(void);
+extern void io_u_set_timeout(struct thread_data *);
/*
* io engine entry points
*/
-extern int td_io_init(struct thread_data *);
-extern int td_io_prep(struct thread_data *, struct io_u *);
-extern int td_io_queue(struct thread_data *, struct io_u *);
-extern int td_io_sync(struct thread_data *, struct fio_file *);
-extern int td_io_getevents(struct thread_data *, int, int, struct timespec *);
+extern int __must_check td_io_init(struct thread_data *);
+extern int __must_check td_io_prep(struct thread_data *, struct io_u *);
+extern int __must_check td_io_queue(struct thread_data *, struct io_u *);
+extern int __must_check td_io_sync(struct thread_data *, struct fio_file *);
+extern int __must_check td_io_getevents(struct thread_data *, int, int, struct timespec *);
+extern int __must_check td_io_commit(struct thread_data *);
/*
* This is a pretty crappy semaphore implementation, but with the use that fio
int (*init)(struct thread_data *);
int (*prep)(struct thread_data *, struct io_u *);
int (*queue)(struct thread_data *, struct io_u *);
+ int (*commit)(struct thread_data *);
int (*getevents)(struct thread_data *, int, int, struct timespec *);
struct io_u *(*event)(struct thread_data *, int);
int (*cancel)(struct thread_data *, struct io_u *);
unsigned long priv;
};
-#define FIO_IOOPS_VERSION 4
+#define FIO_IOOPS_VERSION 5
extern struct ioengine_ops *load_ioengine(struct thread_data *, const char *);
-extern int register_ioengine(struct ioengine_ops *);
+extern void register_ioengine(struct ioengine_ops *);
extern void unregister_ioengine(struct ioengine_ops *);
extern void close_ioengine(struct thread_data *);