DDIR_SYNC,
};
+enum td_ddir {
+ TD_DDIR_READ = 1 << 0,
+ TD_DDIR_WRITE = 1 << 1,
+ TD_DDIR_RAND = 1 << 2,
+ TD_DDIR_RW = TD_DDIR_READ | TD_DDIR_WRITE,
+ TD_DDIR_RANDREAD = TD_DDIR_READ | TD_DDIR_RAND,
+ TD_DDIR_RANDWRITE = TD_DDIR_WRITE | TD_DDIR_RAND,
+ TD_DDIR_RANDRW = TD_DDIR_RW | TD_DDIR_RAND,
+};
+
/*
* Use for maintaining statistics
*/
#ifdef FIO_HAVE_SYSLET
struct syslet_req {
- struct syslet_uatom atom;
- long ret;
+ struct syslet_uatom atom; /* the atom to submit */
+ struct syslet_uatom *head; /* head of the sequence */
+ long ret; /* syscall return value */
};
#endif
struct fio_file *file;
struct list_head list;
+
+ /*
+ * Callback for io completion
+ */
+ int (*end_io)(struct io_u *);
};
/*
FIO_CPUIO = 1 << 1, /* cpu burner, doesn't do real io */
FIO_MMAPIO = 1 << 2, /* uses memory mapped io */
FIO_RAWIO = 1 << 3, /* some sort of direct/raw io */
- FIO_NETIO = 1 << 4, /* networked io */
- FIO_NULLIO = 1 << 5, /* no real data transfer (cpu/null) */
+ FIO_DISKLESSIO = 1 << 4, /* no disk involved */
+ FIO_SELFOPEN = 1 << 5, /* opens its own devices */
};
/*
unsigned int unlink;
};
+/*
+ * How many depth levels to log
+ */
+#define FIO_IO_U_MAP_NR 8
+#define FIO_IO_U_LAT_NR 12
+
struct thread_stat {
struct io_log *slat_log;
struct io_log *clat_log;
unsigned long usr_time;
unsigned long sys_time;
unsigned long ctx;
-};
-/*
- * How many depth levels to log
- */
-#define FIO_IO_U_MAP_NR 8
-#define FIO_IO_U_LAT_NR 12
+ /*
+ * IO depth and latency stats
+ */
+ unsigned int io_u_map[FIO_IO_U_MAP_NR];
+ unsigned int io_u_lat[FIO_IO_U_LAT_NR];
+ unsigned long total_io_u;
+};
/*
* This describes a single thread/process executing a fio job.
*/
struct thread_data {
+ int pad;
char *description;
char *name;
char *directory;
char *filename;
- char verror[80];
+ char verror[128];
pthread_t thread;
int thread_number;
int groupid;
- struct thread_stat ts;
+ struct thread_stat *ts;
+ struct thread_stat __ts;
enum fio_filetype filetype;
struct fio_file *files;
unsigned int nr_files;
+ unsigned int nr_open_files;
unsigned int nr_uniq_files;
- unsigned int next_file;
+ union {
+ unsigned int next_file;
+ os_random_state_t next_file_state;
+ };
int error;
pid_t pid;
char *orig_buffer;
size_t orig_buffer_size;
volatile int terminate;
volatile int runstate;
- enum fio_ddir ddir;
- unsigned int iomix;
+ enum td_ddir td_ddir;
unsigned int ioprio;
unsigned int last_was_sync;
- unsigned int sequential;
unsigned int odirect;
unsigned int invalidate_cache;
unsigned int create_serialize;
unsigned int numjobs;
unsigned int iodepth;
unsigned int iodepth_low;
+ unsigned int iodepth_batch;
os_cpu_mask_t cpumask;
unsigned int iolog;
unsigned int read_iolog;
unsigned int rwmixread;
unsigned int rwmixwrite;
unsigned int nice;
+ unsigned int file_service_type;
+ unsigned int group_reporting;
char *read_iolog_file;
char *write_iolog_file;
* Current IO depth and list of free and busy io_u's.
*/
unsigned int cur_depth;
- unsigned int io_u_map[FIO_IO_U_MAP_NR];
- unsigned int io_u_lat[FIO_IO_U_LAT_NR];
- unsigned long total_io_u;
struct list_head io_u_freelist;
struct list_head io_u_busylist;
struct list_head io_u_requeues;
+ unsigned int io_u_queued;
/*
* Rate state
struct itimerval timer;
};
+/*
+ * roundrobin available files, or choose one at random.
+ */
+enum {
+ FIO_FSERVICE_RANDOM = 1,
+ FIO_FSERVICE_RR = 2,
+};
+
/*
* 30 second per-io_u timeout, with 5 second intervals to avoid resetting
* the timer on each queue operation.
#define IO_U_TIMEOUT_INC 5
#define IO_U_TIMEOUT 30
-#define __td_verror(td, err, msg) \
+#define __td_verror(td, err, msg, func) \
do { \
if ((td)->error) \
break; \
int e = (err); \
(td)->error = e; \
- snprintf(td->verror, sizeof(td->verror) - 1, "file:%s:%d, error=%s", __FILE__, __LINE__, (msg)); \
+ snprintf(td->verror, sizeof(td->verror) - 1, "file:%s:%d, func=%s, error=%s", __FILE__, __LINE__, (func), (msg)); \
} while (0)
-#define td_verror(td, err) __td_verror((td), (err), strerror((err)))
-#define td_vmsg(td, err, msg) __td_verror((td), (err), (msg))
+#define td_verror(td, err, func) \
+ __td_verror((td), (err), strerror((err)), (func))
+#define td_vmsg(td, err, msg, func) \
+ __td_verror((td), (err), (msg), (func))
extern int exitall_on_terminate;
extern int thread_number;
extern struct thread_data *threads;
-#define td_read(td) ((td)->ddir == DDIR_READ)
-#define td_write(td) ((td)->ddir == DDIR_WRITE)
-#define td_rw(td) ((td)->iomix != 0)
+#define td_read(td) ((td)->td_ddir & TD_DDIR_READ)
+#define td_write(td) ((td)->td_ddir & TD_DDIR_WRITE)
+#define td_rw(td) (((td)->td_ddir & TD_DDIR_RW) == TD_DDIR_RW)
+#define td_random(td) ((td)->td_ddir & TD_DDIR_RAND)
#define BLOCKS_PER_MAP (8 * sizeof(long))
#define TO_MAP_BLOCK(td, f, b) ((b) - ((f)->file_offset / (td)->rw_min_bs))
struct timeval time;
};
-/*
- * Callback for io completion
- */
-typedef int (endio_handler)(struct io_u *);
-
#define DISK_UTIL_MSEC (250)
#ifndef min
extern unsigned long mtime_since_genesis(void);
extern void __usec_sleep(unsigned int);
extern void usec_sleep(struct thread_data *, unsigned long);
-extern void rate_throttle(struct thread_data *, unsigned long, unsigned int, int);
+extern void rate_throttle(struct thread_data *, unsigned long, unsigned int);
extern void fill_start_time(struct timeval *);
extern void fio_gettime(struct timeval *, void *);
+extern void set_genesis_time(void);
/*
* Init functions
extern void close_files(struct thread_data *);
extern int __must_check setup_files(struct thread_data *);
extern int __must_check open_files(struct thread_data *);
+extern int open_file(struct thread_data *, struct fio_file *, int, int);
+extern void close_file(struct thread_data *, struct fio_file *);
extern int __must_check file_invalidate_cache(struct thread_data *, struct fio_file *);
/*
extern struct io_u *get_io_u(struct thread_data *);
extern void put_io_u(struct thread_data *, struct io_u *);
extern void requeue_io_u(struct thread_data *, struct io_u **);
-extern long __must_check io_u_sync_complete(struct thread_data *, struct io_u *, endio_handler *);
-extern long __must_check io_u_queued_complete(struct thread_data *, int, endio_handler *);
+extern long __must_check io_u_sync_complete(struct thread_data *, struct io_u *);
+extern long __must_check io_u_queued_complete(struct thread_data *, int);
extern void io_u_queued(struct thread_data *, struct io_u *);
+extern void io_u_log_error(struct thread_data *, struct io_u *);
extern void io_u_init_timeout(void);
extern void io_u_set_timeout(struct thread_data *);
fprintf(stderr, ##args); \
} while (0)
+FILE *get_f_out(void);
+FILE *get_f_err(void);
+
struct ioengine_ops {
struct list_head list;
char name[16];