* IO depth and latency stats
*/
unsigned int io_u_map[FIO_IO_U_MAP_NR];
+ unsigned int io_u_submit[FIO_IO_U_MAP_NR];
+ unsigned int io_u_complete[FIO_IO_U_MAP_NR];
unsigned int io_u_lat_u[FIO_IO_U_LAT_U_NR];
unsigned int io_u_lat_m[FIO_IO_U_LAT_M_NR];
unsigned long total_io_u[2];
unsigned long short_io_u[2];
+ unsigned long total_submit;
+ unsigned long total_complete;
unsigned long long io_bytes[2];
unsigned long runtime[2];
unsigned int write_lat_log;
unsigned int write_bw_log;
unsigned int norandommap;
+ unsigned int softrandommap;
unsigned int bs_unaligned;
unsigned int fsync_on_close;
unsigned int group_reporting;
unsigned int fadvise_hint;
unsigned int zero_buffers;
+ unsigned int refill_buffers;
unsigned int time_based;
char *read_iolog_file;
* Current IO depth and list of free and busy io_u's.
*/
unsigned int cur_depth;
+ unsigned int io_u_queued;
struct list_head io_u_freelist;
struct list_head io_u_busylist;
struct list_head io_u_requeues;
- unsigned int io_u_queued;
/*
* Rate state
* read/write mixed workload state
*/
os_random_state_t rwmix_state;
- unsigned long long rwmix_bytes;
- struct timeval rwmix_switch;
+ unsigned long rwmix_issues;
enum fio_ddir rwmix_ddir;
unsigned int ddir_nr;
extern unsigned long page_mask, page_size;
extern int read_only;
extern int eta_print;
+extern unsigned long done_secs;
extern char *job_section;
extern struct thread_data *threads;
#define td_write(td) ((td)->o.td_ddir & TD_DDIR_WRITE)
#define td_rw(td) (((td)->o.td_ddir & TD_DDIR_RW) == TD_DDIR_RW)
#define td_random(td) ((td)->o.td_ddir & TD_DDIR_RAND)
+#define file_randommap(td, f) (!(td)->o.norandommap && (f)->file_map)
static inline void fio_ro_check(struct thread_data *td, struct io_u *io_u)
{
}
#define BLOCKS_PER_MAP (8 * sizeof(long))
-#define TO_MAP_BLOCK(td, f, b) (b)
-#define RAND_MAP_IDX(td, f, b) (TO_MAP_BLOCK(td, f, b) / BLOCKS_PER_MAP)
-#define RAND_MAP_BIT(td, f, b) (TO_MAP_BLOCK(td, f, b) & (BLOCKS_PER_MAP - 1))
+#define TO_MAP_BLOCK(f, b) (b)
+#define RAND_MAP_IDX(f, b) (TO_MAP_BLOCK(f, b) / BLOCKS_PER_MAP)
+#define RAND_MAP_BIT(f, b) (TO_MAP_BLOCK(f, b) & (BLOCKS_PER_MAP - 1))
#define MAX_JOBS (1024)
extern void io_u_log_error(struct thread_data *, struct io_u *);
extern void io_u_init_timeout(void);
extern void io_u_set_timeout(struct thread_data *);
-extern void io_u_mark_depth(struct thread_data *, struct io_u *);
+extern void io_u_mark_depth(struct thread_data *, unsigned int);
+extern void io_u_fill_buffer(struct thread_data *td, struct io_u *, unsigned int);
+void io_u_mark_complete(struct thread_data *, unsigned int);
+void io_u_mark_submit(struct thread_data *, unsigned int);
/*
* io engine entry points
} \
} while (0)
+static inline void fio_file_reset(struct fio_file *f)
+{
+ f->last_free_lookup = 0;
+ f->last_pos = f->file_offset;
+}
+
static inline void clear_error(struct thread_data *td)
{
td->error = 0;
struct fio_file *f = io_u->file;
dprint(FD_IO, "%s: io_u %p: off=%llu/len=%lu/ddir=%d", p, io_u,
- io_u->offset, io_u->buflen, io_u->ddir);
+ (unsigned long long) io_u->offset,
+ io_u->buflen, io_u->ddir);
if (fio_debug & (1 << FD_IO)) {
if (f)
log_info("/%s", f->file_name);