unsigned int write_lat_log;
unsigned int write_bw_log;
unsigned int norandommap;
+ unsigned int softrandommap;
unsigned int bs_unaligned;
unsigned int fsync_on_close;
* Current IO depth and list of free and busy io_u's.
*/
unsigned int cur_depth;
+ unsigned int io_u_queued;
struct list_head io_u_freelist;
struct list_head io_u_busylist;
struct list_head io_u_requeues;
- unsigned int io_u_queued;
/*
* Rate state
* read/write mixed workload state
*/
os_random_state_t rwmix_state;
- unsigned long long rwmix_bytes;
- struct timeval rwmix_switch;
+ unsigned long rwmix_issues;
enum fio_ddir rwmix_ddir;
unsigned int ddir_nr;
extern unsigned long page_mask, page_size;
extern int read_only;
extern int eta_print;
+extern unsigned long done_secs;
extern char *job_section;
extern struct thread_data *threads;
#define td_write(td) ((td)->o.td_ddir & TD_DDIR_WRITE)
#define td_rw(td) (((td)->o.td_ddir & TD_DDIR_RW) == TD_DDIR_RW)
#define td_random(td) ((td)->o.td_ddir & TD_DDIR_RAND)
+#define file_randommap(td, f) (!(td)->o.norandommap && (f)->file_map)
static inline void fio_ro_check(struct thread_data *td, struct io_u *io_u)
{
}
#define BLOCKS_PER_MAP (8 * sizeof(long))
-#define TO_MAP_BLOCK(td, f, b) (b)
-#define RAND_MAP_IDX(td, f, b) (TO_MAP_BLOCK(td, f, b) / BLOCKS_PER_MAP)
-#define RAND_MAP_BIT(td, f, b) (TO_MAP_BLOCK(td, f, b) & (BLOCKS_PER_MAP - 1))
+#define TO_MAP_BLOCK(f, b) (b)
+#define RAND_MAP_IDX(f, b) (TO_MAP_BLOCK(f, b) / BLOCKS_PER_MAP)
+#define RAND_MAP_BIT(f, b) (TO_MAP_BLOCK(f, b) & (BLOCKS_PER_MAP - 1))
#define MAX_JOBS (1024)
FIO_LOG_ADD_FILE,
FIO_LOG_OPEN_FILE,
FIO_LOG_CLOSE_FILE,
+ FIO_LOG_UNLINK_FILE,
};
extern int __must_check read_iolog_get(struct thread_data *, struct io_u *);
extern void log_file(struct thread_data *, struct fio_file *, enum file_log_act);
extern int __must_check init_iolog(struct thread_data *td);
extern void log_io_piece(struct thread_data *, struct io_u *);
+extern void queue_io_piece(struct thread_data *, struct io_piece *);
extern void prune_io_piece_log(struct thread_data *);
extern void write_iolog_close(struct thread_data *);
extern void io_u_log_error(struct thread_data *, struct io_u *);
extern void io_u_init_timeout(void);
extern void io_u_set_timeout(struct thread_data *);
-extern void io_u_mark_depth(struct thread_data *, struct io_u *);
+extern void io_u_mark_depth(struct thread_data *, unsigned int);
/*
* io engine entry points
#define for_each_td(td, i) \
for ((i) = 0, (td) = &threads[0]; (i) < (int) thread_number; (i)++, (td)++)
#define for_each_file(td, f, i) \
- for ((i) = 0, (f) = (td)->files[0]; \
- (i) < (td)->o.nr_files && ((f) = (td)->files[i]) != NULL; \
- (i)++)
+ if ((td)->files_index) \
+ for ((i) = 0, (f) = (td)->files[0]; \
+ (i) < (td)->o.nr_files && ((f) = (td)->files[i]) != NULL; \
+ (i)++)
#define fio_assert(td, cond) do { \
if (!(cond)) { \