TD_DDIR_RANDRW = TD_DDIR_RW | TD_DDIR_RAND,
};
+enum file_lock_mode {
+ FILE_LOCK_NONE,
+ FILE_LOCK_EXCLUSIVE,
+ FILE_LOCK_READWRITE,
+};
+
/*
* Use for maintaining statistics
*/
FIO_FILE_EXTEND = 1 << 2, /* needs extend */
FIO_FILE_DONE = 1 << 3, /* io completed to this file */
FIO_SIZE_KNOWN = 1 << 4, /* size has been set */
+ FIO_FILE_HASHED = 1 << 5, /* file is on hash */
};
/*
* this structure holds state information for a single file.
*/
struct fio_file {
+ struct list_head hash_list;
enum fio_filetype filetype;
/*
unsigned long long last_pos;
+ /*
+ * if io is protected by a semaphore, this is set
+ */
+ struct fio_mutex *lock;
+ void *lock_owner;
+ unsigned int lock_batch;
+ enum fio_ddir lock_ddir;
+
/*
* block map for random io
*/
* IO depth and latency stats
*/
unsigned int io_u_map[FIO_IO_U_MAP_NR];
+ unsigned int io_u_submit[FIO_IO_U_MAP_NR];
+ unsigned int io_u_complete[FIO_IO_U_MAP_NR];
unsigned int io_u_lat_u[FIO_IO_U_LAT_U_NR];
unsigned int io_u_lat_m[FIO_IO_U_LAT_M_NR];
unsigned long total_io_u[2];
unsigned long short_io_u[2];
+ unsigned long total_submit;
+ unsigned long total_complete;
unsigned long long io_bytes[2];
unsigned long runtime[2];
unsigned int nr_files;
unsigned int open_files;
+ enum file_lock_mode file_lock_mode;
+ unsigned int lockfile_batch;
unsigned int odirect;
unsigned int invalidate_cache;
unsigned int write_lat_log;
unsigned int write_bw_log;
unsigned int norandommap;
+ unsigned int softrandommap;
unsigned int bs_unaligned;
unsigned int fsync_on_close;
unsigned int group_reporting;
unsigned int fadvise_hint;
unsigned int zero_buffers;
+ unsigned int refill_buffers;
unsigned int time_based;
char *read_iolog_file;
* Current IO depth and list of free and busy io_u's.
*/
unsigned int cur_depth;
+ unsigned int io_u_queued;
struct list_head io_u_freelist;
struct list_head io_u_busylist;
struct list_head io_u_requeues;
- unsigned int io_u_queued;
/*
* Rate state
* read/write mixed workload state
*/
os_random_state_t rwmix_state;
- unsigned long long rwmix_bytes;
- struct timeval rwmix_switch;
+ unsigned long rwmix_issues;
enum fio_ddir rwmix_ddir;
unsigned int ddir_nr;
extern unsigned long page_mask, page_size;
extern int read_only;
extern int eta_print;
+extern unsigned long done_secs;
extern char *job_section;
extern struct thread_data *threads;
#define td_write(td) ((td)->o.td_ddir & TD_DDIR_WRITE)
#define td_rw(td) (((td)->o.td_ddir & TD_DDIR_RW) == TD_DDIR_RW)
#define td_random(td) ((td)->o.td_ddir & TD_DDIR_RAND)
+#define file_randommap(td, f) (!(td)->o.norandommap && (f)->file_map)
static inline void fio_ro_check(struct thread_data *td, struct io_u *io_u)
{
}
#define BLOCKS_PER_MAP (8 * sizeof(long))
-#define TO_MAP_BLOCK(td, f, b) (b)
-#define RAND_MAP_IDX(td, f, b) (TO_MAP_BLOCK(td, f, b) / BLOCKS_PER_MAP)
-#define RAND_MAP_BIT(td, f, b) (TO_MAP_BLOCK(td, f, b) & (BLOCKS_PER_MAP - 1))
+#define TO_MAP_BLOCK(f, b) (b)
+#define RAND_MAP_IDX(f, b) (TO_MAP_BLOCK(f, b) / BLOCKS_PER_MAP)
+#define RAND_MAP_BIT(f, b) (TO_MAP_BLOCK(f, b) & (BLOCKS_PER_MAP - 1))
#define MAX_JOBS (1024)
FIO_LOG_ADD_FILE,
FIO_LOG_OPEN_FILE,
FIO_LOG_CLOSE_FILE,
+ FIO_LOG_UNLINK_FILE,
};
extern int __must_check read_iolog_get(struct thread_data *, struct io_u *);
extern void log_file(struct thread_data *, struct fio_file *, enum file_log_act);
extern int __must_check init_iolog(struct thread_data *td);
extern void log_io_piece(struct thread_data *, struct io_u *);
+extern void queue_io_piece(struct thread_data *, struct io_piece *);
extern void prune_io_piece_log(struct thread_data *);
extern void write_iolog_close(struct thread_data *);
extern int add_file(struct thread_data *, const char *);
extern void get_file(struct fio_file *);
extern int __must_check put_file(struct thread_data *, struct fio_file *);
+extern void lock_file(struct thread_data *, struct fio_file *, enum fio_ddir);
+extern void unlock_file(struct thread_data *, struct fio_file *);
+extern void unlock_file_all(struct thread_data *, struct fio_file *);
extern int add_dir_files(struct thread_data *, const char *);
extern int init_random_map(struct thread_data *);
extern void dup_files(struct thread_data *, struct thread_data *);
extern void io_u_log_error(struct thread_data *, struct io_u *);
extern void io_u_init_timeout(void);
extern void io_u_set_timeout(struct thread_data *);
-extern void io_u_mark_depth(struct thread_data *, struct io_u *);
+extern void io_u_mark_depth(struct thread_data *, unsigned int);
+extern void io_u_fill_buffer(struct thread_data *td, struct io_u *, unsigned int);
+void io_u_mark_complete(struct thread_data *, unsigned int);
+void io_u_mark_submit(struct thread_data *, unsigned int);
/*
* io engine entry points
#define for_each_td(td, i) \
for ((i) = 0, (td) = &threads[0]; (i) < (int) thread_number; (i)++, (td)++)
#define for_each_file(td, f, i) \
- for ((i) = 0, (f) = (td)->files[0]; (i) < (td)->o.nr_files; (i)++, (f)++)
+ if ((td)->files_index) \
+ for ((i) = 0, (f) = (td)->files[0]; \
+ (i) < (td)->o.nr_files && ((f) = (td)->files[i]) != NULL; \
+ (i)++)
#define fio_assert(td, cond) do { \
if (!(cond)) { \
} \
} while (0)
+static inline void fio_file_reset(struct fio_file *f)
+{
+ f->last_free_lookup = 0;
+ f->last_pos = f->file_offset;
+}
+
static inline void clear_error(struct thread_data *td)
{
td->error = 0;
struct fio_file *f = io_u->file;
dprint(FD_IO, "%s: io_u %p: off=%llu/len=%lu/ddir=%d", p, io_u,
- io_u->offset, io_u->buflen, io_u->ddir);
+ (unsigned long long) io_u->offset,
+ io_u->buflen, io_u->ddir);
if (fio_debug & (1 << FD_IO)) {
if (f)
log_info("/%s", f->file_name);