unsigned long long max_bw[2], min_bw[2];
unsigned long long io_kb[2];
unsigned long long agg[2];
+ unsigned int kb_base;
};
/*
unsigned long long io_bytes[2];
unsigned long runtime[2];
unsigned long total_run_time;
+
+ /*
+ * IO Error related stats
+ */
+ unsigned continue_on_error;
+ unsigned long total_err_count;
+ int first_error;
+
+ unsigned int kb_base;
};
struct bssplit {
char *opendir;
char *ioengine;
enum td_ddir td_ddir;
+ unsigned int kb_base;
unsigned int ddir_nr;
unsigned int iodepth;
unsigned int iodepth_low;
unsigned int verify_pattern;
unsigned int verify_pattern_bytes;
unsigned int verify_fatal;
+ unsigned int verify_async;
unsigned int use_thread;
unsigned int unlink;
unsigned int do_disk_util;
unsigned int thinktime_spin;
unsigned int thinktime_blocks;
unsigned int fsync_blocks;
+ unsigned int fdatasync_blocks;
unsigned int start_delay;
unsigned long long timeout;
unsigned long long ramp_time;
unsigned long long zone_size;
unsigned long long zone_skip;
enum fio_memtype mem_type;
+ unsigned int mem_align;
unsigned int stonewall;
unsigned int new_group;
unsigned int numjobs;
os_cpu_mask_t cpumask;
unsigned int cpumask_set;
+ os_cpu_mask_t verify_cpumask;
+ unsigned int verify_cpumask_set;
unsigned int iolog;
unsigned int rwmixcycle;
unsigned int rwmix[2];
*/
unsigned int cpuload;
unsigned int cpucycle;
+
+ /*
+ * I/O Error handling
+ */
+ unsigned int continue_on_error;
};
#define FIO_VERROR_SIZE 128
struct flist_head io_u_freelist;
struct flist_head io_u_busylist;
struct flist_head io_u_requeues;
+ pthread_mutex_t io_u_lock;
+ pthread_cond_t free_cond;
+
+ /*
+ * async verify offload
+ */
+ struct flist_head verify_list;
+ pthread_t *verify_threads;
+ unsigned int nr_verify_threads;
+ pthread_cond_t verify_cond;
+ int verify_thread_exit;
/*
* Rate state
* For generating file sizes
*/
os_random_state_t file_size_state;
+
+ /*
+ * Error counts
+ */
+ unsigned int total_err_count;
+ int first_error;
};
/*
break; \
int e = (err); \
(td)->error = e; \
- snprintf(td->verror, sizeof(td->verror) - 1, "file:%s:%d, func=%s, error=%s", __FILE__, __LINE__, (func), (msg)); \
+ if (!(td)->first_error) \
+ snprintf(td->verror, sizeof(td->verror) - 1, "file:%s:%d, func=%s, error=%s", __FILE__, __LINE__, (func), (msg)); \
} while (0)
+#define td_clear_error(td) \
+ (td)->error = 0;
#define td_verror(td, err, func) \
__td_verror((td), (err), strerror((err)), (func))
#define td_vmsg(td, err, msg, func) \
extern struct thread_data *threads;
-#define td_read(td) ((td)->o.td_ddir & TD_DDIR_READ)
-#define td_write(td) ((td)->o.td_ddir & TD_DDIR_WRITE)
-#define td_rw(td) (((td)->o.td_ddir & TD_DDIR_RW) == TD_DDIR_RW)
-#define td_random(td) ((td)->o.td_ddir & TD_DDIR_RAND)
-#define file_randommap(td, f) (!(td)->o.norandommap && (f)->file_map)
-
static inline void fio_ro_check(struct thread_data *td, struct io_u *io_u)
{
assert(!(io_u->ddir == DDIR_WRITE && !td_write(td)));
#define MAX_JOBS (1024)
+#define td_non_fatal_error(e) ((e) == EIO || (e) == EILSEQ)
+
+static inline void update_error_count(struct thread_data *td, int err)
+{
+ td->total_err_count++;
+ if (td->total_err_count == 1)
+ td->first_error = err;
+}
+
static inline int should_fsync(struct thread_data *td)
{
if (td->last_was_sync)
extern unsigned long mtime_since_genesis(void);
extern void usec_spin(unsigned int);
extern void usec_sleep(struct thread_data *, unsigned long);
-extern long rate_throttle(struct thread_data *, unsigned long, unsigned long, enum fio_ddir);
extern void fill_start_time(struct timeval *);
extern void fio_gettime(struct timeval *, void *);
extern void fio_gtod_init(void);
*/
extern int __must_check parse_options(int, char **);
extern int fio_options_parse(struct thread_data *, char **, int);
+extern void fio_keywords_init(void);
extern int fio_cmd_option_parse(struct thread_data *, const char *, char *);
extern void fio_fill_default_options(struct thread_data *);
extern int fio_show_option_help(const char *);
return ret;
}
+static inline int is_power_of_2(unsigned int val)
+{
+ return (val != 0 && ((val & (val - 1)) == 0));
+}
+
+/*
+ * We currently only need to do locking if we have verifier threads
+ * accessing our internal structures too
+ */
+static inline void td_io_u_lock(struct thread_data *td)
+{
+ if (td->o.verify_async)
+ pthread_mutex_lock(&td->io_u_lock);
+}
+
+static inline void td_io_u_unlock(struct thread_data *td)
+{
+ if (td->o.verify_async)
+ pthread_mutex_unlock(&td->io_u_lock);
+}
+
+static inline void td_io_u_free_notify(struct thread_data *td)
+{
+ if (td->o.verify_async)
+ pthread_cond_signal(&td->free_cond);
+}
+
#endif