#include "file.h"
#include "io_ddir.h"
#include "ioengine.h"
+#include "iolog.h"
#ifdef FIO_HAVE_GUASI
#include <guasi.h>
#include <sys/asynch.h>
#endif
-/*
- * Use for maintaining statistics
- */
-struct io_stat {
- unsigned long max_val;
- unsigned long min_val;
- unsigned long samples;
-
- double mean;
- double S;
-};
-
-/*
- * A single data sample
- */
-struct io_sample {
- unsigned long time;
- unsigned long val;
- enum fio_ddir ddir;
- unsigned int bs;
-};
-
-/*
- * Dynamically growing data sample log
- */
-struct io_log {
- unsigned long nr_samples;
- unsigned long max_samples;
- struct io_sample *log;
-};
-
-/*
- * When logging io actions, this matches a single sent io_u
- */
-struct io_piece {
- union {
- struct rb_node rb_node;
- struct flist_head list;
- };
- union {
- int fileno;
- struct fio_file *file;
- };
- unsigned long long offset;
- unsigned long len;
- enum fio_ddir ddir;
- union {
- unsigned long delay;
- unsigned int file_action;
- };
-};
-
struct group_run_stats {
unsigned long long max_run[2], min_run[2];
unsigned long long max_bw[2], min_bw[2];
unsigned long long io_kb[2];
unsigned long long agg[2];
+ unsigned int kb_base;
};
/*
unsigned long long io_bytes[2];
unsigned long runtime[2];
unsigned long total_run_time;
+
+ /*
+ * IO Error related stats
+ */
+ unsigned continue_on_error;
+ unsigned long total_err_count;
+ int first_error;
+
+ unsigned int kb_base;
};
struct bssplit {
char *opendir;
char *ioengine;
enum td_ddir td_ddir;
+ unsigned int kb_base;
unsigned int ddir_nr;
unsigned int iodepth;
unsigned int iodepth_low;
unsigned int verify_pattern;
unsigned int verify_pattern_bytes;
unsigned int verify_fatal;
+ unsigned int verify_async;
unsigned int use_thread;
unsigned int unlink;
unsigned int do_disk_util;
unsigned int thinktime_spin;
unsigned int thinktime_blocks;
unsigned int fsync_blocks;
+ unsigned int fdatasync_blocks;
unsigned int start_delay;
unsigned long long timeout;
unsigned long long ramp_time;
unsigned long long zone_size;
unsigned long long zone_skip;
enum fio_memtype mem_type;
+ unsigned int mem_align;
unsigned int stonewall;
unsigned int new_group;
unsigned int numjobs;
os_cpu_mask_t cpumask;
unsigned int cpumask_set;
+ os_cpu_mask_t verify_cpumask;
+ unsigned int verify_cpumask_set;
unsigned int iolog;
unsigned int rwmixcycle;
unsigned int rwmix[2];
char *exec_prerun;
char *exec_postrun;
- unsigned int rate;
- unsigned int ratemin;
+ unsigned int rate[2];
+ unsigned int ratemin[2];
unsigned int ratecycle;
- unsigned int rate_iops;
- unsigned int rate_iops_min;
+ unsigned int rate_iops[2];
+ unsigned int rate_iops_min[2];
char *ioscheduler;
*/
unsigned int cpuload;
unsigned int cpucycle;
+
+ /*
+ * I/O Error handling
+ */
+ unsigned int continue_on_error;
};
#define FIO_VERROR_SIZE 128
struct flist_head io_u_freelist;
struct flist_head io_u_busylist;
struct flist_head io_u_requeues;
+ pthread_mutex_t io_u_lock;
+ pthread_cond_t free_cond;
+
+ /*
+ * async verify offload
+ */
+ struct flist_head verify_list;
+ pthread_t *verify_threads;
+ unsigned int nr_verify_threads;
+ pthread_cond_t verify_cond;
+ int verify_thread_exit;
/*
* Rate state
*/
- unsigned long rate_usec_cycle;
- long rate_pending_usleep;
- unsigned long rate_bytes;
- unsigned long rate_blocks;
- struct timeval lastrate;
+ unsigned long rate_usec_cycle[2];
+ long rate_pending_usleep[2];
+ unsigned long rate_bytes[2];
+ unsigned long rate_blocks[2];
+ struct timeval lastrate[2];
unsigned long long total_io_size;
struct timeval start; /* start of this loop */
struct timeval epoch; /* time job was started */
- struct timeval rw_end[2];
struct timeval last_issue;
struct timeval tv_cache;
unsigned int tv_cache_nr;
unsigned int tv_cache_mask;
- unsigned int rw_end_set[2];
unsigned int ramp_time_over;
/*
* For generating file sizes
*/
os_random_state_t file_size_state;
-};
-/*
- * roundrobin available files, or choose one at random, or do each one
- * serially.
- */
-enum {
- FIO_FSERVICE_RANDOM = 1,
- FIO_FSERVICE_RR = 2,
- FIO_FSERVICE_SEQ = 3,
+ /*
+ * Error counts
+ */
+ unsigned int total_err_count;
+ int first_error;
};
/*
break; \
int e = (err); \
(td)->error = e; \
- snprintf(td->verror, sizeof(td->verror) - 1, "file:%s:%d, func=%s, error=%s", __FILE__, __LINE__, (func), (msg)); \
+ if (!(td)->first_error) \
+ snprintf(td->verror, sizeof(td->verror) - 1, "file:%s:%d, func=%s, error=%s", __FILE__, __LINE__, (func), (msg)); \
} while (0)
+#define td_clear_error(td) \
+ (td)->error = 0;
#define td_verror(td, err, func) \
__td_verror((td), (err), strerror((err)), (func))
#define td_vmsg(td, err, msg, func) \
extern struct thread_data *threads;
-#define td_read(td) ((td)->o.td_ddir & TD_DDIR_READ)
-#define td_write(td) ((td)->o.td_ddir & TD_DDIR_WRITE)
-#define td_rw(td) (((td)->o.td_ddir & TD_DDIR_RW) == TD_DDIR_RW)
-#define td_random(td) ((td)->o.td_ddir & TD_DDIR_RAND)
-#define file_randommap(td, f) (!(td)->o.norandommap && (f)->file_map)
-
static inline void fio_ro_check(struct thread_data *td, struct io_u *io_u)
{
assert(!(io_u->ddir == DDIR_WRITE && !td_write(td)));
#define MAX_JOBS (1024)
+#define td_non_fatal_error(e) ((e) == EIO || (e) == EILSEQ)
+
+static inline void update_error_count(struct thread_data *td, int err)
+{
+ td->total_err_count++;
+ if (td->total_err_count == 1)
+ td->first_error = err;
+}
+
static inline int should_fsync(struct thread_data *td)
{
if (td->last_was_sync)
return 0;
}
-/*
- * Disk utils as read in /sys/block/<dev>/stat
- */
-struct disk_util_stat {
- unsigned ios[2];
- unsigned merges[2];
- unsigned long long sectors[2];
- unsigned ticks[2];
- unsigned io_ticks;
- unsigned time_in_queue;
-};
-
-/*
- * Per-device disk util management
- */
-struct disk_util {
- struct flist_head list;
- /* If this disk is a slave, hook it into the master's
- * list using this head.
- */
- struct flist_head slavelist;
-
- char *name;
- char *sysfs_root;
- char path[256];
- int major, minor;
-
- struct disk_util_stat dus;
- struct disk_util_stat last_dus;
-
- /* For software raids, this entry maintains pointers to the
- * entries for the slave devices. The disk_util entries for
- * the slaves devices should primarily be maintained through
- * the disk_list list, i.e. for memory allocation and
- * de-allocation, etc. Whereas this list should be used only
- * for aggregating a software RAID's disk util figures.
- */
- struct flist_head slaves;
-
- unsigned long msec;
- struct timeval time;
-
- struct fio_mutex *lock;
- unsigned long users;
-};
-
-static inline void disk_util_inc(struct disk_util *du)
-{
- if (du) {
- fio_mutex_down(du->lock);
- du->users++;
- fio_mutex_up(du->lock);
- }
-}
-
-static inline void disk_util_dec(struct disk_util *du)
-{
- if (du) {
- fio_mutex_down(du->lock);
- du->users--;
- fio_mutex_up(du->lock);
- }
-}
-
-#define DISK_UTIL_MSEC (250)
-
-/*
- * Log exports
- */
-enum file_log_act {
- FIO_LOG_ADD_FILE,
- FIO_LOG_OPEN_FILE,
- FIO_LOG_CLOSE_FILE,
- FIO_LOG_UNLINK_FILE,
-};
-
-extern int __must_check read_iolog_get(struct thread_data *, struct io_u *);
-extern void log_io_u(struct thread_data *, struct io_u *);
-extern void log_file(struct thread_data *, struct fio_file *, enum file_log_act);
-extern int __must_check init_iolog(struct thread_data *td);
-extern void log_io_piece(struct thread_data *, struct io_u *);
-extern void queue_io_piece(struct thread_data *, struct io_piece *);
-extern void prune_io_piece_log(struct thread_data *);
-extern void write_iolog_close(struct thread_data *);
-
-/*
- * Logging
- */
-extern void add_clat_sample(struct thread_data *, enum fio_ddir, unsigned long,
- unsigned int);
-extern void add_slat_sample(struct thread_data *, enum fio_ddir, unsigned long,
- unsigned int);
-extern void add_bw_sample(struct thread_data *, enum fio_ddir, unsigned int,
- struct timeval *);
-extern void show_run_stats(void);
-extern void init_disk_util(struct thread_data *);
-extern void update_rusage_stat(struct thread_data *);
-extern void update_io_ticks(void);
-extern void setup_log(struct io_log **);
-extern void finish_log(struct thread_data *, struct io_log *, const char *);
-extern void finish_log_named(struct thread_data *, struct io_log *, const char *, const char *);
-extern void __finish_log(struct io_log *, const char *);
-extern struct io_log *agg_io_log[2];
-extern int write_bw_log;
-extern void add_agg_sample(unsigned long, enum fio_ddir, unsigned int);
-
/*
* Time functions
*/
extern unsigned long mtime_since_genesis(void);
extern void usec_spin(unsigned int);
extern void usec_sleep(struct thread_data *, unsigned long);
-extern void rate_throttle(struct thread_data *, unsigned long, unsigned int);
extern void fill_start_time(struct timeval *);
extern void fio_gettime(struct timeval *, void *);
extern void fio_gtod_init(void);
*/
extern int __must_check parse_options(int, char **);
extern int fio_options_parse(struct thread_data *, char **, int);
+extern void fio_keywords_init(void);
extern int fio_cmd_option_parse(struct thread_data *, const char *, char *);
extern void fio_fill_default_options(struct thread_data *);
extern int fio_show_option_help(const char *);
extern void print_thread_status(void);
extern void print_status_init(int);
-/*
- * disk util stuff
- */
-#ifdef FIO_HAVE_DISK_UTIL
-extern void show_disk_util(void);
-extern void init_disk_util(struct thread_data *);
-extern void update_io_ticks(void);
-#else
-#define show_disk_util()
-#define init_disk_util(td)
-#define update_io_ticks()
-#endif
-
/*
* Thread life cycle. Once a thread has a runstate beyond TD_INITIALIZED, it
* will never back again. It may cycle between running/verififying/fsyncing.
} \
} while (0)
-static inline void fio_file_reset(struct fio_file *f)
+static inline int fio_fill_issue_time(struct thread_data *td)
+{
+ if (td->o.read_iolog_file ||
+ !td->o.disable_clat || !td->o.disable_slat || !td->o.disable_bw)
+ return 1;
+
+ return 0;
+}
+
+/*
+ * Cheesy number->string conversion, complete with carry rounding error.
+ */
+static inline char *num2str(unsigned long num, int maxlen, int base, int pow2)
{
- f->last_free_lookup = 0;
- f->last_pos = f->file_offset;
- if (f->file_map)
- memset(f->file_map, 0, f->num_maps * sizeof(int));
+ char postfix[] = { ' ', 'K', 'M', 'G', 'P', 'E' };
+ unsigned int thousand;
+ char *buf;
+ int i;
+
+ if (pow2)
+ thousand = 1024;
+ else
+ thousand = 1000;
+
+ buf = malloc(128);
+
+ for (i = 0; base > 1; i++)
+ base /= thousand;
+
+ do {
+ int len, carry = 0;
+
+ len = sprintf(buf, "%'lu", num);
+ if (len <= maxlen) {
+ if (i >= 1) {
+ buf[len] = postfix[i];
+ buf[len + 1] = '\0';
+ }
+ return buf;
+ }
+
+ if ((num % thousand) >= (thousand / 2))
+ carry = 1;
+
+ num /= thousand;
+ num += carry;
+ i++;
+ } while (i <= 5);
+
+ return buf;
}
-static inline void clear_error(struct thread_data *td)
+static inline int __should_check_rate(struct thread_data *td,
+ enum fio_ddir ddir)
{
- td->error = 0;
- td->verror[0] = '\0';
+ struct thread_options *o = &td->o;
+
+ /*
+ * If some rate setting was given, we need to check it
+ */
+ if (o->rate[ddir] || o->ratemin[ddir] || o->rate_iops[ddir] ||
+ o->rate_iops_min[ddir])
+ return 1;
+
+ return 0;
}
-#ifdef FIO_INC_DEBUG
-static inline void dprint_io_u(struct io_u *io_u, const char *p)
+static inline int should_check_rate(struct thread_data *td,
+ unsigned long *bytes_done)
{
- struct fio_file *f = io_u->file;
+ int ret = 0;
- dprint(FD_IO, "%s: io_u %p: off=%llu/len=%lu/ddir=%d", p, io_u,
- (unsigned long long) io_u->offset,
- io_u->buflen, io_u->ddir);
- if (fio_debug & (1 << FD_IO)) {
- if (f)
- log_info("/%s", f->file_name);
+ if (bytes_done[0])
+ ret |= __should_check_rate(td, 0);
+ if (bytes_done[1])
+ ret |= __should_check_rate(td, 1);
- log_info("\n");
- }
+ return ret;
}
-#else
-#define dprint_io_u(io_u, p)
-#endif
-static inline int fio_fill_issue_time(struct thread_data *td)
+static inline int is_power_of_2(unsigned int val)
{
- if (td->o.read_iolog_file ||
- !td->o.disable_clat || !td->o.disable_slat || !td->o.disable_bw)
- return 1;
+ return (val != 0 && ((val & (val - 1)) == 0));
+}
- return 0;
+/*
+ * We currently only need to do locking if we have verifier threads
+ * accessing our internal structures too
+ */
+static inline void td_io_u_lock(struct thread_data *td)
+{
+ if (td->o.verify_async)
+ pthread_mutex_lock(&td->io_u_lock);
+}
+
+static inline void td_io_u_unlock(struct thread_data *td)
+{
+ if (td->o.verify_async)
+ pthread_mutex_unlock(&td->io_u_lock);
+}
+
+static inline void td_io_u_free_notify(struct thread_data *td)
+{
+ if (td->o.verify_async)
+ pthread_cond_signal(&td->free_cond);
}
#endif