#include <sys/asynch.h>
#endif
+#ifdef FIO_HAVE_LIBNUMA
+#include <linux/mempolicy.h>
+#include <numa.h>
+
+/*
+ * "local" is pseudo-policy
+ */
+#define MPOL_LOCAL MPOL_MAX
+#endif
+
/*
* What type of allocation to use for io buffers
*/
/*
* What type of errors to continue on when continue_on_error is used
*/
+enum error_type_bit {
+ ERROR_TYPE_READ_BIT = 0,
+ ERROR_TYPE_WRITE_BIT = 1,
+ ERROR_TYPE_VERIFY_BIT = 2,
+ ERROR_TYPE_CNT = 3,
+};
+
enum error_type {
ERROR_TYPE_NONE = 0,
- ERROR_TYPE_READ = 1 << 0,
- ERROR_TYPE_WRITE = 1 << 1,
- ERROR_TYPE_VERIFY = 1 << 2,
+ ERROR_TYPE_READ = 1 << ERROR_TYPE_READ_BIT,
+ ERROR_TYPE_WRITE = 1 << ERROR_TYPE_WRITE_BIT,
+ ERROR_TYPE_VERIFY = 1 << ERROR_TYPE_VERIFY_BIT,
ERROR_TYPE_ANY = 0xffff,
};
struct bssplit *bssplit[DDIR_RWDIR_CNT];
unsigned int bssplit_nr[DDIR_RWDIR_CNT];
+ int *ignore_error[ERROR_TYPE_CNT];
+ unsigned int ignore_error_nr[ERROR_TYPE_CNT];
+ unsigned int error_dump;
+
unsigned int nr_files;
unsigned int open_files;
enum file_lock_mode file_lock_mode;
unsigned int bs_unaligned;
unsigned int fsync_on_close;
+ unsigned int random_distribution;
+ double zipf_theta;
+ double pareto_h;
+
+ unsigned int random_generator;
+
unsigned int hugepage_size;
unsigned int rw_min_bs;
unsigned int thinktime;
enum fio_memtype mem_type;
unsigned int mem_align;
+ unsigned int max_latency;
+
unsigned int stonewall;
unsigned int new_group;
unsigned int numjobs;
unsigned int cpumask_set;
os_cpu_mask_t verify_cpumask;
unsigned int verify_cpumask_set;
+#ifdef FIO_HAVE_LIBNUMA
+ struct bitmask *numa_cpunodesmask;
+ unsigned int numa_cpumask_set;
+ unsigned short numa_mem_mode;
+ unsigned int numa_mem_prefer_node;
+ struct bitmask *numa_memnodesmask;
+ unsigned int numa_memmask_set;
+#endif
unsigned int iolog;
unsigned int rwmixcycle;
unsigned int rwmix[2];
extern int exitall_on_terminate;
extern unsigned int thread_number;
+extern unsigned int stat_number;
extern unsigned int nr_process, nr_thread;
extern int shm_id;
extern int groupid;
assert(!(io_u->ddir == DDIR_WRITE && !td_write(td)));
}
-#define BLOCKS_PER_MAP (8 * sizeof(unsigned long))
-#define TO_MAP_BLOCK(f, b) (b)
-#define RAND_MAP_IDX(f, b) (TO_MAP_BLOCK(f, b) / BLOCKS_PER_MAP)
-#define RAND_MAP_BIT(f, b) (TO_MAP_BLOCK(f, b) & (BLOCKS_PER_MAP - 1))
-
#define REAL_MAX_JOBS 2048
-#define td_non_fatal_error(e) ((e) == EIO || (e) == EILSEQ)
-
-static inline enum error_type td_error_type(enum fio_ddir ddir, int err)
+static inline enum error_type_bit td_error_type(enum fio_ddir ddir, int err)
{
if (err == EILSEQ)
- return ERROR_TYPE_VERIFY;
+ return ERROR_TYPE_VERIFY_BIT;
if (ddir == DDIR_READ)
- return ERROR_TYPE_READ;
- return ERROR_TYPE_WRITE;
+ return ERROR_TYPE_READ_BIT;
+ return ERROR_TYPE_WRITE_BIT;
+}
+
+static int __NON_FATAL_ERR[] = {EIO, EILSEQ};
+static inline int td_non_fatal_error(struct thread_data *td,
+ enum error_type_bit etype, int err)
+{
+ int i;
+ if (!td->o.ignore_error[etype]) {
+ td->o.ignore_error[etype] = __NON_FATAL_ERR;
+ td->o.ignore_error_nr[etype] = sizeof(__NON_FATAL_ERR)
+ / sizeof(int);
+ }
+
+ if (!(td->o.continue_on_error & (1 << etype)))
+ return 0;
+ for (i = 0; i < td->o.ignore_error_nr[etype]; i++)
+ if (td->o.ignore_error[etype][i] == err)
+ return 1;
+ return 0;
}
static inline void update_error_count(struct thread_data *td, int err)
TD_NOT_CREATED = 0,
TD_CREATED,
TD_INITIALIZED,
+ TD_SETTING_UP,
TD_RAMP,
TD_RUNNING,
TD_PRE_READING,
FIO_OUTPUT_NORMAL,
};
+enum {
+ FIO_RAND_DIST_RANDOM = 0,
+ FIO_RAND_DIST_ZIPF,
+ FIO_RAND_DIST_PARETO,
+};
+
+enum {
+ FIO_RAND_GEN_TAUSWORTHE = 0,
+ FIO_RAND_GEN_LFSR,
+};
+
#endif