#include "stat.h"
#include "flow.h"
-#ifdef FIO_HAVE_GUASI
-#include <guasi.h>
+#ifdef CONFIG_SOLARISAIO
+#include <sys/asynch.h>
#endif
-#ifdef FIO_HAVE_SOLARISAIO
-#include <sys/asynch.h>
+#ifdef CONFIG_LIBNUMA
+#include <linux/mempolicy.h>
+#include <numa.h>
+
+/*
+ * "local" is pseudo-policy
+ */
+#define MPOL_LOCAL MPOL_MAX
#endif
/*
RW_SEQ_IDENT,
};
+enum {
+ TD_F_VER_BACKLOG = 1,
+ TD_F_TRIM_BACKLOG = 2,
+ TD_F_READ_IOLOG = 4,
+ TD_F_REFILL_BUFFERS = 8,
+ TD_F_SCRAMBLE_BUFFERS = 16,
+ TD_F_VER_NONE = 32,
+ TD_F_PROFILE_OPS = 64,
+};
+
+enum {
+ FIO_RAND_BS_OFF = 0,
+ FIO_RAND_VER_OFF,
+ FIO_RAND_MIX_OFF,
+ FIO_RAND_FILE_OFF,
+ FIO_RAND_BLOCK_OFF,
+ FIO_RAND_FILE_SIZE_OFF,
+ FIO_RAND_TRIM_OFF,
+ FIO_RAND_BUF_OFF,
+ FIO_RAND_NR_OFFS,
+};
+
/*
* This describes a single thread/process executing a fio job.
*/
struct thread_data {
struct thread_options o;
+ unsigned long flags;
void *eo;
char verror[FIO_VERROR_SIZE];
pthread_t thread;
char *sysfs_root;
- unsigned long rand_seeds[8];
+ unsigned long rand_seeds[FIO_RAND_NR_OFFS];
union {
os_random_state_t bsrange_state;
struct flist_head trim_list;
unsigned long trim_entries;
+ struct flist_head next_rand_list;
+
/*
* for fileservice, how often to switch to a new file
*/
int e = (err); \
(td)->error = e; \
if (!(td)->first_error) \
- snprintf(td->verror, sizeof(td->verror) - 1, "file:%s:%d, func=%s, error=%s", __FILE__, __LINE__, (func), (msg)); \
+ snprintf(td->verror, sizeof(td->verror), "file:%s:%d, func=%s, error=%s", __FILE__, __LINE__, (func), (msg)); \
} while (0)
extern int exitall_on_terminate;
extern unsigned int thread_number;
+extern unsigned int stat_number;
extern int shm_id;
extern int groupid;
extern int output_format;
extern int fio_gtod_offload;
extern int fio_gtod_cpu;
extern enum fio_cs fio_clock_source;
+extern int fio_clock_source_set;
extern int warnings_fatal;
extern int terse_version;
extern int is_backend;
assert(!(io_u->ddir == DDIR_WRITE && !td_write(td)));
}
-#define BLOCKS_PER_MAP (8 * sizeof(unsigned long))
-#define TO_MAP_BLOCK(f, b) (b)
-#define RAND_MAP_IDX(f, b) (TO_MAP_BLOCK(f, b) / BLOCKS_PER_MAP)
-#define RAND_MAP_BIT(f, b) (TO_MAP_BLOCK(f, b) & (BLOCKS_PER_MAP - 1))
-
#define REAL_MAX_JOBS 2048
-#define td_non_fatal_error(e) ((e) == EIO || (e) == EILSEQ)
-
-static inline enum error_type td_error_type(enum fio_ddir ddir, int err)
+static inline enum error_type_bit td_error_type(enum fio_ddir ddir, int err)
{
if (err == EILSEQ)
- return ERROR_TYPE_VERIFY;
+ return ERROR_TYPE_VERIFY_BIT;
if (ddir == DDIR_READ)
- return ERROR_TYPE_READ;
- return ERROR_TYPE_WRITE;
+ return ERROR_TYPE_READ_BIT;
+ return ERROR_TYPE_WRITE_BIT;
+}
+
+static int __NON_FATAL_ERR[] = {EIO, EILSEQ};
+static inline int td_non_fatal_error(struct thread_data *td,
+ enum error_type_bit etype, int err)
+{
+ int i;
+ if (!td->o.ignore_error[etype]) {
+ td->o.ignore_error[etype] = __NON_FATAL_ERR;
+ td->o.ignore_error_nr[etype] = sizeof(__NON_FATAL_ERR)
+ / sizeof(int);
+ }
+
+ if (!(td->o.continue_on_error & (1 << etype)))
+ return 0;
+ for (i = 0; i < td->o.ignore_error_nr[etype]; i++)
+ if (td->o.ignore_error[etype][i] == err)
+ return 1;
+ return 0;
}
static inline void update_error_count(struct thread_data *td, int err)
{
if (td->last_was_sync)
return 0;
- if (td->o.odirect)
- return 0;
if (td_write(td) || td_rw(td) || td->o.override_sync)
return 1;
TD_CREATED,
TD_INITIALIZED,
TD_RAMP,
+ TD_SETTING_UP,
TD_RUNNING,
TD_PRE_READING,
TD_VERIFYING,
}
static inline int should_check_rate(struct thread_data *td,
- unsigned long *bytes_done)
+ uint64_t *bytes_done)
{
int ret = 0;
FIO_OUTPUT_NORMAL,
};
+enum {
+ FIO_RAND_DIST_RANDOM = 0,
+ FIO_RAND_DIST_ZIPF,
+ FIO_RAND_DIST_PARETO,
+};
+
+enum {
+ FIO_RAND_GEN_TAUSWORTHE = 0,
+ FIO_RAND_GEN_LFSR,
+};
+
#endif