#include "verify.h"
#include "trim.h"
#include "lib/rand.h"
-#include "lib/bitmap.h"
+#include "lib/axmap.h"
struct io_completion_data {
int nr; /* input */
};
/*
- * The ->io_bitmap contains a map of blocks we have or have not done io
+ * The ->io_axmap contains a map of blocks we have or have not done io
* to yet. Used to make sure we cover the entire range in a fair fashion.
*/
static int random_map_free(struct fio_file *f, const unsigned long long block)
{
- return !bitmap_isset(f->io_bitmap, block);
+ return !axmap_isset(f->io_axmap, block);
}
/*
nr_blocks = (io_u->buflen + min_bs - 1) / min_bs;
if (!(io_u->flags & IO_U_F_BUSY_OK))
- nr_blocks = bitmap_set_nr(f->io_bitmap, block, nr_blocks);
+ nr_blocks = axmap_set_nr(f->io_axmap, block, nr_blocks);
if ((nr_blocks * min_bs) < io_u->buflen)
io_u->buflen = nr_blocks * min_bs;
static int __get_next_rand_offset(struct thread_data *td, struct fio_file *f,
enum fio_ddir ddir, unsigned long long *b)
{
- unsigned long long rmax, r, lastb;
+ unsigned long long r;
- lastb = last_block(td, f, ddir);
- if (!lastb)
- return 1;
+ if (td->o.random_generator == FIO_RAND_GEN_TAUSWORTHE) {
+ unsigned long long rmax, lastb;
- rmax = td->o.use_os_rand ? OS_RAND_MAX : FRAND_MAX;
+ lastb = last_block(td, f, ddir);
+ if (!lastb)
+ return 1;
- if (td->o.use_os_rand) {
- rmax = OS_RAND_MAX;
- r = os_random_long(&td->random_state);
+ rmax = td->o.use_os_rand ? OS_RAND_MAX : FRAND_MAX;
+
+ if (td->o.use_os_rand) {
+ rmax = OS_RAND_MAX;
+ r = os_random_long(&td->random_state);
+ } else {
+ rmax = FRAND_MAX;
+ r = __rand(&td->__random_state);
+ }
+
+ dprint(FD_RANDOM, "off rand %llu\n", r);
+
+ *b = (lastb - 1) * (r / ((unsigned long long) rmax + 1.0));
} else {
- rmax = FRAND_MAX;
- r = __rand(&td->__random_state);
- }
+ uint64_t off = 0;
- *b = (lastb - 1) * (r / ((unsigned long long) rmax + 1.0));
+ if (lfsr_next(&f->lfsr, &off))
+ return 1;
- dprint(FD_RANDOM, "off rand %llu\n", r);
+ *b = off;
+ }
/*
* if we are not maintaining a random map, we are done.
dprint(FD_RANDOM, "get_next_rand_offset: offset %llu busy\n", *b);
- *b = bitmap_next_free(f->io_bitmap, *b);
+ *b = axmap_next_free(f->io_axmap, *b);
if (*b == (uint64_t) -1ULL)
return 1;
ret:
static int get_next_offset(struct thread_data *td, struct io_u *io_u)
{
- struct prof_io_ops *ops = &td->prof_io_ops;
+ if (td->flags & TD_F_PROFILE_OPS) {
+ struct prof_io_ops *ops = &td->prof_io_ops;
- if (ops->fill_io_u_off)
- return ops->fill_io_u_off(td, io_u);
+ if (ops->fill_io_u_off)
+ return ops->fill_io_u_off(td, io_u);
+ }
return __get_next_offset(td, io_u);
}
static unsigned int get_next_buflen(struct thread_data *td, struct io_u *io_u)
{
- struct prof_io_ops *ops = &td->prof_io_ops;
+ if (td->flags & TD_F_PROFILE_OPS) {
+ struct prof_io_ops *ops = &td->prof_io_ops;
- if (ops->fill_io_u_size)
- return ops->fill_io_u_size(td, io_u);
+ if (ops->fill_io_u_size)
+ return ops->fill_io_u_size(td, io_u);
+ }
return __get_next_buflen(td, io_u);
}
static struct fio_file *get_next_file(struct thread_data *td)
{
- struct prof_io_ops *ops = &td->prof_io_ops;
+ if (!(td->flags & TD_F_PROFILE_OPS)) {
+ struct prof_io_ops *ops = &td->prof_io_ops;
- if (ops->get_next_file)
- return ops->get_next_file(td);
+ if (ops->get_next_file)
+ return ops->get_next_file(td);
+ }
return __get_next_file(td);
}
static int check_get_trim(struct thread_data *td, struct io_u *io_u)
{
- if (td->o.trim_backlog && td->trim_entries) {
+ if (!(td->flags & TD_F_TRIM_BACKLOG))
+ return 0;
+
+ if (td->trim_entries) {
int get_trim = 0;
if (td->trim_batch) {
static int check_get_verify(struct thread_data *td, struct io_u *io_u)
{
- if (td->o.verify_backlog && td->io_hist_len) {
+ if (!(td->flags & TD_F_VER_BACKLOG))
+ return 0;
+
+ if (td->io_hist_len) {
int get_verify = 0;
if (td->verify_batch)
/*
* If using an iolog, grab next piece if any available.
*/
- if (td->o.read_iolog_file) {
+ if (td->flags & TD_F_READ_IOLOG) {
if (read_iolog_get(td, io_u))
goto err_put;
} else if (set_io_u_file(td, io_u)) {
f->last_pos = io_u->offset + io_u->buflen;
if (io_u->ddir == DDIR_WRITE) {
- if (td->o.refill_buffers) {
+ if (td->flags & TD_F_REFILL_BUFFERS) {
io_u_fill_buffer(td, io_u,
io_u->xfer_buflen, io_u->xfer_buflen);
- } else if (td->o.scramble_buffers)
+ } else if (td->flags & TD_F_SCRAMBLE_BUFFERS)
do_scramble = 1;
- if (td->o.verify != VERIFY_NONE) {
+ if (td->flags & TD_F_VER_NONE) {
populate_verify_io_u(td, io_u);
do_scramble = 0;
}