#include "verify.h"
#include "trim.h"
#include "lib/rand.h"
-#include "lib/bitmap.h"
+#include "lib/axmap.h"
struct io_completion_data {
int nr; /* input */
};
/*
- * The ->io_bitmap contains a map of blocks we have or have not done io
+ * The ->io_axmap contains a map of blocks we have or have not done io
* to yet. Used to make sure we cover the entire range in a fair fashion.
*/
static int random_map_free(struct fio_file *f, const unsigned long long block)
{
- return !bitmap_isset(f->io_bitmap, block);
+ return !axmap_isset(f->io_axmap, block);
}
/*
nr_blocks = (io_u->buflen + min_bs - 1) / min_bs;
if (!(io_u->flags & IO_U_F_BUSY_OK))
- nr_blocks = bitmap_set_nr(f->io_bitmap, block, nr_blocks);
+ nr_blocks = axmap_set_nr(f->io_axmap, block, nr_blocks);
if ((nr_blocks * min_bs) < io_u->buflen)
io_u->buflen = nr_blocks * min_bs;
dprint(FD_RANDOM, "get_next_rand_offset: offset %llu busy\n", *b);
- *b = bitmap_next_free(f->io_bitmap, *b);
+ *b = axmap_next_free(f->io_axmap, *b);
if (*b == (uint64_t) -1ULL)
return 1;
ret:
static int get_next_offset(struct thread_data *td, struct io_u *io_u)
{
- struct prof_io_ops *ops = &td->prof_io_ops;
+ if (td->flags & TD_F_PROFILE_OPS) {
+ struct prof_io_ops *ops = &td->prof_io_ops;
- if (ops->fill_io_u_off)
- return ops->fill_io_u_off(td, io_u);
+ if (ops->fill_io_u_off)
+ return ops->fill_io_u_off(td, io_u);
+ }
return __get_next_offset(td, io_u);
}
static unsigned int get_next_buflen(struct thread_data *td, struct io_u *io_u)
{
- struct prof_io_ops *ops = &td->prof_io_ops;
+ if (td->flags & TD_F_PROFILE_OPS) {
+ struct prof_io_ops *ops = &td->prof_io_ops;
- if (ops->fill_io_u_size)
- return ops->fill_io_u_size(td, io_u);
+ if (ops->fill_io_u_size)
+ return ops->fill_io_u_size(td, io_u);
+ }
return __get_next_buflen(td, io_u);
}
static struct fio_file *get_next_file(struct thread_data *td)
{
- struct prof_io_ops *ops = &td->prof_io_ops;
+ if (!(td->flags & TD_F_PROFILE_OPS)) {
+ struct prof_io_ops *ops = &td->prof_io_ops;
- if (ops->get_next_file)
- return ops->get_next_file(td);
+ if (ops->get_next_file)
+ return ops->get_next_file(td);
+ }
return __get_next_file(td);
}
static int check_get_trim(struct thread_data *td, struct io_u *io_u)
{
- if (td->o.trim_backlog && td->trim_entries) {
+ if (!(td->flags & TD_F_TRIM_BACKLOG))
+ return 0;
+
+ if (td->trim_entries) {
int get_trim = 0;
if (td->trim_batch) {
static int check_get_verify(struct thread_data *td, struct io_u *io_u)
{
- if (td->o.verify_backlog && td->io_hist_len) {
+ if (!(td->flags & TD_F_VER_BACKLOG))
+ return 0;
+
+ if (td->io_hist_len) {
int get_verify = 0;
if (td->verify_batch)
/*
* If using an iolog, grab next piece if any available.
*/
- if (td->o.read_iolog_file) {
+ if (td->flags & TD_F_READ_IOLOG) {
if (read_iolog_get(td, io_u))
goto err_put;
} else if (set_io_u_file(td, io_u)) {
f->last_pos = io_u->offset + io_u->buflen;
if (io_u->ddir == DDIR_WRITE) {
- if (td->o.refill_buffers) {
+ if (td->flags & TD_F_REFILL_BUFFERS) {
io_u_fill_buffer(td, io_u,
io_u->xfer_buflen, io_u->xfer_buflen);
- } else if (td->o.scramble_buffers)
+ } else if (td->flags & TD_F_SCRAMBLE_BUFFERS)
do_scramble = 1;
- if (td->o.verify != VERIFY_NONE) {
+ if (td->flags & TD_F_VER_NONE) {
populate_verify_io_u(td, io_u);
do_scramble = 0;
}