lib/num2str.c lib/ieee754.c $(wildcard crc/*.c) engines/cpu.c \
engines/mmap.c engines/sync.c engines/null.c engines/net.c \
memalign.c server.c client.c iolog.c backend.c libfio.c flow.c \
- json.c lib/zipf.c gettime-thread.c
+ json.c lib/zipf.c lib/axmap.c lib/lfsr.c gettime-thread.c
ifeq ($(UNAME), Linux)
SOURCE += diskutil.c fifo.c blktrace.c helpers.c cgroup.c trim.c \
T_ZIPF_OBJS += t/log.o lib/ieee754.o lib/rand.o lib/zipf.o t/genzipf.o
T_ZIPF_PROGS = t/genzipf
+T_AXMAP_OBJS = t/axmap.o
+T_AXMAP_OBJS += lib/lfsr.o lib/axmap.o
+T_AXMAP_PROGS = t/axmap
+
T_OBJS = $(T_SMALLOC_OBJS)
T_OBJS += $(T_IEEE_OBJS)
T_OBJS += $(T_ZIPF_OBJS)
+T_OBJS += $(T_AXMAP_OBJS)
T_PROGS = $(T_SMALLOC_PROGS)
T_PROGS += $(T_IEEE_PROGS)
T_PROGS += $(T_ZIPF_PROGS)
+T_PROGS += $(T_AXMAP_PROGS)
ifneq ($(findstring $(MAKEFLAGS),s),s)
ifndef V
t/genzipf: $(T_ZIPF_OBJS)
$(QUIET_CC)$(CC) $(LDFLAGS) $(CFLAGS) -o $@ $(T_ZIPF_OBJS) $(LIBS) $(LDFLAGS)
+t/axmap: $(T_AXMAP_OBJS)
+ $(QUIET_CC)$(CC) $(LDFLAGS) $(CFLAGS) -o $@ $(T_AXMAP_OBJS) $(LIBS) $(LDFLAGS)
+
fio: $(OBJS)
$(QUIET_CC)$(CC) $(LDFLAGS) $(CFLAGS) -o $@ $(OBJS) $(LIBS) $(LDFLAGS)
#include "io_ddir.h"
#include "flist.h"
#include "lib/zipf.h"
+#include "lib/axmap.h"
+#include "lib/lfsr.h"
/*
* The type of object we are working on
/*
* block map for random io
*/
- unsigned long *file_map;
- unsigned long num_maps;
- unsigned long last_free_lookup;
- unsigned failed_rands;
+ struct axmap *io_axmap;
+
+ struct fio_lfsr lfsr;
/*
* Used for zipf random distribution
static inline void fio_file_reset(struct fio_file *f)
{
- f->last_free_lookup = 0;
- f->failed_rands = 0;
f->last_pos = f->file_offset;
f->last_start = -1ULL;
f->file_pos = -1ULL;
- if (f->file_map)
- memset(f->file_map, 0, f->num_maps * sizeof(unsigned long));
+ if (f->io_axmap)
+ axmap_reset(f->io_axmap);
}
#endif
#include "filehash.h"
#include "os/os.h"
#include "hash.h"
+#include "lib/axmap.h"
#ifdef FIO_HAVE_LINUX_FALLOCATE
#include <linux/falloc.h>
int init_random_map(struct thread_data *td)
{
- unsigned long long blocks, num_maps;
+ unsigned long long blocks;
struct fio_file *f;
unsigned int i;
if (init_rand_distribution(td))
return 0;
- if (td->o.norandommap || !td_random(td))
+ if (!td_random(td))
return 0;
for_each_file(td, f, i) {
blocks = (f->real_file_size + td->o.rw_min_bs - 1) /
(unsigned long long) td->o.rw_min_bs;
- num_maps = (blocks + BLOCKS_PER_MAP - 1) /
- (unsigned long long) BLOCKS_PER_MAP;
- if (num_maps == (unsigned long) num_maps) {
- f->file_map = smalloc(num_maps * sizeof(unsigned long));
- if (f->file_map) {
- f->num_maps = num_maps;
+ if (td->o.random_generator == FIO_RAND_GEN_LFSR) {
+ if (!lfsr_init(&f->lfsr, blocks))
continue;
- }
- } else
- f->file_map = NULL;
+ } else if (!td->o.norandommap) {
+ f->io_axmap = axmap_new(blocks);
+ if (f->io_axmap)
+ continue;
+ } else if (td->o.norandommap)
+ continue;
if (!td->o.softrandommap) {
log_err("fio: failed allocating random map. If running"
log_info("fio: file %s failed allocating random map. Running "
"job without.\n", f->file_name);
- f->num_maps = 0;
}
return 0;
sfree(f->file_name);
f->file_name = NULL;
- sfree(f->file_map);
- f->file_map = NULL;
+ axmap_free(f->io_axmap);
+ f->io_axmap = NULL;
sfree(f);
}
double zipf_theta;
double pareto_h;
+ unsigned int random_generator;
+
unsigned int hugepage_size;
unsigned int rw_min_bs;
unsigned int thinktime;
assert(!(io_u->ddir == DDIR_WRITE && !td_write(td)));
}
-#define BLOCKS_PER_MAP (8 * sizeof(unsigned long))
-#define TO_MAP_BLOCK(f, b) (b)
-#define RAND_MAP_IDX(f, b) (TO_MAP_BLOCK(f, b) / BLOCKS_PER_MAP)
-#define RAND_MAP_BIT(f, b) (TO_MAP_BLOCK(f, b) & (BLOCKS_PER_MAP - 1))
-
#define REAL_MAX_JOBS 2048
static inline enum error_type_bit td_error_type(enum fio_ddir ddir, int err)
FIO_RAND_DIST_PARETO,
};
+enum {
+ FIO_RAND_GEN_TAUSWORTHE = 0,
+ FIO_RAND_GEN_LFSR,
+};
+
#endif
#define td_trim(td) ((td)->o.td_ddir & TD_DDIR_TRIM)
#define td_rw(td) (((td)->o.td_ddir & TD_DDIR_RW) == TD_DDIR_RW)
#define td_random(td) ((td)->o.td_ddir & TD_DDIR_RAND)
-#define file_randommap(td, f) (!(td)->o.norandommap && (f)->file_map)
+#define file_randommap(td, f) (!(td)->o.norandommap && (f)->io_axmap)
static inline int ddir_sync(enum fio_ddir ddir)
{
#include "verify.h"
#include "trim.h"
#include "lib/rand.h"
+#include "lib/axmap.h"
struct io_completion_data {
int nr; /* input */
};
/*
- * The ->file_map[] contains a map of blocks we have or have not done io
+ * The ->io_axmap contains a map of blocks we have or have not done io
* to yet. Used to make sure we cover the entire range in a fair fashion.
*/
static int random_map_free(struct fio_file *f, const unsigned long long block)
{
- unsigned int idx = RAND_MAP_IDX(f, block);
- unsigned int bit = RAND_MAP_BIT(f, block);
-
- dprint(FD_RANDOM, "free: b=%llu, idx=%u, bit=%u\n", block, idx, bit);
-
- return (f->file_map[idx] & (1UL << bit)) == 0;
+ return !axmap_isset(f->io_axmap, block);
}
/*
unsigned int min_bs = td->o.rw_min_bs;
struct fio_file *f = io_u->file;
unsigned long long block;
- unsigned int blocks, nr_blocks;
- int busy_check;
+ unsigned int nr_blocks;
block = (io_u->offset - f->file_offset) / (unsigned long long) min_bs;
nr_blocks = (io_u->buflen + min_bs - 1) / min_bs;
- blocks = 0;
- busy_check = !(io_u->flags & IO_U_F_BUSY_OK);
-
- while (nr_blocks) {
- unsigned int idx, bit;
- unsigned long mask, this_blocks;
-
- /*
- * If we have a mixed random workload, we may
- * encounter blocks we already did IO to.
- */
- if (!busy_check) {
- blocks = nr_blocks;
- break;
- }
- if ((td->o.ddir_seq_nr == 1) && !random_map_free(f, block))
- break;
- idx = RAND_MAP_IDX(f, block);
- bit = RAND_MAP_BIT(f, block);
-
- fio_assert(td, idx < f->num_maps);
-
- this_blocks = nr_blocks;
- if (this_blocks + bit > BLOCKS_PER_MAP)
- this_blocks = BLOCKS_PER_MAP - bit;
-
- do {
- if (this_blocks == BLOCKS_PER_MAP)
- mask = -1UL;
- else
- mask = ((1UL << this_blocks) - 1) << bit;
-
- if (!(f->file_map[idx] & mask))
- break;
-
- this_blocks--;
- } while (this_blocks);
-
- if (!this_blocks)
- break;
+ if (!(io_u->flags & IO_U_F_BUSY_OK))
+ nr_blocks = axmap_set_nr(f->io_axmap, block, nr_blocks);
- f->file_map[idx] |= mask;
- nr_blocks -= this_blocks;
- blocks += this_blocks;
- block += this_blocks;
- }
-
- if ((blocks * min_bs) < io_u->buflen)
- io_u->buflen = blocks * min_bs;
+ if ((nr_blocks * min_bs) < io_u->buflen)
+ io_u->buflen = nr_blocks * min_bs;
}
static unsigned long long last_block(struct thread_data *td, struct fio_file *f,
return max_blocks;
}
-/*
- * Return the next free block in the map.
- */
-static int get_next_free_block(struct thread_data *td, struct fio_file *f,
- enum fio_ddir ddir, unsigned long long *b)
-{
- unsigned long long block, min_bs = td->o.rw_min_bs, lastb;
- int i;
-
- lastb = last_block(td, f, ddir);
- if (!lastb)
- return 1;
-
- i = f->last_free_lookup;
- block = i * BLOCKS_PER_MAP;
- while (block * min_bs < f->real_file_size &&
- block * min_bs < f->io_size) {
- if (f->file_map[i] != -1UL) {
- block += ffz(f->file_map[i]);
- if (block > lastb)
- break;
- f->last_free_lookup = i;
- *b = block;
- return 0;
- }
-
- block += BLOCKS_PER_MAP;
- i++;
- }
-
- dprint(FD_IO, "failed finding a free block\n");
- return 1;
-}
-
static int __get_next_rand_offset(struct thread_data *td, struct fio_file *f,
enum fio_ddir ddir, unsigned long long *b)
{
- unsigned long long rmax, r, lastb;
- int loops = 5;
+ unsigned long long r;
- lastb = last_block(td, f, ddir);
- if (!lastb)
- return 1;
+ if (td->o.random_generator == FIO_RAND_GEN_TAUSWORTHE) {
+ unsigned long long rmax, lastb;
- if (f->failed_rands >= 200)
- goto ffz;
+ lastb = last_block(td, f, ddir);
+ if (!lastb)
+ return 1;
- rmax = td->o.use_os_rand ? OS_RAND_MAX : FRAND_MAX;
- do {
- if (td->o.use_os_rand)
+ rmax = td->o.use_os_rand ? OS_RAND_MAX : FRAND_MAX;
+
+ if (td->o.use_os_rand) {
+ rmax = OS_RAND_MAX;
r = os_random_long(&td->random_state);
- else
+ } else {
+ rmax = FRAND_MAX;
r = __rand(&td->__random_state);
-
- *b = (lastb - 1) * (r / ((unsigned long long) rmax + 1.0));
+ }
dprint(FD_RANDOM, "off rand %llu\n", r);
+ *b = (lastb - 1) * (r / ((unsigned long long) rmax + 1.0));
+ } else {
+ uint64_t off = 0;
- /*
- * if we are not maintaining a random map, we are done.
- */
- if (!file_randommap(td, f))
- goto ret_good;
-
- /*
- * calculate map offset and check if it's free
- */
- if (random_map_free(f, *b))
- goto ret_good;
-
- dprint(FD_RANDOM, "get_next_rand_offset: offset %llu busy\n",
- *b);
- } while (--loops);
+ if (lfsr_next(&f->lfsr, &off))
+ return 1;
- if (!f->failed_rands++)
- f->last_free_lookup = 0;
+ *b = off;
+ }
/*
- * we get here, if we didn't suceed in looking up a block. generate
- * a random start offset into the filemap, and find the first free
- * block from there.
+ * if we are not maintaining a random map, we are done.
*/
- loops = 10;
- do {
- f->last_free_lookup = (f->num_maps - 1) *
- (r / ((unsigned long long) rmax + 1.0));
- if (!get_next_free_block(td, f, ddir, b))
- goto ret;
-
- if (td->o.use_os_rand)
- r = os_random_long(&td->random_state);
- else
- r = __rand(&td->__random_state);
- } while (--loops);
+ if (!file_randommap(td, f))
+ goto ret;
/*
- * that didn't work either, try exhaustive search from the start
+ * calculate map offset and check if it's free
*/
- f->last_free_lookup = 0;
-ffz:
- if (!get_next_free_block(td, f, ddir, b))
- return 0;
- f->last_free_lookup = 0;
- return get_next_free_block(td, f, ddir, b);
-ret_good:
- f->failed_rands = 0;
+ if (random_map_free(f, *b))
+ goto ret;
+
+ dprint(FD_RANDOM, "get_next_rand_offset: offset %llu busy\n", *b);
+
+ *b = axmap_next_free(f->io_axmap, *b);
+ if (*b == (uint64_t) -1ULL)
+ return 1;
ret:
return 0;
}
else if (b != -1ULL)
io_u->offset = b * td->o.ba[ddir];
else {
- log_err("fio: bug in offset generation\n");
+ log_err("fio: bug in offset generation: offset=%llu, b=%llu\n",
+ offset, b);
ret = 1;
}
}
--- /dev/null
+/*
+ * Bitmap of bitmaps, where each layer is number-of-bits-per-word smaller than
+ * the previous. Hence an 'axmap', since we axe each previous layer into a
+ * much smaller piece. I swear, that is why it's named like that. It has
+ * nothing to do with anything remotely narcissistic.
+ *
+ * A set bit at layer N indicates a full word at layer N-1, and so forth. As
+ * the bitmap becomes progressively more full, checking for existance
+ * becomes cheaper (since fewer layers are walked, making it a lot more
+ * cache friendly) and locating the next free space likewise.
+ *
+ * Axmaps get pretty close to optimal (1 bit per block) space usage, since
+ * layers quickly diminish in size. Doing the size math is straight forward,
+ * since we have log64(blocks) layers of maps. For 20000 blocks, overhead
+ * is roughly 1.9%, or 1.019 bits per block. The number quickly converges
+ * towards 1.0158, or 1.58% of overhead.
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <assert.h>
+
+#include "../arch/arch.h"
+#include "axmap.h"
+#include "../smalloc.h"
+#include "../minmax.h"
+
+#if BITS_PER_LONG == 64
+#define UNIT_SHIFT 6
+#elif BITS_PER_LONG == 32
+#define UNIT_SHIFT 5
+#else
+#error "Number of arch bits unknown"
+#endif
+
+#define BLOCKS_PER_UNIT (1UL << UNIT_SHIFT)
+#define BLOCKS_PER_UNIT_MASK (BLOCKS_PER_UNIT - 1)
+
+#define firstfree_valid(b) ((b)->first_free != (uint64_t) -1)
+
+struct axmap_level {
+ int level;
+ unsigned long map_size;
+ unsigned long *map;
+};
+
+struct axmap {
+ unsigned int nr_levels;
+ struct axmap_level *levels;
+ uint64_t first_free;
+};
+
+static unsigned long ulog64(unsigned long val, unsigned int log)
+{
+ while (log-- && val)
+ val >>= UNIT_SHIFT;
+
+ return val;
+}
+
+void axmap_reset(struct axmap *axmap)
+{
+ int i;
+
+ for (i = 0; i < axmap->nr_levels; i++) {
+ struct axmap_level *al = &axmap->levels[i];
+
+ memset(al->map, 0, al->map_size * sizeof(unsigned long));
+ }
+}
+
+void axmap_free(struct axmap *axmap)
+{
+ unsigned int i;
+
+ if (!axmap)
+ return;
+
+ for (i = 0; i < axmap->nr_levels; i++)
+ sfree(axmap->levels[i].map);
+
+ sfree(axmap->levels);
+ sfree(axmap);
+}
+
+struct axmap *axmap_new(unsigned long nr_bits)
+{
+ struct axmap *axmap;
+ unsigned int i, levels;
+
+ axmap = smalloc(sizeof(*axmap));
+ if (!axmap)
+ return NULL;
+
+ levels = 1;
+ i = (nr_bits + BLOCKS_PER_UNIT - 1) >> UNIT_SHIFT;
+ while (i > 1) {
+ i = (i + BLOCKS_PER_UNIT - 1) >> UNIT_SHIFT;
+ levels++;
+ }
+
+ axmap->nr_levels = levels;
+ axmap->levels = smalloc(axmap->nr_levels * sizeof(struct axmap_level));
+ axmap->first_free = 0;
+
+ for (i = 0; i < axmap->nr_levels; i++) {
+ struct axmap_level *al = &axmap->levels[i];
+
+ al->level = i;
+ al->map_size = (nr_bits + BLOCKS_PER_UNIT - 1) >> UNIT_SHIFT;
+ al->map = smalloc(al->map_size * sizeof(unsigned long));
+ if (!al->map)
+ goto err;
+
+ nr_bits = (nr_bits + BLOCKS_PER_UNIT - 1) >> UNIT_SHIFT;
+ }
+
+ axmap_reset(axmap);
+ return axmap;
+err:
+ for (i = 0; i < axmap->nr_levels; i++)
+ if (axmap->levels[i].map)
+ sfree(axmap->levels[i].map);
+
+ sfree(axmap->levels);
+ return NULL;
+}
+
+static int axmap_handler(struct axmap *axmap, uint64_t bit_nr,
+ int (*func)(struct axmap_level *, unsigned long, unsigned int,
+ void *), void *data)
+{
+ struct axmap_level *al;
+ int i;
+
+ for (i = 0; i < axmap->nr_levels; i++) {
+ unsigned long index = ulog64(bit_nr, i);
+ unsigned long offset = index >> UNIT_SHIFT;
+ unsigned int bit = index & BLOCKS_PER_UNIT_MASK;
+
+ al = &axmap->levels[i];
+
+ if (func(al, offset, bit, data))
+ return 1;
+ }
+
+ return 0;
+}
+
+static int axmap_handler_topdown(struct axmap *axmap, uint64_t bit_nr,
+ int (*func)(struct axmap_level *, unsigned long, unsigned int, void *),
+ void *data)
+{
+ struct axmap_level *al;
+ int i, level = axmap->nr_levels;
+
+ for (i = axmap->nr_levels - 1; i >= 0; i--) {
+ unsigned long index = ulog64(bit_nr, --level);
+ unsigned long offset = index >> UNIT_SHIFT;
+ unsigned int bit = index & BLOCKS_PER_UNIT_MASK;
+
+ al = &axmap->levels[i];
+
+ if (func(al, offset, bit, data))
+ return 1;
+ }
+
+ return 0;
+}
+
+static int axmap_clear_fn(struct axmap_level *al, unsigned long offset,
+ unsigned int bit, void *unused)
+{
+ if (!(al->map[offset] & (1UL << bit)))
+ return 1;
+
+ al->map[offset] &= ~(1UL << bit);
+ return 0;
+}
+
+void axmap_clear(struct axmap *axmap, uint64_t bit_nr)
+{
+ axmap_handler(axmap, bit_nr, axmap_clear_fn, NULL);
+}
+
+struct axmap_set_data {
+ unsigned int nr_bits;
+ unsigned int set_bits;
+ unsigned int fail_ok;
+};
+
+static unsigned long bit_masks[] = {
+ 0x0000000000000000, 0x0000000000000001, 0x0000000000000003, 0x0000000000000007,
+ 0x000000000000000f, 0x000000000000001f, 0x000000000000003f, 0x000000000000007f,
+ 0x00000000000000ff, 0x00000000000001ff, 0x00000000000003ff, 0x00000000000007ff,
+ 0x0000000000000fff, 0x0000000000001fff, 0x0000000000003fff, 0x0000000000007fff,
+ 0x000000000000ffff, 0x000000000001ffff, 0x000000000003ffff, 0x000000000007ffff,
+ 0x00000000000fffff, 0x00000000001fffff, 0x00000000003fffff, 0x00000000007fffff,
+ 0x0000000000ffffff, 0x0000000001ffffff, 0x0000000003ffffff, 0x0000000007ffffff,
+ 0x000000000fffffff, 0x000000001fffffff, 0x000000003fffffff, 0x000000007fffffff,
+ 0x00000000ffffffff,
+#if BITS_PER_LONG == 64
+ 0x00000001ffffffff, 0x00000003ffffffff, 0x00000007ffffffff, 0x0000000fffffffff,
+ 0x0000001fffffffff, 0x0000003fffffffff, 0x0000007fffffffff, 0x000000ffffffffff,
+ 0x000001ffffffffff, 0x000003ffffffffff, 0x000007ffffffffff, 0x00000fffffffffff,
+ 0x00001fffffffffff, 0x00003fffffffffff, 0x00007fffffffffff, 0x0000ffffffffffff,
+ 0x0001ffffffffffff, 0x0003ffffffffffff, 0x0007ffffffffffff, 0x000fffffffffffff,
+ 0x001fffffffffffff, 0x003fffffffffffff, 0x007fffffffffffff, 0x00ffffffffffffff,
+ 0x01ffffffffffffff, 0x03ffffffffffffff, 0x07ffffffffffffff, 0x0fffffffffffffff,
+ 0x1fffffffffffffff, 0x3fffffffffffffff, 0x7fffffffffffffff, 0xffffffffffffffff
+#endif
+};
+
+static int axmap_set_fn(struct axmap_level *al, unsigned long offset,
+ unsigned int bit, void *__data)
+{
+ struct axmap_set_data *data = __data;
+ unsigned long mask, overlap;
+ unsigned int nr_bits;
+
+ nr_bits = min(data->nr_bits, BLOCKS_PER_UNIT - bit);
+
+ mask = bit_masks[nr_bits] << bit;
+
+ /*
+ * Mask off any potential overlap, only sets contig regions
+ */
+ overlap = al->map[offset] & mask;
+ if (overlap == mask) {
+ assert(data->fail_ok);
+ return 1;
+ }
+
+ while (overlap) {
+ unsigned long clear_mask = ~(1UL << ffz(~overlap));
+
+ mask &= clear_mask;
+ overlap &= clear_mask;
+ nr_bits--;
+ }
+
+ assert(mask);
+ assert(!(al->map[offset] & mask));
+
+ al->map[offset] |= mask;
+
+ if (!al->level)
+ data->set_bits = nr_bits;
+
+ data->nr_bits = 1;
+ return al->map[offset] != -1UL;
+}
+
+static void __axmap_set(struct axmap *axmap, uint64_t bit_nr,
+ struct axmap_set_data *data)
+{
+ unsigned int set_bits, nr_bits = data->nr_bits;
+
+ if (axmap->first_free >= bit_nr &&
+ axmap->first_free < bit_nr + data->nr_bits)
+ axmap->first_free = -1ULL;
+
+ set_bits = 0;
+ while (nr_bits) {
+ axmap_handler(axmap, bit_nr, axmap_set_fn, data);
+ set_bits += data->set_bits;
+
+ if (data->set_bits != (BLOCKS_PER_UNIT - nr_bits))
+ break;
+
+ nr_bits -= data->set_bits;
+ bit_nr += data->set_bits;
+
+ data->nr_bits = nr_bits;
+ data->fail_ok = 1;
+ }
+
+ data->set_bits = set_bits;
+}
+
+void axmap_set(struct axmap *axmap, uint64_t bit_nr)
+{
+ struct axmap_set_data data = { .nr_bits = 1, };
+
+ __axmap_set(axmap, bit_nr, &data);
+}
+
+unsigned int axmap_set_nr(struct axmap *axmap, uint64_t bit_nr, unsigned int nr_bits)
+{
+ struct axmap_set_data data = { .nr_bits = nr_bits, };
+
+ __axmap_set(axmap, bit_nr, &data);
+ return data.set_bits;
+}
+
+static int axmap_isset_fn(struct axmap_level *al, unsigned long offset,
+ unsigned int bit, void *unused)
+{
+ return (al->map[offset] & (1UL << bit)) != 0;
+}
+
+int axmap_isset(struct axmap *axmap, uint64_t bit_nr)
+{
+ return axmap_handler_topdown(axmap, bit_nr, axmap_isset_fn, NULL);
+}
+
+static uint64_t axmap_find_first_free(struct axmap *axmap, unsigned int level,
+ uint64_t index)
+{
+ unsigned long j;
+ int i;
+
+ /*
+ * Start at the bottom, then converge towards first free bit at the top
+ */
+ for (i = level; i >= 0; i--) {
+ struct axmap_level *al = &axmap->levels[i];
+
+ if (index >= al->map_size) {
+ index = -1ULL;
+ break;
+ }
+
+ for (j = index; j < al->map_size; j++) {
+ if (al->map[j] == -1UL)
+ continue;
+
+ /*
+ * First free bit here is our index into the first
+ * free bit at the next higher level
+ */
+ index = (j << UNIT_SHIFT) + ffz(al->map[j]);
+ break;
+ }
+ }
+
+ return index;
+}
+
+uint64_t axmap_first_free(struct axmap *axmap)
+{
+ if (firstfree_valid(axmap))
+ return axmap->first_free;
+
+ axmap->first_free = axmap_find_first_free(axmap, axmap->nr_levels - 1, 0);
+ return axmap->first_free;
+}
+
+struct axmap_next_free_data {
+ unsigned int level;
+ unsigned long offset;
+ uint64_t bit;
+};
+
+static int axmap_next_free_fn(struct axmap_level *al, unsigned long offset,
+ unsigned int bit, void *__data)
+{
+ struct axmap_next_free_data *data = __data;
+ uint64_t mask = ~((1UL << ((data->bit & BLOCKS_PER_UNIT_MASK) + 1)) - 1);
+
+ if (!(mask & al->map[offset]))
+ return 0;
+
+ if (al->map[offset] != -1UL) {
+ data->level = al->level;
+ data->offset = offset;
+ return 1;
+ }
+
+ data->bit = (data->bit + BLOCKS_PER_UNIT - 1) / BLOCKS_PER_UNIT;
+ return 0;
+}
+
+/*
+ * 'bit_nr' is already set. Find the next free bit after this one.
+ */
+uint64_t axmap_next_free(struct axmap *axmap, uint64_t bit_nr)
+{
+ struct axmap_next_free_data data = { .level = -1U, .bit = bit_nr, };
+
+ if (firstfree_valid(axmap) && bit_nr < axmap->first_free)
+ return axmap->first_free;
+
+ if (!axmap_handler(axmap, bit_nr, axmap_next_free_fn, &data))
+ return axmap_first_free(axmap);
+
+ assert(data.level != -1U);
+
+ return axmap_find_first_free(axmap, data.level, data.offset);
+}
--- /dev/null
+#ifndef FIO_BITMAP_H
+#define FIO_BITMAP_H
+
+#include <inttypes.h>
+
+struct axmap;
+struct axmap *axmap_new(unsigned long nr_bits);
+void axmap_free(struct axmap *bm);
+
+void axmap_clear(struct axmap *axmap, uint64_t bit_nr);
+void axmap_set(struct axmap *axmap, uint64_t bit_nr);
+unsigned int axmap_set_nr(struct axmap *axmap, uint64_t bit_nr, unsigned int nr_bits);
+int axmap_isset(struct axmap *axmap, uint64_t bit_nr);
+uint64_t axmap_first_free(struct axmap *axmap);
+uint64_t axmap_next_free(struct axmap *axmap, uint64_t bit_nr);
+void axmap_reset(struct axmap *axmap);
+
+#endif
--- /dev/null
+#include <stdio.h>
+
+#include "lfsr.h"
+
+/*
+ * From table 3 of
+ *
+ * http://www.xilinx.com/support/documentation/application_notes/xapp052.pdf
+ */
+static struct lfsr_taps lfsr_taps[] = {
+ {
+ .length = 16,
+ .taps = { 16, 15, 13, 4, },
+ },
+ {
+ .length = 17,
+ .taps = { 17, 14, },
+ },
+ {
+ .length = 18,
+ .taps = { 18, 11, },
+ },
+ {
+ .length = 19,
+ .taps = { 19, 6, 2, 1, },
+ },
+ {
+ .length = 20,
+ .taps = { 20, 17, },
+ },
+ {
+ .length = 21,
+ .taps = { 21, 19, },
+ },
+ {
+ .length = 22,
+ .taps = { 22, 21, },
+ },
+ {
+ .length = 23,
+ .taps = { 23, 18, },
+ },
+ {
+ .length = 24,
+ .taps = { 24, 23, 22, 17, },
+ },
+ {
+ .length = 25,
+ .taps = { 25, 22, },
+ },
+ {
+ .length = 26,
+ .taps = {26, 6, 2, 1, },
+ },
+ {
+ .length = 27,
+ .taps = { 27, 5, 2, 1, },
+ },
+ {
+ .length = 28,
+ .taps = { 28, 25, },
+ },
+ {
+ .length = 29,
+ .taps = {29, 27, },
+ },
+ {
+ .length = 30,
+ .taps = { 30, 6, 4, 1, },
+ },
+ {
+ .length = 31,
+ .taps = { 31, 28, },
+ },
+ {
+ .length = 32,
+ .taps = { 32, 22, 2, 1, },
+ },
+ {
+ .length = 33,
+ .taps = { 33, 20, },
+ },
+ {
+ .length = 34,
+ .taps = { 34, 27, 2, 1, },
+ },
+ {
+ .length = 35,
+ .taps = { 35, 33, },
+ },
+ {
+ .length = 36,
+ .taps = { 36, 25, },
+ },
+ {
+ .length = 37,
+ .taps = { 37, 5, 4, 3, 2, 1, },
+ },
+ {
+ .length = 38,
+ .taps = { 38, 6, 5, 1, },
+ },
+ {
+ .length = 39,
+ .taps = { 39, 35, },
+ },
+ {
+ .length = 40,
+ .taps = { 40, 38, 21, 19, },
+ },
+ {
+ .length = 41,
+ .taps = { 41, 38, },
+ },
+ {
+ .length = 42,
+ .taps = { 42, 41, 20, 19, },
+ },
+ {
+ .length = 43,
+ .taps = { 43, 42, 38, 37, },
+ },
+ {
+ .length = 44,
+ .taps = { 44, 43, 38, 37, },
+ },
+ {
+ .length = 45,
+ .taps = { 45, 44, 42, 41, },
+ },
+ {
+ .length = 46,
+ .taps = { 46, 45, 26, 25, },
+ },
+ {
+ .length = 47,
+ .taps = { 47, 42, },
+ },
+ {
+ .length = 48,
+ .taps = { 48, 47, 21, 20, },
+ },
+ {
+ .length = 49,
+ .taps = { 49, 40, },
+ },
+ {
+ .length = 50,
+ .taps = { 50, 49, 36, 35, },
+ },
+ {
+ .length = 51,
+ .taps = { 51, 50, 36, 35, },
+ },
+ {
+ .length = 52,
+ .taps = { 52, 49, },
+ },
+ {
+ .length = 53,
+ .taps = { 53, 52, 38, 37 },
+ },
+ {
+ .length = 54,
+ .taps = { 54, 53, 18, 17 },
+ },
+ {
+ .length = 55,
+ .taps = { 55, 31, },
+ },
+ {
+ .length = 56,
+ .taps = { 56, 55, 35, 34, },
+ },
+ {
+ .length = 57,
+ .taps = { 57, 50, },
+ },
+ {
+ .length = 58,
+ .taps = { 58, 39, },
+ },
+ {
+ .length = 59,
+ .taps = { 59, 58, 38, 37, },
+ },
+ {
+ .length = 60,
+ .taps = { 60, 59, },
+ },
+ {
+ .length = 61,
+ .taps = { 61, 60, 46, 45, },
+ },
+ {
+ .length = 62,
+ .taps = { 62, 61, 6, 5, },
+ },
+ {
+ .length = 63,
+ .taps = { 63, 62, },
+ },
+};
+
+#define FIO_LFSR_CRANKS 128
+
+static uint64_t __lfsr_next(uint64_t v, struct lfsr_taps *lt)
+{
+ uint64_t xor_mask = 0;
+ int i;
+
+ for (i = 0; lt->taps[i]; i++)
+ xor_mask ^= (v << (lt->taps[i] - 1));
+
+ xor_mask &= ~(~0UL << 1) << (lt->length - 1);
+ return xor_mask | (v >> 1);
+}
+
+int lfsr_next(struct fio_lfsr *fl, uint64_t *off)
+{
+ if (fl->num_vals > fl->max_val)
+ return 1;
+
+ do {
+ fl->last_val = __lfsr_next(fl->last_val, &fl->taps);
+ if (fl->last_val - 1 <= fl->max_val)
+ break;
+ } while (1);
+
+ *off = fl->last_val - 1;
+ fl->num_vals++;
+ return 0;
+}
+
+static struct lfsr_taps *find_lfsr(uint64_t size)
+{
+ int i;
+
+ for (i = 0; lfsr_taps[i].length; i++)
+ if (((1UL << lfsr_taps[i].length) + FIO_LFSR_CRANKS) >= size)
+ return &lfsr_taps[i];
+
+ return NULL;
+}
+
+int lfsr_init(struct fio_lfsr *fl, uint64_t size)
+{
+ struct lfsr_taps *tap;
+ int i;
+
+ tap = find_lfsr(size);
+ if (!tap)
+ return 1;
+
+ fl->last_val = 1;
+ fl->max_val = size - 1;
+ fl->num_vals = 0;
+ fl->taps.length = tap->length;
+ for (i = 0; i < FIO_MAX_TAPS; i++) {
+ fl->taps.taps[i] = tap->taps[i];
+ if (!fl->taps.taps[i])
+ break;
+ }
+
+ for (i = 0; i < FIO_LFSR_CRANKS; i++)
+ fl->last_val = __lfsr_next(fl->last_val, &fl->taps);
+
+ return 0;
+}
--- /dev/null
+#ifndef FIO_LFSR_H
+#define FIO_LFSR_H
+
+#include <inttypes.h>
+
+#define FIO_MAX_TAPS 8
+
+struct lfsr_taps {
+ unsigned int length;
+ unsigned int taps[FIO_MAX_TAPS];
+};
+
+
+struct fio_lfsr {
+ uint64_t last_val;
+ uint64_t max_val;
+ uint64_t num_vals;
+ struct lfsr_taps taps;
+};
+
+int lfsr_next(struct fio_lfsr *fl, uint64_t *off);
+int lfsr_init(struct fio_lfsr *fl, uint64_t size);
+
+#endif
.parent = "norandommap",
.def = "0",
},
+ {
+ .name = "random_generator",
+ .type = FIO_OPT_STR,
+ .off1 = td_var_offset(random_generator),
+ .help = "Type of random number generator to use",
+ .def = "tausworthe",
+ .posval = {
+ { .ival = "tausworthe",
+ .oval = FIO_RAND_GEN_TAUSWORTHE,
+ .help = "Strong Tausworthe generator",
+ },
+ { .ival = "lfsr",
+ .oval = FIO_RAND_GEN_LFSR,
+ .help = "Variable length LFSR",
+ },
+ },
+ },
{
.name = "random_distribution",
.type = FIO_OPT_STR,
--- /dev/null
+#include <stdio.h>
+#include <stdlib.h>
+#include <fcntl.h>
+#include <string.h>
+#include <unistd.h>
+#include <inttypes.h>
+
+#include "../lib/lfsr.h"
+
+struct axmap;
+void axmap_set(struct axmap *, uint64_t);
+struct axmap *axmap_new(uint64_t size);
+
+void *smalloc(size_t size)
+{
+ return malloc(size);
+}
+
+void sfree(void *ptr)
+{
+ free(ptr);
+}
+
+int main(int argc, char *argv[])
+{
+ struct fio_lfsr lfsr;
+ size_t size = (1UL << 28) - 200;
+ struct axmap *map;
+
+ if (argc > 1)
+ size = strtoul(argv[1], NULL, 10);
+
+ printf("Using %llu entries\n", (unsigned long long) size);
+
+ lfsr_init(&lfsr, size);
+ map = axmap_new(size);
+
+ while (size--) {
+ uint64_t val;
+
+ lfsr_next(&lfsr, &val);
+ axmap_set(map, val);
+ }
+
+ return 0;
+}