2 * Bitmap of bitmaps, where each layer is number-of-bits-per-word smaller than
3 * the previous. Hence an 'axmap', since we axe each previous layer into a
4 * much smaller piece. I swear, that is why it's named like that. It has
5 * nothing to do with anything remotely narcissistic.
7 * A set bit at layer N indicates a full word at layer N-1, and so forth. As
8 * the bitmap becomes progressively more full, checking for existence
9 * becomes cheaper (since fewer layers are walked, making it a lot more
10 * cache friendly) and locating the next free space likewise.
12 * Axmaps get pretty close to optimal (1 bit per block) space usage, since
13 * layers quickly diminish in size. Doing the size math is straight forward,
14 * since we have log64(blocks) layers of maps. For 20000 blocks, overhead
15 * is roughly 1.9%, or 1.019 bits per block. The number quickly converges
16 * towards 1.0158, or 1.58% of overhead.
23 #include "../arch/arch.h"
25 #include "../smalloc.h"
27 #include "../minmax.h"
29 #if BITS_PER_LONG == 64
31 #elif BITS_PER_LONG == 32
34 #error "Number of arch bits unknown"
37 #define BLOCKS_PER_UNIT (1U << UNIT_SHIFT)
38 #define BLOCKS_PER_UNIT_MASK (BLOCKS_PER_UNIT - 1)
40 #define firstfree_valid(b) ((b)->first_free != (uint64_t) -1)
44 unsigned long map_size;
49 struct fio_mutex lock;
50 unsigned int nr_levels;
51 struct axmap_level *levels;
56 static unsigned long ulog64(unsigned long val, unsigned int log)
64 void axmap_reset(struct axmap *axmap)
68 fio_mutex_down(&axmap->lock);
70 for (i = 0; i < axmap->nr_levels; i++) {
71 struct axmap_level *al = &axmap->levels[i];
73 memset(al->map, 0, al->map_size * sizeof(unsigned long));
76 axmap->first_free = 0;
77 fio_mutex_up(&axmap->lock);
80 void axmap_free(struct axmap *axmap)
87 for (i = 0; i < axmap->nr_levels; i++)
88 sfree(axmap->levels[i].map);
91 __fio_mutex_remove(&axmap->lock);
95 struct axmap *axmap_new(unsigned long nr_bits)
98 unsigned int i, levels;
100 axmap = smalloc(sizeof(*axmap));
104 __fio_mutex_init(&axmap->lock, FIO_MUTEX_UNLOCKED);
107 i = (nr_bits + BLOCKS_PER_UNIT - 1) >> UNIT_SHIFT;
109 i = (i + BLOCKS_PER_UNIT - 1) >> UNIT_SHIFT;
113 axmap->nr_levels = levels;
114 axmap->levels = smalloc(axmap->nr_levels * sizeof(struct axmap_level));
115 axmap->nr_bits = nr_bits;
117 for (i = 0; i < axmap->nr_levels; i++) {
118 struct axmap_level *al = &axmap->levels[i];
121 al->map_size = (nr_bits + BLOCKS_PER_UNIT - 1) >> UNIT_SHIFT;
122 al->map = smalloc(al->map_size * sizeof(unsigned long));
126 nr_bits = (nr_bits + BLOCKS_PER_UNIT - 1) >> UNIT_SHIFT;
132 for (i = 0; i < axmap->nr_levels; i++)
133 if (axmap->levels[i].map)
134 sfree(axmap->levels[i].map);
136 sfree(axmap->levels);
137 __fio_mutex_remove(&axmap->lock);
142 static int axmap_handler(struct axmap *axmap, uint64_t bit_nr,
143 int (*func)(struct axmap_level *, unsigned long, unsigned int,
146 struct axmap_level *al;
149 for (i = 0; i < axmap->nr_levels; i++) {
150 unsigned long index = ulog64(bit_nr, i);
151 unsigned long offset = index >> UNIT_SHIFT;
152 unsigned int bit = index & BLOCKS_PER_UNIT_MASK;
154 al = &axmap->levels[i];
156 if (func(al, offset, bit, data))
163 static int axmap_handler_topdown(struct axmap *axmap, uint64_t bit_nr,
164 int (*func)(struct axmap_level *, unsigned long, unsigned int, void *),
167 struct axmap_level *al;
168 int i, level = axmap->nr_levels;
170 for (i = axmap->nr_levels - 1; i >= 0; i--) {
171 unsigned long index = ulog64(bit_nr, --level);
172 unsigned long offset = index >> UNIT_SHIFT;
173 unsigned int bit = index & BLOCKS_PER_UNIT_MASK;
175 al = &axmap->levels[i];
177 if (func(al, offset, bit, data))
184 static int axmap_clear_fn(struct axmap_level *al, unsigned long offset,
185 unsigned int bit, void *unused)
187 if (!(al->map[offset] & (1UL << bit)))
190 al->map[offset] &= ~(1UL << bit);
194 void axmap_clear(struct axmap *axmap, uint64_t bit_nr)
196 axmap_handler(axmap, bit_nr, axmap_clear_fn, NULL);
199 struct axmap_set_data {
200 unsigned int nr_bits;
201 unsigned int set_bits;
204 static unsigned long bit_masks[] = {
205 0x0000000000000000, 0x0000000000000001, 0x0000000000000003, 0x0000000000000007,
206 0x000000000000000f, 0x000000000000001f, 0x000000000000003f, 0x000000000000007f,
207 0x00000000000000ff, 0x00000000000001ff, 0x00000000000003ff, 0x00000000000007ff,
208 0x0000000000000fff, 0x0000000000001fff, 0x0000000000003fff, 0x0000000000007fff,
209 0x000000000000ffff, 0x000000000001ffff, 0x000000000003ffff, 0x000000000007ffff,
210 0x00000000000fffff, 0x00000000001fffff, 0x00000000003fffff, 0x00000000007fffff,
211 0x0000000000ffffff, 0x0000000001ffffff, 0x0000000003ffffff, 0x0000000007ffffff,
212 0x000000000fffffff, 0x000000001fffffff, 0x000000003fffffff, 0x000000007fffffff,
214 #if BITS_PER_LONG == 64
215 0x00000001ffffffff, 0x00000003ffffffff, 0x00000007ffffffff, 0x0000000fffffffff,
216 0x0000001fffffffff, 0x0000003fffffffff, 0x0000007fffffffff, 0x000000ffffffffff,
217 0x000001ffffffffff, 0x000003ffffffffff, 0x000007ffffffffff, 0x00000fffffffffff,
218 0x00001fffffffffff, 0x00003fffffffffff, 0x00007fffffffffff, 0x0000ffffffffffff,
219 0x0001ffffffffffff, 0x0003ffffffffffff, 0x0007ffffffffffff, 0x000fffffffffffff,
220 0x001fffffffffffff, 0x003fffffffffffff, 0x007fffffffffffff, 0x00ffffffffffffff,
221 0x01ffffffffffffff, 0x03ffffffffffffff, 0x07ffffffffffffff, 0x0fffffffffffffff,
222 0x1fffffffffffffff, 0x3fffffffffffffff, 0x7fffffffffffffff, 0xffffffffffffffff
226 static int axmap_set_fn(struct axmap_level *al, unsigned long offset,
227 unsigned int bit, void *__data)
229 struct axmap_set_data *data = __data;
230 unsigned long mask, overlap;
231 unsigned int nr_bits;
233 nr_bits = min(data->nr_bits, BLOCKS_PER_UNIT - bit);
235 mask = bit_masks[nr_bits] << bit;
238 * Mask off any potential overlap, only sets contig regions
240 overlap = al->map[offset] & mask;
245 unsigned long clear_mask = ~(1UL << ffz(~overlap));
248 overlap &= clear_mask;
253 assert(!(al->map[offset] & mask));
255 al->map[offset] |= mask;
258 data->set_bits = nr_bits;
261 return al->map[offset] != -1UL;
264 static void __axmap_set(struct axmap *axmap, uint64_t bit_nr,
265 struct axmap_set_data *data)
267 unsigned int set_bits, nr_bits = data->nr_bits;
269 if (axmap->first_free >= bit_nr &&
270 axmap->first_free < bit_nr + data->nr_bits)
271 axmap->first_free = -1ULL;
273 if (bit_nr > axmap->nr_bits)
275 else if (bit_nr + nr_bits > axmap->nr_bits)
276 nr_bits = axmap->nr_bits - bit_nr;
280 axmap_handler(axmap, bit_nr, axmap_set_fn, data);
281 set_bits += data->set_bits;
283 if (!data->set_bits ||
284 data->set_bits != (BLOCKS_PER_UNIT - nr_bits))
287 nr_bits -= data->set_bits;
288 bit_nr += data->set_bits;
290 data->nr_bits = nr_bits;
293 data->set_bits = set_bits;
296 void axmap_set(struct axmap *axmap, uint64_t bit_nr)
298 struct axmap_set_data data = { .nr_bits = 1, };
300 fio_mutex_down(&axmap->lock);
301 __axmap_set(axmap, bit_nr, &data);
302 fio_mutex_up(&axmap->lock);
305 unsigned int axmap_set_nr(struct axmap *axmap, uint64_t bit_nr, unsigned int nr_bits)
307 unsigned int set_bits = 0;
310 struct axmap_set_data data = { .nr_bits = nr_bits, };
311 unsigned int max_bits, this_set;
313 max_bits = BLOCKS_PER_UNIT - (bit_nr & BLOCKS_PER_UNIT_MASK);
314 if (max_bits < nr_bits)
315 data.nr_bits = max_bits;
317 this_set = data.nr_bits;
318 __axmap_set(axmap, bit_nr, &data);
319 set_bits += data.set_bits;
320 if (data.set_bits != this_set)
323 nr_bits -= data.set_bits;
324 bit_nr += data.set_bits;
330 static int axmap_isset_fn(struct axmap_level *al, unsigned long offset,
331 unsigned int bit, void *unused)
333 return (al->map[offset] & (1UL << bit)) != 0;
336 int axmap_isset(struct axmap *axmap, uint64_t bit_nr)
338 if (bit_nr <= axmap->nr_bits) {
341 fio_mutex_down(&axmap->lock);
342 ret = axmap_handler_topdown(axmap, bit_nr, axmap_isset_fn, NULL);
343 fio_mutex_up(&axmap->lock);
350 static uint64_t axmap_find_first_free(struct axmap *axmap, unsigned int level,
353 uint64_t ret = -1ULL;
358 * Start at the bottom, then converge towards first free bit at the top
360 for (i = level; i >= 0; i--) {
361 struct axmap_level *al = &axmap->levels[i];
364 * Clear 'ret', this is a bug condition.
366 if (index >= al->map_size) {
371 for (j = index; j < al->map_size; j++) {
372 if (al->map[j] == -1UL)
376 * First free bit here is our index into the first
377 * free bit at the next higher level
379 ret = index = (j << UNIT_SHIFT) + ffz(al->map[j]);
384 if (ret < axmap->nr_bits)
387 return (uint64_t) -1ULL;
390 uint64_t axmap_first_free(struct axmap *axmap)
394 if (firstfree_valid(axmap))
395 return axmap->first_free;
397 fio_mutex_down(&axmap->lock);
398 ret = axmap_find_first_free(axmap, axmap->nr_levels - 1, 0);
399 axmap->first_free = ret;
400 fio_mutex_up(&axmap->lock);
405 struct axmap_next_free_data {
407 unsigned long offset;
411 static int axmap_next_free_fn(struct axmap_level *al, unsigned long offset,
412 unsigned int bit, void *__data)
414 struct axmap_next_free_data *data = __data;
415 uint64_t mask = ~bit_masks[(data->bit + 1) & BLOCKS_PER_UNIT_MASK];
417 if (!(mask & ~al->map[offset]))
420 if (al->map[offset] != -1UL) {
421 data->level = al->level;
422 data->offset = offset;
426 data->bit = (data->bit + BLOCKS_PER_UNIT - 1) / BLOCKS_PER_UNIT;
431 * 'bit_nr' is already set. Find the next free bit after this one.
433 uint64_t axmap_next_free(struct axmap *axmap, uint64_t bit_nr)
435 struct axmap_next_free_data data = { .level = -1U, .bit = bit_nr, };
438 fio_mutex_down(&axmap->lock);
440 if (firstfree_valid(axmap) && bit_nr < axmap->first_free) {
441 ret = axmap->first_free;
445 if (!axmap_handler(axmap, bit_nr, axmap_next_free_fn, &data)) {
446 ret = axmap_first_free(axmap);
450 assert(data.level != -1U);
453 * In the rare case that the map is unaligned, we might end up
454 * finding an offset that's beyond the valid end. For that case,
455 * find the first free one, the map is practically full.
457 ret = axmap_find_first_free(axmap, data.level, data.offset);
460 fio_mutex_up(&axmap->lock);
464 ret = axmap_first_free(axmap);