2 * Bitmap of bitmaps, where each layer is number-of-bits-per-word smaller than
3 * the previous. Hence an 'axmap', since we axe each previous layer into a
4 * much smaller piece. I swear, that is why it's named like that. It has
5 * nothing to do with anything remotely narcissistic.
7 * A set bit at layer N indicates a full word at layer N-1, and so forth. As
8 * the bitmap becomes progressively more full, checking for existence
9 * becomes cheaper (since fewer layers are walked, making it a lot more
10 * cache friendly) and locating the next free space likewise.
12 * Axmaps get pretty close to optimal (1 bit per block) space usage, since
13 * layers quickly diminish in size. Doing the size math is straight forward,
14 * since we have log64(blocks) layers of maps. For 20000 blocks, overhead
15 * is roughly 1.9%, or 1.019 bits per block. The number quickly converges
16 * towards 1.0158, or 1.58% of overhead.
23 #include "../arch/arch.h"
25 #include "../minmax.h"
27 #if BITS_PER_LONG == 64
29 #elif BITS_PER_LONG == 32
32 #error "Number of arch bits unknown"
35 #define BLOCKS_PER_UNIT (1U << UNIT_SHIFT)
36 #define BLOCKS_PER_UNIT_MASK (BLOCKS_PER_UNIT - 1)
38 #define firstfree_valid(b) ((b)->first_free != (uint64_t) -1)
40 static const unsigned long bit_masks[] = {
41 0x0000000000000000, 0x0000000000000001, 0x0000000000000003, 0x0000000000000007,
42 0x000000000000000f, 0x000000000000001f, 0x000000000000003f, 0x000000000000007f,
43 0x00000000000000ff, 0x00000000000001ff, 0x00000000000003ff, 0x00000000000007ff,
44 0x0000000000000fff, 0x0000000000001fff, 0x0000000000003fff, 0x0000000000007fff,
45 0x000000000000ffff, 0x000000000001ffff, 0x000000000003ffff, 0x000000000007ffff,
46 0x00000000000fffff, 0x00000000001fffff, 0x00000000003fffff, 0x00000000007fffff,
47 0x0000000000ffffff, 0x0000000001ffffff, 0x0000000003ffffff, 0x0000000007ffffff,
48 0x000000000fffffff, 0x000000001fffffff, 0x000000003fffffff, 0x000000007fffffff,
50 #if BITS_PER_LONG == 64
51 0x00000001ffffffff, 0x00000003ffffffff, 0x00000007ffffffff, 0x0000000fffffffff,
52 0x0000001fffffffff, 0x0000003fffffffff, 0x0000007fffffffff, 0x000000ffffffffff,
53 0x000001ffffffffff, 0x000003ffffffffff, 0x000007ffffffffff, 0x00000fffffffffff,
54 0x00001fffffffffff, 0x00003fffffffffff, 0x00007fffffffffff, 0x0000ffffffffffff,
55 0x0001ffffffffffff, 0x0003ffffffffffff, 0x0007ffffffffffff, 0x000fffffffffffff,
56 0x001fffffffffffff, 0x003fffffffffffff, 0x007fffffffffffff, 0x00ffffffffffffff,
57 0x01ffffffffffffff, 0x03ffffffffffffff, 0x07ffffffffffffff, 0x0fffffffffffffff,
58 0x1fffffffffffffff, 0x3fffffffffffffff, 0x7fffffffffffffff, 0xffffffffffffffff
64 unsigned long map_size;
69 unsigned int nr_levels;
70 struct axmap_level *levels;
75 static inline unsigned long ulog64(unsigned long val, unsigned int log)
83 void axmap_reset(struct axmap *axmap)
87 for (i = 0; i < axmap->nr_levels; i++) {
88 struct axmap_level *al = &axmap->levels[i];
90 memset(al->map, 0, al->map_size * sizeof(unsigned long));
93 axmap->first_free = 0;
96 void axmap_free(struct axmap *axmap)
103 for (i = 0; i < axmap->nr_levels; i++)
104 free(axmap->levels[i].map);
110 struct axmap *axmap_new(unsigned long nr_bits)
113 unsigned int i, levels;
115 axmap = malloc(sizeof(*axmap));
120 i = (nr_bits + BLOCKS_PER_UNIT - 1) >> UNIT_SHIFT;
122 i = (i + BLOCKS_PER_UNIT - 1) >> UNIT_SHIFT;
126 axmap->nr_levels = levels;
127 axmap->levels = calloc(axmap->nr_levels, sizeof(struct axmap_level));
128 axmap->nr_bits = nr_bits;
130 for (i = 0; i < axmap->nr_levels; i++) {
131 struct axmap_level *al = &axmap->levels[i];
134 al->map_size = (nr_bits + BLOCKS_PER_UNIT - 1) >> UNIT_SHIFT;
135 al->map = malloc(al->map_size * sizeof(unsigned long));
139 nr_bits = (nr_bits + BLOCKS_PER_UNIT - 1) >> UNIT_SHIFT;
145 for (i = 0; i < axmap->nr_levels; i++)
146 if (axmap->levels[i].map)
147 free(axmap->levels[i].map);
154 static bool axmap_handler(struct axmap *axmap, uint64_t bit_nr,
155 bool (*func)(struct axmap_level *, unsigned long, unsigned int,
158 struct axmap_level *al;
159 uint64_t index = bit_nr;
162 for (i = 0; i < axmap->nr_levels; i++) {
163 unsigned long offset = index >> UNIT_SHIFT;
164 unsigned int bit = index & BLOCKS_PER_UNIT_MASK;
166 al = &axmap->levels[i];
168 if (func(al, offset, bit, data))
172 index >>= UNIT_SHIFT;
178 static bool axmap_handler_topdown(struct axmap *axmap, uint64_t bit_nr,
179 bool (*func)(struct axmap_level *, unsigned long, unsigned int, void *))
183 for (i = axmap->nr_levels - 1; i >= 0; i--) {
184 unsigned long index = ulog64(bit_nr, i);
185 unsigned long offset = index >> UNIT_SHIFT;
186 unsigned int bit = index & BLOCKS_PER_UNIT_MASK;
188 if (func(&axmap->levels[i], offset, bit, NULL))
195 static bool axmap_clear_fn(struct axmap_level *al, unsigned long offset,
196 unsigned int bit, void *unused)
198 if (!(al->map[offset] & (1UL << bit)))
201 al->map[offset] &= ~(1UL << bit);
205 void axmap_clear(struct axmap *axmap, uint64_t bit_nr)
207 axmap_handler(axmap, bit_nr, axmap_clear_fn, NULL);
209 if (bit_nr < axmap->first_free)
210 axmap->first_free = bit_nr;
213 struct axmap_set_data {
214 unsigned int nr_bits;
215 unsigned int set_bits;
218 static bool axmap_set_fn(struct axmap_level *al, unsigned long offset,
219 unsigned int bit, void *__data)
221 struct axmap_set_data *data = __data;
222 unsigned long mask, overlap;
223 unsigned int nr_bits;
225 nr_bits = min(data->nr_bits, BLOCKS_PER_UNIT - bit);
227 mask = bit_masks[nr_bits] << bit;
230 * Mask off any potential overlap, only sets contig regions
232 overlap = al->map[offset] & mask;
233 if (overlap == mask) {
240 const int __bit = ffz(~overlap);
242 nr_bits = __bit - bit;
246 mask = bit_masks[nr_bits] << bit;
250 assert(!(al->map[offset] & mask));
251 al->map[offset] |= mask;
254 data->set_bits = nr_bits;
257 return al->map[offset] != -1UL;
260 static void __axmap_set(struct axmap *axmap, uint64_t bit_nr,
261 struct axmap_set_data *data)
263 unsigned int set_bits, nr_bits = data->nr_bits;
265 if (axmap->first_free >= bit_nr &&
266 axmap->first_free < bit_nr + data->nr_bits)
267 axmap->first_free = -1ULL;
269 if (bit_nr > axmap->nr_bits)
271 else if (bit_nr + nr_bits > axmap->nr_bits)
272 nr_bits = axmap->nr_bits - bit_nr;
276 axmap_handler(axmap, bit_nr, axmap_set_fn, data);
277 set_bits += data->set_bits;
279 if (!data->set_bits ||
280 data->set_bits != (BLOCKS_PER_UNIT - nr_bits))
283 nr_bits -= data->set_bits;
284 bit_nr += data->set_bits;
286 data->nr_bits = nr_bits;
289 data->set_bits = set_bits;
292 void axmap_set(struct axmap *axmap, uint64_t bit_nr)
294 struct axmap_set_data data = { .nr_bits = 1, };
296 __axmap_set(axmap, bit_nr, &data);
299 unsigned int axmap_set_nr(struct axmap *axmap, uint64_t bit_nr,
300 unsigned int nr_bits)
302 unsigned int set_bits = 0;
305 struct axmap_set_data data = { .nr_bits = nr_bits, };
306 unsigned int max_bits, this_set;
308 max_bits = BLOCKS_PER_UNIT - (bit_nr & BLOCKS_PER_UNIT_MASK);
309 if (nr_bits > max_bits)
310 data.nr_bits = max_bits;
312 this_set = data.nr_bits;
313 __axmap_set(axmap, bit_nr, &data);
314 set_bits += data.set_bits;
315 if (data.set_bits != this_set)
318 nr_bits -= data.set_bits;
319 bit_nr += data.set_bits;
325 static bool axmap_isset_fn(struct axmap_level *al, unsigned long offset,
326 unsigned int bit, void *unused)
328 return (al->map[offset] & (1UL << bit)) != 0;
331 bool axmap_isset(struct axmap *axmap, uint64_t bit_nr)
333 if (bit_nr <= axmap->nr_bits)
334 return axmap_handler_topdown(axmap, bit_nr, axmap_isset_fn);
339 static uint64_t axmap_find_first_free(struct axmap *axmap, unsigned int level,
342 uint64_t ret = -1ULL;
347 * Start at the bottom, then converge towards first free bit at the top
349 for (i = level; i >= 0; i--) {
350 struct axmap_level *al = &axmap->levels[i];
352 if (index >= al->map_size)
355 for (j = index; j < al->map_size; j++) {
356 if (al->map[j] == -1UL)
360 * First free bit here is our index into the first
361 * free bit at the next higher level
363 ret = index = (j << UNIT_SHIFT) + ffz(al->map[j]);
368 if (ret < axmap->nr_bits)
372 return (uint64_t) -1ULL;
375 static uint64_t axmap_first_free(struct axmap *axmap)
377 if (!firstfree_valid(axmap))
378 axmap->first_free = axmap_find_first_free(axmap, axmap->nr_levels - 1, 0);
380 return axmap->first_free;
383 struct axmap_next_free_data {
385 unsigned long offset;
389 static bool axmap_next_free_fn(struct axmap_level *al, unsigned long offset,
390 unsigned int bit, void *__data)
392 struct axmap_next_free_data *data = __data;
393 uint64_t mask = ~bit_masks[(data->bit + 1) & BLOCKS_PER_UNIT_MASK];
395 if (!(mask & ~al->map[offset]))
398 if (al->map[offset] != -1UL) {
399 data->level = al->level;
400 data->offset = offset;
404 data->bit = (data->bit + BLOCKS_PER_UNIT - 1) / BLOCKS_PER_UNIT;
409 * 'bit_nr' is already set. Find the next free bit after this one.
411 uint64_t axmap_next_free(struct axmap *axmap, uint64_t bit_nr)
413 struct axmap_next_free_data data = { .level = -1U, .bit = bit_nr, };
416 if (firstfree_valid(axmap) && bit_nr < axmap->first_free)
417 return axmap->first_free;
419 if (!axmap_handler(axmap, bit_nr, axmap_next_free_fn, &data))
420 return axmap_first_free(axmap);
422 assert(data.level != -1U);
425 * In the rare case that the map is unaligned, we might end up
426 * finding an offset that's beyond the valid end. For that case,
427 * find the first free one, the map is practically full.
429 ret = axmap_find_first_free(axmap, data.level, data.offset);
433 return axmap_first_free(axmap);