2 * Bitmap of bitmaps, where each layer is number-of-bits-per-word smaller than
3 * the previous. Hence an 'axmap', since we axe each previous layer into a
4 * much smaller piece. I swear, that is why it's named like that. It has
5 * nothing to do with anything remotely narcissistic.
7 * A set bit at layer N indicates a full word at layer N-1, and so forth. As
8 * the bitmap becomes progressively more full, checking for existence
9 * becomes cheaper (since fewer layers are walked, making it a lot more
10 * cache friendly) and locating the next free space likewise.
12 * Axmaps get pretty close to optimal (1 bit per block) space usage, since
13 * layers quickly diminish in size. Doing the size math is straight forward,
14 * since we have log64(blocks) layers of maps. For 20000 blocks, overhead
15 * is roughly 1.9%, or 1.019 bits per block. The number quickly converges
16 * towards 1.0158, or 1.58% of overhead.
23 #include "../arch/arch.h"
25 #include "../minmax.h"
27 #if BITS_PER_LONG == 64
29 #elif BITS_PER_LONG == 32
32 #error "Number of arch bits unknown"
35 #define BLOCKS_PER_UNIT (1U << UNIT_SHIFT)
36 #define BLOCKS_PER_UNIT_MASK (BLOCKS_PER_UNIT - 1)
38 static const unsigned long bit_masks[] = {
39 0x0000000000000000, 0x0000000000000001, 0x0000000000000003, 0x0000000000000007,
40 0x000000000000000f, 0x000000000000001f, 0x000000000000003f, 0x000000000000007f,
41 0x00000000000000ff, 0x00000000000001ff, 0x00000000000003ff, 0x00000000000007ff,
42 0x0000000000000fff, 0x0000000000001fff, 0x0000000000003fff, 0x0000000000007fff,
43 0x000000000000ffff, 0x000000000001ffff, 0x000000000003ffff, 0x000000000007ffff,
44 0x00000000000fffff, 0x00000000001fffff, 0x00000000003fffff, 0x00000000007fffff,
45 0x0000000000ffffff, 0x0000000001ffffff, 0x0000000003ffffff, 0x0000000007ffffff,
46 0x000000000fffffff, 0x000000001fffffff, 0x000000003fffffff, 0x000000007fffffff,
48 #if BITS_PER_LONG == 64
49 0x00000001ffffffff, 0x00000003ffffffff, 0x00000007ffffffff, 0x0000000fffffffff,
50 0x0000001fffffffff, 0x0000003fffffffff, 0x0000007fffffffff, 0x000000ffffffffff,
51 0x000001ffffffffff, 0x000003ffffffffff, 0x000007ffffffffff, 0x00000fffffffffff,
52 0x00001fffffffffff, 0x00003fffffffffff, 0x00007fffffffffff, 0x0000ffffffffffff,
53 0x0001ffffffffffff, 0x0003ffffffffffff, 0x0007ffffffffffff, 0x000fffffffffffff,
54 0x001fffffffffffff, 0x003fffffffffffff, 0x007fffffffffffff, 0x00ffffffffffffff,
55 0x01ffffffffffffff, 0x03ffffffffffffff, 0x07ffffffffffffff, 0x0fffffffffffffff,
56 0x1fffffffffffffff, 0x3fffffffffffffff, 0x7fffffffffffffff, 0xffffffffffffffff
61 * struct axmap_level - a bitmap used to implement struct axmap
62 * @level: Level index. Each map has at least one level with index zero. The
63 * higher the level index, the fewer bits a struct axmap_level contains.
64 * @map_size: Number of elements of the @map array.
65 * @map: A bitmap with @map_size elements.
69 unsigned long map_size;
74 * struct axmap - a set that can store numbers 0 .. @nr_bits - 1
75 * @nr_level: Number of elements of the @levels array.
76 * @levels: struct axmap_level array in which lower levels contain more bits
78 * @nr_bits: One more than the highest value stored in the set.
81 unsigned int nr_levels;
82 struct axmap_level *levels;
86 static inline unsigned long ulog64(unsigned long val, unsigned int log)
94 /* Remove all elements from the @axmap set */
95 void axmap_reset(struct axmap *axmap)
99 for (i = 0; i < axmap->nr_levels; i++) {
100 struct axmap_level *al = &axmap->levels[i];
102 memset(al->map, 0, al->map_size * sizeof(unsigned long));
106 void axmap_free(struct axmap *axmap)
113 for (i = 0; i < axmap->nr_levels; i++)
114 free(axmap->levels[i].map);
120 /* Allocate memory for a set that can store the numbers 0 .. @nr_bits - 1. */
121 struct axmap *axmap_new(unsigned long nr_bits)
124 unsigned int i, levels;
126 axmap = malloc(sizeof(*axmap));
131 i = (nr_bits + BLOCKS_PER_UNIT - 1) >> UNIT_SHIFT;
133 i = (i + BLOCKS_PER_UNIT - 1) >> UNIT_SHIFT;
137 axmap->nr_levels = levels;
138 axmap->levels = calloc(axmap->nr_levels, sizeof(struct axmap_level));
139 axmap->nr_bits = nr_bits;
141 for (i = 0; i < axmap->nr_levels; i++) {
142 struct axmap_level *al = &axmap->levels[i];
145 al->map_size = (nr_bits + BLOCKS_PER_UNIT - 1) >> UNIT_SHIFT;
146 al->map = malloc(al->map_size * sizeof(unsigned long));
150 nr_bits = (nr_bits + BLOCKS_PER_UNIT - 1) >> UNIT_SHIFT;
156 for (i = 0; i < axmap->nr_levels; i++)
157 if (axmap->levels[i].map)
158 free(axmap->levels[i].map);
166 * Call @func for each level, starting at level zero, until a level is found
167 * for which @func returns true. Return false if none of the @func calls
170 static bool axmap_handler(struct axmap *axmap, uint64_t bit_nr,
171 bool (*func)(struct axmap_level *, unsigned long, unsigned int,
174 struct axmap_level *al;
175 uint64_t index = bit_nr;
178 for (i = 0; i < axmap->nr_levels; i++) {
179 unsigned long offset = index >> UNIT_SHIFT;
180 unsigned int bit = index & BLOCKS_PER_UNIT_MASK;
182 al = &axmap->levels[i];
184 if (func(al, offset, bit, data))
188 index >>= UNIT_SHIFT;
195 * Call @func for each level, starting at the highest level, until a level is
196 * found for which @func returns true. Return false if none of the @func calls
199 static bool axmap_handler_topdown(struct axmap *axmap, uint64_t bit_nr,
200 bool (*func)(struct axmap_level *, unsigned long, unsigned int, void *))
204 for (i = axmap->nr_levels - 1; i >= 0; i--) {
205 unsigned long index = ulog64(bit_nr, i);
206 unsigned long offset = index >> UNIT_SHIFT;
207 unsigned int bit = index & BLOCKS_PER_UNIT_MASK;
209 if (func(&axmap->levels[i], offset, bit, NULL))
216 struct axmap_set_data {
217 unsigned int nr_bits;
218 unsigned int set_bits;
222 * Set at most @__data->nr_bits bits in @al at offset @offset. Do not exceed
223 * the boundary of the element at offset @offset. Return the number of bits
224 * that have been set in @__data->set_bits if @al->level == 0.
226 static bool axmap_set_fn(struct axmap_level *al, unsigned long offset,
227 unsigned int bit, void *__data)
229 struct axmap_set_data *data = __data;
230 unsigned long mask, overlap;
231 unsigned int nr_bits;
233 nr_bits = min(data->nr_bits, BLOCKS_PER_UNIT - bit);
235 mask = bit_masks[nr_bits] << bit;
238 * Mask off any potential overlap, only sets contig regions
240 overlap = al->map[offset] & mask;
241 if (overlap == mask) {
248 const int __bit = ffz(~overlap);
250 nr_bits = __bit - bit;
254 mask = bit_masks[nr_bits] << bit;
258 assert(!(al->map[offset] & mask));
259 al->map[offset] |= mask;
262 data->set_bits = nr_bits;
264 /* For the next level */
267 return al->map[offset] != -1UL;
271 * Set up to @data->nr_bits starting from @bit_nr in @axmap. Start at
272 * @bit_nr. If that bit has not yet been set then set it and continue until
273 * either @data->nr_bits have been set or a 1 bit is found. Store the number
274 * of bits that have been set in @data->set_bits. It is guaranteed that all
275 * bits that have been requested to set fit in the same unsigned long word of
278 static void __axmap_set(struct axmap *axmap, uint64_t bit_nr,
279 struct axmap_set_data *data)
281 unsigned int set_bits, nr_bits = data->nr_bits;
283 if (bit_nr > axmap->nr_bits)
285 else if (bit_nr + nr_bits > axmap->nr_bits)
286 nr_bits = axmap->nr_bits - bit_nr;
290 axmap_handler(axmap, bit_nr, axmap_set_fn, data);
291 set_bits += data->set_bits;
293 if (!data->set_bits ||
294 data->set_bits != (BLOCKS_PER_UNIT - nr_bits))
297 nr_bits -= data->set_bits;
298 bit_nr += data->set_bits;
300 data->nr_bits = nr_bits;
303 data->set_bits = set_bits;
306 void axmap_set(struct axmap *axmap, uint64_t bit_nr)
308 struct axmap_set_data data = { .nr_bits = 1, };
310 __axmap_set(axmap, bit_nr, &data);
314 * Set up to @nr_bits starting from @bit in @axmap. Start at @bit. If that
315 * bit has not yet been set then set it and continue until either @nr_bits
316 * have been set or a 1 bit is found. Return the number of bits that have been
319 unsigned int axmap_set_nr(struct axmap *axmap, uint64_t bit_nr,
320 unsigned int nr_bits)
322 unsigned int set_bits = 0;
325 struct axmap_set_data data = { .nr_bits = nr_bits, };
326 unsigned int max_bits, this_set;
328 max_bits = BLOCKS_PER_UNIT - (bit_nr & BLOCKS_PER_UNIT_MASK);
329 if (nr_bits > max_bits)
330 data.nr_bits = max_bits;
332 this_set = data.nr_bits;
333 __axmap_set(axmap, bit_nr, &data);
334 set_bits += data.set_bits;
335 if (data.set_bits != this_set)
338 nr_bits -= data.set_bits;
339 bit_nr += data.set_bits;
345 static bool axmap_isset_fn(struct axmap_level *al, unsigned long offset,
346 unsigned int bit, void *unused)
348 return (al->map[offset] & (1UL << bit)) != 0;
351 bool axmap_isset(struct axmap *axmap, uint64_t bit_nr)
353 if (bit_nr <= axmap->nr_bits)
354 return axmap_handler_topdown(axmap, bit_nr, axmap_isset_fn);
360 * Find the first free bit that is at least as large as bit_nr. Return
361 * -1 if no free bit is found before the end of the map.
363 static uint64_t axmap_find_first_free(struct axmap *axmap, uint64_t bit_nr)
368 uint64_t offset, base_index, index;
369 struct axmap_level *al;
372 for (i = axmap->nr_levels - 1; i >= 0; i--) {
373 al = &axmap->levels[i];
375 /* Shift previously calculated index for next level */
376 index <<= UNIT_SHIFT;
379 * Start from an index that's at least as large as the
380 * originally passed in bit number.
382 base_index = bit_nr >> (UNIT_SHIFT * i);
383 if (index < base_index)
386 /* Get the offset and bit for this level */
387 offset = index >> UNIT_SHIFT;
388 bit = index & BLOCKS_PER_UNIT_MASK;
391 * If the previous level had unused bits in its last
392 * word, the offset could be bigger than the map at
393 * this level. That means no free bits exist before the
394 * end of the map, so return -1.
396 if (offset >= al->map_size)
399 /* Check the first word starting with the specific bit */
400 temp = ~bit_masks[bit] & ~al->map[offset];
405 * No free bit in the first word, so iterate
406 * looking for a word with one or more free bits.
408 for (offset++; offset < al->map_size; offset++) {
409 temp = ~al->map[offset];
414 /* Did not find a free bit */
418 /* Compute the index of the free bit just found */
419 index = (offset << UNIT_SHIFT) + ffz(~temp);
422 /* If found an unused bit in the last word of level 0, return -1 */
423 if (index >= axmap->nr_bits)
430 * 'bit_nr' is already set. Find the next free bit after this one.
431 * Return -1 if no free bits found.
433 uint64_t axmap_next_free(struct axmap *axmap, uint64_t bit_nr)
436 uint64_t next_bit = bit_nr + 1;
441 if (bit_nr >= axmap->nr_bits)
444 /* If at the end of the map, wrap-around */
445 if (next_bit == axmap->nr_bits)
448 offset = next_bit >> UNIT_SHIFT;
449 bit = next_bit & BLOCKS_PER_UNIT_MASK;
452 * As an optimization, do a quick check for a free bit
453 * in the current word at level 0. If not found, do
456 temp = ~bit_masks[bit] & ~axmap->levels[0].map[offset];
458 ret = (offset << UNIT_SHIFT) + ffz(~temp);
460 /* Might have found an unused bit at level 0 */
461 if (ret >= axmap->nr_bits)
464 ret = axmap_find_first_free(axmap, next_bit);
467 * If there are no free bits starting at next_bit and going
468 * to the end of the map, wrap around by searching again
471 if (ret == -1ULL && next_bit != 0)
472 ret = axmap_find_first_free(axmap, 0);