2 * Bitmap of bitmaps, where each layer is number-of-bits-per-word smaller than
3 * the previous. Hence an 'axmap', since we axe each previous layer into a
4 * much smaller piece. I swear, that is why it's named like that. It has
5 * nothing to do with anything remotely narcissistic.
7 * A set bit at layer N indicates a full word at layer N-1, and so forth. As
8 * the bitmap becomes progressively more full, checking for existence
9 * becomes cheaper (since fewer layers are walked, making it a lot more
10 * cache friendly) and locating the next free space likewise.
12 * Axmaps get pretty close to optimal (1 bit per block) space usage, since
13 * layers quickly diminish in size. Doing the size math is straight forward,
14 * since we have log64(blocks) layers of maps. For 20000 blocks, overhead
15 * is roughly 1.9%, or 1.019 bits per block. The number quickly converges
16 * towards 1.0158, or 1.58% of overhead.
23 #include "../arch/arch.h"
25 #include "../minmax.h"
27 #if BITS_PER_LONG == 64
29 #elif BITS_PER_LONG == 32
32 #error "Number of arch bits unknown"
35 #define BLOCKS_PER_UNIT (1U << UNIT_SHIFT)
36 #define BLOCKS_PER_UNIT_MASK (BLOCKS_PER_UNIT - 1)
38 static const unsigned long bit_masks[] = {
39 0x0000000000000000, 0x0000000000000001, 0x0000000000000003, 0x0000000000000007,
40 0x000000000000000f, 0x000000000000001f, 0x000000000000003f, 0x000000000000007f,
41 0x00000000000000ff, 0x00000000000001ff, 0x00000000000003ff, 0x00000000000007ff,
42 0x0000000000000fff, 0x0000000000001fff, 0x0000000000003fff, 0x0000000000007fff,
43 0x000000000000ffff, 0x000000000001ffff, 0x000000000003ffff, 0x000000000007ffff,
44 0x00000000000fffff, 0x00000000001fffff, 0x00000000003fffff, 0x00000000007fffff,
45 0x0000000000ffffff, 0x0000000001ffffff, 0x0000000003ffffff, 0x0000000007ffffff,
46 0x000000000fffffff, 0x000000001fffffff, 0x000000003fffffff, 0x000000007fffffff,
48 #if BITS_PER_LONG == 64
49 0x00000001ffffffff, 0x00000003ffffffff, 0x00000007ffffffff, 0x0000000fffffffff,
50 0x0000001fffffffff, 0x0000003fffffffff, 0x0000007fffffffff, 0x000000ffffffffff,
51 0x000001ffffffffff, 0x000003ffffffffff, 0x000007ffffffffff, 0x00000fffffffffff,
52 0x00001fffffffffff, 0x00003fffffffffff, 0x00007fffffffffff, 0x0000ffffffffffff,
53 0x0001ffffffffffff, 0x0003ffffffffffff, 0x0007ffffffffffff, 0x000fffffffffffff,
54 0x001fffffffffffff, 0x003fffffffffffff, 0x007fffffffffffff, 0x00ffffffffffffff,
55 0x01ffffffffffffff, 0x03ffffffffffffff, 0x07ffffffffffffff, 0x0fffffffffffffff,
56 0x1fffffffffffffff, 0x3fffffffffffffff, 0x7fffffffffffffff, 0xffffffffffffffff
62 unsigned long map_size;
67 unsigned int nr_levels;
68 struct axmap_level *levels;
72 static inline unsigned long ulog64(unsigned long val, unsigned int log)
80 void axmap_reset(struct axmap *axmap)
84 for (i = 0; i < axmap->nr_levels; i++) {
85 struct axmap_level *al = &axmap->levels[i];
87 memset(al->map, 0, al->map_size * sizeof(unsigned long));
91 void axmap_free(struct axmap *axmap)
98 for (i = 0; i < axmap->nr_levels; i++)
99 free(axmap->levels[i].map);
105 struct axmap *axmap_new(unsigned long nr_bits)
108 unsigned int i, levels;
110 axmap = malloc(sizeof(*axmap));
115 i = (nr_bits + BLOCKS_PER_UNIT - 1) >> UNIT_SHIFT;
117 i = (i + BLOCKS_PER_UNIT - 1) >> UNIT_SHIFT;
121 axmap->nr_levels = levels;
122 axmap->levels = calloc(axmap->nr_levels, sizeof(struct axmap_level));
123 axmap->nr_bits = nr_bits;
125 for (i = 0; i < axmap->nr_levels; i++) {
126 struct axmap_level *al = &axmap->levels[i];
129 al->map_size = (nr_bits + BLOCKS_PER_UNIT - 1) >> UNIT_SHIFT;
130 al->map = malloc(al->map_size * sizeof(unsigned long));
134 nr_bits = (nr_bits + BLOCKS_PER_UNIT - 1) >> UNIT_SHIFT;
140 for (i = 0; i < axmap->nr_levels; i++)
141 if (axmap->levels[i].map)
142 free(axmap->levels[i].map);
149 static bool axmap_handler(struct axmap *axmap, uint64_t bit_nr,
150 bool (*func)(struct axmap_level *, unsigned long, unsigned int,
153 struct axmap_level *al;
154 uint64_t index = bit_nr;
157 for (i = 0; i < axmap->nr_levels; i++) {
158 unsigned long offset = index >> UNIT_SHIFT;
159 unsigned int bit = index & BLOCKS_PER_UNIT_MASK;
161 al = &axmap->levels[i];
163 if (func(al, offset, bit, data))
167 index >>= UNIT_SHIFT;
173 static bool axmap_handler_topdown(struct axmap *axmap, uint64_t bit_nr,
174 bool (*func)(struct axmap_level *, unsigned long, unsigned int, void *))
178 for (i = axmap->nr_levels - 1; i >= 0; i--) {
179 unsigned long index = ulog64(bit_nr, i);
180 unsigned long offset = index >> UNIT_SHIFT;
181 unsigned int bit = index & BLOCKS_PER_UNIT_MASK;
183 if (func(&axmap->levels[i], offset, bit, NULL))
190 struct axmap_set_data {
191 unsigned int nr_bits;
192 unsigned int set_bits;
195 static bool axmap_set_fn(struct axmap_level *al, unsigned long offset,
196 unsigned int bit, void *__data)
198 struct axmap_set_data *data = __data;
199 unsigned long mask, overlap;
200 unsigned int nr_bits;
202 nr_bits = min(data->nr_bits, BLOCKS_PER_UNIT - bit);
204 mask = bit_masks[nr_bits] << bit;
207 * Mask off any potential overlap, only sets contig regions
209 overlap = al->map[offset] & mask;
210 if (overlap == mask) {
217 const int __bit = ffz(~overlap);
219 nr_bits = __bit - bit;
223 mask = bit_masks[nr_bits] << bit;
227 assert(!(al->map[offset] & mask));
228 al->map[offset] |= mask;
231 data->set_bits = nr_bits;
234 return al->map[offset] != -1UL;
237 static void __axmap_set(struct axmap *axmap, uint64_t bit_nr,
238 struct axmap_set_data *data)
240 unsigned int set_bits, nr_bits = data->nr_bits;
242 if (bit_nr > axmap->nr_bits)
244 else if (bit_nr + nr_bits > axmap->nr_bits)
245 nr_bits = axmap->nr_bits - bit_nr;
249 axmap_handler(axmap, bit_nr, axmap_set_fn, data);
250 set_bits += data->set_bits;
252 if (!data->set_bits ||
253 data->set_bits != (BLOCKS_PER_UNIT - nr_bits))
256 nr_bits -= data->set_bits;
257 bit_nr += data->set_bits;
259 data->nr_bits = nr_bits;
262 data->set_bits = set_bits;
265 void axmap_set(struct axmap *axmap, uint64_t bit_nr)
267 struct axmap_set_data data = { .nr_bits = 1, };
269 __axmap_set(axmap, bit_nr, &data);
272 unsigned int axmap_set_nr(struct axmap *axmap, uint64_t bit_nr,
273 unsigned int nr_bits)
275 unsigned int set_bits = 0;
278 struct axmap_set_data data = { .nr_bits = nr_bits, };
279 unsigned int max_bits, this_set;
281 max_bits = BLOCKS_PER_UNIT - (bit_nr & BLOCKS_PER_UNIT_MASK);
282 if (nr_bits > max_bits)
283 data.nr_bits = max_bits;
285 this_set = data.nr_bits;
286 __axmap_set(axmap, bit_nr, &data);
287 set_bits += data.set_bits;
288 if (data.set_bits != this_set)
291 nr_bits -= data.set_bits;
292 bit_nr += data.set_bits;
298 static bool axmap_isset_fn(struct axmap_level *al, unsigned long offset,
299 unsigned int bit, void *unused)
301 return (al->map[offset] & (1UL << bit)) != 0;
304 bool axmap_isset(struct axmap *axmap, uint64_t bit_nr)
306 if (bit_nr <= axmap->nr_bits)
307 return axmap_handler_topdown(axmap, bit_nr, axmap_isset_fn);
313 * Find the first free bit that is at least as large as bit_nr. Return
314 * -1 if no free bit is found before the end of the map.
316 static uint64_t axmap_find_first_free(struct axmap *axmap, uint64_t bit_nr)
321 uint64_t offset, base_index, index;
322 struct axmap_level *al;
325 for (i = axmap->nr_levels - 1; i >= 0; i--) {
326 al = &axmap->levels[i];
328 /* Shift previously calculated index for next level */
329 index <<= UNIT_SHIFT;
332 * Start from an index that's at least as large as the
333 * originally passed in bit number.
335 base_index = bit_nr >> (UNIT_SHIFT * i);
336 if (index < base_index)
339 /* Get the offset and bit for this level */
340 offset = index >> UNIT_SHIFT;
341 bit = index & BLOCKS_PER_UNIT_MASK;
344 * If the previous level had unused bits in its last
345 * word, the offset could be bigger than the map at
346 * this level. That means no free bits exist before the
347 * end of the map, so return -1.
349 if (offset >= al->map_size)
352 /* Check the first word starting with the specific bit */
353 temp = ~bit_masks[bit] & ~al->map[offset];
358 * No free bit in the first word, so iterate
359 * looking for a word with one or more free bits.
361 for (offset++; offset < al->map_size; offset++) {
362 temp = ~al->map[offset];
367 /* Did not find a free bit */
371 /* Compute the index of the free bit just found */
372 index = (offset << UNIT_SHIFT) + ffz(~temp);
375 /* If found an unused bit in the last word of level 0, return -1 */
376 if (index >= axmap->nr_bits)
383 * 'bit_nr' is already set. Find the next free bit after this one.
384 * Return -1 if no free bits found.
386 uint64_t axmap_next_free(struct axmap *axmap, uint64_t bit_nr)
389 uint64_t next_bit = bit_nr + 1;
394 if (bit_nr >= axmap->nr_bits)
397 /* If at the end of the map, wrap-around */
398 if (next_bit == axmap->nr_bits)
401 offset = next_bit >> UNIT_SHIFT;
402 bit = next_bit & BLOCKS_PER_UNIT_MASK;
405 * As an optimization, do a quick check for a free bit
406 * in the current word at level 0. If not found, do
409 temp = ~bit_masks[bit] & ~axmap->levels[0].map[offset];
411 ret = (offset << UNIT_SHIFT) + ffz(~temp);
413 /* Might have found an unused bit at level 0 */
414 if (ret >= axmap->nr_bits)
417 ret = axmap_find_first_free(axmap, next_bit);
420 * If there are no free bits starting at next_bit and going
421 * to the end of the map, wrap around by searching again
424 if (ret == -1ULL && next_bit != 0)
425 ret = axmap_find_first_free(axmap, 0);