unsigned int nr_levels;
struct axmap_level *levels;
uint64_t first_free;
+ uint64_t nr_bits;
};
static unsigned long ulog64(unsigned long val, unsigned int log)
axmap->nr_levels = levels;
axmap->levels = smalloc(axmap->nr_levels * sizeof(struct axmap_level));
+ axmap->nr_bits = nr_bits;
for (i = 0; i < axmap->nr_levels; i++) {
struct axmap_level *al = &axmap->levels[i];
struct axmap_set_data {
unsigned int nr_bits;
unsigned int set_bits;
- unsigned int fail_ok;
};
static unsigned long bit_masks[] = {
* Mask off any potential overlap, only sets contig regions
*/
overlap = al->map[offset] & mask;
- if (overlap == mask) {
- assert(data->fail_ok);
+ if (overlap == mask)
return 1;
- }
while (overlap) {
unsigned long clear_mask = ~(1UL << ffz(~overlap));
axmap->first_free < bit_nr + data->nr_bits)
axmap->first_free = -1ULL;
+ if (bit_nr > axmap->nr_bits)
+ return;
+ else if (bit_nr + nr_bits > axmap->nr_bits)
+ nr_bits = axmap->nr_bits - bit_nr;
+
set_bits = 0;
while (nr_bits) {
axmap_handler(axmap, bit_nr, axmap_set_fn, data);
set_bits += data->set_bits;
- if (data->set_bits != (BLOCKS_PER_UNIT - nr_bits))
+ if (!data->set_bits ||
+ data->set_bits != (BLOCKS_PER_UNIT - nr_bits))
break;
nr_bits -= data->set_bits;
bit_nr += data->set_bits;
data->nr_bits = nr_bits;
- data->fail_ok = 1;
}
data->set_bits = set_bits;
unsigned int axmap_set_nr(struct axmap *axmap, uint64_t bit_nr, unsigned int nr_bits)
{
- struct axmap_set_data data = { .nr_bits = nr_bits, };
+ unsigned int set_bits = 0;
- __axmap_set(axmap, bit_nr, &data);
- return data.set_bits;
+ do {
+ struct axmap_set_data data = { .nr_bits = nr_bits, };
+ unsigned int max_bits, this_set;
+
+ max_bits = BLOCKS_PER_UNIT - (bit_nr & BLOCKS_PER_UNIT_MASK);
+ if (max_bits < nr_bits)
+ data.nr_bits = max_bits;
+
+ this_set = data.nr_bits;
+ __axmap_set(axmap, bit_nr, &data);
+ set_bits += data.set_bits;
+ if (data.set_bits != this_set)
+ break;
+
+ nr_bits -= data.set_bits;
+ bit_nr += data.set_bits;
+ } while (nr_bits);
+
+ return set_bits;
}
static int axmap_isset_fn(struct axmap_level *al, unsigned long offset,
int axmap_isset(struct axmap *axmap, uint64_t bit_nr)
{
- return axmap_handler_topdown(axmap, bit_nr, axmap_isset_fn, NULL);
+ if (bit_nr <= axmap->nr_bits)
+ return axmap_handler_topdown(axmap, bit_nr, axmap_isset_fn, NULL);
+
+ return 0;
}
static uint64_t axmap_find_first_free(struct axmap *axmap, unsigned int level,
uint64_t index)
{
+ uint64_t ret = -1ULL;
unsigned long j;
int i;
for (i = level; i >= 0; i--) {
struct axmap_level *al = &axmap->levels[i];
+ /*
+ * Clear 'ret', this is a bug condition.
+ */
if (index >= al->map_size) {
- index = -1ULL;
+ ret = -1ULL;
break;
}
* First free bit here is our index into the first
* free bit at the next higher level
*/
- index = (j << UNIT_SHIFT) + ffz(al->map[j]);
+ ret = index = (j << UNIT_SHIFT) + ffz(al->map[j]);
break;
}
}
- return index;
+ if (ret < axmap->nr_bits)
+ return ret;
+
+ return (uint64_t) -1ULL;
}
uint64_t axmap_first_free(struct axmap *axmap)
unsigned int bit, void *__data)
{
struct axmap_next_free_data *data = __data;
- uint64_t mask = ~((1UL << ((data->bit & BLOCKS_PER_UNIT_MASK) + 1)) - 1);
+ uint64_t mask = ~bit_masks[(data->bit + 1) & BLOCKS_PER_UNIT_MASK];
- if (!(mask & al->map[offset]))
+ if (!(mask & ~al->map[offset]))
return 0;
if (al->map[offset] != -1UL) {
uint64_t axmap_next_free(struct axmap *axmap, uint64_t bit_nr)
{
struct axmap_next_free_data data = { .level = -1U, .bit = bit_nr, };
+ uint64_t ret;
if (firstfree_valid(axmap) && bit_nr < axmap->first_free)
return axmap->first_free;
assert(data.level != -1U);
- return axmap_find_first_free(axmap, data.level, data.offset);
+ /*
+ * In the rare case that the map is unaligned, we might end up
+ * finding an offset that's beyond the valid end. For that case,
+ * find the first free one, the map is practically full.
+ */
+ ret = axmap_find_first_free(axmap, data.level, data.offset);
+ if (ret != -1ULL)
+ return ret;
+
+ return axmap_first_free(axmap);
}