* nothing to do with anything remotely narcissistic.
*
* A set bit at layer N indicates a full word at layer N-1, and so forth. As
- * the bitmap becomes progressively more full, checking for existance
+ * the bitmap becomes progressively more full, checking for existence
* becomes cheaper (since fewer layers are walked, making it a lot more
* cache friendly) and locating the next free space likewise.
*
#include "../arch/arch.h"
#include "axmap.h"
-#include "../smalloc.h"
#include "../minmax.h"
#if BITS_PER_LONG == 64
#error "Number of arch bits unknown"
#endif
-#define BLOCKS_PER_UNIT (1UL << UNIT_SHIFT)
+#define BLOCKS_PER_UNIT (1U << UNIT_SHIFT)
#define BLOCKS_PER_UNIT_MASK (BLOCKS_PER_UNIT - 1)
#define firstfree_valid(b) ((b)->first_free != (uint64_t) -1)
unsigned int nr_levels;
struct axmap_level *levels;
uint64_t first_free;
+ uint64_t nr_bits;
};
static unsigned long ulog64(unsigned long val, unsigned int log)
memset(al->map, 0, al->map_size * sizeof(unsigned long));
}
+
+ axmap->first_free = 0;
}
void axmap_free(struct axmap *axmap)
return;
for (i = 0; i < axmap->nr_levels; i++)
- sfree(axmap->levels[i].map);
+ free(axmap->levels[i].map);
- sfree(axmap->levels);
- sfree(axmap);
+ free(axmap->levels);
+ free(axmap);
}
struct axmap *axmap_new(unsigned long nr_bits)
struct axmap *axmap;
unsigned int i, levels;
- axmap = smalloc(sizeof(*axmap));
+ axmap = malloc(sizeof(*axmap));
if (!axmap)
return NULL;
}
axmap->nr_levels = levels;
- axmap->levels = smalloc(axmap->nr_levels * sizeof(struct axmap_level));
- axmap->first_free = 0;
+ axmap->levels = malloc(axmap->nr_levels * sizeof(struct axmap_level));
+ axmap->nr_bits = nr_bits;
for (i = 0; i < axmap->nr_levels; i++) {
struct axmap_level *al = &axmap->levels[i];
al->level = i;
al->map_size = (nr_bits + BLOCKS_PER_UNIT - 1) >> UNIT_SHIFT;
- al->map = smalloc(al->map_size * sizeof(unsigned long));
+ al->map = malloc(al->map_size * sizeof(unsigned long));
if (!al->map)
goto err;
err:
for (i = 0; i < axmap->nr_levels; i++)
if (axmap->levels[i].map)
- sfree(axmap->levels[i].map);
+ free(axmap->levels[i].map);
- sfree(axmap->levels);
+ free(axmap->levels);
+ free(axmap);
return NULL;
}
-static int axmap_handler(struct axmap *axmap, uint64_t bit_nr,
- int (*func)(struct axmap_level *, unsigned long, unsigned int,
+static bool axmap_handler(struct axmap *axmap, uint64_t bit_nr,
+ bool (*func)(struct axmap_level *, unsigned long, unsigned int,
void *), void *data)
{
struct axmap_level *al;
al = &axmap->levels[i];
if (func(al, offset, bit, data))
- return 1;
+ return true;
}
- return 0;
+ return false;
}
-static int axmap_handler_topdown(struct axmap *axmap, uint64_t bit_nr,
- int (*func)(struct axmap_level *, unsigned long, unsigned int, void *),
+static bool axmap_handler_topdown(struct axmap *axmap, uint64_t bit_nr,
+ bool (*func)(struct axmap_level *, unsigned long, unsigned int, void *),
void *data)
{
struct axmap_level *al;
al = &axmap->levels[i];
if (func(al, offset, bit, data))
- return 1;
+ return true;
}
- return 0;
+ return false;
}
-static int axmap_clear_fn(struct axmap_level *al, unsigned long offset,
+static bool axmap_clear_fn(struct axmap_level *al, unsigned long offset,
unsigned int bit, void *unused)
{
if (!(al->map[offset] & (1UL << bit)))
- return 1;
+ return true;
al->map[offset] &= ~(1UL << bit);
- return 0;
+ return false;
}
void axmap_clear(struct axmap *axmap, uint64_t bit_nr)
struct axmap_set_data {
unsigned int nr_bits;
unsigned int set_bits;
- unsigned int fail_ok;
};
static unsigned long bit_masks[] = {
#endif
};
-static int axmap_set_fn(struct axmap_level *al, unsigned long offset,
+static bool axmap_set_fn(struct axmap_level *al, unsigned long offset,
unsigned int bit, void *__data)
{
struct axmap_set_data *data = __data;
* Mask off any potential overlap, only sets contig regions
*/
overlap = al->map[offset] & mask;
- if (overlap == mask) {
- assert(data->fail_ok);
- return 1;
- }
+ if (overlap == mask)
+ return true;
while (overlap) {
unsigned long clear_mask = ~(1UL << ffz(~overlap));
axmap->first_free < bit_nr + data->nr_bits)
axmap->first_free = -1ULL;
+ if (bit_nr > axmap->nr_bits)
+ return;
+ else if (bit_nr + nr_bits > axmap->nr_bits)
+ nr_bits = axmap->nr_bits - bit_nr;
+
set_bits = 0;
while (nr_bits) {
axmap_handler(axmap, bit_nr, axmap_set_fn, data);
set_bits += data->set_bits;
- if (data->set_bits != (BLOCKS_PER_UNIT - nr_bits))
+ if (!data->set_bits ||
+ data->set_bits != (BLOCKS_PER_UNIT - nr_bits))
break;
nr_bits -= data->set_bits;
bit_nr += data->set_bits;
data->nr_bits = nr_bits;
- data->fail_ok = 1;
}
data->set_bits = set_bits;
__axmap_set(axmap, bit_nr, &data);
}
-unsigned int axmap_set_nr(struct axmap *axmap, uint64_t bit_nr, unsigned int nr_bits)
+unsigned int axmap_set_nr(struct axmap *axmap, uint64_t bit_nr,
+ unsigned int nr_bits)
{
- struct axmap_set_data data = { .nr_bits = nr_bits, };
+ unsigned int set_bits = 0;
- __axmap_set(axmap, bit_nr, &data);
- return data.set_bits;
+ do {
+ struct axmap_set_data data = { .nr_bits = nr_bits, };
+ unsigned int max_bits, this_set;
+
+ max_bits = BLOCKS_PER_UNIT - (bit_nr & BLOCKS_PER_UNIT_MASK);
+ if (max_bits < nr_bits)
+ data.nr_bits = max_bits;
+
+ this_set = data.nr_bits;
+ __axmap_set(axmap, bit_nr, &data);
+ set_bits += data.set_bits;
+ if (data.set_bits != this_set)
+ break;
+
+ nr_bits -= data.set_bits;
+ bit_nr += data.set_bits;
+ } while (nr_bits);
+
+ return set_bits;
}
-static int axmap_isset_fn(struct axmap_level *al, unsigned long offset,
- unsigned int bit, void *unused)
+static bool axmap_isset_fn(struct axmap_level *al, unsigned long offset,
+ unsigned int bit, void *unused)
{
return (al->map[offset] & (1UL << bit)) != 0;
}
-int axmap_isset(struct axmap *axmap, uint64_t bit_nr)
+bool axmap_isset(struct axmap *axmap, uint64_t bit_nr)
{
- return axmap_handler_topdown(axmap, bit_nr, axmap_isset_fn, NULL);
+ if (bit_nr <= axmap->nr_bits)
+ return axmap_handler_topdown(axmap, bit_nr, axmap_isset_fn, NULL);
+
+ return false;
}
static uint64_t axmap_find_first_free(struct axmap *axmap, unsigned int level,
uint64_t index)
{
+ uint64_t ret = -1ULL;
unsigned long j;
int i;
for (i = level; i >= 0; i--) {
struct axmap_level *al = &axmap->levels[i];
+ /*
+ * Clear 'ret', this is a bug condition.
+ */
if (index >= al->map_size) {
- index = -1ULL;
+ ret = -1ULL;
break;
}
* First free bit here is our index into the first
* free bit at the next higher level
*/
- index = (j << UNIT_SHIFT) + ffz(al->map[j]);
+ ret = index = (j << UNIT_SHIFT) + ffz(al->map[j]);
break;
}
}
- return index;
+ if (ret < axmap->nr_bits)
+ return ret;
+
+ return (uint64_t) -1ULL;
}
-uint64_t axmap_first_free(struct axmap *axmap)
+static uint64_t axmap_first_free(struct axmap *axmap)
{
if (firstfree_valid(axmap))
return axmap->first_free;
uint64_t bit;
};
-static int axmap_next_free_fn(struct axmap_level *al, unsigned long offset,
+static bool axmap_next_free_fn(struct axmap_level *al, unsigned long offset,
unsigned int bit, void *__data)
{
struct axmap_next_free_data *data = __data;
- uint64_t mask = ~((1UL << ((data->bit & BLOCKS_PER_UNIT_MASK) + 1)) - 1);
+ uint64_t mask = ~bit_masks[(data->bit + 1) & BLOCKS_PER_UNIT_MASK];
- if (!(mask & al->map[offset]))
- return 0;
+ if (!(mask & ~al->map[offset]))
+ return false;
if (al->map[offset] != -1UL) {
data->level = al->level;
data->offset = offset;
- return 1;
+ return true;
}
data->bit = (data->bit + BLOCKS_PER_UNIT - 1) / BLOCKS_PER_UNIT;
- return 0;
+ return false;
}
/*
uint64_t axmap_next_free(struct axmap *axmap, uint64_t bit_nr)
{
struct axmap_next_free_data data = { .level = -1U, .bit = bit_nr, };
+ uint64_t ret;
if (firstfree_valid(axmap) && bit_nr < axmap->first_free)
return axmap->first_free;
assert(data.level != -1U);
- return axmap_find_first_free(axmap, data.level, data.offset);
+ /*
+ * In the rare case that the map is unaligned, we might end up
+ * finding an offset that's beyond the valid end. For that case,
+ * find the first free one, the map is practically full.
+ */
+ ret = axmap_find_first_free(axmap, data.level, data.offset);
+ if (ret != -1ULL)
+ return ret;
+
+ return axmap_first_free(axmap);
}