X-Git-Url: https://git.kernel.dk/?a=blobdiff_plain;f=lib%2Faxmap.c;h=15cd635037a0bdfc565b26c1f2502180eae831af;hb=aa6738a5d03f82424bf23cb308e4986019f14040;hp=df547f346fdca8045716161f8127d7e16ca6fbed;hpb=731ef4c7c0aad1e6717e645595f3454d264b4f2f;p=fio.git diff --git a/lib/axmap.c b/lib/axmap.c index df547f34..15cd6350 100644 --- a/lib/axmap.c +++ b/lib/axmap.c @@ -5,7 +5,7 @@ * nothing to do with anything remotely narcissistic. * * A set bit at layer N indicates a full word at layer N-1, and so forth. As - * the bitmap becomes progressively more full, checking for existance + * the bitmap becomes progressively more full, checking for existence * becomes cheaper (since fewer layers are walked, making it a lot more * cache friendly) and locating the next free space likewise. * @@ -48,6 +48,7 @@ struct axmap { unsigned int nr_levels; struct axmap_level *levels; uint64_t first_free; + uint64_t nr_bits; }; static unsigned long ulog64(unsigned long val, unsigned int log) @@ -103,6 +104,7 @@ struct axmap *axmap_new(unsigned long nr_bits) axmap->nr_levels = levels; axmap->levels = smalloc(axmap->nr_levels * sizeof(struct axmap_level)); + axmap->nr_bits = nr_bits; for (i = 0; i < axmap->nr_levels; i++) { struct axmap_level *al = &axmap->levels[i]; @@ -187,7 +189,6 @@ void axmap_clear(struct axmap *axmap, uint64_t bit_nr) struct axmap_set_data { unsigned int nr_bits; unsigned int set_bits; - unsigned int fail_ok; }; static unsigned long bit_masks[] = { @@ -227,10 +228,8 @@ static int axmap_set_fn(struct axmap_level *al, unsigned long offset, * Mask off any potential overlap, only sets contig regions */ overlap = al->map[offset] & mask; - if (overlap == mask) { - assert(data->fail_ok); + if (overlap == mask) return 1; - } while (overlap) { unsigned long clear_mask = ~(1UL << ffz(~overlap)); @@ -261,19 +260,24 @@ static void __axmap_set(struct axmap *axmap, uint64_t bit_nr, axmap->first_free < bit_nr + data->nr_bits) axmap->first_free = -1ULL; + if (bit_nr > axmap->nr_bits) + return; + else if (bit_nr + nr_bits > axmap->nr_bits) + nr_bits = axmap->nr_bits - bit_nr; + set_bits = 0; while (nr_bits) { axmap_handler(axmap, bit_nr, axmap_set_fn, data); set_bits += data->set_bits; - if (data->set_bits != (BLOCKS_PER_UNIT - nr_bits)) + if (!data->set_bits || + data->set_bits != (BLOCKS_PER_UNIT - nr_bits)) break; nr_bits -= data->set_bits; bit_nr += data->set_bits; data->nr_bits = nr_bits; - data->fail_ok = 1; } data->set_bits = set_bits; @@ -288,10 +292,27 @@ void axmap_set(struct axmap *axmap, uint64_t bit_nr) unsigned int axmap_set_nr(struct axmap *axmap, uint64_t bit_nr, unsigned int nr_bits) { - struct axmap_set_data data = { .nr_bits = nr_bits, }; + unsigned int set_bits = 0; - __axmap_set(axmap, bit_nr, &data); - return data.set_bits; + do { + struct axmap_set_data data = { .nr_bits = nr_bits, }; + unsigned int max_bits, this_set; + + max_bits = BLOCKS_PER_UNIT - (bit_nr & BLOCKS_PER_UNIT_MASK); + if (max_bits < nr_bits) + data.nr_bits = max_bits; + + this_set = data.nr_bits; + __axmap_set(axmap, bit_nr, &data); + set_bits += data.set_bits; + if (data.set_bits != this_set) + break; + + nr_bits -= data.set_bits; + bit_nr += data.set_bits; + } while (nr_bits); + + return set_bits; } static int axmap_isset_fn(struct axmap_level *al, unsigned long offset, @@ -302,7 +323,10 @@ static int axmap_isset_fn(struct axmap_level *al, unsigned long offset, int axmap_isset(struct axmap *axmap, uint64_t bit_nr) { - return axmap_handler_topdown(axmap, bit_nr, axmap_isset_fn, NULL); + if (bit_nr <= axmap->nr_bits) + return axmap_handler_topdown(axmap, bit_nr, axmap_isset_fn, NULL); + + return 0; } static uint64_t axmap_find_first_free(struct axmap *axmap, unsigned int level, @@ -339,7 +363,10 @@ static uint64_t axmap_find_first_free(struct axmap *axmap, unsigned int level, } } - return ret; + if (ret < axmap->nr_bits) + return ret; + + return (uint64_t) -1ULL; } uint64_t axmap_first_free(struct axmap *axmap) @@ -361,9 +388,9 @@ static int axmap_next_free_fn(struct axmap_level *al, unsigned long offset, unsigned int bit, void *__data) { struct axmap_next_free_data *data = __data; - uint64_t mask = ~((1UL << ((data->bit & BLOCKS_PER_UNIT_MASK) + 1)) - 1); + uint64_t mask = ~bit_masks[(data->bit + 1) & BLOCKS_PER_UNIT_MASK]; - if (!(mask & al->map[offset])) + if (!(mask & ~al->map[offset])) return 0; if (al->map[offset] != -1UL) { @@ -382,6 +409,7 @@ static int axmap_next_free_fn(struct axmap_level *al, unsigned long offset, uint64_t axmap_next_free(struct axmap *axmap, uint64_t bit_nr) { struct axmap_next_free_data data = { .level = -1U, .bit = bit_nr, }; + uint64_t ret; if (firstfree_valid(axmap) && bit_nr < axmap->first_free) return axmap->first_free; @@ -391,5 +419,14 @@ uint64_t axmap_next_free(struct axmap *axmap, uint64_t bit_nr) assert(data.level != -1U); - return axmap_find_first_free(axmap, data.level, data.offset); + /* + * In the rare case that the map is unaligned, we might end up + * finding an offset that's beyond the valid end. For that case, + * find the first free one, the map is practically full. + */ + ret = axmap_find_first_free(axmap, data.level, data.offset); + if (ret != -1ULL) + return ret; + + return axmap_first_free(axmap); }