X-Git-Url: https://git.kernel.dk/?a=blobdiff_plain;f=lib%2Faxmap.c;h=15cd635037a0bdfc565b26c1f2502180eae831af;hb=02975b649c986efbafdb76b3ddff998634610e3f;hp=533a155c7243ea5195b19baae6d023cbc23335b2;hpb=47d94b0ba87db8646f1a1d15f8b3911b3c640b30;p=fio.git diff --git a/lib/axmap.c b/lib/axmap.c index 533a155c..15cd6350 100644 --- a/lib/axmap.c +++ b/lib/axmap.c @@ -5,7 +5,7 @@ * nothing to do with anything remotely narcissistic. * * A set bit at layer N indicates a full word at layer N-1, and so forth. As - * the bitmap becomes progressively more full, checking for existance + * the bitmap becomes progressively more full, checking for existence * becomes cheaper (since fewer layers are walked, making it a lot more * cache friendly) and locating the next free space likewise. * @@ -189,7 +189,6 @@ void axmap_clear(struct axmap *axmap, uint64_t bit_nr) struct axmap_set_data { unsigned int nr_bits; unsigned int set_bits; - unsigned int fail_ok; }; static unsigned long bit_masks[] = { @@ -229,10 +228,8 @@ static int axmap_set_fn(struct axmap_level *al, unsigned long offset, * Mask off any potential overlap, only sets contig regions */ overlap = al->map[offset] & mask; - if (overlap == mask) { - assert(data->fail_ok); + if (overlap == mask) return 1; - } while (overlap) { unsigned long clear_mask = ~(1UL << ffz(~overlap)); @@ -273,14 +270,14 @@ static void __axmap_set(struct axmap *axmap, uint64_t bit_nr, axmap_handler(axmap, bit_nr, axmap_set_fn, data); set_bits += data->set_bits; - if (data->set_bits != (BLOCKS_PER_UNIT - nr_bits)) + if (!data->set_bits || + data->set_bits != (BLOCKS_PER_UNIT - nr_bits)) break; nr_bits -= data->set_bits; bit_nr += data->set_bits; data->nr_bits = nr_bits; - data->fail_ok = 1; } data->set_bits = set_bits; @@ -295,10 +292,27 @@ void axmap_set(struct axmap *axmap, uint64_t bit_nr) unsigned int axmap_set_nr(struct axmap *axmap, uint64_t bit_nr, unsigned int nr_bits) { - struct axmap_set_data data = { .nr_bits = nr_bits, }; + unsigned int set_bits = 0; - __axmap_set(axmap, bit_nr, &data); - return data.set_bits; + do { + struct axmap_set_data data = { .nr_bits = nr_bits, }; + unsigned int max_bits, this_set; + + max_bits = BLOCKS_PER_UNIT - (bit_nr & BLOCKS_PER_UNIT_MASK); + if (max_bits < nr_bits) + data.nr_bits = max_bits; + + this_set = data.nr_bits; + __axmap_set(axmap, bit_nr, &data); + set_bits += data.set_bits; + if (data.set_bits != this_set) + break; + + nr_bits -= data.set_bits; + bit_nr += data.set_bits; + } while (nr_bits); + + return set_bits; } static int axmap_isset_fn(struct axmap_level *al, unsigned long offset, @@ -374,9 +388,9 @@ static int axmap_next_free_fn(struct axmap_level *al, unsigned long offset, unsigned int bit, void *__data) { struct axmap_next_free_data *data = __data; - uint64_t mask = ~((1UL << ((data->bit & BLOCKS_PER_UNIT_MASK) + 1)) - 1); + uint64_t mask = ~bit_masks[(data->bit + 1) & BLOCKS_PER_UNIT_MASK]; - if (!(mask & al->map[offset])) + if (!(mask & ~al->map[offset])) return 0; if (al->map[offset] != -1UL) { @@ -395,6 +409,7 @@ static int axmap_next_free_fn(struct axmap_level *al, unsigned long offset, uint64_t axmap_next_free(struct axmap *axmap, uint64_t bit_nr) { struct axmap_next_free_data data = { .level = -1U, .bit = bit_nr, }; + uint64_t ret; if (firstfree_valid(axmap) && bit_nr < axmap->first_free) return axmap->first_free; @@ -404,5 +419,14 @@ uint64_t axmap_next_free(struct axmap *axmap, uint64_t bit_nr) assert(data.level != -1U); - return axmap_find_first_free(axmap, data.level, data.offset); + /* + * In the rare case that the map is unaligned, we might end up + * finding an offset that's beyond the valid end. For that case, + * find the first free one, the map is practically full. + */ + ret = axmap_find_first_free(axmap, data.level, data.offset); + if (ret != -1ULL) + return ret; + + return axmap_first_free(axmap); }