axmap: clean up 'no bits to set' case
[fio.git] / lib / axmap.c
... / ...
CommitLineData
1/*
2 * Bitmap of bitmaps, where each layer is number-of-bits-per-word smaller than
3 * the previous. Hence an 'axmap', since we axe each previous layer into a
4 * much smaller piece. I swear, that is why it's named like that. It has
5 * nothing to do with anything remotely narcissistic.
6 *
7 * A set bit at layer N indicates a full word at layer N-1, and so forth. As
8 * the bitmap becomes progressively more full, checking for existence
9 * becomes cheaper (since fewer layers are walked, making it a lot more
10 * cache friendly) and locating the next free space likewise.
11 *
12 * Axmaps get pretty close to optimal (1 bit per block) space usage, since
13 * layers quickly diminish in size. Doing the size math is straight forward,
14 * since we have log64(blocks) layers of maps. For 20000 blocks, overhead
15 * is roughly 1.9%, or 1.019 bits per block. The number quickly converges
16 * towards 1.0158, or 1.58% of overhead.
17 */
18#include <stdio.h>
19#include <stdlib.h>
20#include <string.h>
21#include <assert.h>
22
23#include "../arch/arch.h"
24#include "axmap.h"
25#include "../minmax.h"
26
27#if BITS_PER_LONG == 64
28#define UNIT_SHIFT 6
29#elif BITS_PER_LONG == 32
30#define UNIT_SHIFT 5
31#else
32#error "Number of arch bits unknown"
33#endif
34
35#define BLOCKS_PER_UNIT (1U << UNIT_SHIFT)
36#define BLOCKS_PER_UNIT_MASK (BLOCKS_PER_UNIT - 1)
37
38#define firstfree_valid(b) ((b)->first_free != (uint64_t) -1)
39
40struct axmap_level {
41 int level;
42 unsigned long map_size;
43 unsigned long *map;
44};
45
46struct axmap {
47 unsigned int nr_levels;
48 struct axmap_level *levels;
49 uint64_t first_free;
50 uint64_t nr_bits;
51};
52
53static unsigned long ulog64(unsigned long val, unsigned int log)
54{
55 while (log-- && val)
56 val >>= UNIT_SHIFT;
57
58 return val;
59}
60
61void axmap_reset(struct axmap *axmap)
62{
63 int i;
64
65 for (i = 0; i < axmap->nr_levels; i++) {
66 struct axmap_level *al = &axmap->levels[i];
67
68 memset(al->map, 0, al->map_size * sizeof(unsigned long));
69 }
70
71 axmap->first_free = 0;
72}
73
74void axmap_free(struct axmap *axmap)
75{
76 unsigned int i;
77
78 if (!axmap)
79 return;
80
81 for (i = 0; i < axmap->nr_levels; i++)
82 free(axmap->levels[i].map);
83
84 free(axmap->levels);
85 free(axmap);
86}
87
88struct axmap *axmap_new(unsigned long nr_bits)
89{
90 struct axmap *axmap;
91 unsigned int i, levels;
92
93 axmap = malloc(sizeof(*axmap));
94 if (!axmap)
95 return NULL;
96
97 levels = 1;
98 i = (nr_bits + BLOCKS_PER_UNIT - 1) >> UNIT_SHIFT;
99 while (i > 1) {
100 i = (i + BLOCKS_PER_UNIT - 1) >> UNIT_SHIFT;
101 levels++;
102 }
103
104 axmap->nr_levels = levels;
105 axmap->levels = calloc(axmap->nr_levels, sizeof(struct axmap_level));
106 axmap->nr_bits = nr_bits;
107
108 for (i = 0; i < axmap->nr_levels; i++) {
109 struct axmap_level *al = &axmap->levels[i];
110
111 al->level = i;
112 al->map_size = (nr_bits + BLOCKS_PER_UNIT - 1) >> UNIT_SHIFT;
113 al->map = malloc(al->map_size * sizeof(unsigned long));
114 if (!al->map)
115 goto err;
116
117 nr_bits = (nr_bits + BLOCKS_PER_UNIT - 1) >> UNIT_SHIFT;
118 }
119
120 axmap_reset(axmap);
121 return axmap;
122err:
123 for (i = 0; i < axmap->nr_levels; i++)
124 if (axmap->levels[i].map)
125 free(axmap->levels[i].map);
126
127 free(axmap->levels);
128 free(axmap);
129 return NULL;
130}
131
132static bool axmap_handler(struct axmap *axmap, uint64_t bit_nr,
133 bool (*func)(struct axmap_level *, unsigned long, unsigned int,
134 void *), void *data)
135{
136 struct axmap_level *al;
137 int i;
138
139 for (i = 0; i < axmap->nr_levels; i++) {
140 unsigned long index = ulog64(bit_nr, i);
141 unsigned long offset = index >> UNIT_SHIFT;
142 unsigned int bit = index & BLOCKS_PER_UNIT_MASK;
143
144 al = &axmap->levels[i];
145
146 if (func(al, offset, bit, data))
147 return true;
148 }
149
150 return false;
151}
152
153static bool axmap_handler_topdown(struct axmap *axmap, uint64_t bit_nr,
154 bool (*func)(struct axmap_level *, unsigned long, unsigned int, void *),
155 void *data)
156{
157 struct axmap_level *al;
158 int i, level = axmap->nr_levels;
159
160 for (i = axmap->nr_levels - 1; i >= 0; i--) {
161 unsigned long index = ulog64(bit_nr, --level);
162 unsigned long offset = index >> UNIT_SHIFT;
163 unsigned int bit = index & BLOCKS_PER_UNIT_MASK;
164
165 al = &axmap->levels[i];
166
167 if (func(al, offset, bit, data))
168 return true;
169 }
170
171 return false;
172}
173
174static bool axmap_clear_fn(struct axmap_level *al, unsigned long offset,
175 unsigned int bit, void *unused)
176{
177 if (!(al->map[offset] & (1UL << bit)))
178 return true;
179
180 al->map[offset] &= ~(1UL << bit);
181 return false;
182}
183
184void axmap_clear(struct axmap *axmap, uint64_t bit_nr)
185{
186 axmap_handler(axmap, bit_nr, axmap_clear_fn, NULL);
187
188 if (bit_nr < axmap->first_free)
189 axmap->first_free = bit_nr;
190}
191
192struct axmap_set_data {
193 unsigned int nr_bits;
194 unsigned int set_bits;
195};
196
197static const unsigned long bit_masks[] = {
198 0x0000000000000000, 0x0000000000000001, 0x0000000000000003, 0x0000000000000007,
199 0x000000000000000f, 0x000000000000001f, 0x000000000000003f, 0x000000000000007f,
200 0x00000000000000ff, 0x00000000000001ff, 0x00000000000003ff, 0x00000000000007ff,
201 0x0000000000000fff, 0x0000000000001fff, 0x0000000000003fff, 0x0000000000007fff,
202 0x000000000000ffff, 0x000000000001ffff, 0x000000000003ffff, 0x000000000007ffff,
203 0x00000000000fffff, 0x00000000001fffff, 0x00000000003fffff, 0x00000000007fffff,
204 0x0000000000ffffff, 0x0000000001ffffff, 0x0000000003ffffff, 0x0000000007ffffff,
205 0x000000000fffffff, 0x000000001fffffff, 0x000000003fffffff, 0x000000007fffffff,
206 0x00000000ffffffff,
207#if BITS_PER_LONG == 64
208 0x00000001ffffffff, 0x00000003ffffffff, 0x00000007ffffffff, 0x0000000fffffffff,
209 0x0000001fffffffff, 0x0000003fffffffff, 0x0000007fffffffff, 0x000000ffffffffff,
210 0x000001ffffffffff, 0x000003ffffffffff, 0x000007ffffffffff, 0x00000fffffffffff,
211 0x00001fffffffffff, 0x00003fffffffffff, 0x00007fffffffffff, 0x0000ffffffffffff,
212 0x0001ffffffffffff, 0x0003ffffffffffff, 0x0007ffffffffffff, 0x000fffffffffffff,
213 0x001fffffffffffff, 0x003fffffffffffff, 0x007fffffffffffff, 0x00ffffffffffffff,
214 0x01ffffffffffffff, 0x03ffffffffffffff, 0x07ffffffffffffff, 0x0fffffffffffffff,
215 0x1fffffffffffffff, 0x3fffffffffffffff, 0x7fffffffffffffff, 0xffffffffffffffff
216#endif
217};
218
219static bool axmap_set_fn(struct axmap_level *al, unsigned long offset,
220 unsigned int bit, void *__data)
221{
222 struct axmap_set_data *data = __data;
223 unsigned long mask, overlap;
224 unsigned int nr_bits;
225
226 nr_bits = min(data->nr_bits, BLOCKS_PER_UNIT - bit);
227
228 mask = bit_masks[nr_bits] << bit;
229
230 /*
231 * Mask off any potential overlap, only sets contig regions
232 */
233 overlap = al->map[offset] & mask;
234 if (overlap == mask)
235 return true;
236
237 if (overlap) {
238 const int __bit = ffz(~overlap);
239
240 nr_bits = __bit - bit;
241 if (!nr_bits)
242 return true;
243
244 mask = bit_masks[nr_bits] << bit;
245 }
246
247 assert(mask);
248 assert(!(al->map[offset] & mask));
249 al->map[offset] |= mask;
250
251 if (!al->level)
252 data->set_bits = nr_bits;
253
254 data->nr_bits = 1;
255 return al->map[offset] != -1UL;
256}
257
258static void __axmap_set(struct axmap *axmap, uint64_t bit_nr,
259 struct axmap_set_data *data)
260{
261 unsigned int set_bits, nr_bits = data->nr_bits;
262
263 if (axmap->first_free >= bit_nr &&
264 axmap->first_free < bit_nr + data->nr_bits)
265 axmap->first_free = -1ULL;
266
267 if (bit_nr > axmap->nr_bits)
268 return;
269 else if (bit_nr + nr_bits > axmap->nr_bits)
270 nr_bits = axmap->nr_bits - bit_nr;
271
272 set_bits = 0;
273 while (nr_bits) {
274 axmap_handler(axmap, bit_nr, axmap_set_fn, data);
275 set_bits += data->set_bits;
276
277 if (!data->set_bits ||
278 data->set_bits != (BLOCKS_PER_UNIT - nr_bits))
279 break;
280
281 nr_bits -= data->set_bits;
282 bit_nr += data->set_bits;
283
284 data->nr_bits = nr_bits;
285 }
286
287 data->set_bits = set_bits;
288}
289
290void axmap_set(struct axmap *axmap, uint64_t bit_nr)
291{
292 struct axmap_set_data data = { .nr_bits = 1, };
293
294 __axmap_set(axmap, bit_nr, &data);
295}
296
297unsigned int axmap_set_nr(struct axmap *axmap, uint64_t bit_nr,
298 unsigned int nr_bits)
299{
300 unsigned int set_bits = 0;
301
302 do {
303 struct axmap_set_data data = { .nr_bits = nr_bits, };
304 unsigned int max_bits, this_set;
305
306 max_bits = BLOCKS_PER_UNIT - (bit_nr & BLOCKS_PER_UNIT_MASK);
307 if (max_bits < nr_bits)
308 data.nr_bits = max_bits;
309
310 this_set = data.nr_bits;
311 __axmap_set(axmap, bit_nr, &data);
312 set_bits += data.set_bits;
313 if (data.set_bits != this_set)
314 break;
315
316 nr_bits -= data.set_bits;
317 bit_nr += data.set_bits;
318 } while (nr_bits);
319
320 return set_bits;
321}
322
323static bool axmap_isset_fn(struct axmap_level *al, unsigned long offset,
324 unsigned int bit, void *unused)
325{
326 return (al->map[offset] & (1UL << bit)) != 0;
327}
328
329bool axmap_isset(struct axmap *axmap, uint64_t bit_nr)
330{
331 if (bit_nr <= axmap->nr_bits)
332 return axmap_handler_topdown(axmap, bit_nr, axmap_isset_fn, NULL);
333
334 return false;
335}
336
337static uint64_t axmap_find_first_free(struct axmap *axmap, unsigned int level,
338 uint64_t index)
339{
340 uint64_t ret = -1ULL;
341 unsigned long j;
342 int i;
343
344 /*
345 * Start at the bottom, then converge towards first free bit at the top
346 */
347 for (i = level; i >= 0; i--) {
348 struct axmap_level *al = &axmap->levels[i];
349
350 /*
351 * Clear 'ret', this is a bug condition.
352 */
353 if (index >= al->map_size) {
354 ret = -1ULL;
355 break;
356 }
357
358 for (j = index; j < al->map_size; j++) {
359 if (al->map[j] == -1UL)
360 continue;
361
362 /*
363 * First free bit here is our index into the first
364 * free bit at the next higher level
365 */
366 ret = index = (j << UNIT_SHIFT) + ffz(al->map[j]);
367 break;
368 }
369 }
370
371 if (ret < axmap->nr_bits)
372 return ret;
373
374 return (uint64_t) -1ULL;
375}
376
377static uint64_t axmap_first_free(struct axmap *axmap)
378{
379 if (!firstfree_valid(axmap))
380 axmap->first_free = axmap_find_first_free(axmap, axmap->nr_levels - 1, 0);
381
382 return axmap->first_free;
383}
384
385struct axmap_next_free_data {
386 unsigned int level;
387 unsigned long offset;
388 uint64_t bit;
389};
390
391static bool axmap_next_free_fn(struct axmap_level *al, unsigned long offset,
392 unsigned int bit, void *__data)
393{
394 struct axmap_next_free_data *data = __data;
395 uint64_t mask = ~bit_masks[(data->bit + 1) & BLOCKS_PER_UNIT_MASK];
396
397 if (!(mask & ~al->map[offset]))
398 return false;
399
400 if (al->map[offset] != -1UL) {
401 data->level = al->level;
402 data->offset = offset;
403 return true;
404 }
405
406 data->bit = (data->bit + BLOCKS_PER_UNIT - 1) / BLOCKS_PER_UNIT;
407 return false;
408}
409
410/*
411 * 'bit_nr' is already set. Find the next free bit after this one.
412 */
413uint64_t axmap_next_free(struct axmap *axmap, uint64_t bit_nr)
414{
415 struct axmap_next_free_data data = { .level = -1U, .bit = bit_nr, };
416 uint64_t ret;
417
418 if (firstfree_valid(axmap) && bit_nr < axmap->first_free)
419 return axmap->first_free;
420
421 if (!axmap_handler(axmap, bit_nr, axmap_next_free_fn, &data))
422 return axmap_first_free(axmap);
423
424 assert(data.level != -1U);
425
426 /*
427 * In the rare case that the map is unaligned, we might end up
428 * finding an offset that's beyond the valid end. For that case,
429 * find the first free one, the map is practically full.
430 */
431 ret = axmap_find_first_free(axmap, data.level, data.offset);
432 if (ret != -1ULL)
433 return ret;
434
435 return axmap_first_free(axmap);
436}