init: use o-> instead of td->o
[fio.git] / lib / axmap.c
... / ...
CommitLineData
1/*
2 * Bitmap of bitmaps, where each layer is number-of-bits-per-word smaller than
3 * the previous. Hence an 'axmap', since we axe each previous layer into a
4 * much smaller piece. I swear, that is why it's named like that. It has
5 * nothing to do with anything remotely narcissistic.
6 *
7 * A set bit at layer N indicates a full word at layer N-1, and so forth. As
8 * the bitmap becomes progressively more full, checking for existence
9 * becomes cheaper (since fewer layers are walked, making it a lot more
10 * cache friendly) and locating the next free space likewise.
11 *
12 * Axmaps get pretty close to optimal (1 bit per block) space usage, since
13 * layers quickly diminish in size. Doing the size math is straight forward,
14 * since we have log64(blocks) layers of maps. For 20000 blocks, overhead
15 * is roughly 1.9%, or 1.019 bits per block. The number quickly converges
16 * towards 1.0158, or 1.58% of overhead.
17 */
18#include <stdio.h>
19#include <stdlib.h>
20#include <string.h>
21#include <assert.h>
22
23#include "../arch/arch.h"
24#include "axmap.h"
25#include "../minmax.h"
26
27#if BITS_PER_LONG == 64
28#define UNIT_SHIFT 6
29#elif BITS_PER_LONG == 32
30#define UNIT_SHIFT 5
31#else
32#error "Number of arch bits unknown"
33#endif
34
35#define BLOCKS_PER_UNIT (1U << UNIT_SHIFT)
36#define BLOCKS_PER_UNIT_MASK (BLOCKS_PER_UNIT - 1)
37
38#define firstfree_valid(b) ((b)->first_free != (uint64_t) -1)
39
40struct axmap_level {
41 int level;
42 unsigned long map_size;
43 unsigned long *map;
44};
45
46struct axmap {
47 unsigned int nr_levels;
48 struct axmap_level *levels;
49 uint64_t first_free;
50 uint64_t nr_bits;
51};
52
53static unsigned long ulog64(unsigned long val, unsigned int log)
54{
55 while (log-- && val)
56 val >>= UNIT_SHIFT;
57
58 return val;
59}
60
61void axmap_reset(struct axmap *axmap)
62{
63 int i;
64
65 for (i = 0; i < axmap->nr_levels; i++) {
66 struct axmap_level *al = &axmap->levels[i];
67
68 memset(al->map, 0, al->map_size * sizeof(unsigned long));
69 }
70
71 axmap->first_free = 0;
72}
73
74void axmap_free(struct axmap *axmap)
75{
76 unsigned int i;
77
78 if (!axmap)
79 return;
80
81 for (i = 0; i < axmap->nr_levels; i++)
82 free(axmap->levels[i].map);
83
84 free(axmap->levels);
85 free(axmap);
86}
87
88struct axmap *axmap_new(unsigned long nr_bits)
89{
90 struct axmap *axmap;
91 unsigned int i, levels;
92
93 axmap = malloc(sizeof(*axmap));
94 if (!axmap)
95 return NULL;
96
97 levels = 1;
98 i = (nr_bits + BLOCKS_PER_UNIT - 1) >> UNIT_SHIFT;
99 while (i > 1) {
100 i = (i + BLOCKS_PER_UNIT - 1) >> UNIT_SHIFT;
101 levels++;
102 }
103
104 axmap->nr_levels = levels;
105 axmap->levels = calloc(axmap->nr_levels, sizeof(struct axmap_level));
106 axmap->nr_bits = nr_bits;
107
108 for (i = 0; i < axmap->nr_levels; i++) {
109 struct axmap_level *al = &axmap->levels[i];
110
111 al->level = i;
112 al->map_size = (nr_bits + BLOCKS_PER_UNIT - 1) >> UNIT_SHIFT;
113 al->map = malloc(al->map_size * sizeof(unsigned long));
114 if (!al->map)
115 goto err;
116
117 nr_bits = (nr_bits + BLOCKS_PER_UNIT - 1) >> UNIT_SHIFT;
118 }
119
120 axmap_reset(axmap);
121 return axmap;
122err:
123 for (i = 0; i < axmap->nr_levels; i++)
124 if (axmap->levels[i].map)
125 free(axmap->levels[i].map);
126
127 free(axmap->levels);
128 free(axmap);
129 return NULL;
130}
131
132static bool axmap_handler(struct axmap *axmap, uint64_t bit_nr,
133 bool (*func)(struct axmap_level *, unsigned long, unsigned int,
134 void *), void *data)
135{
136 struct axmap_level *al;
137 int i;
138
139 for (i = 0; i < axmap->nr_levels; i++) {
140 unsigned long index = ulog64(bit_nr, i);
141 unsigned long offset = index >> UNIT_SHIFT;
142 unsigned int bit = index & BLOCKS_PER_UNIT_MASK;
143
144 al = &axmap->levels[i];
145
146 if (func(al, offset, bit, data))
147 return true;
148 }
149
150 return false;
151}
152
153static bool axmap_handler_topdown(struct axmap *axmap, uint64_t bit_nr,
154 bool (*func)(struct axmap_level *, unsigned long, unsigned int, void *),
155 void *data)
156{
157 struct axmap_level *al;
158 int i, level = axmap->nr_levels;
159
160 for (i = axmap->nr_levels - 1; i >= 0; i--) {
161 unsigned long index = ulog64(bit_nr, --level);
162 unsigned long offset = index >> UNIT_SHIFT;
163 unsigned int bit = index & BLOCKS_PER_UNIT_MASK;
164
165 al = &axmap->levels[i];
166
167 if (func(al, offset, bit, data))
168 return true;
169 }
170
171 return false;
172}
173
174static bool axmap_clear_fn(struct axmap_level *al, unsigned long offset,
175 unsigned int bit, void *unused)
176{
177 if (!(al->map[offset] & (1UL << bit)))
178 return true;
179
180 al->map[offset] &= ~(1UL << bit);
181 return false;
182}
183
184void axmap_clear(struct axmap *axmap, uint64_t bit_nr)
185{
186 axmap_handler(axmap, bit_nr, axmap_clear_fn, NULL);
187
188 if (bit_nr < axmap->first_free)
189 axmap->first_free = bit_nr;
190}
191
192struct axmap_set_data {
193 unsigned int nr_bits;
194 unsigned int set_bits;
195};
196
197static const unsigned long bit_masks[] = {
198 0x0000000000000000, 0x0000000000000001, 0x0000000000000003, 0x0000000000000007,
199 0x000000000000000f, 0x000000000000001f, 0x000000000000003f, 0x000000000000007f,
200 0x00000000000000ff, 0x00000000000001ff, 0x00000000000003ff, 0x00000000000007ff,
201 0x0000000000000fff, 0x0000000000001fff, 0x0000000000003fff, 0x0000000000007fff,
202 0x000000000000ffff, 0x000000000001ffff, 0x000000000003ffff, 0x000000000007ffff,
203 0x00000000000fffff, 0x00000000001fffff, 0x00000000003fffff, 0x00000000007fffff,
204 0x0000000000ffffff, 0x0000000001ffffff, 0x0000000003ffffff, 0x0000000007ffffff,
205 0x000000000fffffff, 0x000000001fffffff, 0x000000003fffffff, 0x000000007fffffff,
206 0x00000000ffffffff,
207#if BITS_PER_LONG == 64
208 0x00000001ffffffff, 0x00000003ffffffff, 0x00000007ffffffff, 0x0000000fffffffff,
209 0x0000001fffffffff, 0x0000003fffffffff, 0x0000007fffffffff, 0x000000ffffffffff,
210 0x000001ffffffffff, 0x000003ffffffffff, 0x000007ffffffffff, 0x00000fffffffffff,
211 0x00001fffffffffff, 0x00003fffffffffff, 0x00007fffffffffff, 0x0000ffffffffffff,
212 0x0001ffffffffffff, 0x0003ffffffffffff, 0x0007ffffffffffff, 0x000fffffffffffff,
213 0x001fffffffffffff, 0x003fffffffffffff, 0x007fffffffffffff, 0x00ffffffffffffff,
214 0x01ffffffffffffff, 0x03ffffffffffffff, 0x07ffffffffffffff, 0x0fffffffffffffff,
215 0x1fffffffffffffff, 0x3fffffffffffffff, 0x7fffffffffffffff, 0xffffffffffffffff
216#endif
217};
218
219static bool axmap_set_fn(struct axmap_level *al, unsigned long offset,
220 unsigned int bit, void *__data)
221{
222 struct axmap_set_data *data = __data;
223 unsigned long mask, overlap;
224 unsigned int nr_bits;
225
226 nr_bits = min(data->nr_bits, BLOCKS_PER_UNIT - bit);
227
228 mask = bit_masks[nr_bits] << bit;
229
230 /*
231 * Mask off any potential overlap, only sets contig regions
232 */
233 overlap = al->map[offset] & mask;
234 if (overlap == mask)
235 return true;
236
237 while (overlap) {
238 unsigned long clear_mask = ~(1UL << ffz(~overlap));
239
240 mask &= clear_mask;
241 overlap &= clear_mask;
242 nr_bits--;
243 }
244
245 assert(mask);
246 assert(!(al->map[offset] & mask));
247
248 al->map[offset] |= mask;
249
250 if (!al->level)
251 data->set_bits = nr_bits;
252
253 data->nr_bits = 1;
254 return al->map[offset] != -1UL;
255}
256
257static void __axmap_set(struct axmap *axmap, uint64_t bit_nr,
258 struct axmap_set_data *data)
259{
260 unsigned int set_bits, nr_bits = data->nr_bits;
261
262 if (axmap->first_free >= bit_nr &&
263 axmap->first_free < bit_nr + data->nr_bits)
264 axmap->first_free = -1ULL;
265
266 if (bit_nr > axmap->nr_bits)
267 return;
268 else if (bit_nr + nr_bits > axmap->nr_bits)
269 nr_bits = axmap->nr_bits - bit_nr;
270
271 set_bits = 0;
272 while (nr_bits) {
273 axmap_handler(axmap, bit_nr, axmap_set_fn, data);
274 set_bits += data->set_bits;
275
276 if (!data->set_bits ||
277 data->set_bits != (BLOCKS_PER_UNIT - nr_bits))
278 break;
279
280 nr_bits -= data->set_bits;
281 bit_nr += data->set_bits;
282
283 data->nr_bits = nr_bits;
284 }
285
286 data->set_bits = set_bits;
287}
288
289void axmap_set(struct axmap *axmap, uint64_t bit_nr)
290{
291 struct axmap_set_data data = { .nr_bits = 1, };
292
293 __axmap_set(axmap, bit_nr, &data);
294}
295
296unsigned int axmap_set_nr(struct axmap *axmap, uint64_t bit_nr,
297 unsigned int nr_bits)
298{
299 unsigned int set_bits = 0;
300
301 do {
302 struct axmap_set_data data = { .nr_bits = nr_bits, };
303 unsigned int max_bits, this_set;
304
305 max_bits = BLOCKS_PER_UNIT - (bit_nr & BLOCKS_PER_UNIT_MASK);
306 if (max_bits < nr_bits)
307 data.nr_bits = max_bits;
308
309 this_set = data.nr_bits;
310 __axmap_set(axmap, bit_nr, &data);
311 set_bits += data.set_bits;
312 if (data.set_bits != this_set)
313 break;
314
315 nr_bits -= data.set_bits;
316 bit_nr += data.set_bits;
317 } while (nr_bits);
318
319 return set_bits;
320}
321
322static bool axmap_isset_fn(struct axmap_level *al, unsigned long offset,
323 unsigned int bit, void *unused)
324{
325 return (al->map[offset] & (1UL << bit)) != 0;
326}
327
328bool axmap_isset(struct axmap *axmap, uint64_t bit_nr)
329{
330 if (bit_nr <= axmap->nr_bits)
331 return axmap_handler_topdown(axmap, bit_nr, axmap_isset_fn, NULL);
332
333 return false;
334}
335
336static uint64_t axmap_find_first_free(struct axmap *axmap, unsigned int level,
337 uint64_t index)
338{
339 uint64_t ret = -1ULL;
340 unsigned long j;
341 int i;
342
343 /*
344 * Start at the bottom, then converge towards first free bit at the top
345 */
346 for (i = level; i >= 0; i--) {
347 struct axmap_level *al = &axmap->levels[i];
348
349 /*
350 * Clear 'ret', this is a bug condition.
351 */
352 if (index >= al->map_size) {
353 ret = -1ULL;
354 break;
355 }
356
357 for (j = index; j < al->map_size; j++) {
358 if (al->map[j] == -1UL)
359 continue;
360
361 /*
362 * First free bit here is our index into the first
363 * free bit at the next higher level
364 */
365 ret = index = (j << UNIT_SHIFT) + ffz(al->map[j]);
366 break;
367 }
368 }
369
370 if (ret < axmap->nr_bits)
371 return ret;
372
373 return (uint64_t) -1ULL;
374}
375
376static uint64_t axmap_first_free(struct axmap *axmap)
377{
378 if (!firstfree_valid(axmap))
379 axmap->first_free = axmap_find_first_free(axmap, axmap->nr_levels - 1, 0);
380
381 return axmap->first_free;
382}
383
384struct axmap_next_free_data {
385 unsigned int level;
386 unsigned long offset;
387 uint64_t bit;
388};
389
390static bool axmap_next_free_fn(struct axmap_level *al, unsigned long offset,
391 unsigned int bit, void *__data)
392{
393 struct axmap_next_free_data *data = __data;
394 uint64_t mask = ~bit_masks[(data->bit + 1) & BLOCKS_PER_UNIT_MASK];
395
396 if (!(mask & ~al->map[offset]))
397 return false;
398
399 if (al->map[offset] != -1UL) {
400 data->level = al->level;
401 data->offset = offset;
402 return true;
403 }
404
405 data->bit = (data->bit + BLOCKS_PER_UNIT - 1) / BLOCKS_PER_UNIT;
406 return false;
407}
408
409/*
410 * 'bit_nr' is already set. Find the next free bit after this one.
411 */
412uint64_t axmap_next_free(struct axmap *axmap, uint64_t bit_nr)
413{
414 struct axmap_next_free_data data = { .level = -1U, .bit = bit_nr, };
415 uint64_t ret;
416
417 if (firstfree_valid(axmap) && bit_nr < axmap->first_free)
418 return axmap->first_free;
419
420 if (!axmap_handler(axmap, bit_nr, axmap_next_free_fn, &data))
421 return axmap_first_free(axmap);
422
423 assert(data.level != -1U);
424
425 /*
426 * In the rare case that the map is unaligned, we might end up
427 * finding an offset that's beyond the valid end. For that case,
428 * find the first free one, the map is practically full.
429 */
430 ret = axmap_find_first_free(axmap, data.level, data.offset);
431 if (ret != -1ULL)
432 return ret;
433
434 return axmap_first_free(axmap);
435}