Fix segfault with client/server and minimal output
[fio.git] / lib / axmap.c
... / ...
CommitLineData
1/*
2 * Bitmap of bitmaps, where each layer is number-of-bits-per-word smaller than
3 * the previous. Hence an 'axmap', since we axe each previous layer into a
4 * much smaller piece. I swear, that is why it's named like that. It has
5 * nothing to do with anything remotely narcissistic.
6 *
7 * A set bit at layer N indicates a full word at layer N-1, and so forth. As
8 * the bitmap becomes progressively more full, checking for existence
9 * becomes cheaper (since fewer layers are walked, making it a lot more
10 * cache friendly) and locating the next free space likewise.
11 *
12 * Axmaps get pretty close to optimal (1 bit per block) space usage, since
13 * layers quickly diminish in size. Doing the size math is straight forward,
14 * since we have log64(blocks) layers of maps. For 20000 blocks, overhead
15 * is roughly 1.9%, or 1.019 bits per block. The number quickly converges
16 * towards 1.0158, or 1.58% of overhead.
17 */
18#include <stdio.h>
19#include <stdlib.h>
20#include <string.h>
21#include <assert.h>
22
23#include "../arch/arch.h"
24#include "axmap.h"
25#include "../minmax.h"
26
27#if BITS_PER_LONG == 64
28#define UNIT_SHIFT 6
29#elif BITS_PER_LONG == 32
30#define UNIT_SHIFT 5
31#else
32#error "Number of arch bits unknown"
33#endif
34
35#define BLOCKS_PER_UNIT (1U << UNIT_SHIFT)
36#define BLOCKS_PER_UNIT_MASK (BLOCKS_PER_UNIT - 1)
37
38#define firstfree_valid(b) ((b)->first_free != (uint64_t) -1)
39
40struct axmap_level {
41 int level;
42 unsigned long map_size;
43 unsigned long *map;
44};
45
46struct axmap {
47 unsigned int nr_levels;
48 struct axmap_level *levels;
49 uint64_t first_free;
50 uint64_t nr_bits;
51};
52
53static unsigned long ulog64(unsigned long val, unsigned int log)
54{
55 while (log-- && val)
56 val >>= UNIT_SHIFT;
57
58 return val;
59}
60
61void axmap_reset(struct axmap *axmap)
62{
63 int i;
64
65 for (i = 0; i < axmap->nr_levels; i++) {
66 struct axmap_level *al = &axmap->levels[i];
67
68 memset(al->map, 0, al->map_size * sizeof(unsigned long));
69 }
70
71 axmap->first_free = 0;
72}
73
74void axmap_free(struct axmap *axmap)
75{
76 unsigned int i;
77
78 if (!axmap)
79 return;
80
81 for (i = 0; i < axmap->nr_levels; i++)
82 free(axmap->levels[i].map);
83
84 free(axmap->levels);
85 free(axmap);
86}
87
88struct axmap *axmap_new(unsigned long nr_bits)
89{
90 struct axmap *axmap;
91 unsigned int i, levels;
92
93 axmap = malloc(sizeof(*axmap));
94 if (!axmap)
95 return NULL;
96
97 levels = 1;
98 i = (nr_bits + BLOCKS_PER_UNIT - 1) >> UNIT_SHIFT;
99 while (i > 1) {
100 i = (i + BLOCKS_PER_UNIT - 1) >> UNIT_SHIFT;
101 levels++;
102 }
103
104 axmap->nr_levels = levels;
105 axmap->levels = malloc(axmap->nr_levels * sizeof(struct axmap_level));
106 axmap->nr_bits = nr_bits;
107
108 for (i = 0; i < axmap->nr_levels; i++) {
109 struct axmap_level *al = &axmap->levels[i];
110
111 al->level = i;
112 al->map_size = (nr_bits + BLOCKS_PER_UNIT - 1) >> UNIT_SHIFT;
113 al->map = malloc(al->map_size * sizeof(unsigned long));
114 if (!al->map)
115 goto err;
116
117 nr_bits = (nr_bits + BLOCKS_PER_UNIT - 1) >> UNIT_SHIFT;
118 }
119
120 axmap_reset(axmap);
121 return axmap;
122err:
123 for (i = 0; i < axmap->nr_levels; i++)
124 if (axmap->levels[i].map)
125 free(axmap->levels[i].map);
126
127 free(axmap->levels);
128 return NULL;
129}
130
131static int axmap_handler(struct axmap *axmap, uint64_t bit_nr,
132 int (*func)(struct axmap_level *, unsigned long, unsigned int,
133 void *), void *data)
134{
135 struct axmap_level *al;
136 int i;
137
138 for (i = 0; i < axmap->nr_levels; i++) {
139 unsigned long index = ulog64(bit_nr, i);
140 unsigned long offset = index >> UNIT_SHIFT;
141 unsigned int bit = index & BLOCKS_PER_UNIT_MASK;
142
143 al = &axmap->levels[i];
144
145 if (func(al, offset, bit, data))
146 return 1;
147 }
148
149 return 0;
150}
151
152static int axmap_handler_topdown(struct axmap *axmap, uint64_t bit_nr,
153 int (*func)(struct axmap_level *, unsigned long, unsigned int, void *),
154 void *data)
155{
156 struct axmap_level *al;
157 int i, level = axmap->nr_levels;
158
159 for (i = axmap->nr_levels - 1; i >= 0; i--) {
160 unsigned long index = ulog64(bit_nr, --level);
161 unsigned long offset = index >> UNIT_SHIFT;
162 unsigned int bit = index & BLOCKS_PER_UNIT_MASK;
163
164 al = &axmap->levels[i];
165
166 if (func(al, offset, bit, data))
167 return 1;
168 }
169
170 return 0;
171}
172
173static int axmap_clear_fn(struct axmap_level *al, unsigned long offset,
174 unsigned int bit, void *unused)
175{
176 if (!(al->map[offset] & (1UL << bit)))
177 return 1;
178
179 al->map[offset] &= ~(1UL << bit);
180 return 0;
181}
182
183void axmap_clear(struct axmap *axmap, uint64_t bit_nr)
184{
185 axmap_handler(axmap, bit_nr, axmap_clear_fn, NULL);
186}
187
188struct axmap_set_data {
189 unsigned int nr_bits;
190 unsigned int set_bits;
191};
192
193static unsigned long bit_masks[] = {
194 0x0000000000000000, 0x0000000000000001, 0x0000000000000003, 0x0000000000000007,
195 0x000000000000000f, 0x000000000000001f, 0x000000000000003f, 0x000000000000007f,
196 0x00000000000000ff, 0x00000000000001ff, 0x00000000000003ff, 0x00000000000007ff,
197 0x0000000000000fff, 0x0000000000001fff, 0x0000000000003fff, 0x0000000000007fff,
198 0x000000000000ffff, 0x000000000001ffff, 0x000000000003ffff, 0x000000000007ffff,
199 0x00000000000fffff, 0x00000000001fffff, 0x00000000003fffff, 0x00000000007fffff,
200 0x0000000000ffffff, 0x0000000001ffffff, 0x0000000003ffffff, 0x0000000007ffffff,
201 0x000000000fffffff, 0x000000001fffffff, 0x000000003fffffff, 0x000000007fffffff,
202 0x00000000ffffffff,
203#if BITS_PER_LONG == 64
204 0x00000001ffffffff, 0x00000003ffffffff, 0x00000007ffffffff, 0x0000000fffffffff,
205 0x0000001fffffffff, 0x0000003fffffffff, 0x0000007fffffffff, 0x000000ffffffffff,
206 0x000001ffffffffff, 0x000003ffffffffff, 0x000007ffffffffff, 0x00000fffffffffff,
207 0x00001fffffffffff, 0x00003fffffffffff, 0x00007fffffffffff, 0x0000ffffffffffff,
208 0x0001ffffffffffff, 0x0003ffffffffffff, 0x0007ffffffffffff, 0x000fffffffffffff,
209 0x001fffffffffffff, 0x003fffffffffffff, 0x007fffffffffffff, 0x00ffffffffffffff,
210 0x01ffffffffffffff, 0x03ffffffffffffff, 0x07ffffffffffffff, 0x0fffffffffffffff,
211 0x1fffffffffffffff, 0x3fffffffffffffff, 0x7fffffffffffffff, 0xffffffffffffffff
212#endif
213};
214
215static int axmap_set_fn(struct axmap_level *al, unsigned long offset,
216 unsigned int bit, void *__data)
217{
218 struct axmap_set_data *data = __data;
219 unsigned long mask, overlap;
220 unsigned int nr_bits;
221
222 nr_bits = min(data->nr_bits, BLOCKS_PER_UNIT - bit);
223
224 mask = bit_masks[nr_bits] << bit;
225
226 /*
227 * Mask off any potential overlap, only sets contig regions
228 */
229 overlap = al->map[offset] & mask;
230 if (overlap == mask)
231 return 1;
232
233 while (overlap) {
234 unsigned long clear_mask = ~(1UL << ffz(~overlap));
235
236 mask &= clear_mask;
237 overlap &= clear_mask;
238 nr_bits--;
239 }
240
241 assert(mask);
242 assert(!(al->map[offset] & mask));
243
244 al->map[offset] |= mask;
245
246 if (!al->level)
247 data->set_bits = nr_bits;
248
249 data->nr_bits = 1;
250 return al->map[offset] != -1UL;
251}
252
253static void __axmap_set(struct axmap *axmap, uint64_t bit_nr,
254 struct axmap_set_data *data)
255{
256 unsigned int set_bits, nr_bits = data->nr_bits;
257
258 if (axmap->first_free >= bit_nr &&
259 axmap->first_free < bit_nr + data->nr_bits)
260 axmap->first_free = -1ULL;
261
262 if (bit_nr > axmap->nr_bits)
263 return;
264 else if (bit_nr + nr_bits > axmap->nr_bits)
265 nr_bits = axmap->nr_bits - bit_nr;
266
267 set_bits = 0;
268 while (nr_bits) {
269 axmap_handler(axmap, bit_nr, axmap_set_fn, data);
270 set_bits += data->set_bits;
271
272 if (!data->set_bits ||
273 data->set_bits != (BLOCKS_PER_UNIT - nr_bits))
274 break;
275
276 nr_bits -= data->set_bits;
277 bit_nr += data->set_bits;
278
279 data->nr_bits = nr_bits;
280 }
281
282 data->set_bits = set_bits;
283}
284
285void axmap_set(struct axmap *axmap, uint64_t bit_nr)
286{
287 struct axmap_set_data data = { .nr_bits = 1, };
288
289 __axmap_set(axmap, bit_nr, &data);
290}
291
292unsigned int axmap_set_nr(struct axmap *axmap, uint64_t bit_nr, unsigned int nr_bits)
293{
294 unsigned int set_bits = 0;
295
296 do {
297 struct axmap_set_data data = { .nr_bits = nr_bits, };
298 unsigned int max_bits, this_set;
299
300 max_bits = BLOCKS_PER_UNIT - (bit_nr & BLOCKS_PER_UNIT_MASK);
301 if (max_bits < nr_bits)
302 data.nr_bits = max_bits;
303
304 this_set = data.nr_bits;
305 __axmap_set(axmap, bit_nr, &data);
306 set_bits += data.set_bits;
307 if (data.set_bits != this_set)
308 break;
309
310 nr_bits -= data.set_bits;
311 bit_nr += data.set_bits;
312 } while (nr_bits);
313
314 return set_bits;
315}
316
317static int axmap_isset_fn(struct axmap_level *al, unsigned long offset,
318 unsigned int bit, void *unused)
319{
320 return (al->map[offset] & (1UL << bit)) != 0;
321}
322
323int axmap_isset(struct axmap *axmap, uint64_t bit_nr)
324{
325 if (bit_nr <= axmap->nr_bits)
326 return axmap_handler_topdown(axmap, bit_nr, axmap_isset_fn, NULL);
327
328 return 0;
329}
330
331static uint64_t axmap_find_first_free(struct axmap *axmap, unsigned int level,
332 uint64_t index)
333{
334 uint64_t ret = -1ULL;
335 unsigned long j;
336 int i;
337
338 /*
339 * Start at the bottom, then converge towards first free bit at the top
340 */
341 for (i = level; i >= 0; i--) {
342 struct axmap_level *al = &axmap->levels[i];
343
344 /*
345 * Clear 'ret', this is a bug condition.
346 */
347 if (index >= al->map_size) {
348 ret = -1ULL;
349 break;
350 }
351
352 for (j = index; j < al->map_size; j++) {
353 if (al->map[j] == -1UL)
354 continue;
355
356 /*
357 * First free bit here is our index into the first
358 * free bit at the next higher level
359 */
360 ret = index = (j << UNIT_SHIFT) + ffz(al->map[j]);
361 break;
362 }
363 }
364
365 if (ret < axmap->nr_bits)
366 return ret;
367
368 return (uint64_t) -1ULL;
369}
370
371static uint64_t axmap_first_free(struct axmap *axmap)
372{
373 if (firstfree_valid(axmap))
374 return axmap->first_free;
375
376 axmap->first_free = axmap_find_first_free(axmap, axmap->nr_levels - 1, 0);
377 return axmap->first_free;
378}
379
380struct axmap_next_free_data {
381 unsigned int level;
382 unsigned long offset;
383 uint64_t bit;
384};
385
386static int axmap_next_free_fn(struct axmap_level *al, unsigned long offset,
387 unsigned int bit, void *__data)
388{
389 struct axmap_next_free_data *data = __data;
390 uint64_t mask = ~bit_masks[(data->bit + 1) & BLOCKS_PER_UNIT_MASK];
391
392 if (!(mask & ~al->map[offset]))
393 return 0;
394
395 if (al->map[offset] != -1UL) {
396 data->level = al->level;
397 data->offset = offset;
398 return 1;
399 }
400
401 data->bit = (data->bit + BLOCKS_PER_UNIT - 1) / BLOCKS_PER_UNIT;
402 return 0;
403}
404
405/*
406 * 'bit_nr' is already set. Find the next free bit after this one.
407 */
408uint64_t axmap_next_free(struct axmap *axmap, uint64_t bit_nr)
409{
410 struct axmap_next_free_data data = { .level = -1U, .bit = bit_nr, };
411 uint64_t ret;
412
413 if (firstfree_valid(axmap) && bit_nr < axmap->first_free)
414 return axmap->first_free;
415
416 if (!axmap_handler(axmap, bit_nr, axmap_next_free_fn, &data))
417 return axmap_first_free(axmap);
418
419 assert(data.level != -1U);
420
421 /*
422 * In the rare case that the map is unaligned, we might end up
423 * finding an offset that's beyond the valid end. For that case,
424 * find the first free one, the map is practically full.
425 */
426 ret = axmap_find_first_free(axmap, data.level, data.offset);
427 if (ret != -1ULL)
428 return ret;
429
430 return axmap_first_free(axmap);
431}