Commit | Line | Data |
---|---|---|
7ebd796f JA |
1 | /* |
2 | * Bitmap of bitmaps, where each layer is number-of-bits-per-word smaller than | |
3 | * the previous. Hence an 'axmap', since we axe each previous layer into a | |
4 | * much smaller piece. I swear, that is why it's named like that. It has | |
5 | * nothing to do with anything remotely narcissistic. | |
6 | * | |
7 | * A set bit at layer N indicates a full word at layer N-1, and so forth. As | |
8 | * the bitmap becomes progressively more full, checking for existance | |
9 | * becomes cheaper (since fewer layers are walked, making it a lot more | |
10 | * cache friendly) and locating the next free space likewise. | |
11 | * | |
12 | * Axmaps get pretty close to optimal (1 bit per block) space usage, since | |
13 | * layers quickly diminish in size. Doing the size math is straight forward, | |
14 | * since we have log64(blocks) layers of maps. For 20000 blocks, overhead | |
15 | * is roughly 1.9%, or 1.019 bits per block. The number quickly converges | |
16 | * towards 1.0158, or 1.58% of overhead. | |
17 | */ | |
18 | #include <stdio.h> | |
19 | #include <stdlib.h> | |
20 | #include <string.h> | |
21 | #include <assert.h> | |
22 | ||
23 | #include "../arch/arch.h" | |
24 | #include "axmap.h" | |
25 | #include "../smalloc.h" | |
26 | #include "../minmax.h" | |
27 | ||
28 | #if BITS_PER_LONG == 64 | |
29 | #define UNIT_SHIFT 6 | |
30 | #elif BITS_PER_LONG == 32 | |
31 | #define UNIT_SHIFT 5 | |
32 | #else | |
33 | #error "Number of arch bits unknown" | |
34 | #endif | |
35 | ||
36 | #define BLOCKS_PER_UNIT (1UL << UNIT_SHIFT) | |
37 | #define BLOCKS_PER_UNIT_MASK (BLOCKS_PER_UNIT - 1) | |
38 | ||
39 | #define firstfree_valid(b) ((b)->first_free != (uint64_t) -1) | |
40 | ||
41 | struct axmap_level { | |
42 | int level; | |
43 | unsigned long map_size; | |
44 | unsigned long *map; | |
45 | }; | |
46 | ||
47 | struct axmap { | |
48 | unsigned int nr_levels; | |
49 | struct axmap_level *levels; | |
50 | uint64_t first_free; | |
51 | }; | |
52 | ||
53 | static unsigned long ulog64(unsigned long val, unsigned int log) | |
54 | { | |
55 | while (log-- && val) | |
56 | val >>= UNIT_SHIFT; | |
57 | ||
58 | return val; | |
59 | } | |
60 | ||
61 | void axmap_reset(struct axmap *axmap) | |
62 | { | |
63 | int i; | |
64 | ||
65 | for (i = 0; i < axmap->nr_levels; i++) { | |
66 | struct axmap_level *al = &axmap->levels[i]; | |
67 | ||
68 | memset(al->map, 0, al->map_size * sizeof(unsigned long)); | |
69 | } | |
70 | } | |
71 | ||
72 | void axmap_free(struct axmap *axmap) | |
73 | { | |
74 | unsigned int i; | |
75 | ||
76 | if (!axmap) | |
77 | return; | |
78 | ||
79 | for (i = 0; i < axmap->nr_levels; i++) | |
80 | sfree(axmap->levels[i].map); | |
81 | ||
82 | sfree(axmap->levels); | |
83 | sfree(axmap); | |
84 | } | |
85 | ||
86 | struct axmap *axmap_new(unsigned long nr_bits) | |
87 | { | |
88 | struct axmap *axmap; | |
89 | unsigned int i, levels; | |
90 | ||
91 | axmap = smalloc(sizeof(*axmap)); | |
92 | if (!axmap) | |
93 | return NULL; | |
94 | ||
95 | levels = 1; | |
96 | i = (nr_bits + BLOCKS_PER_UNIT - 1) >> UNIT_SHIFT; | |
97 | while (i > 1) { | |
98 | i = (i + BLOCKS_PER_UNIT - 1) >> UNIT_SHIFT; | |
99 | levels++; | |
100 | } | |
101 | ||
102 | axmap->nr_levels = levels; | |
103 | axmap->levels = smalloc(axmap->nr_levels * sizeof(struct axmap_level)); | |
104 | axmap->first_free = 0; | |
105 | ||
106 | for (i = 0; i < axmap->nr_levels; i++) { | |
107 | struct axmap_level *al = &axmap->levels[i]; | |
108 | ||
109 | al->level = i; | |
110 | al->map_size = (nr_bits + BLOCKS_PER_UNIT - 1) >> UNIT_SHIFT; | |
111 | al->map = smalloc(al->map_size * sizeof(unsigned long)); | |
112 | if (!al->map) | |
113 | goto err; | |
114 | ||
115 | nr_bits = (nr_bits + BLOCKS_PER_UNIT - 1) >> UNIT_SHIFT; | |
116 | } | |
117 | ||
118 | axmap_reset(axmap); | |
119 | return axmap; | |
120 | err: | |
121 | for (i = 0; i < axmap->nr_levels; i++) | |
122 | if (axmap->levels[i].map) | |
123 | sfree(axmap->levels[i].map); | |
124 | ||
125 | sfree(axmap->levels); | |
126 | return NULL; | |
127 | } | |
128 | ||
129 | static int axmap_handler(struct axmap *axmap, uint64_t bit_nr, | |
130 | int (*func)(struct axmap_level *, unsigned long, unsigned int, | |
131 | void *), void *data) | |
132 | { | |
133 | struct axmap_level *al; | |
134 | int i; | |
135 | ||
136 | for (i = 0; i < axmap->nr_levels; i++) { | |
137 | unsigned long index = ulog64(bit_nr, i); | |
138 | unsigned long offset = index >> UNIT_SHIFT; | |
139 | unsigned int bit = index & BLOCKS_PER_UNIT_MASK; | |
140 | ||
141 | al = &axmap->levels[i]; | |
142 | ||
143 | if (func(al, offset, bit, data)) | |
144 | return 1; | |
145 | } | |
146 | ||
147 | return 0; | |
148 | } | |
149 | ||
150 | static int axmap_handler_topdown(struct axmap *axmap, uint64_t bit_nr, | |
151 | int (*func)(struct axmap_level *, unsigned long, unsigned int, void *), | |
152 | void *data) | |
153 | { | |
154 | struct axmap_level *al; | |
155 | int i, level = axmap->nr_levels; | |
156 | ||
157 | for (i = axmap->nr_levels - 1; i >= 0; i--) { | |
158 | unsigned long index = ulog64(bit_nr, --level); | |
159 | unsigned long offset = index >> UNIT_SHIFT; | |
160 | unsigned int bit = index & BLOCKS_PER_UNIT_MASK; | |
161 | ||
162 | al = &axmap->levels[i]; | |
163 | ||
164 | if (func(al, offset, bit, data)) | |
165 | return 1; | |
166 | } | |
167 | ||
168 | return 0; | |
169 | } | |
170 | ||
171 | static int axmap_clear_fn(struct axmap_level *al, unsigned long offset, | |
172 | unsigned int bit, void *unused) | |
173 | { | |
174 | if (!(al->map[offset] & (1UL << bit))) | |
175 | return 1; | |
176 | ||
177 | al->map[offset] &= ~(1UL << bit); | |
178 | return 0; | |
179 | } | |
180 | ||
181 | void axmap_clear(struct axmap *axmap, uint64_t bit_nr) | |
182 | { | |
183 | axmap_handler(axmap, bit_nr, axmap_clear_fn, NULL); | |
184 | } | |
185 | ||
186 | struct axmap_set_data { | |
187 | unsigned int nr_bits; | |
188 | unsigned int set_bits; | |
189 | unsigned int fail_ok; | |
190 | }; | |
191 | ||
192 | static unsigned long bit_masks[] = { | |
193 | 0x0000000000000000, 0x0000000000000001, 0x0000000000000003, 0x0000000000000007, | |
194 | 0x000000000000000f, 0x000000000000001f, 0x000000000000003f, 0x000000000000007f, | |
195 | 0x00000000000000ff, 0x00000000000001ff, 0x00000000000003ff, 0x00000000000007ff, | |
196 | 0x0000000000000fff, 0x0000000000001fff, 0x0000000000003fff, 0x0000000000007fff, | |
197 | 0x000000000000ffff, 0x000000000001ffff, 0x000000000003ffff, 0x000000000007ffff, | |
198 | 0x00000000000fffff, 0x00000000001fffff, 0x00000000003fffff, 0x00000000007fffff, | |
199 | 0x0000000000ffffff, 0x0000000001ffffff, 0x0000000003ffffff, 0x0000000007ffffff, | |
200 | 0x000000000fffffff, 0x000000001fffffff, 0x000000003fffffff, 0x000000007fffffff, | |
201 | 0x00000000ffffffff, | |
202 | #if BITS_PER_LONG == 64 | |
203 | 0x00000001ffffffff, 0x00000003ffffffff, 0x00000007ffffffff, 0x0000000fffffffff, | |
204 | 0x0000001fffffffff, 0x0000003fffffffff, 0x0000007fffffffff, 0x000000ffffffffff, | |
205 | 0x000001ffffffffff, 0x000003ffffffffff, 0x000007ffffffffff, 0x00000fffffffffff, | |
206 | 0x00001fffffffffff, 0x00003fffffffffff, 0x00007fffffffffff, 0x0000ffffffffffff, | |
207 | 0x0001ffffffffffff, 0x0003ffffffffffff, 0x0007ffffffffffff, 0x000fffffffffffff, | |
208 | 0x001fffffffffffff, 0x003fffffffffffff, 0x007fffffffffffff, 0x00ffffffffffffff, | |
209 | 0x01ffffffffffffff, 0x03ffffffffffffff, 0x07ffffffffffffff, 0x0fffffffffffffff, | |
210 | 0x1fffffffffffffff, 0x3fffffffffffffff, 0x7fffffffffffffff, 0xffffffffffffffff | |
211 | #endif | |
212 | }; | |
213 | ||
214 | static int axmap_set_fn(struct axmap_level *al, unsigned long offset, | |
215 | unsigned int bit, void *__data) | |
216 | { | |
217 | struct axmap_set_data *data = __data; | |
218 | unsigned long mask, overlap; | |
219 | unsigned int nr_bits; | |
220 | ||
221 | nr_bits = min(data->nr_bits, BLOCKS_PER_UNIT - bit); | |
222 | ||
223 | mask = bit_masks[nr_bits] << bit; | |
224 | ||
225 | /* | |
226 | * Mask off any potential overlap, only sets contig regions | |
227 | */ | |
228 | overlap = al->map[offset] & mask; | |
229 | if (overlap == mask) { | |
230 | assert(data->fail_ok); | |
231 | return 1; | |
232 | } | |
233 | ||
234 | while (overlap) { | |
235 | unsigned long clear_mask = ~(1UL << ffz(~overlap)); | |
236 | ||
237 | mask &= clear_mask; | |
238 | overlap &= clear_mask; | |
239 | nr_bits--; | |
240 | } | |
241 | ||
242 | assert(mask); | |
243 | assert(!(al->map[offset] & mask)); | |
244 | ||
245 | al->map[offset] |= mask; | |
246 | ||
247 | if (!al->level) | |
248 | data->set_bits = nr_bits; | |
249 | ||
250 | data->nr_bits = 1; | |
251 | return al->map[offset] != -1UL; | |
252 | } | |
253 | ||
254 | static void __axmap_set(struct axmap *axmap, uint64_t bit_nr, | |
255 | struct axmap_set_data *data) | |
256 | { | |
257 | unsigned int set_bits, nr_bits = data->nr_bits; | |
258 | ||
259 | if (axmap->first_free >= bit_nr && | |
260 | axmap->first_free < bit_nr + data->nr_bits) | |
261 | axmap->first_free = -1ULL; | |
262 | ||
263 | set_bits = 0; | |
264 | while (nr_bits) { | |
265 | axmap_handler(axmap, bit_nr, axmap_set_fn, data); | |
266 | set_bits += data->set_bits; | |
267 | ||
268 | if (data->set_bits != (BLOCKS_PER_UNIT - nr_bits)) | |
269 | break; | |
270 | ||
271 | nr_bits -= data->set_bits; | |
272 | bit_nr += data->set_bits; | |
273 | ||
274 | data->nr_bits = nr_bits; | |
275 | data->fail_ok = 1; | |
276 | } | |
277 | ||
278 | data->set_bits = set_bits; | |
279 | } | |
280 | ||
281 | void axmap_set(struct axmap *axmap, uint64_t bit_nr) | |
282 | { | |
283 | struct axmap_set_data data = { .nr_bits = 1, }; | |
284 | ||
285 | __axmap_set(axmap, bit_nr, &data); | |
286 | } | |
287 | ||
288 | unsigned int axmap_set_nr(struct axmap *axmap, uint64_t bit_nr, unsigned int nr_bits) | |
289 | { | |
290 | struct axmap_set_data data = { .nr_bits = nr_bits, }; | |
291 | ||
292 | __axmap_set(axmap, bit_nr, &data); | |
293 | return data.set_bits; | |
294 | } | |
295 | ||
296 | static int axmap_isset_fn(struct axmap_level *al, unsigned long offset, | |
297 | unsigned int bit, void *unused) | |
298 | { | |
299 | return (al->map[offset] & (1UL << bit)) != 0; | |
300 | } | |
301 | ||
302 | int axmap_isset(struct axmap *axmap, uint64_t bit_nr) | |
303 | { | |
304 | return axmap_handler_topdown(axmap, bit_nr, axmap_isset_fn, NULL); | |
305 | } | |
306 | ||
307 | static uint64_t axmap_find_first_free(struct axmap *axmap, unsigned int level, | |
308 | uint64_t index) | |
309 | { | |
310 | unsigned long j; | |
311 | int i; | |
312 | ||
313 | /* | |
314 | * Start at the bottom, then converge towards first free bit at the top | |
315 | */ | |
316 | for (i = level; i >= 0; i--) { | |
317 | struct axmap_level *al = &axmap->levels[i]; | |
318 | ||
319 | if (index >= al->map_size) { | |
320 | index = -1ULL; | |
321 | break; | |
322 | } | |
323 | ||
324 | for (j = index; j < al->map_size; j++) { | |
325 | if (al->map[j] == -1UL) | |
326 | continue; | |
327 | ||
328 | /* | |
329 | * First free bit here is our index into the first | |
330 | * free bit at the next higher level | |
331 | */ | |
332 | index = (j << UNIT_SHIFT) + ffz(al->map[j]); | |
333 | break; | |
334 | } | |
335 | } | |
336 | ||
337 | return index; | |
338 | } | |
339 | ||
340 | uint64_t axmap_first_free(struct axmap *axmap) | |
341 | { | |
342 | if (firstfree_valid(axmap)) | |
343 | return axmap->first_free; | |
344 | ||
345 | axmap->first_free = axmap_find_first_free(axmap, axmap->nr_levels - 1, 0); | |
346 | return axmap->first_free; | |
347 | } | |
348 | ||
349 | struct axmap_next_free_data { | |
350 | unsigned int level; | |
351 | unsigned long offset; | |
352 | uint64_t bit; | |
353 | }; | |
354 | ||
355 | static int axmap_next_free_fn(struct axmap_level *al, unsigned long offset, | |
356 | unsigned int bit, void *__data) | |
357 | { | |
358 | struct axmap_next_free_data *data = __data; | |
359 | uint64_t mask = ~((1UL << ((data->bit & BLOCKS_PER_UNIT_MASK) + 1)) - 1); | |
360 | ||
361 | if (!(mask & al->map[offset])) | |
362 | return 0; | |
363 | ||
364 | if (al->map[offset] != -1UL) { | |
365 | data->level = al->level; | |
366 | data->offset = offset; | |
367 | return 1; | |
368 | } | |
369 | ||
370 | data->bit = (data->bit + BLOCKS_PER_UNIT - 1) / BLOCKS_PER_UNIT; | |
371 | return 0; | |
372 | } | |
373 | ||
374 | /* | |
375 | * 'bit_nr' is already set. Find the next free bit after this one. | |
376 | */ | |
377 | uint64_t axmap_next_free(struct axmap *axmap, uint64_t bit_nr) | |
378 | { | |
379 | struct axmap_next_free_data data = { .level = -1U, .bit = bit_nr, }; | |
380 | ||
381 | if (firstfree_valid(axmap) && bit_nr < axmap->first_free) | |
382 | return axmap->first_free; | |
383 | ||
384 | if (!axmap_handler(axmap, bit_nr, axmap_next_free_fn, &data)) | |
385 | return axmap_first_free(axmap); | |
386 | ||
387 | assert(data.level != -1U); | |
388 | ||
389 | return axmap_find_first_free(axmap, data.level, data.offset); | |
390 | } |