Fix bug in smalloc size calculation
[fio.git] / smalloc.c
... / ...
CommitLineData
1/*
2 * simple memory allocator, backed by mmap() so that it hands out memory
3 * that can be shared across processes and threads
4 */
5#include <sys/mman.h>
6#include <stdio.h>
7#include <stdlib.h>
8#include <assert.h>
9#include <string.h>
10#include <unistd.h>
11#include <sys/types.h>
12#include <limits.h>
13#include <fcntl.h>
14
15#include "mutex.h"
16#include "arch/arch.h"
17#include "os/os.h"
18
19#define SMALLOC_REDZONE /* define to detect memory corruption */
20
21#define SMALLOC_BPB 32 /* block size, bytes-per-bit in bitmap */
22#define SMALLOC_BPI (sizeof(unsigned int) * 8)
23#define SMALLOC_BPL (SMALLOC_BPB * SMALLOC_BPI)
24
25#define INITIAL_SIZE 8192*1024 /* new pool size */
26#define MAX_POOLS 128 /* maximum number of pools to setup */
27
28#define SMALLOC_PRE_RED 0xdeadbeefU
29#define SMALLOC_POST_RED 0x5aa55aa5U
30
31unsigned int smalloc_pool_size = INITIAL_SIZE;
32const int int_mask = sizeof(int) - 1;
33
34struct pool {
35 struct fio_mutex *lock; /* protects this pool */
36 void *map; /* map of blocks */
37 unsigned int *bitmap; /* blocks free/busy map */
38 unsigned int free_blocks; /* free blocks */
39 unsigned int nr_blocks; /* total blocks */
40 unsigned int next_non_full;
41 unsigned int mmap_size;
42};
43
44struct block_hdr {
45 unsigned int size;
46#ifdef SMALLOC_REDZONE
47 unsigned int prered;
48#endif
49};
50
51static struct pool mp[MAX_POOLS];
52static unsigned int nr_pools;
53static unsigned int last_pool;
54static struct fio_mutex *lock;
55
56static inline void pool_lock(struct pool *pool)
57{
58 fio_mutex_down(pool->lock);
59}
60
61static inline void pool_unlock(struct pool *pool)
62{
63 fio_mutex_up(pool->lock);
64}
65
66static inline void global_read_lock(void)
67{
68 fio_mutex_down_read(lock);
69}
70
71static inline void global_read_unlock(void)
72{
73 fio_mutex_up_read(lock);
74}
75
76static inline void global_write_lock(void)
77{
78 fio_mutex_down_write(lock);
79}
80
81static inline void global_write_unlock(void)
82{
83 fio_mutex_up_write(lock);
84}
85
86static inline int ptr_valid(struct pool *pool, void *ptr)
87{
88 unsigned int pool_size = pool->nr_blocks * SMALLOC_BPL;
89
90 return (ptr >= pool->map) && (ptr < pool->map + pool_size);
91}
92
93static inline unsigned int size_to_blocks(unsigned int size)
94{
95 return (size + SMALLOC_BPB - 1) / SMALLOC_BPB;
96}
97
98static int blocks_iter(struct pool *pool, unsigned int pool_idx,
99 unsigned int idx, unsigned int nr_blocks,
100 int (*func)(unsigned int *map, unsigned int mask))
101{
102
103 while (nr_blocks) {
104 unsigned int this_blocks, mask;
105 unsigned int *map;
106
107 if (pool_idx >= pool->nr_blocks)
108 return 0;
109
110 map = &pool->bitmap[pool_idx];
111
112 this_blocks = nr_blocks;
113 if (this_blocks + idx > SMALLOC_BPI) {
114 this_blocks = SMALLOC_BPI - idx;
115 idx = SMALLOC_BPI - this_blocks;
116 }
117
118 if (this_blocks == SMALLOC_BPI)
119 mask = -1U;
120 else
121 mask = ((1U << this_blocks) - 1) << idx;
122
123 if (!func(map, mask))
124 return 0;
125
126 nr_blocks -= this_blocks;
127 idx = 0;
128 pool_idx++;
129 }
130
131 return 1;
132}
133
134static int mask_cmp(unsigned int *map, unsigned int mask)
135{
136 return !(*map & mask);
137}
138
139static int mask_clear(unsigned int *map, unsigned int mask)
140{
141 assert((*map & mask) == mask);
142 *map &= ~mask;
143 return 1;
144}
145
146static int mask_set(unsigned int *map, unsigned int mask)
147{
148 assert(!(*map & mask));
149 *map |= mask;
150 return 1;
151}
152
153static int blocks_free(struct pool *pool, unsigned int pool_idx,
154 unsigned int idx, unsigned int nr_blocks)
155{
156 return blocks_iter(pool, pool_idx, idx, nr_blocks, mask_cmp);
157}
158
159static void set_blocks(struct pool *pool, unsigned int pool_idx,
160 unsigned int idx, unsigned int nr_blocks)
161{
162 blocks_iter(pool, pool_idx, idx, nr_blocks, mask_set);
163}
164
165static void clear_blocks(struct pool *pool, unsigned int pool_idx,
166 unsigned int idx, unsigned int nr_blocks)
167{
168 blocks_iter(pool, pool_idx, idx, nr_blocks, mask_clear);
169}
170
171static int find_next_zero(int word, int start)
172{
173 assert(word != -1U);
174 word >>= start;
175 return ffz(word) + start;
176}
177
178static int add_pool(struct pool *pool, unsigned int alloc_size)
179{
180 int bitmap_blocks;
181 void *ptr;
182
183#ifdef SMALLOC_REDZONE
184 alloc_size += sizeof(unsigned int);
185#endif
186 alloc_size += sizeof(struct block_hdr);
187 if (alloc_size < INITIAL_SIZE)
188 alloc_size = INITIAL_SIZE;
189
190 /* round up to nearest full number of blocks */
191 alloc_size = (alloc_size + SMALLOC_BPL - 1) & ~(SMALLOC_BPL - 1);
192 bitmap_blocks = alloc_size / SMALLOC_BPL;
193 alloc_size += bitmap_blocks * sizeof(unsigned int);
194 pool->mmap_size = alloc_size;
195
196 pool->nr_blocks = bitmap_blocks;
197 pool->free_blocks = bitmap_blocks * SMALLOC_BPB;
198
199 ptr = mmap(NULL, alloc_size, PROT_READ|PROT_WRITE,
200 MAP_SHARED | OS_MAP_ANON, -1, 0);
201 if (ptr == MAP_FAILED)
202 goto out_fail;
203
204 memset(ptr, 0, alloc_size);
205 pool->map = ptr;
206 pool->bitmap = (void *) ptr + (pool->nr_blocks * SMALLOC_BPL);
207
208 pool->lock = fio_mutex_init(1);
209 if (!pool->lock)
210 goto out_fail;
211
212 nr_pools++;
213 return 0;
214out_fail:
215 fprintf(stderr, "smalloc: failed adding pool\n");
216 if (pool->map)
217 munmap(pool->map, pool->mmap_size);
218 return 1;
219}
220
221void sinit(void)
222{
223 int ret;
224
225 lock = fio_mutex_rw_init();
226 ret = add_pool(&mp[0], INITIAL_SIZE);
227 assert(!ret);
228}
229
230static void cleanup_pool(struct pool *pool)
231{
232 /*
233 * This will also remove the temporary file we used as a backing
234 * store, it was already unlinked
235 */
236 munmap(pool->map, pool->mmap_size);
237
238 if (pool->lock)
239 fio_mutex_remove(pool->lock);
240}
241
242void scleanup(void)
243{
244 unsigned int i;
245
246 for (i = 0; i < nr_pools; i++)
247 cleanup_pool(&mp[i]);
248
249 if (lock)
250 fio_mutex_remove(lock);
251}
252
253#ifdef SMALLOC_REDZONE
254static void *postred_ptr(struct block_hdr *hdr)
255{
256 unsigned long ptr;
257
258 ptr = (unsigned long) hdr + hdr->size - sizeof(unsigned int);
259 ptr = (ptr + int_mask) & ~int_mask;
260
261 return (void *) ptr;
262}
263
264static void fill_redzone(struct block_hdr *hdr)
265{
266 unsigned int *postred = postred_ptr(hdr);
267
268 hdr->prered = SMALLOC_PRE_RED;
269 *postred = SMALLOC_POST_RED;
270}
271
272static void sfree_check_redzone(struct block_hdr *hdr)
273{
274 unsigned int *postred = postred_ptr(hdr);
275
276 if (hdr->prered != SMALLOC_PRE_RED) {
277 fprintf(stderr, "smalloc pre redzone destroyed!\n");
278 fprintf(stderr, " ptr=%p, prered=%x, expected %x\n",
279 hdr, hdr->prered, SMALLOC_PRE_RED);
280 assert(0);
281 }
282 if (*postred != SMALLOC_POST_RED) {
283 fprintf(stderr, "smalloc post redzone destroyed!\n");
284 fprintf(stderr, " ptr=%p, postred=%x, expected %x\n",
285 hdr, *postred, SMALLOC_POST_RED);
286 assert(0);
287 }
288}
289#else
290static void fill_redzone(struct block_hdr *hdr)
291{
292}
293
294static void sfree_check_redzone(struct block_hdr *hdr)
295{
296}
297#endif
298
299static void sfree_pool(struct pool *pool, void *ptr)
300{
301 struct block_hdr *hdr;
302 unsigned int i, idx;
303 unsigned long offset;
304
305 if (!ptr)
306 return;
307
308 ptr -= sizeof(*hdr);
309 hdr = ptr;
310
311 assert(ptr_valid(pool, ptr));
312
313 sfree_check_redzone(hdr);
314
315 offset = ptr - pool->map;
316 i = offset / SMALLOC_BPL;
317 idx = (offset % SMALLOC_BPL) / SMALLOC_BPB;
318
319 pool_lock(pool);
320 clear_blocks(pool, i, idx, size_to_blocks(hdr->size));
321 if (i < pool->next_non_full)
322 pool->next_non_full = i;
323 pool->free_blocks += size_to_blocks(hdr->size);
324 pool_unlock(pool);
325}
326
327void sfree(void *ptr)
328{
329 struct pool *pool = NULL;
330 unsigned int i;
331
332 if (!ptr)
333 return;
334
335 global_read_lock();
336
337 for (i = 0; i < nr_pools; i++) {
338 if (ptr_valid(&mp[i], ptr)) {
339 pool = &mp[i];
340 break;
341 }
342 }
343
344 global_read_unlock();
345
346 assert(pool);
347 sfree_pool(pool, ptr);
348}
349
350static void *__smalloc_pool(struct pool *pool, unsigned int size)
351{
352 unsigned int nr_blocks;
353 unsigned int i;
354 unsigned int offset;
355 unsigned int last_idx;
356 void *ret = NULL;
357
358 pool_lock(pool);
359
360 nr_blocks = size_to_blocks(size);
361 if (nr_blocks > pool->free_blocks)
362 goto fail;
363
364 i = pool->next_non_full;
365 last_idx = 0;
366 offset = -1U;
367 while (i < pool->nr_blocks) {
368 unsigned int idx;
369
370 if (pool->bitmap[i] == -1U) {
371 i++;
372 pool->next_non_full = i;
373 last_idx = 0;
374 continue;
375 }
376
377 idx = find_next_zero(pool->bitmap[i], last_idx);
378 if (!blocks_free(pool, i, idx, nr_blocks)) {
379 idx += nr_blocks;
380 if (idx < SMALLOC_BPI)
381 last_idx = idx;
382 else {
383 last_idx = 0;
384 while (idx >= SMALLOC_BPI) {
385 i++;
386 idx -= SMALLOC_BPI;
387 }
388 }
389 continue;
390 }
391 set_blocks(pool, i, idx, nr_blocks);
392 offset = i * SMALLOC_BPL + idx * SMALLOC_BPB;
393 break;
394 }
395
396 if (i < pool->nr_blocks) {
397 pool->free_blocks -= nr_blocks;
398 ret = pool->map + offset;
399 }
400fail:
401 pool_unlock(pool);
402 return ret;
403}
404
405static void *smalloc_pool(struct pool *pool, unsigned int size)
406{
407 unsigned int alloc_size = size + sizeof(struct block_hdr);
408 void *ptr;
409
410 /*
411 * Round to int alignment, so that the postred pointer will
412 * be naturally aligned as well.
413 */
414#ifdef SMALLOC_REDZONE
415 alloc_size += sizeof(unsigned int);
416 alloc_size = (alloc_size + int_mask) & ~int_mask;
417#endif
418
419 ptr = __smalloc_pool(pool, alloc_size);
420 if (ptr) {
421 struct block_hdr *hdr = ptr;
422
423 hdr->size = alloc_size;
424 fill_redzone(hdr);
425
426 ptr += sizeof(*hdr);
427 memset(ptr, 0, size);
428 }
429
430 return ptr;
431}
432
433void *smalloc(unsigned int size)
434{
435 unsigned int i;
436
437 global_write_lock();
438 i = last_pool;
439
440 do {
441 for (; i < nr_pools; i++) {
442 void *ptr = smalloc_pool(&mp[i], size);
443
444 if (ptr) {
445 last_pool = i;
446 global_write_unlock();
447 return ptr;
448 }
449 }
450 if (last_pool) {
451 last_pool = 0;
452 continue;
453 }
454
455 if (nr_pools + 1 > MAX_POOLS)
456 break;
457 else {
458 i = nr_pools;
459 if (add_pool(&mp[nr_pools], size))
460 goto out;
461 }
462 } while (1);
463
464out:
465 global_write_unlock();
466 return NULL;
467}
468
469char *smalloc_strdup(const char *str)
470{
471 char *ptr;
472
473 ptr = smalloc(strlen(str) + 1);
474 strcpy(ptr, str);
475 return ptr;
476}