smalloc: turn on the thread safe flag
[fio.git] / smalloc.c
... / ...
CommitLineData
1/*
2 * simple memory allocator, backed by mmap() so that it hands out memory
3 * that can be shared across processes and threads
4 */
5#include <sys/mman.h>
6#include <stdio.h>
7#include <stdlib.h>
8#include <assert.h>
9#include <string.h>
10#include <unistd.h>
11#include <sys/types.h>
12#include <limits.h>
13
14#include "mutex.h"
15
16#define MP_SAFE /* define to make thread safe */
17#define SMALLOC_REDZONE /* define to detect memory corruption */
18
19#define SMALLOC_BPB 32 /* block size, bytes-per-bit in bitmap */
20#define SMALLOC_BPI (sizeof(unsigned int) * 8)
21#define SMALLOC_BPL (SMALLOC_BPB * SMALLOC_BPI)
22
23#define INITIAL_SIZE 1024*1024 /* new pool size */
24#define MAX_POOLS 4 /* maximum number of pools to setup */
25
26#define SMALLOC_PRE_RED 0xdeadbeefU
27#define SMALLOC_POST_RED 0x5aa55aa5U
28
29unsigned int smalloc_pool_size = INITIAL_SIZE;
30
31struct pool {
32 struct fio_mutex *lock; /* protects this pool */
33 void *map; /* map of blocks */
34 unsigned int *bitmap; /* blocks free/busy map */
35 unsigned int free_blocks; /* free blocks */
36 unsigned int nr_blocks; /* total blocks */
37 unsigned int next_non_full;
38 int fd; /* memory backing fd */
39 char file[PATH_MAX]; /* filename for fd */
40 unsigned int mmap_size;
41};
42
43struct block_hdr {
44 unsigned int size;
45#ifdef SMALLOC_REDZONE
46 unsigned int prered;
47#endif
48};
49
50static struct pool mp[MAX_POOLS];
51static unsigned int nr_pools;
52static unsigned int last_pool;
53static struct fio_mutex *lock;
54
55static inline void pool_lock(struct pool *pool)
56{
57 if (pool->lock)
58 fio_mutex_down(pool->lock);
59}
60
61static inline void pool_unlock(struct pool *pool)
62{
63 if (pool->lock)
64 fio_mutex_up(pool->lock);
65}
66
67static inline void global_read_lock(void)
68{
69 if (lock)
70 fio_mutex_down_read(lock);
71}
72
73static inline void global_read_unlock(void)
74{
75 if (lock)
76 fio_mutex_up_read(lock);
77}
78
79static inline void global_write_lock(void)
80{
81 if (lock)
82 fio_mutex_down_write(lock);
83}
84
85static inline void global_write_unlock(void)
86{
87 if (lock)
88 fio_mutex_up_write(lock);
89}
90
91static inline int ptr_valid(struct pool *pool, void *ptr)
92{
93 unsigned int pool_size = pool->nr_blocks * SMALLOC_BPL;
94
95 return (ptr >= pool->map) && (ptr < pool->map + pool_size);
96}
97
98static int blocks_iter(unsigned int *map, unsigned int idx,
99 unsigned int nr_blocks,
100 int (*func)(unsigned int *map, unsigned int mask))
101{
102 while (nr_blocks) {
103 unsigned int this_blocks, mask;
104
105 this_blocks = nr_blocks;
106 if (this_blocks + idx > SMALLOC_BPI) {
107 this_blocks = SMALLOC_BPI - idx;
108 idx = SMALLOC_BPI - this_blocks;
109 }
110
111 if (this_blocks == SMALLOC_BPI)
112 mask = -1U;
113 else
114 mask = ((1U << this_blocks) - 1) << idx;
115
116 if (!func(map, mask))
117 return 0;
118
119 nr_blocks -= this_blocks;
120 idx = 0;
121 map++;
122 }
123
124 return 1;
125
126}
127
128static int mask_cmp(unsigned int *map, unsigned int mask)
129{
130 return !(*map & mask);
131}
132
133static int mask_clear(unsigned int *map, unsigned int mask)
134{
135 *map &= ~mask;
136 return 1;
137}
138
139static int mask_set(unsigned int *map, unsigned int mask)
140{
141 *map |= mask;
142 return 1;
143}
144
145static int blocks_free(unsigned int *map, unsigned int idx,
146 unsigned int nr_blocks)
147{
148 return blocks_iter(map, idx, nr_blocks, mask_cmp);
149}
150
151static void set_blocks(unsigned int *map, unsigned int idx,
152 unsigned int nr_blocks)
153{
154 blocks_iter(map, idx, nr_blocks, mask_set);
155}
156
157static void clear_blocks(unsigned int *map, unsigned int idx,
158 unsigned int nr_blocks)
159{
160 blocks_iter(map, idx, nr_blocks, mask_clear);
161}
162
163static inline int __ffs(int word)
164{
165 int r = 0;
166
167 if (!(word & 0xffff)) {
168 word >>= 16;
169 r += 16;
170 }
171 if (!(word & 0xff)) {
172 word >>= 8;
173 r += 8;
174 }
175 if (!(word & 0xf)) {
176 word >>= 4;
177 r += 4;
178 }
179 if (!(word & 3)) {
180 word >>= 2;
181 r += 2;
182 }
183 if (!(word & 1)) {
184 word >>= 1;
185 r += 1;
186 }
187
188 return r;
189}
190
191static int find_next_zero(int word, int start)
192{
193 assert(word != -1U);
194 word >>= (start + 1);
195 return __ffs(~word) + start + 1;
196}
197
198static int add_pool(struct pool *pool, unsigned int alloc_size)
199{
200 void *ptr;
201 int fd, bitmap_blocks;
202
203 printf("add pool %u\n", alloc_size);
204
205 strcpy(pool->file, "/tmp/.fio_smalloc.XXXXXX");
206 fd = mkstemp(pool->file);
207 if (fd < 0)
208 goto out_close;
209
210#ifdef SMALLOC_REDZONE
211 alloc_size += sizeof(unsigned int);
212#endif
213 alloc_size += sizeof(struct block_hdr);
214 if (alloc_size < INITIAL_SIZE)
215 alloc_size = INITIAL_SIZE;
216
217 /* round up to nearest full number of blocks */
218 alloc_size = (alloc_size + SMALLOC_BPL - 1) & ~(SMALLOC_BPL - 1);
219 bitmap_blocks = alloc_size / SMALLOC_BPL;
220 alloc_size += bitmap_blocks * sizeof(unsigned int);
221 pool->mmap_size = alloc_size;
222
223 pool->nr_blocks = bitmap_blocks;
224 pool->free_blocks = bitmap_blocks * SMALLOC_BPB;
225
226 if (ftruncate(fd, alloc_size) < 0)
227 goto out_unlink;
228
229 ptr = mmap(NULL, alloc_size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
230 if (ptr == MAP_FAILED)
231 goto out_unlink;
232
233 memset(ptr, 0, alloc_size);
234 pool->map = ptr;
235 pool->bitmap = (void *) ptr + (pool->nr_blocks * SMALLOC_BPL);
236
237#ifdef MP_SAFE
238 pool->lock = fio_mutex_init(1);
239 if (!pool->lock)
240 goto out_unlink;
241#endif
242
243 pool->fd = fd;
244
245 global_write_lock();
246 nr_pools++;
247 global_write_unlock();
248 return 0;
249out_unlink:
250 fprintf(stderr, "smalloc: failed adding pool\n");
251 if (pool->map)
252 munmap(pool->map, pool->mmap_size);
253 unlink(pool->file);
254out_close:
255 if (fd >= 0)
256 close(fd);
257 return 1;
258}
259
260void sinit(void)
261{
262 int ret;
263
264#ifdef MP_SAFE
265 lock = fio_mutex_rw_init();
266#endif
267 ret = add_pool(&mp[0], INITIAL_SIZE);
268 assert(!ret);
269}
270
271static void cleanup_pool(struct pool *pool)
272{
273 unlink(pool->file);
274 close(pool->fd);
275 munmap(pool->map, pool->mmap_size);
276
277 if (pool->lock)
278 fio_mutex_remove(pool->lock);
279}
280
281void scleanup(void)
282{
283 unsigned int i;
284
285 for (i = 0; i < nr_pools; i++)
286 cleanup_pool(&mp[i]);
287
288 if (lock)
289 fio_mutex_remove(lock);
290}
291
292static void fill_redzone(struct block_hdr *hdr)
293{
294#ifdef SMALLOC_REDZONE
295 unsigned int *postred = (void *) hdr + hdr->size - sizeof(unsigned int);
296
297 hdr->prered = SMALLOC_PRE_RED;
298 *postred = SMALLOC_POST_RED;
299#endif
300}
301
302static void sfree_check_redzone(struct block_hdr *hdr)
303{
304#ifdef SMALLOC_REDZONE
305 unsigned int *postred = (void *) hdr + hdr->size - sizeof(unsigned int);
306
307 if (hdr->prered != SMALLOC_PRE_RED) {
308 fprintf(stderr, "smalloc pre redzone destroyed!\n");
309 fprintf(stderr, " ptr=%p, prered=%x, expected %x\n",
310 hdr, hdr->prered, SMALLOC_PRE_RED);
311 assert(0);
312 }
313 if (*postred != SMALLOC_POST_RED) {
314 fprintf(stderr, "smalloc post redzone destroyed!\n");
315 fprintf(stderr, " ptr=%p, postred=%x, expected %x\n",
316 hdr, *postred, SMALLOC_POST_RED);
317 assert(0);
318 }
319#endif
320}
321
322static void sfree_pool(struct pool *pool, void *ptr)
323{
324 struct block_hdr *hdr;
325 unsigned int nr_blocks, i, idx;
326 unsigned long offset;
327
328 if (!ptr)
329 return;
330
331 ptr -= sizeof(*hdr);
332 hdr = ptr;
333
334 assert(ptr_valid(pool, ptr));
335
336 nr_blocks = (hdr->size + SMALLOC_BPB - 1) / SMALLOC_BPB;
337 sfree_check_redzone(hdr);
338
339 offset = ptr - pool->map;
340 i = offset / SMALLOC_BPL;
341 idx = (offset % SMALLOC_BPL) / SMALLOC_BPB;
342
343 pool_lock(pool);
344 clear_blocks(&pool->bitmap[i], idx, nr_blocks);
345 if (i < pool->next_non_full)
346 pool->next_non_full = i;
347 pool->free_blocks += nr_blocks;
348 pool_unlock(pool);
349}
350
351void sfree(void *ptr)
352{
353 struct pool *pool = NULL;
354 unsigned int i;
355
356 if (!ptr)
357 return;
358
359 global_read_lock();
360
361 for (i = 0; i < nr_pools; i++) {
362 if (ptr_valid(&mp[i], ptr)) {
363 pool = &mp[i];
364 break;
365 }
366 }
367
368 global_read_unlock();
369
370 assert(pool);
371 sfree_pool(pool, ptr);
372}
373
374static void *__smalloc_pool(struct pool *pool, unsigned int size)
375{
376 unsigned int nr_blocks;
377 unsigned int i;
378 unsigned int offset;
379 unsigned int last_idx;
380 void *ret = NULL;
381
382 nr_blocks = (size + SMALLOC_BPB - 1) / SMALLOC_BPB;
383
384 pool_lock(pool);
385 if (nr_blocks > pool->free_blocks)
386 goto fail;
387
388 i = pool->next_non_full;
389 last_idx = 0;
390 offset = -1U;
391 while (i < pool->nr_blocks) {
392 unsigned int idx;
393
394 if (pool->bitmap[i] == -1U) {
395 i++;
396 pool->next_non_full = i;
397 last_idx = 0;
398 continue;
399 }
400
401 idx = find_next_zero(pool->bitmap[i], last_idx);
402 if (!blocks_free(&pool->bitmap[i], idx, nr_blocks)) {
403 idx += nr_blocks;
404 if (idx < SMALLOC_BPI)
405 last_idx = idx;
406 else {
407 last_idx = 0;
408 while (idx >= SMALLOC_BPI) {
409 i++;
410 idx -= SMALLOC_BPI;
411 }
412 }
413 continue;
414 }
415 set_blocks(&pool->bitmap[i], idx, nr_blocks);
416 offset = i * SMALLOC_BPL + idx * SMALLOC_BPB;
417 break;
418 }
419
420 if (i < pool->nr_blocks) {
421 pool->free_blocks -= nr_blocks;
422 ret = pool->map + offset;
423 }
424fail:
425 pool_unlock(pool);
426 return ret;
427}
428
429static void *smalloc_pool(struct pool *pool, unsigned int size)
430{
431 struct block_hdr *hdr;
432 unsigned int alloc_size;
433 void *ptr;
434
435 alloc_size = size + sizeof(*hdr);
436#ifdef SMALLOC_REDZONE
437 alloc_size += sizeof(unsigned int);
438#endif
439
440 ptr = __smalloc_pool(pool, alloc_size);
441 if (!ptr) {
442 printf("failed allocating %u\n", alloc_size);
443 return NULL;
444 }
445
446 hdr = ptr;
447 hdr->size = alloc_size;
448 ptr += sizeof(*hdr);
449
450 fill_redzone(hdr);
451
452 memset(ptr, 0, size);
453 return ptr;
454}
455
456void *smalloc(unsigned int size)
457{
458 unsigned int i;
459
460 global_read_lock();
461 i = last_pool;
462
463 do {
464 for (; i < nr_pools; i++) {
465 void *ptr = smalloc_pool(&mp[i], size);
466
467 if (ptr) {
468 last_pool = i;
469 global_read_unlock();
470 return ptr;
471 }
472 }
473 if (last_pool) {
474 last_pool = 0;
475 continue;
476 }
477
478 if (nr_pools + 1 > MAX_POOLS)
479 break;
480 else {
481 i = nr_pools;
482 global_read_unlock();
483 if (add_pool(&mp[nr_pools], size))
484 goto out;
485 global_read_lock();
486 }
487 } while (1);
488
489 global_read_unlock();
490out:
491 return NULL;
492}
493
494char *smalloc_strdup(const char *str)
495{
496 char *ptr;
497
498 ptr = smalloc(strlen(str) + 1);
499 strcpy(ptr, str);
500 return ptr;
501}