Make smalloc redzone pointer properly aligned
[fio.git] / smalloc.c
CommitLineData
d24c33a4
JA
1/*
2 * simple memory allocator, backed by mmap() so that it hands out memory
3 * that can be shared across processes and threads
4 */
5#include <sys/mman.h>
6#include <stdio.h>
7#include <stdlib.h>
8#include <assert.h>
9#include <string.h>
10#include <unistd.h>
11#include <sys/types.h>
12#include <limits.h>
13
6548f47f 14#include "mutex.h"
b3268b92 15#include "arch/arch.h"
d24c33a4 16
55f6491d 17#define SMALLOC_REDZONE /* define to detect memory corruption */
d24c33a4 18
ec996e9c
JA
19#define SMALLOC_BPB 32 /* block size, bytes-per-bit in bitmap */
20#define SMALLOC_BPI (sizeof(unsigned int) * 8)
21#define SMALLOC_BPL (SMALLOC_BPB * SMALLOC_BPI)
22
23#define INITIAL_SIZE 1024*1024 /* new pool size */
68857686 24#define MAX_POOLS 128 /* maximum number of pools to setup */
d24c33a4 25
55f6491d
JA
26#define SMALLOC_PRE_RED 0xdeadbeefU
27#define SMALLOC_POST_RED 0x5aa55aa5U
55f6491d 28
2b386d25
JA
29unsigned int smalloc_pool_size = INITIAL_SIZE;
30
d24c33a4 31struct pool {
6548f47f 32 struct fio_mutex *lock; /* protects this pool */
d24c33a4 33 void *map; /* map of blocks */
ec996e9c
JA
34 unsigned int *bitmap; /* blocks free/busy map */
35 unsigned int free_blocks; /* free blocks */
36 unsigned int nr_blocks; /* total blocks */
37 unsigned int next_non_full;
d24c33a4 38 int fd; /* memory backing fd */
ec996e9c
JA
39 unsigned int mmap_size;
40};
41
42struct block_hdr {
43 unsigned int size;
44#ifdef SMALLOC_REDZONE
45 unsigned int prered;
46#endif
d24c33a4
JA
47};
48
49static struct pool mp[MAX_POOLS];
50static unsigned int nr_pools;
51static unsigned int last_pool;
6548f47f 52static struct fio_mutex *lock;
d24c33a4 53
d24c33a4
JA
54static inline void pool_lock(struct pool *pool)
55{
2e3e31e3 56 fio_mutex_down(pool->lock);
d24c33a4
JA
57}
58
59static inline void pool_unlock(struct pool *pool)
60{
2e3e31e3 61 fio_mutex_up(pool->lock);
d24c33a4
JA
62}
63
65864cf7 64static inline void global_read_lock(void)
d24c33a4 65{
2e3e31e3 66 fio_mutex_down_read(lock);
d24c33a4
JA
67}
68
65864cf7 69static inline void global_read_unlock(void)
d24c33a4 70{
2e3e31e3 71 fio_mutex_up_read(lock);
65864cf7
JA
72}
73
74static inline void global_write_lock(void)
75{
2e3e31e3 76 fio_mutex_down_write(lock);
65864cf7
JA
77}
78
79static inline void global_write_unlock(void)
80{
2e3e31e3 81 fio_mutex_up_write(lock);
d24c33a4
JA
82}
83
d24c33a4
JA
84static inline int ptr_valid(struct pool *pool, void *ptr)
85{
dcb69098 86 unsigned int pool_size = pool->nr_blocks * SMALLOC_BPL;
ec996e9c
JA
87
88 return (ptr >= pool->map) && (ptr < pool->map + pool_size);
d24c33a4
JA
89}
90
808e9ea8
JA
91static inline unsigned int size_to_blocks(unsigned int size)
92{
93 return (size + SMALLOC_BPB - 1) / SMALLOC_BPB;
94}
95
dcb69098
JA
96static int blocks_iter(struct pool *pool, unsigned int pool_idx,
97 unsigned int idx, unsigned int nr_blocks,
ec996e9c 98 int (*func)(unsigned int *map, unsigned int mask))
d24c33a4 99{
dcb69098 100
ec996e9c
JA
101 while (nr_blocks) {
102 unsigned int this_blocks, mask;
dcb69098
JA
103 unsigned int *map;
104
105 if (pool_idx >= pool->nr_blocks)
106 return 0;
107
108 map = &pool->bitmap[pool_idx];
ec996e9c
JA
109
110 this_blocks = nr_blocks;
111 if (this_blocks + idx > SMALLOC_BPI) {
112 this_blocks = SMALLOC_BPI - idx;
113 idx = SMALLOC_BPI - this_blocks;
114 }
115
116 if (this_blocks == SMALLOC_BPI)
117 mask = -1U;
118 else
119 mask = ((1U << this_blocks) - 1) << idx;
120
121 if (!func(map, mask))
122 return 0;
123
124 nr_blocks -= this_blocks;
125 idx = 0;
dcb69098 126 pool_idx++;
ec996e9c
JA
127 }
128
129 return 1;
d24c33a4
JA
130}
131
ec996e9c 132static int mask_cmp(unsigned int *map, unsigned int mask)
d24c33a4 133{
ec996e9c 134 return !(*map & mask);
d24c33a4
JA
135}
136
ec996e9c 137static int mask_clear(unsigned int *map, unsigned int mask)
d24c33a4 138{
dcb69098 139 assert((*map & mask) == mask);
ec996e9c
JA
140 *map &= ~mask;
141 return 1;
d24c33a4
JA
142}
143
ec996e9c 144static int mask_set(unsigned int *map, unsigned int mask)
d24c33a4 145{
dcb69098 146 assert(!(*map & mask));
ec996e9c
JA
147 *map |= mask;
148 return 1;
d24c33a4
JA
149}
150
dcb69098
JA
151static int blocks_free(struct pool *pool, unsigned int pool_idx,
152 unsigned int idx, unsigned int nr_blocks)
d24c33a4 153{
dcb69098 154 return blocks_iter(pool, pool_idx, idx, nr_blocks, mask_cmp);
d24c33a4
JA
155}
156
dcb69098
JA
157static void set_blocks(struct pool *pool, unsigned int pool_idx,
158 unsigned int idx, unsigned int nr_blocks)
d24c33a4 159{
dcb69098 160 blocks_iter(pool, pool_idx, idx, nr_blocks, mask_set);
d24c33a4
JA
161}
162
dcb69098
JA
163static void clear_blocks(struct pool *pool, unsigned int pool_idx,
164 unsigned int idx, unsigned int nr_blocks)
d24c33a4 165{
dcb69098 166 blocks_iter(pool, pool_idx, idx, nr_blocks, mask_clear);
d24c33a4
JA
167}
168
ec996e9c
JA
169static int find_next_zero(int word, int start)
170{
171 assert(word != -1U);
172 word >>= (start + 1);
b3268b92 173 return ffz(word) + start + 1;
d24c33a4
JA
174}
175
adf57099 176static int add_pool(struct pool *pool, unsigned int alloc_size)
d24c33a4 177{
ec996e9c 178 int fd, bitmap_blocks;
b8a6582e
JA
179 char file[] = "/tmp/.fio_smalloc.XXXXXX";
180 void *ptr;
ec996e9c 181
b8a6582e 182 fd = mkstemp(file);
d24c33a4
JA
183 if (fd < 0)
184 goto out_close;
185
55f6491d 186#ifdef SMALLOC_REDZONE
ec996e9c 187 alloc_size += sizeof(unsigned int);
55f6491d 188#endif
ec996e9c
JA
189 alloc_size += sizeof(struct block_hdr);
190 if (alloc_size < INITIAL_SIZE)
191 alloc_size = INITIAL_SIZE;
192
193 /* round up to nearest full number of blocks */
194 alloc_size = (alloc_size + SMALLOC_BPL - 1) & ~(SMALLOC_BPL - 1);
195 bitmap_blocks = alloc_size / SMALLOC_BPL;
196 alloc_size += bitmap_blocks * sizeof(unsigned int);
197 pool->mmap_size = alloc_size;
55f6491d 198
ec996e9c
JA
199 pool->nr_blocks = bitmap_blocks;
200 pool->free_blocks = bitmap_blocks * SMALLOC_BPB;
adf57099 201
ec996e9c 202 if (ftruncate(fd, alloc_size) < 0)
d24c33a4
JA
203 goto out_unlink;
204
ec996e9c 205 ptr = mmap(NULL, alloc_size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
d24c33a4
JA
206 if (ptr == MAP_FAILED)
207 goto out_unlink;
208
ec996e9c
JA
209 memset(ptr, 0, alloc_size);
210 pool->map = ptr;
211 pool->bitmap = (void *) ptr + (pool->nr_blocks * SMALLOC_BPL);
d24c33a4 212
6548f47f 213 pool->lock = fio_mutex_init(1);
d24c33a4
JA
214 if (!pool->lock)
215 goto out_unlink;
d24c33a4 216
443bb114
JA
217 /*
218 * Unlink pool file now. It wont get deleted until the fd is closed,
219 * which happens both for cleanup or unexpected quit. This way we
220 * don't leave temp files around in case of a crash.
221 */
b8a6582e 222 unlink(file);
d24c33a4
JA
223 pool->fd = fd;
224
d24c33a4
JA
225 nr_pools++;
226 return 0;
227out_unlink:
ec996e9c 228 fprintf(stderr, "smalloc: failed adding pool\n");
d24c33a4 229 if (pool->map)
ec996e9c 230 munmap(pool->map, pool->mmap_size);
b8a6582e 231 unlink(file);
d24c33a4 232out_close:
b8a6582e 233 close(fd);
d24c33a4
JA
234 return 1;
235}
236
237void sinit(void)
238{
4d4e80f2 239 int ret;
d24c33a4 240
6548f47f 241 lock = fio_mutex_rw_init();
adf57099 242 ret = add_pool(&mp[0], INITIAL_SIZE);
d24c33a4
JA
243 assert(!ret);
244}
245
246static void cleanup_pool(struct pool *pool)
247{
443bb114
JA
248 /*
249 * This will also remove the temporary file we used as a backing
250 * store, it was already unlinked
251 */
d24c33a4 252 close(pool->fd);
ec996e9c 253 munmap(pool->map, pool->mmap_size);
6548f47f
JA
254
255 if (pool->lock)
256 fio_mutex_remove(pool->lock);
d24c33a4
JA
257}
258
259void scleanup(void)
260{
261 unsigned int i;
262
263 for (i = 0; i < nr_pools; i++)
264 cleanup_pool(&mp[i]);
265
6548f47f
JA
266 if (lock)
267 fio_mutex_remove(lock);
d24c33a4
JA
268}
269
89da54e8 270#ifdef SMALLOC_REDZONE
cf98708d
JA
271static void *postred_ptr(struct block_hdr *hdr)
272{
273 const int int_mask = sizeof(unsigned int) - 1;
274 unsigned long ptr;
275
276 ptr = (unsigned long) hdr + hdr->size - sizeof(unsigned int);
277 ptr = (ptr + int_mask) & ~int_mask;
278
279 return (void *) ptr;
280}
281
ec996e9c 282static void fill_redzone(struct block_hdr *hdr)
55f6491d 283{
cf98708d 284 unsigned int *postred = postred_ptr(hdr);
55f6491d 285
ec996e9c
JA
286 hdr->prered = SMALLOC_PRE_RED;
287 *postred = SMALLOC_POST_RED;
ec996e9c 288}
55f6491d 289
ec996e9c
JA
290static void sfree_check_redzone(struct block_hdr *hdr)
291{
cf98708d 292 unsigned int *postred = postred_ptr(hdr);
ec996e9c
JA
293
294 if (hdr->prered != SMALLOC_PRE_RED) {
55f6491d
JA
295 fprintf(stderr, "smalloc pre redzone destroyed!\n");
296 fprintf(stderr, " ptr=%p, prered=%x, expected %x\n",
ec996e9c 297 hdr, hdr->prered, SMALLOC_PRE_RED);
55f6491d
JA
298 assert(0);
299 }
300 if (*postred != SMALLOC_POST_RED) {
301 fprintf(stderr, "smalloc post redzone destroyed!\n");
302 fprintf(stderr, " ptr=%p, postred=%x, expected %x\n",
ec996e9c 303 hdr, *postred, SMALLOC_POST_RED);
55f6491d
JA
304 assert(0);
305 }
89da54e8
JA
306}
307#else
308static void fill_redzone(struct block_hdr *hdr)
309{
55f6491d
JA
310}
311
89da54e8
JA
312static void sfree_check_redzone(struct block_hdr *hdr)
313{
314}
315#endif
316
d24c33a4
JA
317static void sfree_pool(struct pool *pool, void *ptr)
318{
ec996e9c 319 struct block_hdr *hdr;
179446e0 320 unsigned int i, idx;
ec996e9c 321 unsigned long offset;
d24c33a4
JA
322
323 if (!ptr)
324 return;
325
ec996e9c
JA
326 ptr -= sizeof(*hdr);
327 hdr = ptr;
55f6491d 328
d24c33a4
JA
329 assert(ptr_valid(pool, ptr));
330
ec996e9c 331 sfree_check_redzone(hdr);
d24c33a4 332
ec996e9c
JA
333 offset = ptr - pool->map;
334 i = offset / SMALLOC_BPL;
335 idx = (offset % SMALLOC_BPL) / SMALLOC_BPB;
d24c33a4 336
ec996e9c 337 pool_lock(pool);
dcb69098 338 clear_blocks(pool, i, idx, size_to_blocks(hdr->size));
ec996e9c
JA
339 if (i < pool->next_non_full)
340 pool->next_non_full = i;
179446e0 341 pool->free_blocks += size_to_blocks(hdr->size);
d24c33a4
JA
342 pool_unlock(pool);
343}
344
345void sfree(void *ptr)
346{
347 struct pool *pool = NULL;
348 unsigned int i;
349
8e5732e5
JA
350 if (!ptr)
351 return;
352
65864cf7 353 global_read_lock();
d24c33a4
JA
354
355 for (i = 0; i < nr_pools; i++) {
356 if (ptr_valid(&mp[i], ptr)) {
357 pool = &mp[i];
358 break;
359 }
360 }
361
65864cf7 362 global_read_unlock();
d24c33a4
JA
363
364 assert(pool);
365 sfree_pool(pool, ptr);
366}
367
55f6491d 368static void *__smalloc_pool(struct pool *pool, unsigned int size)
d24c33a4 369{
ec996e9c
JA
370 unsigned int nr_blocks;
371 unsigned int i;
372 unsigned int offset;
373 unsigned int last_idx;
374 void *ret = NULL;
d24c33a4 375
d24c33a4 376 pool_lock(pool);
179446e0
JA
377
378 nr_blocks = size_to_blocks(size);
ec996e9c 379 if (nr_blocks > pool->free_blocks)
8e5732e5 380 goto fail;
5ec10eaa 381
ec996e9c
JA
382 i = pool->next_non_full;
383 last_idx = 0;
384 offset = -1U;
385 while (i < pool->nr_blocks) {
386 unsigned int idx;
d24c33a4 387
ec996e9c
JA
388 if (pool->bitmap[i] == -1U) {
389 i++;
390 pool->next_non_full = i;
391 last_idx = 0;
392 continue;
393 }
d24c33a4 394
ec996e9c 395 idx = find_next_zero(pool->bitmap[i], last_idx);
dcb69098 396 if (!blocks_free(pool, i, idx, nr_blocks)) {
ec996e9c
JA
397 idx += nr_blocks;
398 if (idx < SMALLOC_BPI)
399 last_idx = idx;
400 else {
401 last_idx = 0;
402 while (idx >= SMALLOC_BPI) {
403 i++;
404 idx -= SMALLOC_BPI;
405 }
406 }
407 continue;
d24c33a4 408 }
dcb69098 409 set_blocks(pool, i, idx, nr_blocks);
ec996e9c
JA
410 offset = i * SMALLOC_BPL + idx * SMALLOC_BPB;
411 break;
412 }
413
414 if (i < pool->nr_blocks) {
415 pool->free_blocks -= nr_blocks;
416 ret = pool->map + offset;
d24c33a4 417 }
ec996e9c 418fail:
d24c33a4 419 pool_unlock(pool);
ec996e9c 420 return ret;
d24c33a4
JA
421}
422
55f6491d
JA
423static void *smalloc_pool(struct pool *pool, unsigned int size)
424{
89da54e8 425 unsigned int alloc_size = size + sizeof(struct block_hdr);
55f6491d
JA
426 void *ptr;
427
cf98708d
JA
428 /*
429 * Use twice the size for good luck, we may need to adjust
430 * alignment.
431 */
ec996e9c 432#ifdef SMALLOC_REDZONE
cf98708d 433 alloc_size += 2 * sizeof(unsigned int);
ec996e9c
JA
434#endif
435
436 ptr = __smalloc_pool(pool, alloc_size);
89da54e8
JA
437 if (ptr) {
438 struct block_hdr *hdr = ptr;
55f6491d 439
89da54e8
JA
440 hdr->size = alloc_size;
441 fill_redzone(hdr);
55f6491d 442
89da54e8
JA
443 ptr += sizeof(*hdr);
444 memset(ptr, 0, size);
445 }
ec996e9c 446
55f6491d 447 return ptr;
55f6491d
JA
448}
449
d24c33a4
JA
450void *smalloc(unsigned int size)
451{
452 unsigned int i;
453
d1271dc1 454 global_write_lock();
d24c33a4
JA
455 i = last_pool;
456
457 do {
458 for (; i < nr_pools; i++) {
459 void *ptr = smalloc_pool(&mp[i], size);
460
461 if (ptr) {
462 last_pool = i;
d1271dc1 463 global_write_unlock();
d24c33a4
JA
464 return ptr;
465 }
466 }
467 if (last_pool) {
468 last_pool = 0;
469 continue;
470 }
471
ec996e9c 472 if (nr_pools + 1 > MAX_POOLS)
d24c33a4
JA
473 break;
474 else {
475 i = nr_pools;
adf57099 476 if (add_pool(&mp[nr_pools], size))
65864cf7 477 goto out;
d24c33a4
JA
478 }
479 } while (1);
480
65864cf7 481out:
d1271dc1 482 global_write_unlock();
d24c33a4
JA
483 return NULL;
484}
485
486char *smalloc_strdup(const char *str)
487{
488 char *ptr;
489
490 ptr = smalloc(strlen(str) + 1);
491 strcpy(ptr, str);
492 return ptr;
493}