Improve Valgrind instrumentation of memory allocations
[fio.git] / smalloc.c
... / ...
CommitLineData
1/*
2 * simple memory allocator, backed by mmap() so that it hands out memory
3 * that can be shared across processes and threads
4 */
5#include <sys/mman.h>
6#include <stdio.h>
7#include <stdlib.h>
8#include <assert.h>
9#include <string.h>
10#include <unistd.h>
11#include <inttypes.h>
12#include <sys/types.h>
13#include <limits.h>
14#include <fcntl.h>
15#ifdef CONFIG_VALGRIND_DEV
16#include <valgrind/valgrind.h>
17#else
18#define RUNNING_ON_VALGRIND 0
19#define VALGRIND_MALLOCLIKE_BLOCK(addr, size, rzB, is_zeroed) do { } while (0)
20#define VALGRIND_FREELIKE_BLOCK(addr, rzB) do { } while (0)
21#endif
22
23#include "fio.h"
24#include "fio_sem.h"
25#include "arch/arch.h"
26#include "os/os.h"
27#include "smalloc.h"
28#include "log.h"
29
30#define SMALLOC_REDZONE /* define to detect memory corruption */
31
32#define SMALLOC_BPB 32 /* block size, bytes-per-bit in bitmap */
33#define SMALLOC_BPI (sizeof(unsigned int) * 8)
34#define SMALLOC_BPL (SMALLOC_BPB * SMALLOC_BPI)
35
36#define INITIAL_SIZE 16*1024*1024 /* new pool size */
37#define INITIAL_POOLS 8 /* maximum number of pools to setup */
38
39#define MAX_POOLS 16
40
41#define SMALLOC_PRE_RED 0xdeadbeefU
42#define SMALLOC_POST_RED 0x5aa55aa5U
43
44unsigned int smalloc_pool_size = INITIAL_SIZE;
45#ifdef SMALLOC_REDZONE
46static const int int_mask = sizeof(int) - 1;
47#endif
48
49struct pool {
50 struct fio_sem *lock; /* protects this pool */
51 void *map; /* map of blocks */
52 unsigned int *bitmap; /* blocks free/busy map */
53 size_t free_blocks; /* free blocks */
54 size_t nr_blocks; /* total blocks */
55 size_t next_non_full;
56 size_t mmap_size;
57};
58
59#ifdef SMALLOC_REDZONE
60#define REDZONE_SIZE sizeof(unsigned int)
61#else
62#define REDZONE_SIZE 0
63#endif
64
65struct block_hdr {
66 size_t size;
67#ifdef SMALLOC_REDZONE
68 unsigned int prered;
69#endif
70};
71
72static struct pool mp[MAX_POOLS];
73static unsigned int nr_pools;
74static unsigned int last_pool;
75
76static inline int ptr_valid(struct pool *pool, void *ptr)
77{
78 unsigned int pool_size = pool->nr_blocks * SMALLOC_BPL;
79
80 return (ptr >= pool->map) && (ptr < pool->map + pool_size);
81}
82
83static inline size_t size_to_blocks(size_t size)
84{
85 return (size + SMALLOC_BPB - 1) / SMALLOC_BPB;
86}
87
88static int blocks_iter(struct pool *pool, unsigned int pool_idx,
89 unsigned int idx, size_t nr_blocks,
90 int (*func)(unsigned int *map, unsigned int mask))
91{
92
93 while (nr_blocks) {
94 unsigned int this_blocks, mask;
95 unsigned int *map;
96
97 if (pool_idx >= pool->nr_blocks)
98 return 0;
99
100 map = &pool->bitmap[pool_idx];
101
102 this_blocks = nr_blocks;
103 if (this_blocks + idx > SMALLOC_BPI) {
104 this_blocks = SMALLOC_BPI - idx;
105 idx = SMALLOC_BPI - this_blocks;
106 }
107
108 if (this_blocks == SMALLOC_BPI)
109 mask = -1U;
110 else
111 mask = ((1U << this_blocks) - 1) << idx;
112
113 if (!func(map, mask))
114 return 0;
115
116 nr_blocks -= this_blocks;
117 idx = 0;
118 pool_idx++;
119 }
120
121 return 1;
122}
123
124static int mask_cmp(unsigned int *map, unsigned int mask)
125{
126 return !(*map & mask);
127}
128
129static int mask_clear(unsigned int *map, unsigned int mask)
130{
131 assert((*map & mask) == mask);
132 *map &= ~mask;
133 return 1;
134}
135
136static int mask_set(unsigned int *map, unsigned int mask)
137{
138 assert(!(*map & mask));
139 *map |= mask;
140 return 1;
141}
142
143static int blocks_free(struct pool *pool, unsigned int pool_idx,
144 unsigned int idx, size_t nr_blocks)
145{
146 return blocks_iter(pool, pool_idx, idx, nr_blocks, mask_cmp);
147}
148
149static void set_blocks(struct pool *pool, unsigned int pool_idx,
150 unsigned int idx, size_t nr_blocks)
151{
152 blocks_iter(pool, pool_idx, idx, nr_blocks, mask_set);
153}
154
155static void clear_blocks(struct pool *pool, unsigned int pool_idx,
156 unsigned int idx, size_t nr_blocks)
157{
158 blocks_iter(pool, pool_idx, idx, nr_blocks, mask_clear);
159}
160
161static int find_next_zero(int word, int start)
162{
163 assert(word != -1U);
164 word >>= start;
165 return ffz(word) + start;
166}
167
168static bool add_pool(struct pool *pool, unsigned int alloc_size)
169{
170 int bitmap_blocks;
171 int mmap_flags;
172 void *ptr;
173
174 if (nr_pools == MAX_POOLS)
175 return false;
176
177#ifdef SMALLOC_REDZONE
178 alloc_size += sizeof(unsigned int);
179#endif
180 alloc_size += sizeof(struct block_hdr);
181 if (alloc_size < INITIAL_SIZE)
182 alloc_size = INITIAL_SIZE;
183
184 /* round up to nearest full number of blocks */
185 alloc_size = (alloc_size + SMALLOC_BPL - 1) & ~(SMALLOC_BPL - 1);
186 bitmap_blocks = alloc_size / SMALLOC_BPL;
187 alloc_size += bitmap_blocks * sizeof(unsigned int);
188 pool->mmap_size = alloc_size;
189
190 pool->nr_blocks = bitmap_blocks;
191 pool->free_blocks = bitmap_blocks * SMALLOC_BPB;
192
193 mmap_flags = OS_MAP_ANON;
194#ifdef CONFIG_ESX
195 mmap_flags |= MAP_PRIVATE;
196#else
197 mmap_flags |= MAP_SHARED;
198#endif
199 ptr = mmap(NULL, alloc_size, PROT_READ|PROT_WRITE, mmap_flags, -1, 0);
200
201 if (ptr == MAP_FAILED)
202 goto out_fail;
203
204 pool->map = ptr;
205 pool->bitmap = (unsigned int *)((char *) ptr + (pool->nr_blocks * SMALLOC_BPL));
206 memset(pool->bitmap, 0, bitmap_blocks * sizeof(unsigned int));
207
208 pool->lock = fio_sem_init(FIO_SEM_UNLOCKED);
209 if (!pool->lock)
210 goto out_fail;
211
212 nr_pools++;
213 return true;
214out_fail:
215 log_err("smalloc: failed adding pool\n");
216 if (pool->map)
217 munmap(pool->map, pool->mmap_size);
218 return false;
219}
220
221void sinit(void)
222{
223 bool ret;
224 int i;
225
226 for (i = 0; i < INITIAL_POOLS; i++) {
227 ret = add_pool(&mp[nr_pools], smalloc_pool_size);
228 if (!ret)
229 break;
230 }
231
232 /*
233 * If we added at least one pool, we should be OK for most
234 * cases.
235 */
236 assert(i);
237}
238
239static void cleanup_pool(struct pool *pool)
240{
241 /*
242 * This will also remove the temporary file we used as a backing
243 * store, it was already unlinked
244 */
245 munmap(pool->map, pool->mmap_size);
246
247 if (pool->lock)
248 fio_sem_remove(pool->lock);
249}
250
251void scleanup(void)
252{
253 unsigned int i;
254
255 for (i = 0; i < nr_pools; i++)
256 cleanup_pool(&mp[i]);
257}
258
259#ifdef SMALLOC_REDZONE
260static void *postred_ptr(struct block_hdr *hdr)
261{
262 uintptr_t ptr;
263
264 ptr = (uintptr_t) hdr + hdr->size - sizeof(unsigned int);
265 ptr = (uintptr_t) PTR_ALIGN(ptr, int_mask);
266
267 return (void *) ptr;
268}
269
270static void fill_redzone(struct block_hdr *hdr)
271{
272 unsigned int *postred = postred_ptr(hdr);
273
274 /* Let Valgrind fill the red zones. */
275 if (RUNNING_ON_VALGRIND)
276 return;
277
278 hdr->prered = SMALLOC_PRE_RED;
279 *postred = SMALLOC_POST_RED;
280}
281
282static void sfree_check_redzone(struct block_hdr *hdr)
283{
284 unsigned int *postred = postred_ptr(hdr);
285
286 /* Let Valgrind check the red zones. */
287 if (RUNNING_ON_VALGRIND)
288 return;
289
290 if (hdr->prered != SMALLOC_PRE_RED) {
291 log_err("smalloc pre redzone destroyed!\n"
292 " ptr=%p, prered=%x, expected %x\n",
293 hdr, hdr->prered, SMALLOC_PRE_RED);
294 assert(0);
295 }
296 if (*postred != SMALLOC_POST_RED) {
297 log_err("smalloc post redzone destroyed!\n"
298 " ptr=%p, postred=%x, expected %x\n",
299 hdr, *postred, SMALLOC_POST_RED);
300 assert(0);
301 }
302}
303#else
304static void fill_redzone(struct block_hdr *hdr)
305{
306}
307
308static void sfree_check_redzone(struct block_hdr *hdr)
309{
310}
311#endif
312
313static void sfree_pool(struct pool *pool, void *ptr)
314{
315 struct block_hdr *hdr;
316 unsigned int i, idx;
317 unsigned long offset;
318
319 if (!ptr)
320 return;
321
322 ptr -= sizeof(*hdr);
323 hdr = ptr;
324
325 assert(ptr_valid(pool, ptr));
326
327 sfree_check_redzone(hdr);
328
329 offset = ptr - pool->map;
330 i = offset / SMALLOC_BPL;
331 idx = (offset % SMALLOC_BPL) / SMALLOC_BPB;
332
333 fio_sem_down(pool->lock);
334 clear_blocks(pool, i, idx, size_to_blocks(hdr->size));
335 if (i < pool->next_non_full)
336 pool->next_non_full = i;
337 pool->free_blocks += size_to_blocks(hdr->size);
338 fio_sem_up(pool->lock);
339}
340
341void sfree(void *ptr)
342{
343 struct pool *pool = NULL;
344 unsigned int i;
345
346 if (!ptr)
347 return;
348
349 for (i = 0; i < nr_pools; i++) {
350 if (ptr_valid(&mp[i], ptr)) {
351 pool = &mp[i];
352 break;
353 }
354 }
355
356 if (pool) {
357 VALGRIND_FREELIKE_BLOCK(ptr, REDZONE_SIZE);
358 sfree_pool(pool, ptr);
359 return;
360 }
361
362 log_err("smalloc: ptr %p not from smalloc pool\n", ptr);
363}
364
365static void *__smalloc_pool(struct pool *pool, size_t size)
366{
367 size_t nr_blocks;
368 unsigned int i;
369 unsigned int offset;
370 unsigned int last_idx;
371 void *ret = NULL;
372
373 fio_sem_down(pool->lock);
374
375 nr_blocks = size_to_blocks(size);
376 if (nr_blocks > pool->free_blocks)
377 goto fail;
378
379 i = pool->next_non_full;
380 last_idx = 0;
381 offset = -1U;
382 while (i < pool->nr_blocks) {
383 unsigned int idx;
384
385 if (pool->bitmap[i] == -1U) {
386 i++;
387 pool->next_non_full = i;
388 last_idx = 0;
389 continue;
390 }
391
392 idx = find_next_zero(pool->bitmap[i], last_idx);
393 if (!blocks_free(pool, i, idx, nr_blocks)) {
394 idx += nr_blocks;
395 if (idx < SMALLOC_BPI)
396 last_idx = idx;
397 else {
398 last_idx = 0;
399 while (idx >= SMALLOC_BPI) {
400 i++;
401 idx -= SMALLOC_BPI;
402 }
403 }
404 continue;
405 }
406 set_blocks(pool, i, idx, nr_blocks);
407 offset = i * SMALLOC_BPL + idx * SMALLOC_BPB;
408 break;
409 }
410
411 if (i < pool->nr_blocks) {
412 pool->free_blocks -= nr_blocks;
413 ret = pool->map + offset;
414 }
415fail:
416 fio_sem_up(pool->lock);
417 return ret;
418}
419
420static void *smalloc_pool(struct pool *pool, size_t size)
421{
422 size_t alloc_size = size + sizeof(struct block_hdr);
423 void *ptr;
424
425 /*
426 * Round to int alignment, so that the postred pointer will
427 * be naturally aligned as well.
428 */
429#ifdef SMALLOC_REDZONE
430 alloc_size += sizeof(unsigned int);
431 alloc_size = (alloc_size + int_mask) & ~int_mask;
432#endif
433
434 ptr = __smalloc_pool(pool, alloc_size);
435 if (ptr) {
436 struct block_hdr *hdr = ptr;
437
438 hdr->size = alloc_size;
439 fill_redzone(hdr);
440
441 ptr += sizeof(*hdr);
442 memset(ptr, 0, size);
443 }
444
445 return ptr;
446}
447
448static void *__smalloc(size_t size, bool is_zeroed)
449{
450 unsigned int i, end_pool;
451
452 if (size != (unsigned int) size)
453 return NULL;
454
455 i = last_pool;
456 end_pool = nr_pools;
457
458 do {
459 for (; i < end_pool; i++) {
460 void *ptr = smalloc_pool(&mp[i], size);
461
462 if (ptr) {
463 last_pool = i;
464 VALGRIND_MALLOCLIKE_BLOCK(ptr, size,
465 REDZONE_SIZE,
466 is_zeroed);
467 return ptr;
468 }
469 }
470 if (last_pool) {
471 end_pool = last_pool;
472 last_pool = i = 0;
473 continue;
474 }
475
476 break;
477 } while (1);
478
479 log_err("smalloc: OOM. Consider using --alloc-size to increase the "
480 "shared memory available.\n");
481 return NULL;
482}
483
484void *smalloc(size_t size)
485{
486 return __smalloc(size, false);
487}
488
489void *scalloc(size_t nmemb, size_t size)
490{
491 return __smalloc(nmemb * size, true);
492}
493
494char *smalloc_strdup(const char *str)
495{
496 char *ptr = NULL;
497
498 ptr = smalloc(strlen(str) + 1);
499 if (ptr)
500 strcpy(ptr, str);
501 return ptr;
502}