t/zbd/test-zbd-support: Report a test summary when finished
[fio.git] / smalloc.c
CommitLineData
d24c33a4
JA
1/*
2 * simple memory allocator, backed by mmap() so that it hands out memory
3 * that can be shared across processes and threads
4 */
5#include <sys/mman.h>
d24c33a4
JA
6#include <assert.h>
7#include <string.h>
d24c33a4 8
248c9436 9#include "fio.h"
971caeb1 10#include "fio_sem.h"
3a8600b4 11#include "os/os.h"
10aa136b 12#include "smalloc.h"
b0f0326a 13#include "log.h"
d24c33a4 14
55f6491d 15#define SMALLOC_REDZONE /* define to detect memory corruption */
d24c33a4 16
ec996e9c
JA
17#define SMALLOC_BPB 32 /* block size, bytes-per-bit in bitmap */
18#define SMALLOC_BPI (sizeof(unsigned int) * 8)
19#define SMALLOC_BPL (SMALLOC_BPB * SMALLOC_BPI)
20
23bd40f9 21#define INITIAL_SIZE 16*1024*1024 /* new pool size */
5f9454a2
JA
22#define INITIAL_POOLS 8 /* maximum number of pools to setup */
23
24#define MAX_POOLS 16
d24c33a4 25
55f6491d
JA
26#define SMALLOC_PRE_RED 0xdeadbeefU
27#define SMALLOC_POST_RED 0x5aa55aa5U
55f6491d 28
2b386d25 29unsigned int smalloc_pool_size = INITIAL_SIZE;
aa1af5fd 30#ifdef SMALLOC_REDZONE
10aa136b 31static const int int_mask = sizeof(int) - 1;
aa1af5fd 32#endif
2b386d25 33
d24c33a4 34struct pool {
971caeb1 35 struct fio_sem *lock; /* protects this pool */
d24c33a4 36 void *map; /* map of blocks */
ec996e9c 37 unsigned int *bitmap; /* blocks free/busy map */
a3ebe7e0
JA
38 size_t free_blocks; /* free blocks */
39 size_t nr_blocks; /* total blocks */
40 size_t next_non_full;
41 size_t mmap_size;
ec996e9c
JA
42};
43
44struct block_hdr {
a3ebe7e0 45 size_t size;
ec996e9c
JA
46#ifdef SMALLOC_REDZONE
47 unsigned int prered;
48#endif
d24c33a4
JA
49};
50
51static struct pool mp[MAX_POOLS];
52static unsigned int nr_pools;
53static unsigned int last_pool;
d24c33a4 54
d24c33a4
JA
55static inline int ptr_valid(struct pool *pool, void *ptr)
56{
dcb69098 57 unsigned int pool_size = pool->nr_blocks * SMALLOC_BPL;
ec996e9c
JA
58
59 return (ptr >= pool->map) && (ptr < pool->map + pool_size);
d24c33a4
JA
60}
61
a3ebe7e0 62static inline size_t size_to_blocks(size_t size)
808e9ea8
JA
63{
64 return (size + SMALLOC_BPB - 1) / SMALLOC_BPB;
65}
66
dcb69098 67static int blocks_iter(struct pool *pool, unsigned int pool_idx,
a3ebe7e0 68 unsigned int idx, size_t nr_blocks,
ec996e9c 69 int (*func)(unsigned int *map, unsigned int mask))
d24c33a4 70{
dcb69098 71
ec996e9c
JA
72 while (nr_blocks) {
73 unsigned int this_blocks, mask;
dcb69098
JA
74 unsigned int *map;
75
76 if (pool_idx >= pool->nr_blocks)
77 return 0;
78
79 map = &pool->bitmap[pool_idx];
ec996e9c
JA
80
81 this_blocks = nr_blocks;
82 if (this_blocks + idx > SMALLOC_BPI) {
83 this_blocks = SMALLOC_BPI - idx;
84 idx = SMALLOC_BPI - this_blocks;
85 }
86
87 if (this_blocks == SMALLOC_BPI)
88 mask = -1U;
89 else
90 mask = ((1U << this_blocks) - 1) << idx;
91
92 if (!func(map, mask))
93 return 0;
94
95 nr_blocks -= this_blocks;
96 idx = 0;
dcb69098 97 pool_idx++;
ec996e9c
JA
98 }
99
100 return 1;
d24c33a4
JA
101}
102
ec996e9c 103static int mask_cmp(unsigned int *map, unsigned int mask)
d24c33a4 104{
ec996e9c 105 return !(*map & mask);
d24c33a4
JA
106}
107
ec996e9c 108static int mask_clear(unsigned int *map, unsigned int mask)
d24c33a4 109{
dcb69098 110 assert((*map & mask) == mask);
ec996e9c
JA
111 *map &= ~mask;
112 return 1;
d24c33a4
JA
113}
114
ec996e9c 115static int mask_set(unsigned int *map, unsigned int mask)
d24c33a4 116{
dcb69098 117 assert(!(*map & mask));
ec996e9c
JA
118 *map |= mask;
119 return 1;
d24c33a4
JA
120}
121
dcb69098 122static int blocks_free(struct pool *pool, unsigned int pool_idx,
a3ebe7e0 123 unsigned int idx, size_t nr_blocks)
d24c33a4 124{
dcb69098 125 return blocks_iter(pool, pool_idx, idx, nr_blocks, mask_cmp);
d24c33a4
JA
126}
127
dcb69098 128static void set_blocks(struct pool *pool, unsigned int pool_idx,
a3ebe7e0 129 unsigned int idx, size_t nr_blocks)
d24c33a4 130{
dcb69098 131 blocks_iter(pool, pool_idx, idx, nr_blocks, mask_set);
d24c33a4
JA
132}
133
dcb69098 134static void clear_blocks(struct pool *pool, unsigned int pool_idx,
a3ebe7e0 135 unsigned int idx, size_t nr_blocks)
d24c33a4 136{
dcb69098 137 blocks_iter(pool, pool_idx, idx, nr_blocks, mask_clear);
d24c33a4
JA
138}
139
ec996e9c
JA
140static int find_next_zero(int word, int start)
141{
142 assert(word != -1U);
271067a6
JH
143 word >>= start;
144 return ffz(word) + start;
d24c33a4
JA
145}
146
5f9454a2 147static bool add_pool(struct pool *pool, unsigned int alloc_size)
d24c33a4 148{
8d5844e9 149 int bitmap_blocks;
c8931876 150 int mmap_flags;
b8a6582e 151 void *ptr;
ec996e9c 152
5f9454a2
JA
153 if (nr_pools == MAX_POOLS)
154 return false;
155
55f6491d 156#ifdef SMALLOC_REDZONE
ec996e9c 157 alloc_size += sizeof(unsigned int);
55f6491d 158#endif
ec996e9c
JA
159 alloc_size += sizeof(struct block_hdr);
160 if (alloc_size < INITIAL_SIZE)
161 alloc_size = INITIAL_SIZE;
162
163 /* round up to nearest full number of blocks */
164 alloc_size = (alloc_size + SMALLOC_BPL - 1) & ~(SMALLOC_BPL - 1);
165 bitmap_blocks = alloc_size / SMALLOC_BPL;
166 alloc_size += bitmap_blocks * sizeof(unsigned int);
167 pool->mmap_size = alloc_size;
0b9d69ec 168
ec996e9c
JA
169 pool->nr_blocks = bitmap_blocks;
170 pool->free_blocks = bitmap_blocks * SMALLOC_BPB;
adf57099 171
c8931876
JA
172 mmap_flags = OS_MAP_ANON;
173#ifdef CONFIG_ESX
174 mmap_flags |= MAP_PRIVATE;
175#else
176 mmap_flags |= MAP_SHARED;
177#endif
178 ptr = mmap(NULL, alloc_size, PROT_READ|PROT_WRITE, mmap_flags, -1, 0);
179
d24c33a4 180 if (ptr == MAP_FAILED)
8d5844e9 181 goto out_fail;
d24c33a4 182
ec996e9c 183 pool->map = ptr;
17f7fcd0 184 pool->bitmap = (unsigned int *)((char *) ptr + (pool->nr_blocks * SMALLOC_BPL));
9c3e13e3 185 memset(pool->bitmap, 0, bitmap_blocks * sizeof(unsigned int));
d24c33a4 186
971caeb1 187 pool->lock = fio_sem_init(FIO_SEM_UNLOCKED);
d24c33a4 188 if (!pool->lock)
8d5844e9 189 goto out_fail;
d24c33a4 190
d24c33a4 191 nr_pools++;
5f9454a2 192 return true;
8d5844e9 193out_fail:
b0f0326a 194 log_err("smalloc: failed adding pool\n");
d24c33a4 195 if (pool->map)
ec996e9c 196 munmap(pool->map, pool->mmap_size);
5f9454a2 197 return false;
d24c33a4
JA
198}
199
200void sinit(void)
201{
5f9454a2
JA
202 bool ret;
203 int i;
d24c33a4 204
5f9454a2
JA
205 for (i = 0; i < INITIAL_POOLS; i++) {
206 ret = add_pool(&mp[nr_pools], smalloc_pool_size);
207 if (!ret)
85492cb8
JA
208 break;
209 }
210
211 /*
212 * If we added at least one pool, we should be OK for most
213 * cases.
214 */
215 assert(i);
d24c33a4
JA
216}
217
218static void cleanup_pool(struct pool *pool)
219{
443bb114
JA
220 /*
221 * This will also remove the temporary file we used as a backing
222 * store, it was already unlinked
223 */
ec996e9c 224 munmap(pool->map, pool->mmap_size);
6548f47f
JA
225
226 if (pool->lock)
971caeb1 227 fio_sem_remove(pool->lock);
d24c33a4
JA
228}
229
230void scleanup(void)
231{
232 unsigned int i;
233
234 for (i = 0; i < nr_pools; i++)
235 cleanup_pool(&mp[i]);
d24c33a4
JA
236}
237
89da54e8 238#ifdef SMALLOC_REDZONE
cf98708d
JA
239static void *postred_ptr(struct block_hdr *hdr)
240{
e43606c2 241 uintptr_t ptr;
cf98708d 242
e43606c2 243 ptr = (uintptr_t) hdr + hdr->size - sizeof(unsigned int);
248c9436 244 ptr = (uintptr_t) PTR_ALIGN(ptr, int_mask);
cf98708d
JA
245
246 return (void *) ptr;
247}
248
ec996e9c 249static void fill_redzone(struct block_hdr *hdr)
55f6491d 250{
cf98708d 251 unsigned int *postred = postred_ptr(hdr);
55f6491d 252
ec996e9c
JA
253 hdr->prered = SMALLOC_PRE_RED;
254 *postred = SMALLOC_POST_RED;
ec996e9c 255}
55f6491d 256
ec996e9c
JA
257static void sfree_check_redzone(struct block_hdr *hdr)
258{
cf98708d 259 unsigned int *postred = postred_ptr(hdr);
ec996e9c
JA
260
261 if (hdr->prered != SMALLOC_PRE_RED) {
b0f0326a
JA
262 log_err("smalloc pre redzone destroyed!\n"
263 " ptr=%p, prered=%x, expected %x\n",
ec996e9c 264 hdr, hdr->prered, SMALLOC_PRE_RED);
55f6491d
JA
265 assert(0);
266 }
267 if (*postred != SMALLOC_POST_RED) {
b0f0326a
JA
268 log_err("smalloc post redzone destroyed!\n"
269 " ptr=%p, postred=%x, expected %x\n",
ec996e9c 270 hdr, *postred, SMALLOC_POST_RED);
55f6491d
JA
271 assert(0);
272 }
89da54e8
JA
273}
274#else
275static void fill_redzone(struct block_hdr *hdr)
276{
55f6491d
JA
277}
278
89da54e8
JA
279static void sfree_check_redzone(struct block_hdr *hdr)
280{
281}
282#endif
283
d24c33a4
JA
284static void sfree_pool(struct pool *pool, void *ptr)
285{
ec996e9c 286 struct block_hdr *hdr;
179446e0 287 unsigned int i, idx;
ec996e9c 288 unsigned long offset;
d24c33a4
JA
289
290 if (!ptr)
291 return;
292
ec996e9c
JA
293 ptr -= sizeof(*hdr);
294 hdr = ptr;
55f6491d 295
d24c33a4
JA
296 assert(ptr_valid(pool, ptr));
297
ec996e9c 298 sfree_check_redzone(hdr);
d24c33a4 299
ec996e9c
JA
300 offset = ptr - pool->map;
301 i = offset / SMALLOC_BPL;
302 idx = (offset % SMALLOC_BPL) / SMALLOC_BPB;
d24c33a4 303
971caeb1 304 fio_sem_down(pool->lock);
dcb69098 305 clear_blocks(pool, i, idx, size_to_blocks(hdr->size));
ec996e9c
JA
306 if (i < pool->next_non_full)
307 pool->next_non_full = i;
179446e0 308 pool->free_blocks += size_to_blocks(hdr->size);
971caeb1 309 fio_sem_up(pool->lock);
d24c33a4
JA
310}
311
312void sfree(void *ptr)
313{
314 struct pool *pool = NULL;
315 unsigned int i;
316
8e5732e5
JA
317 if (!ptr)
318 return;
319
d24c33a4
JA
320 for (i = 0; i < nr_pools; i++) {
321 if (ptr_valid(&mp[i], ptr)) {
322 pool = &mp[i];
323 break;
324 }
325 }
326
45a65144
JA
327 if (pool) {
328 sfree_pool(pool, ptr);
329 return;
330 }
331
332 log_err("smalloc: ptr %p not from smalloc pool\n", ptr);
d24c33a4
JA
333}
334
a3ebe7e0 335static void *__smalloc_pool(struct pool *pool, size_t size)
d24c33a4 336{
a3ebe7e0 337 size_t nr_blocks;
ec996e9c
JA
338 unsigned int i;
339 unsigned int offset;
340 unsigned int last_idx;
341 void *ret = NULL;
d24c33a4 342
971caeb1 343 fio_sem_down(pool->lock);
179446e0
JA
344
345 nr_blocks = size_to_blocks(size);
ec996e9c 346 if (nr_blocks > pool->free_blocks)
8e5732e5 347 goto fail;
5ec10eaa 348
ec996e9c
JA
349 i = pool->next_non_full;
350 last_idx = 0;
351 offset = -1U;
352 while (i < pool->nr_blocks) {
353 unsigned int idx;
d24c33a4 354
ec996e9c
JA
355 if (pool->bitmap[i] == -1U) {
356 i++;
357 pool->next_non_full = i;
358 last_idx = 0;
359 continue;
360 }
d24c33a4 361
ec996e9c 362 idx = find_next_zero(pool->bitmap[i], last_idx);
dcb69098 363 if (!blocks_free(pool, i, idx, nr_blocks)) {
ec996e9c
JA
364 idx += nr_blocks;
365 if (idx < SMALLOC_BPI)
366 last_idx = idx;
367 else {
368 last_idx = 0;
369 while (idx >= SMALLOC_BPI) {
370 i++;
371 idx -= SMALLOC_BPI;
372 }
373 }
374 continue;
d24c33a4 375 }
dcb69098 376 set_blocks(pool, i, idx, nr_blocks);
ec996e9c
JA
377 offset = i * SMALLOC_BPL + idx * SMALLOC_BPB;
378 break;
379 }
380
381 if (i < pool->nr_blocks) {
382 pool->free_blocks -= nr_blocks;
383 ret = pool->map + offset;
d24c33a4 384 }
ec996e9c 385fail:
971caeb1 386 fio_sem_up(pool->lock);
ec996e9c 387 return ret;
d24c33a4
JA
388}
389
a3ebe7e0 390static void *smalloc_pool(struct pool *pool, size_t size)
55f6491d 391{
a3ebe7e0 392 size_t alloc_size = size + sizeof(struct block_hdr);
55f6491d
JA
393 void *ptr;
394
cf98708d 395 /*
122426da
JA
396 * Round to int alignment, so that the postred pointer will
397 * be naturally aligned as well.
cf98708d 398 */
ec996e9c 399#ifdef SMALLOC_REDZONE
122426da
JA
400 alloc_size += sizeof(unsigned int);
401 alloc_size = (alloc_size + int_mask) & ~int_mask;
ec996e9c
JA
402#endif
403
404 ptr = __smalloc_pool(pool, alloc_size);
89da54e8
JA
405 if (ptr) {
406 struct block_hdr *hdr = ptr;
55f6491d 407
89da54e8
JA
408 hdr->size = alloc_size;
409 fill_redzone(hdr);
55f6491d 410
89da54e8
JA
411 ptr += sizeof(*hdr);
412 memset(ptr, 0, size);
413 }
ec996e9c 414
55f6491d 415 return ptr;
55f6491d
JA
416}
417
5457259f 418void *smalloc(size_t size)
d24c33a4 419{
85492cb8 420 unsigned int i, end_pool;
d24c33a4 421
7982aa7d
JA
422 if (size != (unsigned int) size)
423 return NULL;
424
d24c33a4 425 i = last_pool;
85492cb8 426 end_pool = nr_pools;
d24c33a4
JA
427
428 do {
85492cb8 429 for (; i < end_pool; i++) {
d24c33a4
JA
430 void *ptr = smalloc_pool(&mp[i], size);
431
432 if (ptr) {
433 last_pool = i;
d24c33a4
JA
434 return ptr;
435 }
436 }
437 if (last_pool) {
85492cb8
JA
438 end_pool = last_pool;
439 last_pool = i = 0;
d24c33a4
JA
440 continue;
441 }
442
85492cb8 443 break;
d24c33a4
JA
444 } while (1);
445
81b3c86f
JA
446 log_err("smalloc: OOM. Consider using --alloc-size to increase the "
447 "shared memory available.\n");
d24c33a4
JA
448 return NULL;
449}
450
544992f7
JA
451void *scalloc(size_t nmemb, size_t size)
452{
5457259f 453 return smalloc(nmemb * size);
544992f7
JA
454}
455
d24c33a4
JA
456char *smalloc_strdup(const char *str)
457{
2894a2d4 458 char *ptr = NULL;
d24c33a4
JA
459
460 ptr = smalloc(strlen(str) + 1);
2894a2d4
CE
461 if (ptr)
462 strcpy(ptr, str);
d24c33a4
JA
463 return ptr;
464}