flow: fix bad overflowing math
[fio.git] / smalloc.c
CommitLineData
d24c33a4
JA
1/*
2 * simple memory allocator, backed by mmap() so that it hands out memory
3 * that can be shared across processes and threads
4 */
5#include <sys/mman.h>
6#include <stdio.h>
7#include <stdlib.h>
8#include <assert.h>
9#include <string.h>
10#include <unistd.h>
e43606c2 11#include <inttypes.h>
d24c33a4
JA
12#include <sys/types.h>
13#include <limits.h>
3a8600b4 14#include <fcntl.h>
d24c33a4 15
248c9436 16#include "fio.h"
6548f47f 17#include "mutex.h"
b3268b92 18#include "arch/arch.h"
3a8600b4 19#include "os/os.h"
10aa136b 20#include "smalloc.h"
b0f0326a 21#include "log.h"
d24c33a4 22
55f6491d 23#define SMALLOC_REDZONE /* define to detect memory corruption */
d24c33a4 24
ec996e9c
JA
25#define SMALLOC_BPB 32 /* block size, bytes-per-bit in bitmap */
26#define SMALLOC_BPI (sizeof(unsigned int) * 8)
27#define SMALLOC_BPL (SMALLOC_BPB * SMALLOC_BPI)
28
23bd40f9 29#define INITIAL_SIZE 16*1024*1024 /* new pool size */
5f9454a2
JA
30#define INITIAL_POOLS 8 /* maximum number of pools to setup */
31
32#define MAX_POOLS 16
d24c33a4 33
55f6491d
JA
34#define SMALLOC_PRE_RED 0xdeadbeefU
35#define SMALLOC_POST_RED 0x5aa55aa5U
55f6491d 36
2b386d25 37unsigned int smalloc_pool_size = INITIAL_SIZE;
aa1af5fd 38#ifdef SMALLOC_REDZONE
10aa136b 39static const int int_mask = sizeof(int) - 1;
aa1af5fd 40#endif
2b386d25 41
d24c33a4 42struct pool {
6548f47f 43 struct fio_mutex *lock; /* protects this pool */
d24c33a4 44 void *map; /* map of blocks */
ec996e9c 45 unsigned int *bitmap; /* blocks free/busy map */
a3ebe7e0
JA
46 size_t free_blocks; /* free blocks */
47 size_t nr_blocks; /* total blocks */
48 size_t next_non_full;
49 size_t mmap_size;
ec996e9c
JA
50};
51
52struct block_hdr {
a3ebe7e0 53 size_t size;
ec996e9c
JA
54#ifdef SMALLOC_REDZONE
55 unsigned int prered;
56#endif
d24c33a4
JA
57};
58
59static struct pool mp[MAX_POOLS];
60static unsigned int nr_pools;
61static unsigned int last_pool;
d24c33a4 62
d24c33a4
JA
63static inline int ptr_valid(struct pool *pool, void *ptr)
64{
dcb69098 65 unsigned int pool_size = pool->nr_blocks * SMALLOC_BPL;
ec996e9c
JA
66
67 return (ptr >= pool->map) && (ptr < pool->map + pool_size);
d24c33a4
JA
68}
69
a3ebe7e0 70static inline size_t size_to_blocks(size_t size)
808e9ea8
JA
71{
72 return (size + SMALLOC_BPB - 1) / SMALLOC_BPB;
73}
74
dcb69098 75static int blocks_iter(struct pool *pool, unsigned int pool_idx,
a3ebe7e0 76 unsigned int idx, size_t nr_blocks,
ec996e9c 77 int (*func)(unsigned int *map, unsigned int mask))
d24c33a4 78{
dcb69098 79
ec996e9c
JA
80 while (nr_blocks) {
81 unsigned int this_blocks, mask;
dcb69098
JA
82 unsigned int *map;
83
84 if (pool_idx >= pool->nr_blocks)
85 return 0;
86
87 map = &pool->bitmap[pool_idx];
ec996e9c
JA
88
89 this_blocks = nr_blocks;
90 if (this_blocks + idx > SMALLOC_BPI) {
91 this_blocks = SMALLOC_BPI - idx;
92 idx = SMALLOC_BPI - this_blocks;
93 }
94
95 if (this_blocks == SMALLOC_BPI)
96 mask = -1U;
97 else
98 mask = ((1U << this_blocks) - 1) << idx;
99
100 if (!func(map, mask))
101 return 0;
102
103 nr_blocks -= this_blocks;
104 idx = 0;
dcb69098 105 pool_idx++;
ec996e9c
JA
106 }
107
108 return 1;
d24c33a4
JA
109}
110
ec996e9c 111static int mask_cmp(unsigned int *map, unsigned int mask)
d24c33a4 112{
ec996e9c 113 return !(*map & mask);
d24c33a4
JA
114}
115
ec996e9c 116static int mask_clear(unsigned int *map, unsigned int mask)
d24c33a4 117{
dcb69098 118 assert((*map & mask) == mask);
ec996e9c
JA
119 *map &= ~mask;
120 return 1;
d24c33a4
JA
121}
122
ec996e9c 123static int mask_set(unsigned int *map, unsigned int mask)
d24c33a4 124{
dcb69098 125 assert(!(*map & mask));
ec996e9c
JA
126 *map |= mask;
127 return 1;
d24c33a4
JA
128}
129
dcb69098 130static int blocks_free(struct pool *pool, unsigned int pool_idx,
a3ebe7e0 131 unsigned int idx, size_t nr_blocks)
d24c33a4 132{
dcb69098 133 return blocks_iter(pool, pool_idx, idx, nr_blocks, mask_cmp);
d24c33a4
JA
134}
135
dcb69098 136static void set_blocks(struct pool *pool, unsigned int pool_idx,
a3ebe7e0 137 unsigned int idx, size_t nr_blocks)
d24c33a4 138{
dcb69098 139 blocks_iter(pool, pool_idx, idx, nr_blocks, mask_set);
d24c33a4
JA
140}
141
dcb69098 142static void clear_blocks(struct pool *pool, unsigned int pool_idx,
a3ebe7e0 143 unsigned int idx, size_t nr_blocks)
d24c33a4 144{
dcb69098 145 blocks_iter(pool, pool_idx, idx, nr_blocks, mask_clear);
d24c33a4
JA
146}
147
ec996e9c
JA
148static int find_next_zero(int word, int start)
149{
150 assert(word != -1U);
271067a6
JH
151 word >>= start;
152 return ffz(word) + start;
d24c33a4
JA
153}
154
5f9454a2 155static bool add_pool(struct pool *pool, unsigned int alloc_size)
d24c33a4 156{
8d5844e9 157 int bitmap_blocks;
c8931876 158 int mmap_flags;
b8a6582e 159 void *ptr;
ec996e9c 160
5f9454a2
JA
161 if (nr_pools == MAX_POOLS)
162 return false;
163
55f6491d 164#ifdef SMALLOC_REDZONE
ec996e9c 165 alloc_size += sizeof(unsigned int);
55f6491d 166#endif
ec996e9c
JA
167 alloc_size += sizeof(struct block_hdr);
168 if (alloc_size < INITIAL_SIZE)
169 alloc_size = INITIAL_SIZE;
170
171 /* round up to nearest full number of blocks */
172 alloc_size = (alloc_size + SMALLOC_BPL - 1) & ~(SMALLOC_BPL - 1);
173 bitmap_blocks = alloc_size / SMALLOC_BPL;
174 alloc_size += bitmap_blocks * sizeof(unsigned int);
175 pool->mmap_size = alloc_size;
0b9d69ec 176
ec996e9c
JA
177 pool->nr_blocks = bitmap_blocks;
178 pool->free_blocks = bitmap_blocks * SMALLOC_BPB;
adf57099 179
c8931876
JA
180 mmap_flags = OS_MAP_ANON;
181#ifdef CONFIG_ESX
182 mmap_flags |= MAP_PRIVATE;
183#else
184 mmap_flags |= MAP_SHARED;
185#endif
186 ptr = mmap(NULL, alloc_size, PROT_READ|PROT_WRITE, mmap_flags, -1, 0);
187
d24c33a4 188 if (ptr == MAP_FAILED)
8d5844e9 189 goto out_fail;
d24c33a4 190
ec996e9c 191 pool->map = ptr;
17f7fcd0 192 pool->bitmap = (unsigned int *)((char *) ptr + (pool->nr_blocks * SMALLOC_BPL));
9c3e13e3 193 memset(pool->bitmap, 0, bitmap_blocks * sizeof(unsigned int));
d24c33a4 194
521da527 195 pool->lock = fio_mutex_init(FIO_MUTEX_UNLOCKED);
d24c33a4 196 if (!pool->lock)
8d5844e9 197 goto out_fail;
d24c33a4 198
d24c33a4 199 nr_pools++;
5f9454a2 200 return true;
8d5844e9 201out_fail:
b0f0326a 202 log_err("smalloc: failed adding pool\n");
d24c33a4 203 if (pool->map)
ec996e9c 204 munmap(pool->map, pool->mmap_size);
5f9454a2 205 return false;
d24c33a4
JA
206}
207
208void sinit(void)
209{
5f9454a2
JA
210 bool ret;
211 int i;
d24c33a4 212
5f9454a2
JA
213 for (i = 0; i < INITIAL_POOLS; i++) {
214 ret = add_pool(&mp[nr_pools], smalloc_pool_size);
215 if (!ret)
85492cb8
JA
216 break;
217 }
218
219 /*
220 * If we added at least one pool, we should be OK for most
221 * cases.
222 */
223 assert(i);
d24c33a4
JA
224}
225
226static void cleanup_pool(struct pool *pool)
227{
443bb114
JA
228 /*
229 * This will also remove the temporary file we used as a backing
230 * store, it was already unlinked
231 */
ec996e9c 232 munmap(pool->map, pool->mmap_size);
6548f47f
JA
233
234 if (pool->lock)
235 fio_mutex_remove(pool->lock);
d24c33a4
JA
236}
237
238void scleanup(void)
239{
240 unsigned int i;
241
242 for (i = 0; i < nr_pools; i++)
243 cleanup_pool(&mp[i]);
d24c33a4
JA
244}
245
89da54e8 246#ifdef SMALLOC_REDZONE
cf98708d
JA
247static void *postred_ptr(struct block_hdr *hdr)
248{
e43606c2 249 uintptr_t ptr;
cf98708d 250
e43606c2 251 ptr = (uintptr_t) hdr + hdr->size - sizeof(unsigned int);
248c9436 252 ptr = (uintptr_t) PTR_ALIGN(ptr, int_mask);
cf98708d
JA
253
254 return (void *) ptr;
255}
256
ec996e9c 257static void fill_redzone(struct block_hdr *hdr)
55f6491d 258{
cf98708d 259 unsigned int *postred = postred_ptr(hdr);
55f6491d 260
ec996e9c
JA
261 hdr->prered = SMALLOC_PRE_RED;
262 *postred = SMALLOC_POST_RED;
ec996e9c 263}
55f6491d 264
ec996e9c
JA
265static void sfree_check_redzone(struct block_hdr *hdr)
266{
cf98708d 267 unsigned int *postred = postred_ptr(hdr);
ec996e9c
JA
268
269 if (hdr->prered != SMALLOC_PRE_RED) {
b0f0326a
JA
270 log_err("smalloc pre redzone destroyed!\n"
271 " ptr=%p, prered=%x, expected %x\n",
ec996e9c 272 hdr, hdr->prered, SMALLOC_PRE_RED);
55f6491d
JA
273 assert(0);
274 }
275 if (*postred != SMALLOC_POST_RED) {
b0f0326a
JA
276 log_err("smalloc post redzone destroyed!\n"
277 " ptr=%p, postred=%x, expected %x\n",
ec996e9c 278 hdr, *postred, SMALLOC_POST_RED);
55f6491d
JA
279 assert(0);
280 }
89da54e8
JA
281}
282#else
283static void fill_redzone(struct block_hdr *hdr)
284{
55f6491d
JA
285}
286
89da54e8
JA
287static void sfree_check_redzone(struct block_hdr *hdr)
288{
289}
290#endif
291
d24c33a4
JA
292static void sfree_pool(struct pool *pool, void *ptr)
293{
ec996e9c 294 struct block_hdr *hdr;
179446e0 295 unsigned int i, idx;
ec996e9c 296 unsigned long offset;
d24c33a4
JA
297
298 if (!ptr)
299 return;
300
ec996e9c
JA
301 ptr -= sizeof(*hdr);
302 hdr = ptr;
55f6491d 303
d24c33a4
JA
304 assert(ptr_valid(pool, ptr));
305
ec996e9c 306 sfree_check_redzone(hdr);
d24c33a4 307
ec996e9c
JA
308 offset = ptr - pool->map;
309 i = offset / SMALLOC_BPL;
310 idx = (offset % SMALLOC_BPL) / SMALLOC_BPB;
d24c33a4 311
0127c57b 312 fio_mutex_down(pool->lock);
dcb69098 313 clear_blocks(pool, i, idx, size_to_blocks(hdr->size));
ec996e9c
JA
314 if (i < pool->next_non_full)
315 pool->next_non_full = i;
179446e0 316 pool->free_blocks += size_to_blocks(hdr->size);
0127c57b 317 fio_mutex_up(pool->lock);
d24c33a4
JA
318}
319
320void sfree(void *ptr)
321{
322 struct pool *pool = NULL;
323 unsigned int i;
324
8e5732e5
JA
325 if (!ptr)
326 return;
327
d24c33a4
JA
328 for (i = 0; i < nr_pools; i++) {
329 if (ptr_valid(&mp[i], ptr)) {
330 pool = &mp[i];
331 break;
332 }
333 }
334
45a65144
JA
335 if (pool) {
336 sfree_pool(pool, ptr);
337 return;
338 }
339
340 log_err("smalloc: ptr %p not from smalloc pool\n", ptr);
d24c33a4
JA
341}
342
a3ebe7e0 343static void *__smalloc_pool(struct pool *pool, size_t size)
d24c33a4 344{
a3ebe7e0 345 size_t nr_blocks;
ec996e9c
JA
346 unsigned int i;
347 unsigned int offset;
348 unsigned int last_idx;
349 void *ret = NULL;
d24c33a4 350
0127c57b 351 fio_mutex_down(pool->lock);
179446e0
JA
352
353 nr_blocks = size_to_blocks(size);
ec996e9c 354 if (nr_blocks > pool->free_blocks)
8e5732e5 355 goto fail;
5ec10eaa 356
ec996e9c
JA
357 i = pool->next_non_full;
358 last_idx = 0;
359 offset = -1U;
360 while (i < pool->nr_blocks) {
361 unsigned int idx;
d24c33a4 362
ec996e9c
JA
363 if (pool->bitmap[i] == -1U) {
364 i++;
365 pool->next_non_full = i;
366 last_idx = 0;
367 continue;
368 }
d24c33a4 369
ec996e9c 370 idx = find_next_zero(pool->bitmap[i], last_idx);
dcb69098 371 if (!blocks_free(pool, i, idx, nr_blocks)) {
ec996e9c
JA
372 idx += nr_blocks;
373 if (idx < SMALLOC_BPI)
374 last_idx = idx;
375 else {
376 last_idx = 0;
377 while (idx >= SMALLOC_BPI) {
378 i++;
379 idx -= SMALLOC_BPI;
380 }
381 }
382 continue;
d24c33a4 383 }
dcb69098 384 set_blocks(pool, i, idx, nr_blocks);
ec996e9c
JA
385 offset = i * SMALLOC_BPL + idx * SMALLOC_BPB;
386 break;
387 }
388
389 if (i < pool->nr_blocks) {
390 pool->free_blocks -= nr_blocks;
391 ret = pool->map + offset;
d24c33a4 392 }
ec996e9c 393fail:
0127c57b 394 fio_mutex_up(pool->lock);
ec996e9c 395 return ret;
d24c33a4
JA
396}
397
a3ebe7e0 398static void *smalloc_pool(struct pool *pool, size_t size)
55f6491d 399{
a3ebe7e0 400 size_t alloc_size = size + sizeof(struct block_hdr);
55f6491d
JA
401 void *ptr;
402
cf98708d 403 /*
122426da
JA
404 * Round to int alignment, so that the postred pointer will
405 * be naturally aligned as well.
cf98708d 406 */
ec996e9c 407#ifdef SMALLOC_REDZONE
122426da
JA
408 alloc_size += sizeof(unsigned int);
409 alloc_size = (alloc_size + int_mask) & ~int_mask;
ec996e9c
JA
410#endif
411
412 ptr = __smalloc_pool(pool, alloc_size);
89da54e8
JA
413 if (ptr) {
414 struct block_hdr *hdr = ptr;
55f6491d 415
89da54e8
JA
416 hdr->size = alloc_size;
417 fill_redzone(hdr);
55f6491d 418
89da54e8
JA
419 ptr += sizeof(*hdr);
420 memset(ptr, 0, size);
421 }
ec996e9c 422
55f6491d 423 return ptr;
55f6491d
JA
424}
425
7982aa7d 426void *smalloc(size_t size)
d24c33a4 427{
85492cb8 428 unsigned int i, end_pool;
d24c33a4 429
7982aa7d
JA
430 if (size != (unsigned int) size)
431 return NULL;
432
d24c33a4 433 i = last_pool;
85492cb8 434 end_pool = nr_pools;
d24c33a4
JA
435
436 do {
85492cb8 437 for (; i < end_pool; i++) {
d24c33a4
JA
438 void *ptr = smalloc_pool(&mp[i], size);
439
440 if (ptr) {
441 last_pool = i;
d24c33a4
JA
442 return ptr;
443 }
444 }
445 if (last_pool) {
85492cb8
JA
446 end_pool = last_pool;
447 last_pool = i = 0;
d24c33a4
JA
448 continue;
449 }
450
85492cb8 451 break;
d24c33a4
JA
452 } while (1);
453
81b3c86f
JA
454 log_err("smalloc: OOM. Consider using --alloc-size to increase the "
455 "shared memory available.\n");
d24c33a4
JA
456 return NULL;
457}
458
544992f7
JA
459void *scalloc(size_t nmemb, size_t size)
460{
a640ed36 461 return smalloc(nmemb * size);
544992f7
JA
462}
463
d24c33a4
JA
464char *smalloc_strdup(const char *str)
465{
2894a2d4 466 char *ptr = NULL;
d24c33a4
JA
467
468 ptr = smalloc(strlen(str) + 1);
2894a2d4
CE
469 if (ptr)
470 strcpy(ptr, str);
d24c33a4
JA
471 return ptr;
472}