Add basic write/read-and-verify example job file
[fio.git] / smalloc.c
CommitLineData
d24c33a4
JA
1/*
2 * simple memory allocator, backed by mmap() so that it hands out memory
3 * that can be shared across processes and threads
4 */
5#include <sys/mman.h>
6#include <stdio.h>
7#include <stdlib.h>
8#include <assert.h>
9#include <string.h>
10#include <unistd.h>
e43606c2 11#include <inttypes.h>
d24c33a4
JA
12#include <sys/types.h>
13#include <limits.h>
3a8600b4 14#include <fcntl.h>
d24c33a4 15
6548f47f 16#include "mutex.h"
b3268b92 17#include "arch/arch.h"
3a8600b4 18#include "os/os.h"
10aa136b 19#include "smalloc.h"
b0f0326a 20#include "log.h"
d24c33a4 21
55f6491d 22#define SMALLOC_REDZONE /* define to detect memory corruption */
d24c33a4 23
ec996e9c
JA
24#define SMALLOC_BPB 32 /* block size, bytes-per-bit in bitmap */
25#define SMALLOC_BPI (sizeof(unsigned int) * 8)
26#define SMALLOC_BPL (SMALLOC_BPB * SMALLOC_BPI)
27
23bd40f9 28#define INITIAL_SIZE 16*1024*1024 /* new pool size */
85492cb8 29#define MAX_POOLS 8 /* maximum number of pools to setup */
d24c33a4 30
55f6491d
JA
31#define SMALLOC_PRE_RED 0xdeadbeefU
32#define SMALLOC_POST_RED 0x5aa55aa5U
55f6491d 33
2b386d25 34unsigned int smalloc_pool_size = INITIAL_SIZE;
aa1af5fd 35#ifdef SMALLOC_REDZONE
10aa136b 36static const int int_mask = sizeof(int) - 1;
aa1af5fd 37#endif
2b386d25 38
d24c33a4 39struct pool {
6548f47f 40 struct fio_mutex *lock; /* protects this pool */
d24c33a4 41 void *map; /* map of blocks */
ec996e9c 42 unsigned int *bitmap; /* blocks free/busy map */
a3ebe7e0
JA
43 size_t free_blocks; /* free blocks */
44 size_t nr_blocks; /* total blocks */
45 size_t next_non_full;
46 size_t mmap_size;
ec996e9c
JA
47};
48
49struct block_hdr {
a3ebe7e0 50 size_t size;
ec996e9c
JA
51#ifdef SMALLOC_REDZONE
52 unsigned int prered;
53#endif
d24c33a4
JA
54};
55
56static struct pool mp[MAX_POOLS];
57static unsigned int nr_pools;
58static unsigned int last_pool;
d24c33a4 59
d24c33a4
JA
60static inline int ptr_valid(struct pool *pool, void *ptr)
61{
dcb69098 62 unsigned int pool_size = pool->nr_blocks * SMALLOC_BPL;
ec996e9c
JA
63
64 return (ptr >= pool->map) && (ptr < pool->map + pool_size);
d24c33a4
JA
65}
66
a3ebe7e0 67static inline size_t size_to_blocks(size_t size)
808e9ea8
JA
68{
69 return (size + SMALLOC_BPB - 1) / SMALLOC_BPB;
70}
71
dcb69098 72static int blocks_iter(struct pool *pool, unsigned int pool_idx,
a3ebe7e0 73 unsigned int idx, size_t nr_blocks,
ec996e9c 74 int (*func)(unsigned int *map, unsigned int mask))
d24c33a4 75{
dcb69098 76
ec996e9c
JA
77 while (nr_blocks) {
78 unsigned int this_blocks, mask;
dcb69098
JA
79 unsigned int *map;
80
81 if (pool_idx >= pool->nr_blocks)
82 return 0;
83
84 map = &pool->bitmap[pool_idx];
ec996e9c
JA
85
86 this_blocks = nr_blocks;
87 if (this_blocks + idx > SMALLOC_BPI) {
88 this_blocks = SMALLOC_BPI - idx;
89 idx = SMALLOC_BPI - this_blocks;
90 }
91
92 if (this_blocks == SMALLOC_BPI)
93 mask = -1U;
94 else
95 mask = ((1U << this_blocks) - 1) << idx;
96
97 if (!func(map, mask))
98 return 0;
99
100 nr_blocks -= this_blocks;
101 idx = 0;
dcb69098 102 pool_idx++;
ec996e9c
JA
103 }
104
105 return 1;
d24c33a4
JA
106}
107
ec996e9c 108static int mask_cmp(unsigned int *map, unsigned int mask)
d24c33a4 109{
ec996e9c 110 return !(*map & mask);
d24c33a4
JA
111}
112
ec996e9c 113static int mask_clear(unsigned int *map, unsigned int mask)
d24c33a4 114{
dcb69098 115 assert((*map & mask) == mask);
ec996e9c
JA
116 *map &= ~mask;
117 return 1;
d24c33a4
JA
118}
119
ec996e9c 120static int mask_set(unsigned int *map, unsigned int mask)
d24c33a4 121{
dcb69098 122 assert(!(*map & mask));
ec996e9c
JA
123 *map |= mask;
124 return 1;
d24c33a4
JA
125}
126
dcb69098 127static int blocks_free(struct pool *pool, unsigned int pool_idx,
a3ebe7e0 128 unsigned int idx, size_t nr_blocks)
d24c33a4 129{
dcb69098 130 return blocks_iter(pool, pool_idx, idx, nr_blocks, mask_cmp);
d24c33a4
JA
131}
132
dcb69098 133static void set_blocks(struct pool *pool, unsigned int pool_idx,
a3ebe7e0 134 unsigned int idx, size_t nr_blocks)
d24c33a4 135{
dcb69098 136 blocks_iter(pool, pool_idx, idx, nr_blocks, mask_set);
d24c33a4
JA
137}
138
dcb69098 139static void clear_blocks(struct pool *pool, unsigned int pool_idx,
a3ebe7e0 140 unsigned int idx, size_t nr_blocks)
d24c33a4 141{
dcb69098 142 blocks_iter(pool, pool_idx, idx, nr_blocks, mask_clear);
d24c33a4
JA
143}
144
ec996e9c
JA
145static int find_next_zero(int word, int start)
146{
147 assert(word != -1U);
271067a6
JH
148 word >>= start;
149 return ffz(word) + start;
d24c33a4
JA
150}
151
adf57099 152static int add_pool(struct pool *pool, unsigned int alloc_size)
d24c33a4 153{
8d5844e9 154 int bitmap_blocks;
c8931876 155 int mmap_flags;
b8a6582e 156 void *ptr;
ec996e9c 157
55f6491d 158#ifdef SMALLOC_REDZONE
ec996e9c 159 alloc_size += sizeof(unsigned int);
55f6491d 160#endif
ec996e9c
JA
161 alloc_size += sizeof(struct block_hdr);
162 if (alloc_size < INITIAL_SIZE)
163 alloc_size = INITIAL_SIZE;
164
165 /* round up to nearest full number of blocks */
166 alloc_size = (alloc_size + SMALLOC_BPL - 1) & ~(SMALLOC_BPL - 1);
167 bitmap_blocks = alloc_size / SMALLOC_BPL;
168 alloc_size += bitmap_blocks * sizeof(unsigned int);
169 pool->mmap_size = alloc_size;
0b9d69ec 170
ec996e9c
JA
171 pool->nr_blocks = bitmap_blocks;
172 pool->free_blocks = bitmap_blocks * SMALLOC_BPB;
adf57099 173
c8931876
JA
174 mmap_flags = OS_MAP_ANON;
175#ifdef CONFIG_ESX
176 mmap_flags |= MAP_PRIVATE;
177#else
178 mmap_flags |= MAP_SHARED;
179#endif
180 ptr = mmap(NULL, alloc_size, PROT_READ|PROT_WRITE, mmap_flags, -1, 0);
181
d24c33a4 182 if (ptr == MAP_FAILED)
8d5844e9 183 goto out_fail;
d24c33a4 184
ec996e9c
JA
185 pool->map = ptr;
186 pool->bitmap = (void *) ptr + (pool->nr_blocks * SMALLOC_BPL);
9c3e13e3 187 memset(pool->bitmap, 0, bitmap_blocks * sizeof(unsigned int));
d24c33a4 188
521da527 189 pool->lock = fio_mutex_init(FIO_MUTEX_UNLOCKED);
d24c33a4 190 if (!pool->lock)
8d5844e9 191 goto out_fail;
d24c33a4 192
d24c33a4
JA
193 nr_pools++;
194 return 0;
8d5844e9 195out_fail:
b0f0326a 196 log_err("smalloc: failed adding pool\n");
d24c33a4 197 if (pool->map)
ec996e9c 198 munmap(pool->map, pool->mmap_size);
d24c33a4
JA
199 return 1;
200}
201
202void sinit(void)
203{
85492cb8 204 int i, ret;
d24c33a4 205
85492cb8 206 for (i = 0; i < MAX_POOLS; i++) {
204c368e 207 ret = add_pool(&mp[i], smalloc_pool_size);
85492cb8
JA
208 if (ret)
209 break;
210 }
211
212 /*
213 * If we added at least one pool, we should be OK for most
214 * cases.
215 */
216 assert(i);
d24c33a4
JA
217}
218
219static void cleanup_pool(struct pool *pool)
220{
443bb114
JA
221 /*
222 * This will also remove the temporary file we used as a backing
223 * store, it was already unlinked
224 */
ec996e9c 225 munmap(pool->map, pool->mmap_size);
6548f47f
JA
226
227 if (pool->lock)
228 fio_mutex_remove(pool->lock);
d24c33a4
JA
229}
230
231void scleanup(void)
232{
233 unsigned int i;
234
235 for (i = 0; i < nr_pools; i++)
236 cleanup_pool(&mp[i]);
d24c33a4
JA
237}
238
89da54e8 239#ifdef SMALLOC_REDZONE
cf98708d
JA
240static void *postred_ptr(struct block_hdr *hdr)
241{
e43606c2 242 uintptr_t ptr;
cf98708d 243
e43606c2 244 ptr = (uintptr_t) hdr + hdr->size - sizeof(unsigned int);
cf98708d
JA
245 ptr = (ptr + int_mask) & ~int_mask;
246
247 return (void *) ptr;
248}
249
ec996e9c 250static void fill_redzone(struct block_hdr *hdr)
55f6491d 251{
cf98708d 252 unsigned int *postred = postred_ptr(hdr);
55f6491d 253
ec996e9c
JA
254 hdr->prered = SMALLOC_PRE_RED;
255 *postred = SMALLOC_POST_RED;
ec996e9c 256}
55f6491d 257
ec996e9c
JA
258static void sfree_check_redzone(struct block_hdr *hdr)
259{
cf98708d 260 unsigned int *postred = postred_ptr(hdr);
ec996e9c
JA
261
262 if (hdr->prered != SMALLOC_PRE_RED) {
b0f0326a
JA
263 log_err("smalloc pre redzone destroyed!\n"
264 " ptr=%p, prered=%x, expected %x\n",
ec996e9c 265 hdr, hdr->prered, SMALLOC_PRE_RED);
55f6491d
JA
266 assert(0);
267 }
268 if (*postred != SMALLOC_POST_RED) {
b0f0326a
JA
269 log_err("smalloc post redzone destroyed!\n"
270 " ptr=%p, postred=%x, expected %x\n",
ec996e9c 271 hdr, *postred, SMALLOC_POST_RED);
55f6491d
JA
272 assert(0);
273 }
89da54e8
JA
274}
275#else
276static void fill_redzone(struct block_hdr *hdr)
277{
55f6491d
JA
278}
279
89da54e8
JA
280static void sfree_check_redzone(struct block_hdr *hdr)
281{
282}
283#endif
284
d24c33a4
JA
285static void sfree_pool(struct pool *pool, void *ptr)
286{
ec996e9c 287 struct block_hdr *hdr;
179446e0 288 unsigned int i, idx;
ec996e9c 289 unsigned long offset;
d24c33a4
JA
290
291 if (!ptr)
292 return;
293
ec996e9c
JA
294 ptr -= sizeof(*hdr);
295 hdr = ptr;
55f6491d 296
d24c33a4
JA
297 assert(ptr_valid(pool, ptr));
298
ec996e9c 299 sfree_check_redzone(hdr);
d24c33a4 300
ec996e9c
JA
301 offset = ptr - pool->map;
302 i = offset / SMALLOC_BPL;
303 idx = (offset % SMALLOC_BPL) / SMALLOC_BPB;
d24c33a4 304
0127c57b 305 fio_mutex_down(pool->lock);
dcb69098 306 clear_blocks(pool, i, idx, size_to_blocks(hdr->size));
ec996e9c
JA
307 if (i < pool->next_non_full)
308 pool->next_non_full = i;
179446e0 309 pool->free_blocks += size_to_blocks(hdr->size);
0127c57b 310 fio_mutex_up(pool->lock);
d24c33a4
JA
311}
312
313void sfree(void *ptr)
314{
315 struct pool *pool = NULL;
316 unsigned int i;
317
8e5732e5
JA
318 if (!ptr)
319 return;
320
d24c33a4
JA
321 for (i = 0; i < nr_pools; i++) {
322 if (ptr_valid(&mp[i], ptr)) {
323 pool = &mp[i];
324 break;
325 }
326 }
327
45a65144
JA
328 if (pool) {
329 sfree_pool(pool, ptr);
330 return;
331 }
332
333 log_err("smalloc: ptr %p not from smalloc pool\n", ptr);
d24c33a4
JA
334}
335
a3ebe7e0 336static void *__smalloc_pool(struct pool *pool, size_t size)
d24c33a4 337{
a3ebe7e0 338 size_t nr_blocks;
ec996e9c
JA
339 unsigned int i;
340 unsigned int offset;
341 unsigned int last_idx;
342 void *ret = NULL;
d24c33a4 343
0127c57b 344 fio_mutex_down(pool->lock);
179446e0
JA
345
346 nr_blocks = size_to_blocks(size);
ec996e9c 347 if (nr_blocks > pool->free_blocks)
8e5732e5 348 goto fail;
5ec10eaa 349
ec996e9c
JA
350 i = pool->next_non_full;
351 last_idx = 0;
352 offset = -1U;
353 while (i < pool->nr_blocks) {
354 unsigned int idx;
d24c33a4 355
ec996e9c
JA
356 if (pool->bitmap[i] == -1U) {
357 i++;
358 pool->next_non_full = i;
359 last_idx = 0;
360 continue;
361 }
d24c33a4 362
ec996e9c 363 idx = find_next_zero(pool->bitmap[i], last_idx);
dcb69098 364 if (!blocks_free(pool, i, idx, nr_blocks)) {
ec996e9c
JA
365 idx += nr_blocks;
366 if (idx < SMALLOC_BPI)
367 last_idx = idx;
368 else {
369 last_idx = 0;
370 while (idx >= SMALLOC_BPI) {
371 i++;
372 idx -= SMALLOC_BPI;
373 }
374 }
375 continue;
d24c33a4 376 }
dcb69098 377 set_blocks(pool, i, idx, nr_blocks);
ec996e9c
JA
378 offset = i * SMALLOC_BPL + idx * SMALLOC_BPB;
379 break;
380 }
381
382 if (i < pool->nr_blocks) {
383 pool->free_blocks -= nr_blocks;
384 ret = pool->map + offset;
d24c33a4 385 }
ec996e9c 386fail:
0127c57b 387 fio_mutex_up(pool->lock);
ec996e9c 388 return ret;
d24c33a4
JA
389}
390
a3ebe7e0 391static void *smalloc_pool(struct pool *pool, size_t size)
55f6491d 392{
a3ebe7e0 393 size_t alloc_size = size + sizeof(struct block_hdr);
55f6491d
JA
394 void *ptr;
395
cf98708d 396 /*
122426da
JA
397 * Round to int alignment, so that the postred pointer will
398 * be naturally aligned as well.
cf98708d 399 */
ec996e9c 400#ifdef SMALLOC_REDZONE
122426da
JA
401 alloc_size += sizeof(unsigned int);
402 alloc_size = (alloc_size + int_mask) & ~int_mask;
ec996e9c
JA
403#endif
404
405 ptr = __smalloc_pool(pool, alloc_size);
89da54e8
JA
406 if (ptr) {
407 struct block_hdr *hdr = ptr;
55f6491d 408
89da54e8
JA
409 hdr->size = alloc_size;
410 fill_redzone(hdr);
55f6491d 411
89da54e8
JA
412 ptr += sizeof(*hdr);
413 memset(ptr, 0, size);
414 }
ec996e9c 415
55f6491d 416 return ptr;
55f6491d
JA
417}
418
7982aa7d 419void *smalloc(size_t size)
d24c33a4 420{
85492cb8 421 unsigned int i, end_pool;
d24c33a4 422
7982aa7d
JA
423 if (size != (unsigned int) size)
424 return NULL;
425
d24c33a4 426 i = last_pool;
85492cb8 427 end_pool = nr_pools;
d24c33a4
JA
428
429 do {
85492cb8 430 for (; i < end_pool; i++) {
d24c33a4
JA
431 void *ptr = smalloc_pool(&mp[i], size);
432
433 if (ptr) {
434 last_pool = i;
d24c33a4
JA
435 return ptr;
436 }
437 }
438 if (last_pool) {
85492cb8
JA
439 end_pool = last_pool;
440 last_pool = i = 0;
d24c33a4
JA
441 continue;
442 }
443
85492cb8 444 break;
d24c33a4
JA
445 } while (1);
446
d24c33a4
JA
447 return NULL;
448}
449
544992f7
JA
450void *scalloc(size_t nmemb, size_t size)
451{
a640ed36 452 return smalloc(nmemb * size);
544992f7
JA
453}
454
d24c33a4
JA
455char *smalloc_strdup(const char *str)
456{
2894a2d4 457 char *ptr = NULL;
d24c33a4
JA
458
459 ptr = smalloc(strlen(str) + 1);
2894a2d4
CE
460 if (ptr)
461 strcpy(ptr, str);
d24c33a4
JA
462 return ptr;
463}