Fio 1.41.5
[fio.git] / smalloc.c
... / ...
CommitLineData
1/*
2 * simple memory allocator, backed by mmap() so that it hands out memory
3 * that can be shared across processes and threads
4 */
5#include <sys/mman.h>
6#include <stdio.h>
7#include <stdlib.h>
8#include <assert.h>
9#include <string.h>
10#include <unistd.h>
11#include <sys/types.h>
12#include <limits.h>
13
14#include "mutex.h"
15#include "arch/arch.h"
16
17#define SMALLOC_REDZONE /* define to detect memory corruption */
18
19#define SMALLOC_BPB 32 /* block size, bytes-per-bit in bitmap */
20#define SMALLOC_BPI (sizeof(unsigned int) * 8)
21#define SMALLOC_BPL (SMALLOC_BPB * SMALLOC_BPI)
22
23#define INITIAL_SIZE 8192*1024 /* new pool size */
24#define MAX_POOLS 128 /* maximum number of pools to setup */
25
26#define SMALLOC_PRE_RED 0xdeadbeefU
27#define SMALLOC_POST_RED 0x5aa55aa5U
28
29unsigned int smalloc_pool_size = INITIAL_SIZE;
30const int int_mask = sizeof(int) - 1;
31
32struct pool {
33 struct fio_mutex *lock; /* protects this pool */
34 void *map; /* map of blocks */
35 unsigned int *bitmap; /* blocks free/busy map */
36 unsigned int free_blocks; /* free blocks */
37 unsigned int nr_blocks; /* total blocks */
38 unsigned int next_non_full;
39 int fd; /* memory backing fd */
40 unsigned int mmap_size;
41};
42
43struct block_hdr {
44 unsigned int size;
45#ifdef SMALLOC_REDZONE
46 unsigned int prered;
47#endif
48};
49
50static struct pool mp[MAX_POOLS];
51static unsigned int nr_pools;
52static unsigned int last_pool;
53static struct fio_mutex *lock;
54
55static inline void pool_lock(struct pool *pool)
56{
57 fio_mutex_down(pool->lock);
58}
59
60static inline void pool_unlock(struct pool *pool)
61{
62 fio_mutex_up(pool->lock);
63}
64
65static inline void global_read_lock(void)
66{
67 fio_mutex_down_read(lock);
68}
69
70static inline void global_read_unlock(void)
71{
72 fio_mutex_up_read(lock);
73}
74
75static inline void global_write_lock(void)
76{
77 fio_mutex_down_write(lock);
78}
79
80static inline void global_write_unlock(void)
81{
82 fio_mutex_up_write(lock);
83}
84
85static inline int ptr_valid(struct pool *pool, void *ptr)
86{
87 unsigned int pool_size = pool->nr_blocks * SMALLOC_BPL;
88
89 return (ptr >= pool->map) && (ptr < pool->map + pool_size);
90}
91
92static inline unsigned int size_to_blocks(unsigned int size)
93{
94 return (size + SMALLOC_BPB - 1) / SMALLOC_BPB;
95}
96
97static int blocks_iter(struct pool *pool, unsigned int pool_idx,
98 unsigned int idx, unsigned int nr_blocks,
99 int (*func)(unsigned int *map, unsigned int mask))
100{
101
102 while (nr_blocks) {
103 unsigned int this_blocks, mask;
104 unsigned int *map;
105
106 if (pool_idx >= pool->nr_blocks)
107 return 0;
108
109 map = &pool->bitmap[pool_idx];
110
111 this_blocks = nr_blocks;
112 if (this_blocks + idx > SMALLOC_BPI) {
113 this_blocks = SMALLOC_BPI - idx;
114 idx = SMALLOC_BPI - this_blocks;
115 }
116
117 if (this_blocks == SMALLOC_BPI)
118 mask = -1U;
119 else
120 mask = ((1U << this_blocks) - 1) << idx;
121
122 if (!func(map, mask))
123 return 0;
124
125 nr_blocks -= this_blocks;
126 idx = 0;
127 pool_idx++;
128 }
129
130 return 1;
131}
132
133static int mask_cmp(unsigned int *map, unsigned int mask)
134{
135 return !(*map & mask);
136}
137
138static int mask_clear(unsigned int *map, unsigned int mask)
139{
140 assert((*map & mask) == mask);
141 *map &= ~mask;
142 return 1;
143}
144
145static int mask_set(unsigned int *map, unsigned int mask)
146{
147 assert(!(*map & mask));
148 *map |= mask;
149 return 1;
150}
151
152static int blocks_free(struct pool *pool, unsigned int pool_idx,
153 unsigned int idx, unsigned int nr_blocks)
154{
155 return blocks_iter(pool, pool_idx, idx, nr_blocks, mask_cmp);
156}
157
158static void set_blocks(struct pool *pool, unsigned int pool_idx,
159 unsigned int idx, unsigned int nr_blocks)
160{
161 blocks_iter(pool, pool_idx, idx, nr_blocks, mask_set);
162}
163
164static void clear_blocks(struct pool *pool, unsigned int pool_idx,
165 unsigned int idx, unsigned int nr_blocks)
166{
167 blocks_iter(pool, pool_idx, idx, nr_blocks, mask_clear);
168}
169
170static int find_next_zero(int word, int start)
171{
172 assert(word != -1U);
173 word >>= (start + 1);
174 return ffz(word) + start + 1;
175}
176
177static int add_pool(struct pool *pool, unsigned int alloc_size)
178{
179 int fd, bitmap_blocks;
180 char file[] = "/tmp/.fio_smalloc.XXXXXX";
181 void *ptr;
182
183 fd = mkstemp(file);
184 if (fd < 0)
185 goto out_close;
186
187#ifdef SMALLOC_REDZONE
188 alloc_size += sizeof(unsigned int);
189#endif
190 alloc_size += sizeof(struct block_hdr);
191 if (alloc_size < INITIAL_SIZE)
192 alloc_size = INITIAL_SIZE;
193
194 /* round up to nearest full number of blocks */
195 alloc_size = (alloc_size + SMALLOC_BPL - 1) & ~(SMALLOC_BPL - 1);
196 bitmap_blocks = alloc_size / SMALLOC_BPL;
197 alloc_size += bitmap_blocks * sizeof(unsigned int);
198 pool->mmap_size = alloc_size;
199
200 pool->nr_blocks = bitmap_blocks;
201 pool->free_blocks = bitmap_blocks * SMALLOC_BPB;
202
203 if (ftruncate(fd, alloc_size) < 0)
204 goto out_unlink;
205
206 ptr = mmap(NULL, alloc_size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
207 if (ptr == MAP_FAILED)
208 goto out_unlink;
209
210 memset(ptr, 0, alloc_size);
211 pool->map = ptr;
212 pool->bitmap = (void *) ptr + (pool->nr_blocks * SMALLOC_BPL);
213
214 pool->lock = fio_mutex_init(1);
215 if (!pool->lock)
216 goto out_unlink;
217
218 /*
219 * Unlink pool file now. It wont get deleted until the fd is closed,
220 * which happens both for cleanup or unexpected quit. This way we
221 * don't leave temp files around in case of a crash.
222 */
223 unlink(file);
224 pool->fd = fd;
225
226 nr_pools++;
227 return 0;
228out_unlink:
229 fprintf(stderr, "smalloc: failed adding pool\n");
230 if (pool->map)
231 munmap(pool->map, pool->mmap_size);
232 unlink(file);
233out_close:
234 close(fd);
235 return 1;
236}
237
238void sinit(void)
239{
240 int ret;
241
242 lock = fio_mutex_rw_init();
243 ret = add_pool(&mp[0], INITIAL_SIZE);
244 assert(!ret);
245}
246
247static void cleanup_pool(struct pool *pool)
248{
249 /*
250 * This will also remove the temporary file we used as a backing
251 * store, it was already unlinked
252 */
253 close(pool->fd);
254 munmap(pool->map, pool->mmap_size);
255
256 if (pool->lock)
257 fio_mutex_remove(pool->lock);
258}
259
260void scleanup(void)
261{
262 unsigned int i;
263
264 for (i = 0; i < nr_pools; i++)
265 cleanup_pool(&mp[i]);
266
267 if (lock)
268 fio_mutex_remove(lock);
269}
270
271#ifdef SMALLOC_REDZONE
272static void *postred_ptr(struct block_hdr *hdr)
273{
274 unsigned long ptr;
275
276 ptr = (unsigned long) hdr + hdr->size - sizeof(unsigned int);
277 ptr = (ptr + int_mask) & ~int_mask;
278
279 return (void *) ptr;
280}
281
282static void fill_redzone(struct block_hdr *hdr)
283{
284 unsigned int *postred = postred_ptr(hdr);
285
286 hdr->prered = SMALLOC_PRE_RED;
287 *postred = SMALLOC_POST_RED;
288}
289
290static void sfree_check_redzone(struct block_hdr *hdr)
291{
292 unsigned int *postred = postred_ptr(hdr);
293
294 if (hdr->prered != SMALLOC_PRE_RED) {
295 fprintf(stderr, "smalloc pre redzone destroyed!\n");
296 fprintf(stderr, " ptr=%p, prered=%x, expected %x\n",
297 hdr, hdr->prered, SMALLOC_PRE_RED);
298 assert(0);
299 }
300 if (*postred != SMALLOC_POST_RED) {
301 fprintf(stderr, "smalloc post redzone destroyed!\n");
302 fprintf(stderr, " ptr=%p, postred=%x, expected %x\n",
303 hdr, *postred, SMALLOC_POST_RED);
304 assert(0);
305 }
306}
307#else
308static void fill_redzone(struct block_hdr *hdr)
309{
310}
311
312static void sfree_check_redzone(struct block_hdr *hdr)
313{
314}
315#endif
316
317static void sfree_pool(struct pool *pool, void *ptr)
318{
319 struct block_hdr *hdr;
320 unsigned int i, idx;
321 unsigned long offset;
322
323 if (!ptr)
324 return;
325
326 ptr -= sizeof(*hdr);
327 hdr = ptr;
328
329 assert(ptr_valid(pool, ptr));
330
331 sfree_check_redzone(hdr);
332
333 offset = ptr - pool->map;
334 i = offset / SMALLOC_BPL;
335 idx = (offset % SMALLOC_BPL) / SMALLOC_BPB;
336
337 pool_lock(pool);
338 clear_blocks(pool, i, idx, size_to_blocks(hdr->size));
339 if (i < pool->next_non_full)
340 pool->next_non_full = i;
341 pool->free_blocks += size_to_blocks(hdr->size);
342 pool_unlock(pool);
343}
344
345void sfree(void *ptr)
346{
347 struct pool *pool = NULL;
348 unsigned int i;
349
350 if (!ptr)
351 return;
352
353 global_read_lock();
354
355 for (i = 0; i < nr_pools; i++) {
356 if (ptr_valid(&mp[i], ptr)) {
357 pool = &mp[i];
358 break;
359 }
360 }
361
362 global_read_unlock();
363
364 assert(pool);
365 sfree_pool(pool, ptr);
366}
367
368static void *__smalloc_pool(struct pool *pool, unsigned int size)
369{
370 unsigned int nr_blocks;
371 unsigned int i;
372 unsigned int offset;
373 unsigned int last_idx;
374 void *ret = NULL;
375
376 pool_lock(pool);
377
378 nr_blocks = size_to_blocks(size);
379 if (nr_blocks > pool->free_blocks)
380 goto fail;
381
382 i = pool->next_non_full;
383 last_idx = 0;
384 offset = -1U;
385 while (i < pool->nr_blocks) {
386 unsigned int idx;
387
388 if (pool->bitmap[i] == -1U) {
389 i++;
390 pool->next_non_full = i;
391 last_idx = 0;
392 continue;
393 }
394
395 idx = find_next_zero(pool->bitmap[i], last_idx);
396 if (!blocks_free(pool, i, idx, nr_blocks)) {
397 idx += nr_blocks;
398 if (idx < SMALLOC_BPI)
399 last_idx = idx;
400 else {
401 last_idx = 0;
402 while (idx >= SMALLOC_BPI) {
403 i++;
404 idx -= SMALLOC_BPI;
405 }
406 }
407 continue;
408 }
409 set_blocks(pool, i, idx, nr_blocks);
410 offset = i * SMALLOC_BPL + idx * SMALLOC_BPB;
411 break;
412 }
413
414 if (i < pool->nr_blocks) {
415 pool->free_blocks -= nr_blocks;
416 ret = pool->map + offset;
417 }
418fail:
419 pool_unlock(pool);
420 return ret;
421}
422
423static void *smalloc_pool(struct pool *pool, unsigned int size)
424{
425 unsigned int alloc_size = size + sizeof(struct block_hdr);
426 void *ptr;
427
428 /*
429 * Round to int alignment, so that the postred pointer will
430 * be naturally aligned as well.
431 */
432#ifdef SMALLOC_REDZONE
433 alloc_size += sizeof(unsigned int);
434 alloc_size = (alloc_size + int_mask) & ~int_mask;
435#endif
436
437 ptr = __smalloc_pool(pool, alloc_size);
438 if (ptr) {
439 struct block_hdr *hdr = ptr;
440
441 hdr->size = alloc_size;
442 fill_redzone(hdr);
443
444 ptr += sizeof(*hdr);
445 memset(ptr, 0, size);
446 }
447
448 return ptr;
449}
450
451void *smalloc(unsigned int size)
452{
453 unsigned int i;
454
455 global_write_lock();
456 i = last_pool;
457
458 do {
459 for (; i < nr_pools; i++) {
460 void *ptr = smalloc_pool(&mp[i], size);
461
462 if (ptr) {
463 last_pool = i;
464 global_write_unlock();
465 return ptr;
466 }
467 }
468 if (last_pool) {
469 last_pool = 0;
470 continue;
471 }
472
473 if (nr_pools + 1 > MAX_POOLS)
474 break;
475 else {
476 i = nr_pools;
477 if (add_pool(&mp[nr_pools], size))
478 goto out;
479 }
480 } while (1);
481
482out:
483 global_write_unlock();
484 return NULL;
485}
486
487char *smalloc_strdup(const char *str)
488{
489 char *ptr;
490
491 ptr = smalloc(strlen(str) + 1);
492 strcpy(ptr, str);
493 return ptr;
494}