smalloc: fix map overflow
[fio.git] / smalloc.c
... / ...
CommitLineData
1/*
2 * simple memory allocator, backed by mmap() so that it hands out memory
3 * that can be shared across processes and threads
4 */
5#include <sys/mman.h>
6#include <stdio.h>
7#include <stdlib.h>
8#include <assert.h>
9#include <string.h>
10#include <unistd.h>
11#include <sys/types.h>
12#include <limits.h>
13
14#include "mutex.h"
15
16#define MP_SAFE /* define to make thread safe */
17#define SMALLOC_REDZONE /* define to detect memory corruption */
18
19#define SMALLOC_BPB 32 /* block size, bytes-per-bit in bitmap */
20#define SMALLOC_BPI (sizeof(unsigned int) * 8)
21#define SMALLOC_BPL (SMALLOC_BPB * SMALLOC_BPI)
22
23#define INITIAL_SIZE 1024*1024 /* new pool size */
24#define MAX_POOLS 4 /* maximum number of pools to setup */
25
26#define SMALLOC_PRE_RED 0xdeadbeefU
27#define SMALLOC_POST_RED 0x5aa55aa5U
28
29unsigned int smalloc_pool_size = INITIAL_SIZE;
30
31struct pool {
32 struct fio_mutex *lock; /* protects this pool */
33 void *map; /* map of blocks */
34 unsigned int *bitmap; /* blocks free/busy map */
35 unsigned int free_blocks; /* free blocks */
36 unsigned int nr_blocks; /* total blocks */
37 unsigned int next_non_full;
38 int fd; /* memory backing fd */
39 char file[PATH_MAX]; /* filename for fd */
40 unsigned int mmap_size;
41};
42
43struct block_hdr {
44 unsigned int size;
45#ifdef SMALLOC_REDZONE
46 unsigned int prered;
47#endif
48};
49
50static struct pool mp[MAX_POOLS];
51static unsigned int nr_pools;
52static unsigned int last_pool;
53static struct fio_mutex *lock;
54
55static inline void pool_lock(struct pool *pool)
56{
57 if (pool->lock)
58 fio_mutex_down(pool->lock);
59}
60
61static inline void pool_unlock(struct pool *pool)
62{
63 if (pool->lock)
64 fio_mutex_up(pool->lock);
65}
66
67static inline void global_read_lock(void)
68{
69 if (lock)
70 fio_mutex_down_read(lock);
71}
72
73static inline void global_read_unlock(void)
74{
75 if (lock)
76 fio_mutex_up_read(lock);
77}
78
79static inline void global_write_lock(void)
80{
81 if (lock)
82 fio_mutex_down_write(lock);
83}
84
85static inline void global_write_unlock(void)
86{
87 if (lock)
88 fio_mutex_up_write(lock);
89}
90
91static inline int ptr_valid(struct pool *pool, void *ptr)
92{
93 unsigned int pool_size = pool->nr_blocks * SMALLOC_BPL;
94
95 return (ptr >= pool->map) && (ptr < pool->map + pool_size);
96}
97
98static inline unsigned int size_to_blocks(unsigned int size)
99{
100 return (size + SMALLOC_BPB - 1) / SMALLOC_BPB;
101}
102
103static int blocks_iter(struct pool *pool, unsigned int pool_idx,
104 unsigned int idx, unsigned int nr_blocks,
105 int (*func)(unsigned int *map, unsigned int mask))
106{
107
108 while (nr_blocks) {
109 unsigned int this_blocks, mask;
110 unsigned int *map;
111
112 if (pool_idx >= pool->nr_blocks)
113 return 0;
114
115 map = &pool->bitmap[pool_idx];
116
117 this_blocks = nr_blocks;
118 if (this_blocks + idx > SMALLOC_BPI) {
119 this_blocks = SMALLOC_BPI - idx;
120 idx = SMALLOC_BPI - this_blocks;
121 }
122
123 if (this_blocks == SMALLOC_BPI)
124 mask = -1U;
125 else
126 mask = ((1U << this_blocks) - 1) << idx;
127
128 if (!func(map, mask))
129 return 0;
130
131 nr_blocks -= this_blocks;
132 idx = 0;
133 pool_idx++;
134 }
135
136 return 1;
137}
138
139static int mask_cmp(unsigned int *map, unsigned int mask)
140{
141 return !(*map & mask);
142}
143
144static int mask_clear(unsigned int *map, unsigned int mask)
145{
146 assert((*map & mask) == mask);
147 *map &= ~mask;
148 return 1;
149}
150
151static int mask_set(unsigned int *map, unsigned int mask)
152{
153 assert(!(*map & mask));
154 *map |= mask;
155 return 1;
156}
157
158static int blocks_free(struct pool *pool, unsigned int pool_idx,
159 unsigned int idx, unsigned int nr_blocks)
160{
161 return blocks_iter(pool, pool_idx, idx, nr_blocks, mask_cmp);
162}
163
164static void set_blocks(struct pool *pool, unsigned int pool_idx,
165 unsigned int idx, unsigned int nr_blocks)
166{
167 blocks_iter(pool, pool_idx, idx, nr_blocks, mask_set);
168}
169
170static void clear_blocks(struct pool *pool, unsigned int pool_idx,
171 unsigned int idx, unsigned int nr_blocks)
172{
173 blocks_iter(pool, pool_idx, idx, nr_blocks, mask_clear);
174}
175
176static inline int __ffs(int word)
177{
178 int r = 0;
179
180 if (!(word & 0xffff)) {
181 word >>= 16;
182 r += 16;
183 }
184 if (!(word & 0xff)) {
185 word >>= 8;
186 r += 8;
187 }
188 if (!(word & 0xf)) {
189 word >>= 4;
190 r += 4;
191 }
192 if (!(word & 3)) {
193 word >>= 2;
194 r += 2;
195 }
196 if (!(word & 1)) {
197 word >>= 1;
198 r += 1;
199 }
200
201 return r;
202}
203
204static int find_next_zero(int word, int start)
205{
206 assert(word != -1U);
207 word >>= (start + 1);
208 return __ffs(~word) + start + 1;
209}
210
211static int add_pool(struct pool *pool, unsigned int alloc_size)
212{
213 void *ptr;
214 int fd, bitmap_blocks;
215
216 strcpy(pool->file, "/tmp/.fio_smalloc.XXXXXX");
217 fd = mkstemp(pool->file);
218 if (fd < 0)
219 goto out_close;
220
221#ifdef SMALLOC_REDZONE
222 alloc_size += sizeof(unsigned int);
223#endif
224 alloc_size += sizeof(struct block_hdr);
225 if (alloc_size < INITIAL_SIZE)
226 alloc_size = INITIAL_SIZE;
227
228 /* round up to nearest full number of blocks */
229 alloc_size = (alloc_size + SMALLOC_BPL - 1) & ~(SMALLOC_BPL - 1);
230 bitmap_blocks = alloc_size / SMALLOC_BPL;
231 alloc_size += bitmap_blocks * sizeof(unsigned int);
232 pool->mmap_size = alloc_size;
233
234 pool->nr_blocks = bitmap_blocks;
235 pool->free_blocks = bitmap_blocks * SMALLOC_BPB;
236
237 if (ftruncate(fd, alloc_size) < 0)
238 goto out_unlink;
239
240 ptr = mmap(NULL, alloc_size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
241 if (ptr == MAP_FAILED)
242 goto out_unlink;
243
244 memset(ptr, 0, alloc_size);
245 pool->map = ptr;
246 pool->bitmap = (void *) ptr + (pool->nr_blocks * SMALLOC_BPL);
247
248#ifdef MP_SAFE
249 pool->lock = fio_mutex_init(1);
250 if (!pool->lock)
251 goto out_unlink;
252#endif
253
254 pool->fd = fd;
255
256 global_write_lock();
257 nr_pools++;
258 global_write_unlock();
259 return 0;
260out_unlink:
261 fprintf(stderr, "smalloc: failed adding pool\n");
262 if (pool->map)
263 munmap(pool->map, pool->mmap_size);
264 unlink(pool->file);
265out_close:
266 if (fd >= 0)
267 close(fd);
268 return 1;
269}
270
271void sinit(void)
272{
273 int ret;
274
275#ifdef MP_SAFE
276 lock = fio_mutex_rw_init();
277#endif
278 ret = add_pool(&mp[0], INITIAL_SIZE);
279 assert(!ret);
280}
281
282static void cleanup_pool(struct pool *pool)
283{
284 unlink(pool->file);
285 close(pool->fd);
286 munmap(pool->map, pool->mmap_size);
287
288 if (pool->lock)
289 fio_mutex_remove(pool->lock);
290}
291
292void scleanup(void)
293{
294 unsigned int i;
295
296 for (i = 0; i < nr_pools; i++)
297 cleanup_pool(&mp[i]);
298
299 if (lock)
300 fio_mutex_remove(lock);
301}
302
303static void fill_redzone(struct block_hdr *hdr)
304{
305#ifdef SMALLOC_REDZONE
306 unsigned int *postred = (void *) hdr + hdr->size - sizeof(unsigned int);
307
308 hdr->prered = SMALLOC_PRE_RED;
309 *postred = SMALLOC_POST_RED;
310#endif
311}
312
313static void sfree_check_redzone(struct block_hdr *hdr)
314{
315#ifdef SMALLOC_REDZONE
316 unsigned int *postred = (void *) hdr + hdr->size - sizeof(unsigned int);
317
318 if (hdr->prered != SMALLOC_PRE_RED) {
319 fprintf(stderr, "smalloc pre redzone destroyed!\n");
320 fprintf(stderr, " ptr=%p, prered=%x, expected %x\n",
321 hdr, hdr->prered, SMALLOC_PRE_RED);
322 assert(0);
323 }
324 if (*postred != SMALLOC_POST_RED) {
325 fprintf(stderr, "smalloc post redzone destroyed!\n");
326 fprintf(stderr, " ptr=%p, postred=%x, expected %x\n",
327 hdr, *postred, SMALLOC_POST_RED);
328 assert(0);
329 }
330#endif
331}
332
333static void sfree_pool(struct pool *pool, void *ptr)
334{
335 struct block_hdr *hdr;
336 unsigned int i, idx;
337 unsigned long offset;
338
339 if (!ptr)
340 return;
341
342 ptr -= sizeof(*hdr);
343 hdr = ptr;
344
345 assert(ptr_valid(pool, ptr));
346
347 sfree_check_redzone(hdr);
348
349 offset = ptr - pool->map;
350 i = offset / SMALLOC_BPL;
351 idx = (offset % SMALLOC_BPL) / SMALLOC_BPB;
352
353 pool_lock(pool);
354 clear_blocks(pool, i, idx, size_to_blocks(hdr->size));
355 if (i < pool->next_non_full)
356 pool->next_non_full = i;
357 pool->free_blocks += size_to_blocks(hdr->size);
358 pool_unlock(pool);
359}
360
361void sfree(void *ptr)
362{
363 struct pool *pool = NULL;
364 unsigned int i;
365
366 if (!ptr)
367 return;
368
369 global_read_lock();
370
371 for (i = 0; i < nr_pools; i++) {
372 if (ptr_valid(&mp[i], ptr)) {
373 pool = &mp[i];
374 break;
375 }
376 }
377
378 global_read_unlock();
379
380 assert(pool);
381 sfree_pool(pool, ptr);
382}
383
384static void *__smalloc_pool(struct pool *pool, unsigned int size)
385{
386 unsigned int nr_blocks;
387 unsigned int i;
388 unsigned int offset;
389 unsigned int last_idx;
390 void *ret = NULL;
391
392 pool_lock(pool);
393
394 nr_blocks = size_to_blocks(size);
395 if (nr_blocks > pool->free_blocks)
396 goto fail;
397
398 i = pool->next_non_full;
399 last_idx = 0;
400 offset = -1U;
401 while (i < pool->nr_blocks) {
402 unsigned int idx;
403
404 if (pool->bitmap[i] == -1U) {
405 i++;
406 pool->next_non_full = i;
407 last_idx = 0;
408 continue;
409 }
410
411 idx = find_next_zero(pool->bitmap[i], last_idx);
412 if (!blocks_free(pool, i, idx, nr_blocks)) {
413 idx += nr_blocks;
414 if (idx < SMALLOC_BPI)
415 last_idx = idx;
416 else {
417 last_idx = 0;
418 while (idx >= SMALLOC_BPI) {
419 i++;
420 idx -= SMALLOC_BPI;
421 }
422 }
423 continue;
424 }
425 set_blocks(pool, i, idx, nr_blocks);
426 offset = i * SMALLOC_BPL + idx * SMALLOC_BPB;
427 break;
428 }
429
430 if (i < pool->nr_blocks) {
431 pool->free_blocks -= nr_blocks;
432 ret = pool->map + offset;
433 }
434fail:
435 pool_unlock(pool);
436 return ret;
437}
438
439static void *smalloc_pool(struct pool *pool, unsigned int size)
440{
441 struct block_hdr *hdr;
442 unsigned int alloc_size;
443 void *ptr;
444
445 alloc_size = size + sizeof(*hdr);
446#ifdef SMALLOC_REDZONE
447 alloc_size += sizeof(unsigned int);
448#endif
449
450 ptr = __smalloc_pool(pool, alloc_size);
451 if (!ptr)
452 return NULL;
453
454 hdr = ptr;
455 hdr->size = alloc_size;
456 ptr += sizeof(*hdr);
457
458 fill_redzone(hdr);
459
460 memset(ptr, 0, size);
461 return ptr;
462}
463
464void *smalloc(unsigned int size)
465{
466 unsigned int i;
467
468 global_read_lock();
469 i = last_pool;
470
471 do {
472 for (; i < nr_pools; i++) {
473 void *ptr = smalloc_pool(&mp[i], size);
474
475 if (ptr) {
476 last_pool = i;
477 global_read_unlock();
478 return ptr;
479 }
480 }
481 if (last_pool) {
482 last_pool = 0;
483 continue;
484 }
485
486 if (nr_pools + 1 > MAX_POOLS)
487 break;
488 else {
489 i = nr_pools;
490 global_read_unlock();
491 if (add_pool(&mp[nr_pools], size))
492 goto out;
493 global_read_lock();
494 }
495 } while (1);
496
497 global_read_unlock();
498out:
499 return NULL;
500}
501
502char *smalloc_strdup(const char *str)
503{
504 char *ptr;
505
506 ptr = smalloc(strlen(str) + 1);
507 strcpy(ptr, str);
508 return ptr;
509}