Commit | Line | Data |
---|---|---|
d24c33a4 JA |
1 | /* |
2 | * simple memory allocator, backed by mmap() so that it hands out memory | |
3 | * that can be shared across processes and threads | |
4 | */ | |
5 | #include <sys/mman.h> | |
6 | #include <stdio.h> | |
7 | #include <stdlib.h> | |
8 | #include <assert.h> | |
9 | #include <string.h> | |
10 | #include <unistd.h> | |
11 | #include <sys/types.h> | |
12 | #include <limits.h> | |
13 | ||
14 | #include "mutex.h" | |
15 | ||
16 | #undef ENABLE_RESIZE /* define to enable pool resizing */ | |
17 | #define MP_SAFE /* define to made allocator thread safe */ | |
18 | ||
85f46703 | 19 | #define INITIAL_SIZE 1048576 /* new pool size */ |
d24c33a4 JA |
20 | #define MAX_POOLS 32 /* maximum number of pools to setup */ |
21 | ||
2b386d25 JA |
22 | unsigned int smalloc_pool_size = INITIAL_SIZE; |
23 | ||
d24c33a4 | 24 | #ifdef ENABLE_RESIZE |
2b386d25 | 25 | #define MAX_SIZE 8 * smalloc_pool_size |
d24c33a4 JA |
26 | static unsigned int resize_error; |
27 | #endif | |
28 | ||
29 | struct pool { | |
30 | struct fio_mutex *lock; /* protects this pool */ | |
31 | void *map; /* map of blocks */ | |
32 | void *last; /* next free block hint */ | |
33 | unsigned int size; /* size of pool */ | |
34 | unsigned int room; /* size left in pool */ | |
35 | unsigned int largest_block; /* largest block free */ | |
36 | unsigned int free_since_compact; /* sfree() since compact() */ | |
37 | int fd; /* memory backing fd */ | |
38 | char file[PATH_MAX]; /* filename for fd */ | |
39 | }; | |
40 | ||
41 | static struct pool mp[MAX_POOLS]; | |
42 | static unsigned int nr_pools; | |
43 | static unsigned int last_pool; | |
44 | static struct fio_mutex *lock; | |
45 | ||
46 | struct mem_hdr { | |
47 | unsigned int size; | |
48 | }; | |
49 | ||
50 | static inline void pool_lock(struct pool *pool) | |
51 | { | |
52 | if (pool->lock) | |
53 | fio_mutex_down(pool->lock); | |
54 | } | |
55 | ||
56 | static inline void pool_unlock(struct pool *pool) | |
57 | { | |
58 | if (pool->lock) | |
59 | fio_mutex_up(pool->lock); | |
60 | } | |
61 | ||
65864cf7 | 62 | static inline void global_read_lock(void) |
d24c33a4 JA |
63 | { |
64 | if (lock) | |
65864cf7 | 65 | fio_mutex_down_read(lock); |
d24c33a4 JA |
66 | } |
67 | ||
65864cf7 | 68 | static inline void global_read_unlock(void) |
d24c33a4 JA |
69 | { |
70 | if (lock) | |
65864cf7 JA |
71 | fio_mutex_up_read(lock); |
72 | } | |
73 | ||
74 | static inline void global_write_lock(void) | |
75 | { | |
76 | if (lock) | |
77 | fio_mutex_down_write(lock); | |
78 | } | |
79 | ||
80 | static inline void global_write_unlock(void) | |
81 | { | |
82 | if (lock) | |
83 | fio_mutex_up_write(lock); | |
d24c33a4 JA |
84 | } |
85 | ||
86 | #define hdr_free(hdr) ((hdr)->size & 0x80000000) | |
87 | #define hdr_size(hdr) ((hdr)->size & ~0x80000000) | |
88 | #define hdr_mark_free(hdr) ((hdr)->size |= 0x80000000) | |
89 | ||
90 | static inline int ptr_valid(struct pool *pool, void *ptr) | |
91 | { | |
92 | return (ptr >= pool->map) && (ptr < pool->map + pool->size); | |
93 | } | |
94 | ||
95 | static inline int __hdr_valid(struct pool *pool, struct mem_hdr *hdr, | |
96 | unsigned int size) | |
97 | { | |
98 | return ptr_valid(pool, hdr) && ptr_valid(pool, (void *) hdr + size - 1); | |
99 | } | |
100 | ||
101 | static inline int hdr_valid(struct pool *pool, struct mem_hdr *hdr) | |
102 | { | |
103 | return __hdr_valid(pool, hdr, hdr_size(hdr)); | |
104 | } | |
105 | ||
106 | static inline int region_free(struct mem_hdr *hdr) | |
107 | { | |
108 | return hdr_free(hdr) || (!hdr_free(hdr) && !hdr_size(hdr)); | |
109 | } | |
110 | ||
111 | static inline struct mem_hdr *__hdr_nxt(struct pool *pool, struct mem_hdr *hdr, | |
112 | unsigned int size) | |
113 | { | |
114 | struct mem_hdr *nxt = (void *) hdr + size + sizeof(*hdr); | |
115 | ||
116 | if (__hdr_valid(pool, nxt, size)) | |
117 | return nxt; | |
118 | ||
119 | return NULL; | |
120 | } | |
121 | ||
122 | static inline struct mem_hdr *hdr_nxt(struct pool *pool, struct mem_hdr *hdr) | |
123 | { | |
124 | return __hdr_nxt(pool, hdr, hdr_size(hdr)); | |
125 | } | |
126 | ||
127 | static void merge(struct pool *pool, struct mem_hdr *hdr, struct mem_hdr *nxt) | |
128 | { | |
129 | unsigned int hfree = hdr_free(hdr); | |
130 | unsigned int nfree = hdr_free(nxt); | |
131 | ||
132 | hdr->size = hdr_size(hdr) + hdr_size(nxt) + sizeof(*nxt); | |
133 | nxt->size = 0; | |
134 | ||
135 | if (hfree) | |
136 | hdr_mark_free(hdr); | |
137 | if (nfree) | |
138 | hdr_mark_free(nxt); | |
139 | ||
140 | if (pool->last == nxt) | |
141 | pool->last = hdr; | |
142 | } | |
143 | ||
144 | static int combine(struct pool *pool, struct mem_hdr *prv, struct mem_hdr *hdr) | |
145 | { | |
146 | if (prv && hdr_free(prv) && hdr_free(hdr)) { | |
147 | merge(pool, prv, hdr); | |
148 | return 1; | |
149 | } | |
150 | ||
151 | return 0; | |
152 | } | |
153 | ||
154 | static int compact_pool(struct pool *pool) | |
155 | { | |
156 | struct mem_hdr *hdr = pool->map, *nxt; | |
157 | unsigned int compacted = 0; | |
158 | ||
159 | if (pool->free_since_compact < 50) | |
160 | return 1; | |
161 | ||
162 | while (hdr) { | |
163 | nxt = hdr_nxt(pool, hdr); | |
164 | if (!nxt) | |
165 | break; | |
166 | if (hdr_free(nxt) && hdr_free(hdr)) { | |
167 | merge(pool, hdr, nxt); | |
168 | compacted++; | |
169 | continue; | |
170 | } | |
171 | hdr = hdr_nxt(pool, hdr); | |
172 | } | |
173 | ||
174 | pool->free_since_compact = 0; | |
175 | return !!compacted; | |
176 | } | |
177 | ||
178 | static int resize_pool(struct pool *pool) | |
179 | { | |
180 | #ifdef ENABLE_RESIZE | |
181 | unsigned int new_size = pool->size << 1; | |
182 | struct mem_hdr *hdr, *last_hdr; | |
183 | void *ptr; | |
184 | ||
185 | if (new_size >= MAX_SIZE || resize_error) | |
186 | return 1; | |
187 | ||
188 | if (ftruncate(pool->fd, new_size) < 0) | |
189 | goto fail; | |
190 | ||
191 | ptr = mremap(pool->map, pool->size, new_size, 0); | |
192 | if (ptr == MAP_FAILED) | |
193 | goto fail; | |
194 | ||
195 | pool->map = ptr; | |
196 | hdr = pool; | |
197 | do { | |
198 | last_hdr = hdr; | |
199 | } while ((hdr = hdr_nxt(hdr)) != NULL); | |
200 | ||
201 | if (hdr_free(last_hdr)) { | |
202 | last_hdr->size = hdr_size(last_hdr) + new_size - pool_size; | |
203 | hdr_mark_free(last_hdr); | |
204 | } else { | |
205 | struct mem_hdr *nxt; | |
206 | ||
207 | nxt = (void *) last_hdr + hdr_size(last_hdr) + sizeof(*hdr); | |
208 | nxt->size = new_size - pool_size - sizeof(*hdr); | |
209 | hdr_mark_free(nxt); | |
210 | } | |
211 | ||
212 | pool_room += new_size - pool_size; | |
213 | pool_size = new_size; | |
214 | return 0; | |
215 | fail: | |
216 | perror("resize"); | |
217 | resize_error = 1; | |
218 | #else | |
219 | return 1; | |
220 | #endif | |
221 | } | |
222 | ||
adf57099 | 223 | static int add_pool(struct pool *pool, unsigned int alloc_size) |
d24c33a4 JA |
224 | { |
225 | struct mem_hdr *hdr; | |
226 | void *ptr; | |
227 | int fd; | |
228 | ||
229 | strcpy(pool->file, "/tmp/.fio_smalloc.XXXXXX"); | |
230 | fd = mkstemp(pool->file); | |
231 | if (fd < 0) | |
232 | goto out_close; | |
233 | ||
c08e194d | 234 | alloc_size += sizeof(*hdr); |
adf57099 JA |
235 | if (alloc_size > smalloc_pool_size) |
236 | pool->size = alloc_size; | |
237 | else | |
238 | pool->size = smalloc_pool_size; | |
239 | ||
d24c33a4 JA |
240 | if (ftruncate(fd, pool->size) < 0) |
241 | goto out_unlink; | |
242 | ||
243 | ptr = mmap(NULL, pool->size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); | |
244 | if (ptr == MAP_FAILED) | |
245 | goto out_unlink; | |
246 | ||
247 | memset(ptr, 0, pool->size); | |
248 | pool->map = pool->last = ptr; | |
249 | ||
250 | #ifdef MP_SAFE | |
251 | pool->lock = fio_mutex_init(1); | |
252 | if (!pool->lock) | |
253 | goto out_unlink; | |
254 | #endif | |
255 | ||
256 | pool->fd = fd; | |
257 | ||
258 | hdr = pool->map; | |
259 | pool->room = hdr->size = pool->size - sizeof(*hdr); | |
260 | pool->largest_block = pool->room; | |
261 | hdr_mark_free(hdr); | |
65864cf7 | 262 | global_write_lock(); |
d24c33a4 | 263 | nr_pools++; |
65864cf7 | 264 | global_write_unlock(); |
d24c33a4 JA |
265 | return 0; |
266 | out_unlink: | |
267 | if (pool->map) | |
268 | munmap(pool->map, pool->size); | |
269 | unlink(pool->file); | |
270 | out_close: | |
271 | if (fd >= 0) | |
272 | close(fd); | |
273 | return 1; | |
274 | } | |
275 | ||
276 | void sinit(void) | |
277 | { | |
4d4e80f2 | 278 | int ret; |
d24c33a4 JA |
279 | |
280 | #ifdef MP_SAFE | |
9c5b5290 | 281 | lock = fio_mutex_rw_init(); |
d24c33a4 | 282 | #endif |
adf57099 | 283 | ret = add_pool(&mp[0], INITIAL_SIZE); |
d24c33a4 JA |
284 | assert(!ret); |
285 | } | |
286 | ||
287 | static void cleanup_pool(struct pool *pool) | |
288 | { | |
289 | unlink(pool->file); | |
290 | close(pool->fd); | |
291 | munmap(pool->map, pool->size); | |
292 | ||
293 | if (pool->lock) | |
294 | fio_mutex_remove(pool->lock); | |
295 | } | |
296 | ||
297 | void scleanup(void) | |
298 | { | |
299 | unsigned int i; | |
300 | ||
301 | for (i = 0; i < nr_pools; i++) | |
302 | cleanup_pool(&mp[i]); | |
303 | ||
304 | if (lock) | |
305 | fio_mutex_remove(lock); | |
306 | } | |
307 | ||
308 | static void sfree_pool(struct pool *pool, void *ptr) | |
309 | { | |
310 | struct mem_hdr *hdr, *nxt; | |
311 | ||
312 | if (!ptr) | |
313 | return; | |
314 | ||
315 | assert(ptr_valid(pool, ptr)); | |
316 | ||
317 | pool_lock(pool); | |
318 | hdr = ptr - sizeof(*hdr); | |
319 | assert(!hdr_free(hdr)); | |
320 | hdr_mark_free(hdr); | |
321 | pool->room -= hdr_size(hdr); | |
322 | ||
323 | nxt = hdr_nxt(pool, hdr); | |
324 | if (nxt && hdr_free(nxt)) | |
325 | merge(pool, hdr, nxt); | |
326 | ||
327 | if (hdr_size(hdr) > pool->largest_block) | |
328 | pool->largest_block = hdr_size(hdr); | |
329 | ||
330 | pool->free_since_compact++; | |
331 | pool_unlock(pool); | |
332 | } | |
333 | ||
334 | void sfree(void *ptr) | |
335 | { | |
336 | struct pool *pool = NULL; | |
337 | unsigned int i; | |
338 | ||
8e5732e5 JA |
339 | if (!ptr) |
340 | return; | |
341 | ||
65864cf7 | 342 | global_read_lock(); |
d24c33a4 JA |
343 | |
344 | for (i = 0; i < nr_pools; i++) { | |
345 | if (ptr_valid(&mp[i], ptr)) { | |
346 | pool = &mp[i]; | |
347 | break; | |
348 | } | |
349 | } | |
350 | ||
65864cf7 | 351 | global_read_unlock(); |
d24c33a4 JA |
352 | |
353 | assert(pool); | |
354 | sfree_pool(pool, ptr); | |
355 | } | |
356 | ||
357 | static void *smalloc_pool(struct pool *pool, unsigned int size) | |
358 | { | |
359 | struct mem_hdr *hdr, *prv; | |
360 | int did_restart = 0; | |
361 | void *ret; | |
362 | ||
8e5732e5 | 363 | if (!size) |
d24c33a4 JA |
364 | return NULL; |
365 | ||
366 | pool_lock(pool); | |
8e5732e5 JA |
367 | if (size > pool->room + sizeof(*hdr)) |
368 | goto fail; | |
369 | if ((size > pool->largest_block) && pool->largest_block) | |
370 | goto fail; | |
d24c33a4 JA |
371 | restart: |
372 | hdr = pool->last; | |
373 | prv = NULL; | |
374 | do { | |
375 | if (combine(pool, prv, hdr)) | |
376 | hdr = prv; | |
5ec10eaa | 377 | |
d24c33a4 JA |
378 | if (hdr_free(hdr) && hdr_size(hdr) >= size) |
379 | break; | |
380 | ||
381 | prv = hdr; | |
382 | } while ((hdr = hdr_nxt(pool, hdr)) != NULL); | |
383 | ||
384 | if (!hdr) | |
385 | goto fail; | |
386 | ||
387 | /* | |
388 | * more room, adjust next header if any | |
389 | */ | |
390 | if (hdr_size(hdr) - size >= 2 * sizeof(*hdr)) { | |
391 | struct mem_hdr *nxt = __hdr_nxt(pool, hdr, size); | |
392 | ||
393 | if (nxt) { | |
394 | nxt->size = hdr_size(hdr) - size - sizeof(*hdr); | |
395 | if (hdr_size(hdr) == pool->largest_block) | |
396 | pool->largest_block = hdr_size(nxt); | |
397 | hdr_mark_free(nxt); | |
398 | } else | |
399 | size = hdr_size(hdr); | |
400 | } else | |
401 | size = hdr_size(hdr); | |
402 | ||
403 | if (size == hdr_size(hdr) && size == pool->largest_block) | |
404 | pool->largest_block = 0; | |
405 | ||
406 | /* | |
407 | * also clears free bit | |
408 | */ | |
409 | hdr->size = size; | |
410 | pool->last = hdr_nxt(pool, hdr); | |
411 | if (!pool->last) | |
412 | pool->last = pool->map; | |
413 | pool->room -= size; | |
414 | pool_unlock(pool); | |
415 | ||
416 | ret = (void *) hdr + sizeof(*hdr); | |
417 | memset(ret, 0, size); | |
418 | return ret; | |
419 | fail: | |
420 | /* | |
421 | * if we fail to allocate, first compact the entries that we missed. | |
422 | * if that also fails, increase the size of the pool | |
423 | */ | |
424 | ++did_restart; | |
425 | if (did_restart <= 1) { | |
426 | if (!compact_pool(pool)) { | |
427 | pool->last = pool->map; | |
428 | goto restart; | |
429 | } | |
430 | } | |
431 | ++did_restart; | |
432 | if (did_restart <= 2) { | |
433 | if (!resize_pool(pool)) { | |
434 | pool->last = pool->map; | |
435 | goto restart; | |
436 | } | |
437 | } | |
438 | pool_unlock(pool); | |
439 | return NULL; | |
440 | } | |
441 | ||
442 | void *smalloc(unsigned int size) | |
443 | { | |
444 | unsigned int i; | |
445 | ||
65864cf7 | 446 | global_read_lock(); |
d24c33a4 JA |
447 | i = last_pool; |
448 | ||
449 | do { | |
450 | for (; i < nr_pools; i++) { | |
451 | void *ptr = smalloc_pool(&mp[i], size); | |
452 | ||
453 | if (ptr) { | |
454 | last_pool = i; | |
65864cf7 | 455 | global_read_unlock(); |
d24c33a4 JA |
456 | return ptr; |
457 | } | |
458 | } | |
459 | if (last_pool) { | |
460 | last_pool = 0; | |
461 | continue; | |
462 | } | |
463 | ||
464 | if (nr_pools + 1 >= MAX_POOLS) | |
465 | break; | |
466 | else { | |
467 | i = nr_pools; | |
65864cf7 | 468 | global_read_unlock(); |
adf57099 | 469 | if (add_pool(&mp[nr_pools], size)) |
65864cf7 JA |
470 | goto out; |
471 | global_read_lock(); | |
d24c33a4 JA |
472 | } |
473 | } while (1); | |
474 | ||
65864cf7 JA |
475 | global_read_unlock(); |
476 | out: | |
d24c33a4 JA |
477 | return NULL; |
478 | } | |
479 | ||
480 | char *smalloc_strdup(const char *str) | |
481 | { | |
482 | char *ptr; | |
483 | ||
484 | ptr = smalloc(strlen(str) + 1); | |
485 | strcpy(ptr, str); | |
486 | return ptr; | |
487 | } |