Commit | Line | Data |
---|---|---|
9a001fc1 VW |
1 | /* |
2 | * z3fold.c | |
3 | * | |
4 | * Author: Vitaly Wool <vitaly.wool@konsulko.com> | |
5 | * Copyright (C) 2016, Sony Mobile Communications Inc. | |
6 | * | |
7 | * This implementation is based on zbud written by Seth Jennings. | |
8 | * | |
9 | * z3fold is an special purpose allocator for storing compressed pages. It | |
10 | * can store up to three compressed pages per page which improves the | |
11 | * compression ratio of zbud while retaining its main concepts (e. g. always | |
12 | * storing an integral number of objects per page) and simplicity. | |
13 | * It still has simple and deterministic reclaim properties that make it | |
14 | * preferable to a higher density approach (with no requirement on integral | |
15 | * number of object per page) when reclaim is used. | |
16 | * | |
17 | * As in zbud, pages are divided into "chunks". The size of the chunks is | |
18 | * fixed at compile time and is determined by NCHUNKS_ORDER below. | |
19 | * | |
20 | * z3fold doesn't export any API and is meant to be used via zpool API. | |
21 | */ | |
22 | ||
23 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
24 | ||
25 | #include <linux/atomic.h> | |
26 | #include <linux/list.h> | |
27 | #include <linux/mm.h> | |
28 | #include <linux/module.h> | |
29 | #include <linux/preempt.h> | |
30 | #include <linux/slab.h> | |
31 | #include <linux/spinlock.h> | |
32 | #include <linux/zpool.h> | |
33 | ||
34 | /***************** | |
35 | * Structures | |
36 | *****************/ | |
ede93213 VW |
37 | struct z3fold_pool; |
38 | struct z3fold_ops { | |
39 | int (*evict)(struct z3fold_pool *pool, unsigned long handle); | |
40 | }; | |
41 | ||
42 | enum buddy { | |
43 | HEADLESS = 0, | |
44 | FIRST, | |
45 | MIDDLE, | |
46 | LAST, | |
47 | BUDDIES_MAX | |
48 | }; | |
49 | ||
50 | /* | |
51 | * struct z3fold_header - z3fold page metadata occupying the first chunk of each | |
52 | * z3fold page, except for HEADLESS pages | |
53 | * @buddy: links the z3fold page into the relevant list in the pool | |
54 | * @first_chunks: the size of the first buddy in chunks, 0 if free | |
55 | * @middle_chunks: the size of the middle buddy in chunks, 0 if free | |
56 | * @last_chunks: the size of the last buddy in chunks, 0 if free | |
57 | * @first_num: the starting number (for the first handle) | |
58 | */ | |
59 | struct z3fold_header { | |
60 | struct list_head buddy; | |
61 | unsigned short first_chunks; | |
62 | unsigned short middle_chunks; | |
63 | unsigned short last_chunks; | |
64 | unsigned short start_middle; | |
65 | unsigned short first_num:2; | |
66 | }; | |
67 | ||
9a001fc1 VW |
68 | /* |
69 | * NCHUNKS_ORDER determines the internal allocation granularity, effectively | |
70 | * adjusting internal fragmentation. It also determines the number of | |
71 | * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the | |
ede93213 VW |
72 | * allocation granularity will be in chunks of size PAGE_SIZE/64. Some chunks |
73 | * in the beginning of an allocated page are occupied by z3fold header, so | |
74 | * NCHUNKS will be calculated to 63 (or 62 in case CONFIG_DEBUG_SPINLOCK=y), | |
75 | * which shows the max number of free chunks in z3fold page, also there will | |
76 | * be 63, or 62, respectively, freelists per pool. | |
9a001fc1 VW |
77 | */ |
78 | #define NCHUNKS_ORDER 6 | |
79 | ||
80 | #define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER) | |
81 | #define CHUNK_SIZE (1 << CHUNK_SHIFT) | |
ede93213 VW |
82 | #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE) |
83 | #define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT) | |
84 | #define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT) | |
9a001fc1 VW |
85 | #define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT) |
86 | ||
f201ebd8 | 87 | #define BUDDY_MASK (0x3) |
9a001fc1 | 88 | |
9a001fc1 VW |
89 | /** |
90 | * struct z3fold_pool - stores metadata for each z3fold pool | |
91 | * @lock: protects all pool fields and first|last_chunk fields of any | |
92 | * z3fold page in the pool | |
93 | * @unbuddied: array of lists tracking z3fold pages that contain 2- buddies; | |
94 | * the lists each z3fold page is added to depends on the size of | |
95 | * its free region. | |
96 | * @buddied: list tracking the z3fold pages that contain 3 buddies; | |
97 | * these z3fold pages are full | |
98 | * @lru: list tracking the z3fold pages in LRU order by most recently | |
99 | * added buddy. | |
100 | * @pages_nr: number of z3fold pages in the pool. | |
101 | * @ops: pointer to a structure of user defined operations specified at | |
102 | * pool creation time. | |
103 | * | |
104 | * This structure is allocated at pool creation time and maintains metadata | |
105 | * pertaining to a particular z3fold pool. | |
106 | */ | |
107 | struct z3fold_pool { | |
108 | spinlock_t lock; | |
109 | struct list_head unbuddied[NCHUNKS]; | |
110 | struct list_head buddied; | |
111 | struct list_head lru; | |
12d59ae6 | 112 | atomic64_t pages_nr; |
9a001fc1 VW |
113 | const struct z3fold_ops *ops; |
114 | struct zpool *zpool; | |
115 | const struct zpool_ops *zpool_ops; | |
116 | }; | |
117 | ||
9a001fc1 VW |
118 | /* |
119 | * Internal z3fold page flags | |
120 | */ | |
121 | enum z3fold_page_flags { | |
122 | UNDER_RECLAIM = 0, | |
123 | PAGE_HEADLESS, | |
124 | MIDDLE_CHUNK_MAPPED, | |
125 | }; | |
126 | ||
ede93213 | 127 | |
9a001fc1 VW |
128 | /***************** |
129 | * Helpers | |
130 | *****************/ | |
131 | ||
132 | /* Converts an allocation size in bytes to size in z3fold chunks */ | |
133 | static int size_to_chunks(size_t size) | |
134 | { | |
135 | return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT; | |
136 | } | |
137 | ||
138 | #define for_each_unbuddied_list(_iter, _begin) \ | |
139 | for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++) | |
140 | ||
141 | /* Initializes the z3fold header of a newly allocated z3fold page */ | |
142 | static struct z3fold_header *init_z3fold_page(struct page *page) | |
143 | { | |
144 | struct z3fold_header *zhdr = page_address(page); | |
145 | ||
146 | INIT_LIST_HEAD(&page->lru); | |
147 | clear_bit(UNDER_RECLAIM, &page->private); | |
148 | clear_bit(PAGE_HEADLESS, &page->private); | |
149 | clear_bit(MIDDLE_CHUNK_MAPPED, &page->private); | |
150 | ||
151 | zhdr->first_chunks = 0; | |
152 | zhdr->middle_chunks = 0; | |
153 | zhdr->last_chunks = 0; | |
154 | zhdr->first_num = 0; | |
155 | zhdr->start_middle = 0; | |
156 | INIT_LIST_HEAD(&zhdr->buddy); | |
157 | return zhdr; | |
158 | } | |
159 | ||
160 | /* Resets the struct page fields and frees the page */ | |
161 | static void free_z3fold_page(struct z3fold_header *zhdr) | |
162 | { | |
163 | __free_page(virt_to_page(zhdr)); | |
164 | } | |
165 | ||
166 | /* | |
167 | * Encodes the handle of a particular buddy within a z3fold page | |
168 | * Pool lock should be held as this function accesses first_num | |
169 | */ | |
170 | static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud) | |
171 | { | |
172 | unsigned long handle; | |
173 | ||
174 | handle = (unsigned long)zhdr; | |
175 | if (bud != HEADLESS) | |
176 | handle += (bud + zhdr->first_num) & BUDDY_MASK; | |
177 | return handle; | |
178 | } | |
179 | ||
180 | /* Returns the z3fold page where a given handle is stored */ | |
181 | static struct z3fold_header *handle_to_z3fold_header(unsigned long handle) | |
182 | { | |
183 | return (struct z3fold_header *)(handle & PAGE_MASK); | |
184 | } | |
185 | ||
f201ebd8 | 186 | /* |
187 | * (handle & BUDDY_MASK) < zhdr->first_num is possible in encode_handle | |
188 | * but that doesn't matter. because the masking will result in the | |
189 | * correct buddy number. | |
190 | */ | |
9a001fc1 VW |
191 | static enum buddy handle_to_buddy(unsigned long handle) |
192 | { | |
193 | struct z3fold_header *zhdr = handle_to_z3fold_header(handle); | |
194 | return (handle - zhdr->first_num) & BUDDY_MASK; | |
195 | } | |
196 | ||
197 | /* | |
198 | * Returns the number of free chunks in a z3fold page. | |
199 | * NB: can't be used with HEADLESS pages. | |
200 | */ | |
201 | static int num_free_chunks(struct z3fold_header *zhdr) | |
202 | { | |
203 | int nfree; | |
204 | /* | |
205 | * If there is a middle object, pick up the bigger free space | |
206 | * either before or after it. Otherwise just subtract the number | |
207 | * of chunks occupied by the first and the last objects. | |
208 | */ | |
209 | if (zhdr->middle_chunks != 0) { | |
210 | int nfree_before = zhdr->first_chunks ? | |
ede93213 | 211 | 0 : zhdr->start_middle - ZHDR_CHUNKS; |
9a001fc1 | 212 | int nfree_after = zhdr->last_chunks ? |
ede93213 VW |
213 | 0 : TOTAL_CHUNKS - |
214 | (zhdr->start_middle + zhdr->middle_chunks); | |
9a001fc1 VW |
215 | nfree = max(nfree_before, nfree_after); |
216 | } else | |
217 | nfree = NCHUNKS - zhdr->first_chunks - zhdr->last_chunks; | |
218 | return nfree; | |
219 | } | |
220 | ||
221 | /***************** | |
222 | * API Functions | |
223 | *****************/ | |
224 | /** | |
225 | * z3fold_create_pool() - create a new z3fold pool | |
226 | * @gfp: gfp flags when allocating the z3fold pool structure | |
227 | * @ops: user-defined operations for the z3fold pool | |
228 | * | |
229 | * Return: pointer to the new z3fold pool or NULL if the metadata allocation | |
230 | * failed. | |
231 | */ | |
232 | static struct z3fold_pool *z3fold_create_pool(gfp_t gfp, | |
233 | const struct z3fold_ops *ops) | |
234 | { | |
235 | struct z3fold_pool *pool; | |
236 | int i; | |
237 | ||
238 | pool = kzalloc(sizeof(struct z3fold_pool), gfp); | |
239 | if (!pool) | |
240 | return NULL; | |
241 | spin_lock_init(&pool->lock); | |
242 | for_each_unbuddied_list(i, 0) | |
243 | INIT_LIST_HEAD(&pool->unbuddied[i]); | |
244 | INIT_LIST_HEAD(&pool->buddied); | |
245 | INIT_LIST_HEAD(&pool->lru); | |
12d59ae6 | 246 | atomic64_set(&pool->pages_nr, 0); |
9a001fc1 VW |
247 | pool->ops = ops; |
248 | return pool; | |
249 | } | |
250 | ||
251 | /** | |
252 | * z3fold_destroy_pool() - destroys an existing z3fold pool | |
253 | * @pool: the z3fold pool to be destroyed | |
254 | * | |
255 | * The pool should be emptied before this function is called. | |
256 | */ | |
257 | static void z3fold_destroy_pool(struct z3fold_pool *pool) | |
258 | { | |
259 | kfree(pool); | |
260 | } | |
261 | ||
ede93213 VW |
262 | static inline void *mchunk_memmove(struct z3fold_header *zhdr, |
263 | unsigned short dst_chunk) | |
264 | { | |
265 | void *beg = zhdr; | |
266 | return memmove(beg + (dst_chunk << CHUNK_SHIFT), | |
267 | beg + (zhdr->start_middle << CHUNK_SHIFT), | |
268 | zhdr->middle_chunks << CHUNK_SHIFT); | |
269 | } | |
270 | ||
9a001fc1 VW |
271 | /* Has to be called with lock held */ |
272 | static int z3fold_compact_page(struct z3fold_header *zhdr) | |
273 | { | |
274 | struct page *page = virt_to_page(zhdr); | |
9a001fc1 | 275 | |
ede93213 VW |
276 | if (test_bit(MIDDLE_CHUNK_MAPPED, &page->private)) |
277 | return 0; /* can't move middle chunk, it's used */ | |
9a001fc1 | 278 | |
ede93213 VW |
279 | if (zhdr->middle_chunks == 0) |
280 | return 0; /* nothing to compact */ | |
281 | ||
282 | if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) { | |
283 | /* move to the beginning */ | |
284 | mchunk_memmove(zhdr, ZHDR_CHUNKS); | |
9a001fc1 VW |
285 | zhdr->first_chunks = zhdr->middle_chunks; |
286 | zhdr->middle_chunks = 0; | |
287 | zhdr->start_middle = 0; | |
288 | zhdr->first_num++; | |
9a001fc1 | 289 | } |
ede93213 | 290 | return 1; |
9a001fc1 VW |
291 | } |
292 | ||
293 | /** | |
294 | * z3fold_alloc() - allocates a region of a given size | |
295 | * @pool: z3fold pool from which to allocate | |
296 | * @size: size in bytes of the desired allocation | |
297 | * @gfp: gfp flags used if the pool needs to grow | |
298 | * @handle: handle of the new allocation | |
299 | * | |
300 | * This function will attempt to find a free region in the pool large enough to | |
301 | * satisfy the allocation request. A search of the unbuddied lists is | |
302 | * performed first. If no suitable free region is found, then a new page is | |
303 | * allocated and added to the pool to satisfy the request. | |
304 | * | |
305 | * gfp should not set __GFP_HIGHMEM as highmem pages cannot be used | |
306 | * as z3fold pool pages. | |
307 | * | |
308 | * Return: 0 if success and handle is set, otherwise -EINVAL if the size or | |
309 | * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate | |
310 | * a new page. | |
311 | */ | |
312 | static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp, | |
313 | unsigned long *handle) | |
314 | { | |
315 | int chunks = 0, i, freechunks; | |
316 | struct z3fold_header *zhdr = NULL; | |
317 | enum buddy bud; | |
318 | struct page *page; | |
319 | ||
320 | if (!size || (gfp & __GFP_HIGHMEM)) | |
321 | return -EINVAL; | |
322 | ||
323 | if (size > PAGE_SIZE) | |
324 | return -ENOSPC; | |
325 | ||
326 | if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE) | |
327 | bud = HEADLESS; | |
328 | else { | |
329 | chunks = size_to_chunks(size); | |
330 | spin_lock(&pool->lock); | |
331 | ||
332 | /* First, try to find an unbuddied z3fold page. */ | |
333 | zhdr = NULL; | |
334 | for_each_unbuddied_list(i, chunks) { | |
335 | if (!list_empty(&pool->unbuddied[i])) { | |
336 | zhdr = list_first_entry(&pool->unbuddied[i], | |
337 | struct z3fold_header, buddy); | |
338 | page = virt_to_page(zhdr); | |
339 | if (zhdr->first_chunks == 0) { | |
340 | if (zhdr->middle_chunks != 0 && | |
341 | chunks >= zhdr->start_middle) | |
342 | bud = LAST; | |
343 | else | |
344 | bud = FIRST; | |
345 | } else if (zhdr->last_chunks == 0) | |
346 | bud = LAST; | |
347 | else if (zhdr->middle_chunks == 0) | |
348 | bud = MIDDLE; | |
349 | else { | |
350 | pr_err("No free chunks in unbuddied\n"); | |
351 | WARN_ON(1); | |
352 | continue; | |
353 | } | |
354 | list_del(&zhdr->buddy); | |
355 | goto found; | |
356 | } | |
357 | } | |
358 | bud = FIRST; | |
359 | spin_unlock(&pool->lock); | |
360 | } | |
361 | ||
362 | /* Couldn't find unbuddied z3fold page, create new one */ | |
363 | page = alloc_page(gfp); | |
364 | if (!page) | |
365 | return -ENOMEM; | |
366 | spin_lock(&pool->lock); | |
12d59ae6 | 367 | atomic64_inc(&pool->pages_nr); |
9a001fc1 VW |
368 | zhdr = init_z3fold_page(page); |
369 | ||
370 | if (bud == HEADLESS) { | |
371 | set_bit(PAGE_HEADLESS, &page->private); | |
372 | goto headless; | |
373 | } | |
374 | ||
375 | found: | |
376 | if (bud == FIRST) | |
377 | zhdr->first_chunks = chunks; | |
378 | else if (bud == LAST) | |
379 | zhdr->last_chunks = chunks; | |
380 | else { | |
381 | zhdr->middle_chunks = chunks; | |
ede93213 | 382 | zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS; |
9a001fc1 VW |
383 | } |
384 | ||
385 | if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0 || | |
386 | zhdr->middle_chunks == 0) { | |
387 | /* Add to unbuddied list */ | |
388 | freechunks = num_free_chunks(zhdr); | |
389 | list_add(&zhdr->buddy, &pool->unbuddied[freechunks]); | |
390 | } else { | |
391 | /* Add to buddied list */ | |
392 | list_add(&zhdr->buddy, &pool->buddied); | |
393 | } | |
394 | ||
395 | headless: | |
396 | /* Add/move z3fold page to beginning of LRU */ | |
397 | if (!list_empty(&page->lru)) | |
398 | list_del(&page->lru); | |
399 | ||
400 | list_add(&page->lru, &pool->lru); | |
401 | ||
402 | *handle = encode_handle(zhdr, bud); | |
403 | spin_unlock(&pool->lock); | |
404 | ||
405 | return 0; | |
406 | } | |
407 | ||
408 | /** | |
409 | * z3fold_free() - frees the allocation associated with the given handle | |
410 | * @pool: pool in which the allocation resided | |
411 | * @handle: handle associated with the allocation returned by z3fold_alloc() | |
412 | * | |
413 | * In the case that the z3fold page in which the allocation resides is under | |
414 | * reclaim, as indicated by the PG_reclaim flag being set, this function | |
415 | * only sets the first|last_chunks to 0. The page is actually freed | |
416 | * once both buddies are evicted (see z3fold_reclaim_page() below). | |
417 | */ | |
418 | static void z3fold_free(struct z3fold_pool *pool, unsigned long handle) | |
419 | { | |
420 | struct z3fold_header *zhdr; | |
421 | int freechunks; | |
422 | struct page *page; | |
423 | enum buddy bud; | |
424 | ||
425 | spin_lock(&pool->lock); | |
426 | zhdr = handle_to_z3fold_header(handle); | |
427 | page = virt_to_page(zhdr); | |
428 | ||
429 | if (test_bit(PAGE_HEADLESS, &page->private)) { | |
430 | /* HEADLESS page stored */ | |
431 | bud = HEADLESS; | |
432 | } else { | |
43afc194 | 433 | bud = handle_to_buddy(handle); |
9a001fc1 VW |
434 | |
435 | switch (bud) { | |
436 | case FIRST: | |
437 | zhdr->first_chunks = 0; | |
438 | break; | |
439 | case MIDDLE: | |
440 | zhdr->middle_chunks = 0; | |
441 | zhdr->start_middle = 0; | |
442 | break; | |
443 | case LAST: | |
444 | zhdr->last_chunks = 0; | |
445 | break; | |
446 | default: | |
447 | pr_err("%s: unknown bud %d\n", __func__, bud); | |
448 | WARN_ON(1); | |
449 | spin_unlock(&pool->lock); | |
450 | return; | |
451 | } | |
452 | } | |
453 | ||
454 | if (test_bit(UNDER_RECLAIM, &page->private)) { | |
455 | /* z3fold page is under reclaim, reclaim will free */ | |
456 | spin_unlock(&pool->lock); | |
457 | return; | |
458 | } | |
459 | ||
12d59ae6 VW |
460 | /* Remove from existing buddy list */ |
461 | if (bud != HEADLESS) | |
9a001fc1 | 462 | list_del(&zhdr->buddy); |
9a001fc1 VW |
463 | |
464 | if (bud == HEADLESS || | |
465 | (zhdr->first_chunks == 0 && zhdr->middle_chunks == 0 && | |
466 | zhdr->last_chunks == 0)) { | |
467 | /* z3fold page is empty, free */ | |
468 | list_del(&page->lru); | |
469 | clear_bit(PAGE_HEADLESS, &page->private); | |
470 | free_z3fold_page(zhdr); | |
12d59ae6 | 471 | atomic64_dec(&pool->pages_nr); |
9a001fc1 VW |
472 | } else { |
473 | z3fold_compact_page(zhdr); | |
474 | /* Add to the unbuddied list */ | |
475 | freechunks = num_free_chunks(zhdr); | |
476 | list_add(&zhdr->buddy, &pool->unbuddied[freechunks]); | |
477 | } | |
478 | ||
479 | spin_unlock(&pool->lock); | |
480 | } | |
481 | ||
482 | /** | |
483 | * z3fold_reclaim_page() - evicts allocations from a pool page and frees it | |
484 | * @pool: pool from which a page will attempt to be evicted | |
485 | * @retires: number of pages on the LRU list for which eviction will | |
486 | * be attempted before failing | |
487 | * | |
488 | * z3fold reclaim is different from normal system reclaim in that it is done | |
489 | * from the bottom, up. This is because only the bottom layer, z3fold, has | |
490 | * information on how the allocations are organized within each z3fold page. | |
491 | * This has the potential to create interesting locking situations between | |
492 | * z3fold and the user, however. | |
493 | * | |
494 | * To avoid these, this is how z3fold_reclaim_page() should be called: | |
495 | ||
496 | * The user detects a page should be reclaimed and calls z3fold_reclaim_page(). | |
497 | * z3fold_reclaim_page() will remove a z3fold page from the pool LRU list and | |
498 | * call the user-defined eviction handler with the pool and handle as | |
499 | * arguments. | |
500 | * | |
501 | * If the handle can not be evicted, the eviction handler should return | |
502 | * non-zero. z3fold_reclaim_page() will add the z3fold page back to the | |
503 | * appropriate list and try the next z3fold page on the LRU up to | |
504 | * a user defined number of retries. | |
505 | * | |
506 | * If the handle is successfully evicted, the eviction handler should | |
507 | * return 0 _and_ should have called z3fold_free() on the handle. z3fold_free() | |
508 | * contains logic to delay freeing the page if the page is under reclaim, | |
509 | * as indicated by the setting of the PG_reclaim flag on the underlying page. | |
510 | * | |
511 | * If all buddies in the z3fold page are successfully evicted, then the | |
512 | * z3fold page can be freed. | |
513 | * | |
514 | * Returns: 0 if page is successfully freed, otherwise -EINVAL if there are | |
515 | * no pages to evict or an eviction handler is not registered, -EAGAIN if | |
516 | * the retry limit was hit. | |
517 | */ | |
518 | static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries) | |
519 | { | |
520 | int i, ret = 0, freechunks; | |
521 | struct z3fold_header *zhdr; | |
522 | struct page *page; | |
523 | unsigned long first_handle = 0, middle_handle = 0, last_handle = 0; | |
524 | ||
525 | spin_lock(&pool->lock); | |
526 | if (!pool->ops || !pool->ops->evict || list_empty(&pool->lru) || | |
527 | retries == 0) { | |
528 | spin_unlock(&pool->lock); | |
529 | return -EINVAL; | |
530 | } | |
531 | for (i = 0; i < retries; i++) { | |
532 | page = list_last_entry(&pool->lru, struct page, lru); | |
533 | list_del(&page->lru); | |
534 | ||
535 | /* Protect z3fold page against free */ | |
536 | set_bit(UNDER_RECLAIM, &page->private); | |
537 | zhdr = page_address(page); | |
538 | if (!test_bit(PAGE_HEADLESS, &page->private)) { | |
539 | list_del(&zhdr->buddy); | |
540 | /* | |
541 | * We need encode the handles before unlocking, since | |
542 | * we can race with free that will set | |
543 | * (first|last)_chunks to 0 | |
544 | */ | |
545 | first_handle = 0; | |
546 | last_handle = 0; | |
547 | middle_handle = 0; | |
548 | if (zhdr->first_chunks) | |
549 | first_handle = encode_handle(zhdr, FIRST); | |
550 | if (zhdr->middle_chunks) | |
551 | middle_handle = encode_handle(zhdr, MIDDLE); | |
552 | if (zhdr->last_chunks) | |
553 | last_handle = encode_handle(zhdr, LAST); | |
554 | } else { | |
555 | first_handle = encode_handle(zhdr, HEADLESS); | |
556 | last_handle = middle_handle = 0; | |
557 | } | |
558 | ||
559 | spin_unlock(&pool->lock); | |
560 | ||
561 | /* Issue the eviction callback(s) */ | |
562 | if (middle_handle) { | |
563 | ret = pool->ops->evict(pool, middle_handle); | |
564 | if (ret) | |
565 | goto next; | |
566 | } | |
567 | if (first_handle) { | |
568 | ret = pool->ops->evict(pool, first_handle); | |
569 | if (ret) | |
570 | goto next; | |
571 | } | |
572 | if (last_handle) { | |
573 | ret = pool->ops->evict(pool, last_handle); | |
574 | if (ret) | |
575 | goto next; | |
576 | } | |
577 | next: | |
578 | spin_lock(&pool->lock); | |
579 | clear_bit(UNDER_RECLAIM, &page->private); | |
580 | if ((test_bit(PAGE_HEADLESS, &page->private) && ret == 0) || | |
581 | (zhdr->first_chunks == 0 && zhdr->last_chunks == 0 && | |
582 | zhdr->middle_chunks == 0)) { | |
583 | /* | |
584 | * All buddies are now free, free the z3fold page and | |
585 | * return success. | |
586 | */ | |
587 | clear_bit(PAGE_HEADLESS, &page->private); | |
588 | free_z3fold_page(zhdr); | |
12d59ae6 | 589 | atomic64_dec(&pool->pages_nr); |
9a001fc1 VW |
590 | spin_unlock(&pool->lock); |
591 | return 0; | |
43afc194 VW |
592 | } else if (!test_bit(PAGE_HEADLESS, &page->private)) { |
593 | if (zhdr->first_chunks != 0 && | |
594 | zhdr->last_chunks != 0 && | |
595 | zhdr->middle_chunks != 0) { | |
596 | /* Full, add to buddied list */ | |
597 | list_add(&zhdr->buddy, &pool->buddied); | |
598 | } else { | |
599 | z3fold_compact_page(zhdr); | |
600 | /* add to unbuddied list */ | |
601 | freechunks = num_free_chunks(zhdr); | |
602 | list_add(&zhdr->buddy, | |
603 | &pool->unbuddied[freechunks]); | |
604 | } | |
9a001fc1 VW |
605 | } |
606 | ||
607 | /* add to beginning of LRU */ | |
608 | list_add(&page->lru, &pool->lru); | |
609 | } | |
610 | spin_unlock(&pool->lock); | |
611 | return -EAGAIN; | |
612 | } | |
613 | ||
614 | /** | |
615 | * z3fold_map() - maps the allocation associated with the given handle | |
616 | * @pool: pool in which the allocation resides | |
617 | * @handle: handle associated with the allocation to be mapped | |
618 | * | |
619 | * Extracts the buddy number from handle and constructs the pointer to the | |
620 | * correct starting chunk within the page. | |
621 | * | |
622 | * Returns: a pointer to the mapped allocation | |
623 | */ | |
624 | static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle) | |
625 | { | |
626 | struct z3fold_header *zhdr; | |
627 | struct page *page; | |
628 | void *addr; | |
629 | enum buddy buddy; | |
630 | ||
631 | spin_lock(&pool->lock); | |
632 | zhdr = handle_to_z3fold_header(handle); | |
633 | addr = zhdr; | |
634 | page = virt_to_page(zhdr); | |
635 | ||
636 | if (test_bit(PAGE_HEADLESS, &page->private)) | |
637 | goto out; | |
638 | ||
639 | buddy = handle_to_buddy(handle); | |
640 | switch (buddy) { | |
641 | case FIRST: | |
642 | addr += ZHDR_SIZE_ALIGNED; | |
643 | break; | |
644 | case MIDDLE: | |
645 | addr += zhdr->start_middle << CHUNK_SHIFT; | |
646 | set_bit(MIDDLE_CHUNK_MAPPED, &page->private); | |
647 | break; | |
648 | case LAST: | |
649 | addr += PAGE_SIZE - (zhdr->last_chunks << CHUNK_SHIFT); | |
650 | break; | |
651 | default: | |
652 | pr_err("unknown buddy id %d\n", buddy); | |
653 | WARN_ON(1); | |
654 | addr = NULL; | |
655 | break; | |
656 | } | |
657 | out: | |
658 | spin_unlock(&pool->lock); | |
659 | return addr; | |
660 | } | |
661 | ||
662 | /** | |
663 | * z3fold_unmap() - unmaps the allocation associated with the given handle | |
664 | * @pool: pool in which the allocation resides | |
665 | * @handle: handle associated with the allocation to be unmapped | |
666 | */ | |
667 | static void z3fold_unmap(struct z3fold_pool *pool, unsigned long handle) | |
668 | { | |
669 | struct z3fold_header *zhdr; | |
670 | struct page *page; | |
671 | enum buddy buddy; | |
672 | ||
673 | spin_lock(&pool->lock); | |
674 | zhdr = handle_to_z3fold_header(handle); | |
675 | page = virt_to_page(zhdr); | |
676 | ||
677 | if (test_bit(PAGE_HEADLESS, &page->private)) { | |
678 | spin_unlock(&pool->lock); | |
679 | return; | |
680 | } | |
681 | ||
682 | buddy = handle_to_buddy(handle); | |
683 | if (buddy == MIDDLE) | |
684 | clear_bit(MIDDLE_CHUNK_MAPPED, &page->private); | |
685 | spin_unlock(&pool->lock); | |
686 | } | |
687 | ||
688 | /** | |
689 | * z3fold_get_pool_size() - gets the z3fold pool size in pages | |
690 | * @pool: pool whose size is being queried | |
691 | * | |
12d59ae6 | 692 | * Returns: size in pages of the given pool. |
9a001fc1 VW |
693 | */ |
694 | static u64 z3fold_get_pool_size(struct z3fold_pool *pool) | |
695 | { | |
12d59ae6 | 696 | return atomic64_read(&pool->pages_nr); |
9a001fc1 VW |
697 | } |
698 | ||
699 | /***************** | |
700 | * zpool | |
701 | ****************/ | |
702 | ||
703 | static int z3fold_zpool_evict(struct z3fold_pool *pool, unsigned long handle) | |
704 | { | |
705 | if (pool->zpool && pool->zpool_ops && pool->zpool_ops->evict) | |
706 | return pool->zpool_ops->evict(pool->zpool, handle); | |
707 | else | |
708 | return -ENOENT; | |
709 | } | |
710 | ||
711 | static const struct z3fold_ops z3fold_zpool_ops = { | |
712 | .evict = z3fold_zpool_evict | |
713 | }; | |
714 | ||
715 | static void *z3fold_zpool_create(const char *name, gfp_t gfp, | |
716 | const struct zpool_ops *zpool_ops, | |
717 | struct zpool *zpool) | |
718 | { | |
719 | struct z3fold_pool *pool; | |
720 | ||
721 | pool = z3fold_create_pool(gfp, zpool_ops ? &z3fold_zpool_ops : NULL); | |
722 | if (pool) { | |
723 | pool->zpool = zpool; | |
724 | pool->zpool_ops = zpool_ops; | |
725 | } | |
726 | return pool; | |
727 | } | |
728 | ||
729 | static void z3fold_zpool_destroy(void *pool) | |
730 | { | |
731 | z3fold_destroy_pool(pool); | |
732 | } | |
733 | ||
734 | static int z3fold_zpool_malloc(void *pool, size_t size, gfp_t gfp, | |
735 | unsigned long *handle) | |
736 | { | |
737 | return z3fold_alloc(pool, size, gfp, handle); | |
738 | } | |
739 | static void z3fold_zpool_free(void *pool, unsigned long handle) | |
740 | { | |
741 | z3fold_free(pool, handle); | |
742 | } | |
743 | ||
744 | static int z3fold_zpool_shrink(void *pool, unsigned int pages, | |
745 | unsigned int *reclaimed) | |
746 | { | |
747 | unsigned int total = 0; | |
748 | int ret = -EINVAL; | |
749 | ||
750 | while (total < pages) { | |
751 | ret = z3fold_reclaim_page(pool, 8); | |
752 | if (ret < 0) | |
753 | break; | |
754 | total++; | |
755 | } | |
756 | ||
757 | if (reclaimed) | |
758 | *reclaimed = total; | |
759 | ||
760 | return ret; | |
761 | } | |
762 | ||
763 | static void *z3fold_zpool_map(void *pool, unsigned long handle, | |
764 | enum zpool_mapmode mm) | |
765 | { | |
766 | return z3fold_map(pool, handle); | |
767 | } | |
768 | static void z3fold_zpool_unmap(void *pool, unsigned long handle) | |
769 | { | |
770 | z3fold_unmap(pool, handle); | |
771 | } | |
772 | ||
773 | static u64 z3fold_zpool_total_size(void *pool) | |
774 | { | |
775 | return z3fold_get_pool_size(pool) * PAGE_SIZE; | |
776 | } | |
777 | ||
778 | static struct zpool_driver z3fold_zpool_driver = { | |
779 | .type = "z3fold", | |
780 | .owner = THIS_MODULE, | |
781 | .create = z3fold_zpool_create, | |
782 | .destroy = z3fold_zpool_destroy, | |
783 | .malloc = z3fold_zpool_malloc, | |
784 | .free = z3fold_zpool_free, | |
785 | .shrink = z3fold_zpool_shrink, | |
786 | .map = z3fold_zpool_map, | |
787 | .unmap = z3fold_zpool_unmap, | |
788 | .total_size = z3fold_zpool_total_size, | |
789 | }; | |
790 | ||
791 | MODULE_ALIAS("zpool-z3fold"); | |
792 | ||
793 | static int __init init_z3fold(void) | |
794 | { | |
ede93213 VW |
795 | /* Make sure the z3fold header is not larger than the page size */ |
796 | BUILD_BUG_ON(ZHDR_SIZE_ALIGNED > PAGE_SIZE); | |
9a001fc1 VW |
797 | zpool_register_driver(&z3fold_zpool_driver); |
798 | ||
799 | return 0; | |
800 | } | |
801 | ||
802 | static void __exit exit_z3fold(void) | |
803 | { | |
804 | zpool_unregister_driver(&z3fold_zpool_driver); | |
805 | } | |
806 | ||
807 | module_init(init_z3fold); | |
808 | module_exit(exit_z3fold); | |
809 | ||
810 | MODULE_LICENSE("GPL"); | |
811 | MODULE_AUTHOR("Vitaly Wool <vitalywool@gmail.com>"); | |
812 | MODULE_DESCRIPTION("3-Fold Allocator for Compressed Pages"); |