vmscan: convert the writeback handling in shrink_page_list() to folios
[linux-2.6-block.git] / mm / z3fold.c
CommitLineData
09c434b8 1// SPDX-License-Identifier: GPL-2.0-only
9a001fc1
VW
2/*
3 * z3fold.c
4 *
5 * Author: Vitaly Wool <vitaly.wool@konsulko.com>
6 * Copyright (C) 2016, Sony Mobile Communications Inc.
7 *
8 * This implementation is based on zbud written by Seth Jennings.
9 *
10 * z3fold is an special purpose allocator for storing compressed pages. It
11 * can store up to three compressed pages per page which improves the
12 * compression ratio of zbud while retaining its main concepts (e. g. always
13 * storing an integral number of objects per page) and simplicity.
14 * It still has simple and deterministic reclaim properties that make it
15 * preferable to a higher density approach (with no requirement on integral
16 * number of object per page) when reclaim is used.
17 *
18 * As in zbud, pages are divided into "chunks". The size of the chunks is
19 * fixed at compile time and is determined by NCHUNKS_ORDER below.
20 *
21 * z3fold doesn't export any API and is meant to be used via zpool API.
22 */
23
24#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25
26#include <linux/atomic.h>
d30561c5 27#include <linux/sched.h>
1f862989 28#include <linux/cpumask.h>
9a001fc1
VW
29#include <linux/list.h>
30#include <linux/mm.h>
31#include <linux/module.h>
1f862989
VW
32#include <linux/page-flags.h>
33#include <linux/migrate.h>
34#include <linux/node.h>
35#include <linux/compaction.h>
d30561c5 36#include <linux/percpu.h>
1f862989 37#include <linux/mount.h>
ea8157ab 38#include <linux/pseudo_fs.h>
1f862989 39#include <linux/fs.h>
9a001fc1 40#include <linux/preempt.h>
d30561c5 41#include <linux/workqueue.h>
9a001fc1
VW
42#include <linux/slab.h>
43#include <linux/spinlock.h>
44#include <linux/zpool.h>
ea8157ab 45#include <linux/magic.h>
af4798a5 46#include <linux/kmemleak.h>
9a001fc1 47
7c2b8baa
VW
48/*
49 * NCHUNKS_ORDER determines the internal allocation granularity, effectively
50 * adjusting internal fragmentation. It also determines the number of
51 * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the
52 * allocation granularity will be in chunks of size PAGE_SIZE/64. Some chunks
53 * in the beginning of an allocated page are occupied by z3fold header, so
54 * NCHUNKS will be calculated to 63 (or 62 in case CONFIG_DEBUG_SPINLOCK=y),
55 * which shows the max number of free chunks in z3fold page, also there will
56 * be 63, or 62, respectively, freelists per pool.
57 */
58#define NCHUNKS_ORDER 6
59
60#define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
61#define CHUNK_SIZE (1 << CHUNK_SHIFT)
62#define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
63#define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
64#define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
e3c0db4f 65#define NCHUNKS (TOTAL_CHUNKS - ZHDR_CHUNKS)
7c2b8baa
VW
66
67#define BUDDY_MASK (0x3)
68#define BUDDY_SHIFT 2
69#define SLOTS_ALIGN (0x40)
70
9a001fc1
VW
71/*****************
72 * Structures
73*****************/
ede93213
VW
74struct z3fold_pool;
75struct z3fold_ops {
76 int (*evict)(struct z3fold_pool *pool, unsigned long handle);
77};
78
79enum buddy {
80 HEADLESS = 0,
81 FIRST,
82 MIDDLE,
83 LAST,
7c2b8baa
VW
84 BUDDIES_MAX = LAST
85};
86
87struct z3fold_buddy_slots {
88 /*
89 * we are using BUDDY_MASK in handle_to_buddy etc. so there should
90 * be enough slots to hold all possible variants
91 */
92 unsigned long slot[BUDDY_MASK + 1];
fc548865 93 unsigned long pool; /* back link */
4a3ac931 94 rwlock_t lock;
ede93213 95};
7c2b8baa 96#define HANDLE_FLAG_MASK (0x03)
ede93213
VW
97
98/*
d30561c5 99 * struct z3fold_header - z3fold page metadata occupying first chunks of each
ede93213 100 * z3fold page, except for HEADLESS pages
d30561c5
VW
101 * @buddy: links the z3fold page into the relevant list in the
102 * pool
2f1e5e4d 103 * @page_lock: per-page lock
d30561c5
VW
104 * @refcount: reference count for the z3fold page
105 * @work: work_struct for page layout optimization
7c2b8baa 106 * @slots: pointer to the structure holding buddy slots
bb9a374d 107 * @pool: pointer to the containing pool
d30561c5 108 * @cpu: CPU which this page "belongs" to
ede93213
VW
109 * @first_chunks: the size of the first buddy in chunks, 0 if free
110 * @middle_chunks: the size of the middle buddy in chunks, 0 if free
111 * @last_chunks: the size of the last buddy in chunks, 0 if free
112 * @first_num: the starting number (for the first handle)
1f862989 113 * @mapped_count: the number of objects currently mapped
ede93213
VW
114 */
115struct z3fold_header {
116 struct list_head buddy;
2f1e5e4d 117 spinlock_t page_lock;
5a27aa82 118 struct kref refcount;
d30561c5 119 struct work_struct work;
7c2b8baa 120 struct z3fold_buddy_slots *slots;
bb9a374d 121 struct z3fold_pool *pool;
d30561c5 122 short cpu;
ede93213
VW
123 unsigned short first_chunks;
124 unsigned short middle_chunks;
125 unsigned short last_chunks;
126 unsigned short start_middle;
127 unsigned short first_num:2;
1f862989 128 unsigned short mapped_count:2;
4a3ac931 129 unsigned short foreign_handles:2;
ede93213
VW
130};
131
9a001fc1
VW
132/**
133 * struct z3fold_pool - stores metadata for each z3fold pool
d30561c5
VW
134 * @name: pool name
135 * @lock: protects pool unbuddied/lru lists
136 * @stale_lock: protects pool stale page list
137 * @unbuddied: per-cpu array of lists tracking z3fold pages that contain 2-
138 * buddies; the list each z3fold page is added to depends on
139 * the size of its free region.
9a001fc1
VW
140 * @lru: list tracking the z3fold pages in LRU order by most recently
141 * added buddy.
d30561c5 142 * @stale: list of pages marked for freeing
9a001fc1 143 * @pages_nr: number of z3fold pages in the pool.
7c2b8baa 144 * @c_handle: cache for z3fold_buddy_slots allocation
9a001fc1
VW
145 * @ops: pointer to a structure of user defined operations specified at
146 * pool creation time.
30522175
MG
147 * @zpool: zpool driver
148 * @zpool_ops: zpool operations structure with an evict callback
d30561c5
VW
149 * @compact_wq: workqueue for page layout background optimization
150 * @release_wq: workqueue for safe page release
151 * @work: work_struct for safe page release
1f862989 152 * @inode: inode for z3fold pseudo filesystem
9a001fc1
VW
153 *
154 * This structure is allocated at pool creation time and maintains metadata
155 * pertaining to a particular z3fold pool.
156 */
157struct z3fold_pool {
d30561c5 158 const char *name;
9a001fc1 159 spinlock_t lock;
d30561c5
VW
160 spinlock_t stale_lock;
161 struct list_head *unbuddied;
9a001fc1 162 struct list_head lru;
d30561c5 163 struct list_head stale;
12d59ae6 164 atomic64_t pages_nr;
7c2b8baa 165 struct kmem_cache *c_handle;
9a001fc1
VW
166 const struct z3fold_ops *ops;
167 struct zpool *zpool;
168 const struct zpool_ops *zpool_ops;
d30561c5
VW
169 struct workqueue_struct *compact_wq;
170 struct workqueue_struct *release_wq;
171 struct work_struct work;
1f862989 172 struct inode *inode;
9a001fc1
VW
173};
174
9a001fc1
VW
175/*
176 * Internal z3fold page flags
177 */
178enum z3fold_page_flags {
5a27aa82 179 PAGE_HEADLESS = 0,
9a001fc1 180 MIDDLE_CHUNK_MAPPED,
d30561c5 181 NEEDS_COMPACTING,
6098d7e1 182 PAGE_STALE,
ca0246bb 183 PAGE_CLAIMED, /* by either reclaim or free */
9a001fc1
VW
184};
185
dcf5aedb
VW
186/*
187 * handle flags, go under HANDLE_FLAG_MASK
188 */
189enum z3fold_handle_flags {
190 HANDLES_NOFREE = 0,
191};
192
4a3ac931
VW
193/*
194 * Forward declarations
195 */
196static struct z3fold_header *__z3fold_alloc(struct z3fold_pool *, size_t, bool);
197static void compact_page_work(struct work_struct *w);
198
9a001fc1
VW
199/*****************
200 * Helpers
201*****************/
202
203/* Converts an allocation size in bytes to size in z3fold chunks */
204static int size_to_chunks(size_t size)
205{
206 return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT;
207}
208
209#define for_each_unbuddied_list(_iter, _begin) \
210 for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
211
bb9f6f63
VW
212static inline struct z3fold_buddy_slots *alloc_slots(struct z3fold_pool *pool,
213 gfp_t gfp)
7c2b8baa 214{
f1549cb5
HB
215 struct z3fold_buddy_slots *slots;
216
f94afee9 217 slots = kmem_cache_zalloc(pool->c_handle,
f1549cb5 218 (gfp & ~(__GFP_HIGHMEM | __GFP_MOVABLE)));
7c2b8baa
VW
219
220 if (slots) {
af4798a5
QC
221 /* It will be freed separately in free_handle(). */
222 kmemleak_not_leak(slots);
7c2b8baa 223 slots->pool = (unsigned long)pool;
4a3ac931 224 rwlock_init(&slots->lock);
7c2b8baa
VW
225 }
226
227 return slots;
228}
229
230static inline struct z3fold_pool *slots_to_pool(struct z3fold_buddy_slots *s)
231{
232 return (struct z3fold_pool *)(s->pool & ~HANDLE_FLAG_MASK);
233}
234
235static inline struct z3fold_buddy_slots *handle_to_slots(unsigned long handle)
236{
237 return (struct z3fold_buddy_slots *)(handle & ~(SLOTS_ALIGN - 1));
238}
239
4a3ac931
VW
240/* Lock a z3fold page */
241static inline void z3fold_page_lock(struct z3fold_header *zhdr)
242{
243 spin_lock(&zhdr->page_lock);
244}
245
246/* Try to lock a z3fold page */
247static inline int z3fold_page_trylock(struct z3fold_header *zhdr)
248{
249 return spin_trylock(&zhdr->page_lock);
250}
251
252/* Unlock a z3fold page */
253static inline void z3fold_page_unlock(struct z3fold_header *zhdr)
254{
255 spin_unlock(&zhdr->page_lock);
256}
257
767cc6c5
ML
258/* return locked z3fold page if it's not headless */
259static inline struct z3fold_header *get_z3fold_header(unsigned long handle)
4a3ac931
VW
260{
261 struct z3fold_buddy_slots *slots;
262 struct z3fold_header *zhdr;
263 int locked = 0;
264
265 if (!(handle & (1 << PAGE_HEADLESS))) {
266 slots = handle_to_slots(handle);
267 do {
268 unsigned long addr;
269
270 read_lock(&slots->lock);
271 addr = *(unsigned long *)handle;
272 zhdr = (struct z3fold_header *)(addr & PAGE_MASK);
767cc6c5 273 locked = z3fold_page_trylock(zhdr);
4a3ac931
VW
274 read_unlock(&slots->lock);
275 if (locked)
276 break;
277 cpu_relax();
767cc6c5 278 } while (true);
4a3ac931
VW
279 } else {
280 zhdr = (struct z3fold_header *)(handle & PAGE_MASK);
281 }
282
283 return zhdr;
284}
285
4a3ac931
VW
286static inline void put_z3fold_header(struct z3fold_header *zhdr)
287{
288 struct page *page = virt_to_page(zhdr);
289
290 if (!test_bit(PAGE_HEADLESS, &page->private))
291 z3fold_page_unlock(zhdr);
292}
293
fc548865 294static inline void free_handle(unsigned long handle, struct z3fold_header *zhdr)
7c2b8baa
VW
295{
296 struct z3fold_buddy_slots *slots;
297 int i;
298 bool is_free;
299
4a3ac931
VW
300 if (WARN_ON(*(unsigned long *)handle == 0))
301 return;
302
7c2b8baa 303 slots = handle_to_slots(handle);
4a3ac931
VW
304 write_lock(&slots->lock);
305 *(unsigned long *)handle = 0;
dcf5aedb
VW
306
307 if (test_bit(HANDLES_NOFREE, &slots->pool)) {
308 write_unlock(&slots->lock);
309 return; /* simple case, nothing else to do */
310 }
311
fc548865
VW
312 if (zhdr->slots != slots)
313 zhdr->foreign_handles--;
4a3ac931 314
7c2b8baa
VW
315 is_free = true;
316 for (i = 0; i <= BUDDY_MASK; i++) {
317 if (slots->slot[i]) {
318 is_free = false;
319 break;
320 }
321 }
d8f117ab 322 write_unlock(&slots->lock);
7c2b8baa
VW
323
324 if (is_free) {
325 struct z3fold_pool *pool = slots_to_pool(slots);
326
fc548865
VW
327 if (zhdr->slots == slots)
328 zhdr->slots = NULL;
7c2b8baa
VW
329 kmem_cache_free(pool->c_handle, slots);
330 }
331}
332
ea8157ab 333static int z3fold_init_fs_context(struct fs_context *fc)
1f862989 334{
ea8157ab 335 return init_pseudo(fc, Z3FOLD_MAGIC) ? 0 : -ENOMEM;
1f862989
VW
336}
337
338static struct file_system_type z3fold_fs = {
339 .name = "z3fold",
ea8157ab 340 .init_fs_context = z3fold_init_fs_context,
1f862989
VW
341 .kill_sb = kill_anon_super,
342};
343
344static struct vfsmount *z3fold_mnt;
dc3a1f30 345static int __init z3fold_mount(void)
1f862989
VW
346{
347 int ret = 0;
348
349 z3fold_mnt = kern_mount(&z3fold_fs);
350 if (IS_ERR(z3fold_mnt))
351 ret = PTR_ERR(z3fold_mnt);
352
353 return ret;
354}
355
356static void z3fold_unmount(void)
357{
358 kern_unmount(z3fold_mnt);
359}
360
361static const struct address_space_operations z3fold_aops;
362static int z3fold_register_migration(struct z3fold_pool *pool)
363{
364 pool->inode = alloc_anon_inode(z3fold_mnt->mnt_sb);
365 if (IS_ERR(pool->inode)) {
366 pool->inode = NULL;
367 return 1;
368 }
369
370 pool->inode->i_mapping->private_data = pool;
371 pool->inode->i_mapping->a_ops = &z3fold_aops;
372 return 0;
373}
374
375static void z3fold_unregister_migration(struct z3fold_pool *pool)
376{
377 if (pool->inode)
378 iput(pool->inode);
cb152a1a 379}
1f862989 380
9a001fc1 381/* Initializes the z3fold header of a newly allocated z3fold page */
63398413 382static struct z3fold_header *init_z3fold_page(struct page *page, bool headless,
bb9f6f63 383 struct z3fold_pool *pool, gfp_t gfp)
9a001fc1
VW
384{
385 struct z3fold_header *zhdr = page_address(page);
63398413 386 struct z3fold_buddy_slots *slots;
9a001fc1
VW
387
388 INIT_LIST_HEAD(&page->lru);
9a001fc1
VW
389 clear_bit(PAGE_HEADLESS, &page->private);
390 clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
d30561c5
VW
391 clear_bit(NEEDS_COMPACTING, &page->private);
392 clear_bit(PAGE_STALE, &page->private);
ca0246bb 393 clear_bit(PAGE_CLAIMED, &page->private);
63398413
VW
394 if (headless)
395 return zhdr;
396
397 slots = alloc_slots(pool, gfp);
398 if (!slots)
399 return NULL;
9a001fc1 400
c457cd96 401 memset(zhdr, 0, sizeof(*zhdr));
2f1e5e4d 402 spin_lock_init(&zhdr->page_lock);
5a27aa82 403 kref_init(&zhdr->refcount);
d30561c5 404 zhdr->cpu = -1;
7c2b8baa 405 zhdr->slots = slots;
bb9a374d 406 zhdr->pool = pool;
9a001fc1 407 INIT_LIST_HEAD(&zhdr->buddy);
d30561c5 408 INIT_WORK(&zhdr->work, compact_page_work);
9a001fc1
VW
409 return zhdr;
410}
411
412/* Resets the struct page fields and frees the page */
1f862989 413static void free_z3fold_page(struct page *page, bool headless)
9a001fc1 414{
1f862989
VW
415 if (!headless) {
416 lock_page(page);
417 __ClearPageMovable(page);
418 unlock_page(page);
419 }
5a27aa82
VW
420 __free_page(page);
421}
422
7c2b8baa
VW
423/* Helper function to build the index */
424static inline int __idx(struct z3fold_header *zhdr, enum buddy bud)
425{
426 return (bud + zhdr->first_num) & BUDDY_MASK;
427}
428
9a001fc1
VW
429/*
430 * Encodes the handle of a particular buddy within a z3fold page
431 * Pool lock should be held as this function accesses first_num
432 */
3f9d2b57
VW
433static unsigned long __encode_handle(struct z3fold_header *zhdr,
434 struct z3fold_buddy_slots *slots,
435 enum buddy bud)
9a001fc1 436{
7c2b8baa
VW
437 unsigned long h = (unsigned long)zhdr;
438 int idx = 0;
9a001fc1 439
7c2b8baa
VW
440 /*
441 * For a headless page, its handle is its pointer with the extra
442 * PAGE_HEADLESS bit set
443 */
444 if (bud == HEADLESS)
445 return h | (1 << PAGE_HEADLESS);
446
447 /* otherwise, return pointer to encoded handle */
448 idx = __idx(zhdr, bud);
449 h += idx;
450 if (bud == LAST)
451 h |= (zhdr->last_chunks << BUDDY_SHIFT);
452
4a3ac931 453 write_lock(&slots->lock);
7c2b8baa 454 slots->slot[idx] = h;
4a3ac931 455 write_unlock(&slots->lock);
7c2b8baa 456 return (unsigned long)&slots->slot[idx];
9a001fc1
VW
457}
458
3f9d2b57
VW
459static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud)
460{
461 return __encode_handle(zhdr, zhdr->slots, bud);
462}
463
ca0246bb
VW
464/* only for LAST bud, returns zero otherwise */
465static unsigned short handle_to_chunks(unsigned long handle)
466{
4a3ac931
VW
467 struct z3fold_buddy_slots *slots = handle_to_slots(handle);
468 unsigned long addr;
7c2b8baa 469
4a3ac931
VW
470 read_lock(&slots->lock);
471 addr = *(unsigned long *)handle;
472 read_unlock(&slots->lock);
7c2b8baa 473 return (addr & ~PAGE_MASK) >> BUDDY_SHIFT;
ca0246bb
VW
474}
475
f201ebd8 476/*
477 * (handle & BUDDY_MASK) < zhdr->first_num is possible in encode_handle
478 * but that doesn't matter. because the masking will result in the
479 * correct buddy number.
480 */
9a001fc1
VW
481static enum buddy handle_to_buddy(unsigned long handle)
482{
7c2b8baa 483 struct z3fold_header *zhdr;
4a3ac931 484 struct z3fold_buddy_slots *slots = handle_to_slots(handle);
7c2b8baa
VW
485 unsigned long addr;
486
4a3ac931 487 read_lock(&slots->lock);
7c2b8baa
VW
488 WARN_ON(handle & (1 << PAGE_HEADLESS));
489 addr = *(unsigned long *)handle;
4a3ac931 490 read_unlock(&slots->lock);
7c2b8baa
VW
491 zhdr = (struct z3fold_header *)(addr & PAGE_MASK);
492 return (addr - zhdr->first_num) & BUDDY_MASK;
9a001fc1
VW
493}
494
9050cce1
VW
495static inline struct z3fold_pool *zhdr_to_pool(struct z3fold_header *zhdr)
496{
bb9a374d 497 return zhdr->pool;
9050cce1
VW
498}
499
d30561c5
VW
500static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked)
501{
502 struct page *page = virt_to_page(zhdr);
9050cce1 503 struct z3fold_pool *pool = zhdr_to_pool(zhdr);
d30561c5
VW
504
505 WARN_ON(!list_empty(&zhdr->buddy));
506 set_bit(PAGE_STALE, &page->private);
35529357 507 clear_bit(NEEDS_COMPACTING, &page->private);
d30561c5
VW
508 spin_lock(&pool->lock);
509 if (!list_empty(&page->lru))
1f862989 510 list_del_init(&page->lru);
d30561c5 511 spin_unlock(&pool->lock);
4a3ac931 512
d30561c5
VW
513 if (locked)
514 z3fold_page_unlock(zhdr);
4a3ac931 515
d30561c5
VW
516 spin_lock(&pool->stale_lock);
517 list_add(&zhdr->buddy, &pool->stale);
518 queue_work(pool->release_wq, &pool->work);
519 spin_unlock(&pool->stale_lock);
5e36c25b
ML
520
521 atomic64_dec(&pool->pages_nr);
d30561c5
VW
522}
523
70ad3196 524static void release_z3fold_page(struct kref *ref)
d30561c5
VW
525{
526 struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
527 refcount);
528 __release_z3fold_page(zhdr, false);
529}
530
531static void release_z3fold_page_locked(struct kref *ref)
532{
533 struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
534 refcount);
535 WARN_ON(z3fold_page_trylock(zhdr));
536 __release_z3fold_page(zhdr, true);
537}
538
539static void release_z3fold_page_locked_list(struct kref *ref)
540{
541 struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
542 refcount);
9050cce1 543 struct z3fold_pool *pool = zhdr_to_pool(zhdr);
4a3ac931 544
9050cce1 545 spin_lock(&pool->lock);
d30561c5 546 list_del_init(&zhdr->buddy);
9050cce1 547 spin_unlock(&pool->lock);
d30561c5
VW
548
549 WARN_ON(z3fold_page_trylock(zhdr));
550 __release_z3fold_page(zhdr, true);
551}
552
553static void free_pages_work(struct work_struct *w)
554{
555 struct z3fold_pool *pool = container_of(w, struct z3fold_pool, work);
556
557 spin_lock(&pool->stale_lock);
558 while (!list_empty(&pool->stale)) {
559 struct z3fold_header *zhdr = list_first_entry(&pool->stale,
560 struct z3fold_header, buddy);
561 struct page *page = virt_to_page(zhdr);
562
563 list_del(&zhdr->buddy);
564 if (WARN_ON(!test_bit(PAGE_STALE, &page->private)))
565 continue;
d30561c5
VW
566 spin_unlock(&pool->stale_lock);
567 cancel_work_sync(&zhdr->work);
1f862989 568 free_z3fold_page(page, false);
d30561c5
VW
569 cond_resched();
570 spin_lock(&pool->stale_lock);
571 }
572 spin_unlock(&pool->stale_lock);
573}
574
9a001fc1
VW
575/*
576 * Returns the number of free chunks in a z3fold page.
577 * NB: can't be used with HEADLESS pages.
578 */
579static int num_free_chunks(struct z3fold_header *zhdr)
580{
581 int nfree;
582 /*
583 * If there is a middle object, pick up the bigger free space
584 * either before or after it. Otherwise just subtract the number
585 * of chunks occupied by the first and the last objects.
586 */
587 if (zhdr->middle_chunks != 0) {
588 int nfree_before = zhdr->first_chunks ?
ede93213 589 0 : zhdr->start_middle - ZHDR_CHUNKS;
9a001fc1 590 int nfree_after = zhdr->last_chunks ?
ede93213
VW
591 0 : TOTAL_CHUNKS -
592 (zhdr->start_middle + zhdr->middle_chunks);
9a001fc1
VW
593 nfree = max(nfree_before, nfree_after);
594 } else
595 nfree = NCHUNKS - zhdr->first_chunks - zhdr->last_chunks;
596 return nfree;
597}
598
9050cce1
VW
599/* Add to the appropriate unbuddied list */
600static inline void add_to_unbuddied(struct z3fold_pool *pool,
601 struct z3fold_header *zhdr)
602{
603 if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0 ||
604 zhdr->middle_chunks == 0) {
135f97fd 605 struct list_head *unbuddied;
9050cce1 606 int freechunks = num_free_chunks(zhdr);
135f97fd
VW
607
608 migrate_disable();
609 unbuddied = this_cpu_ptr(pool->unbuddied);
9050cce1
VW
610 spin_lock(&pool->lock);
611 list_add(&zhdr->buddy, &unbuddied[freechunks]);
612 spin_unlock(&pool->lock);
613 zhdr->cpu = smp_processor_id();
135f97fd 614 migrate_enable();
9050cce1
VW
615 }
616}
617
dcf5aedb
VW
618static inline enum buddy get_free_buddy(struct z3fold_header *zhdr, int chunks)
619{
620 enum buddy bud = HEADLESS;
621
622 if (zhdr->middle_chunks) {
623 if (!zhdr->first_chunks &&
624 chunks <= zhdr->start_middle - ZHDR_CHUNKS)
625 bud = FIRST;
626 else if (!zhdr->last_chunks)
627 bud = LAST;
628 } else {
629 if (!zhdr->first_chunks)
630 bud = FIRST;
631 else if (!zhdr->last_chunks)
632 bud = LAST;
633 else
634 bud = MIDDLE;
635 }
636
637 return bud;
638}
639
ede93213
VW
640static inline void *mchunk_memmove(struct z3fold_header *zhdr,
641 unsigned short dst_chunk)
642{
643 void *beg = zhdr;
644 return memmove(beg + (dst_chunk << CHUNK_SHIFT),
645 beg + (zhdr->start_middle << CHUNK_SHIFT),
646 zhdr->middle_chunks << CHUNK_SHIFT);
647}
648
4a3ac931
VW
649static inline bool buddy_single(struct z3fold_header *zhdr)
650{
651 return !((zhdr->first_chunks && zhdr->middle_chunks) ||
652 (zhdr->first_chunks && zhdr->last_chunks) ||
653 (zhdr->middle_chunks && zhdr->last_chunks));
654}
655
656static struct z3fold_header *compact_single_buddy(struct z3fold_header *zhdr)
657{
658 struct z3fold_pool *pool = zhdr_to_pool(zhdr);
659 void *p = zhdr;
660 unsigned long old_handle = 0;
661 size_t sz = 0;
662 struct z3fold_header *new_zhdr = NULL;
663 int first_idx = __idx(zhdr, FIRST);
664 int middle_idx = __idx(zhdr, MIDDLE);
665 int last_idx = __idx(zhdr, LAST);
666 unsigned short *moved_chunks = NULL;
667
668 /*
669 * No need to protect slots here -- all the slots are "local" and
670 * the page lock is already taken
671 */
672 if (zhdr->first_chunks && zhdr->slots->slot[first_idx]) {
673 p += ZHDR_SIZE_ALIGNED;
674 sz = zhdr->first_chunks << CHUNK_SHIFT;
675 old_handle = (unsigned long)&zhdr->slots->slot[first_idx];
676 moved_chunks = &zhdr->first_chunks;
677 } else if (zhdr->middle_chunks && zhdr->slots->slot[middle_idx]) {
678 p += zhdr->start_middle << CHUNK_SHIFT;
679 sz = zhdr->middle_chunks << CHUNK_SHIFT;
680 old_handle = (unsigned long)&zhdr->slots->slot[middle_idx];
681 moved_chunks = &zhdr->middle_chunks;
682 } else if (zhdr->last_chunks && zhdr->slots->slot[last_idx]) {
683 p += PAGE_SIZE - (zhdr->last_chunks << CHUNK_SHIFT);
684 sz = zhdr->last_chunks << CHUNK_SHIFT;
685 old_handle = (unsigned long)&zhdr->slots->slot[last_idx];
686 moved_chunks = &zhdr->last_chunks;
687 }
688
689 if (sz > 0) {
690 enum buddy new_bud = HEADLESS;
691 short chunks = size_to_chunks(sz);
692 void *q;
693
694 new_zhdr = __z3fold_alloc(pool, sz, false);
695 if (!new_zhdr)
696 return NULL;
697
698 if (WARN_ON(new_zhdr == zhdr))
699 goto out_fail;
700
dcf5aedb 701 new_bud = get_free_buddy(new_zhdr, chunks);
4a3ac931
VW
702 q = new_zhdr;
703 switch (new_bud) {
704 case FIRST:
705 new_zhdr->first_chunks = chunks;
706 q += ZHDR_SIZE_ALIGNED;
707 break;
708 case MIDDLE:
709 new_zhdr->middle_chunks = chunks;
710 new_zhdr->start_middle =
711 new_zhdr->first_chunks + ZHDR_CHUNKS;
712 q += new_zhdr->start_middle << CHUNK_SHIFT;
713 break;
714 case LAST:
715 new_zhdr->last_chunks = chunks;
716 q += PAGE_SIZE - (new_zhdr->last_chunks << CHUNK_SHIFT);
717 break;
718 default:
719 goto out_fail;
720 }
721 new_zhdr->foreign_handles++;
722 memcpy(q, p, sz);
723 write_lock(&zhdr->slots->lock);
724 *(unsigned long *)old_handle = (unsigned long)new_zhdr +
725 __idx(new_zhdr, new_bud);
726 if (new_bud == LAST)
727 *(unsigned long *)old_handle |=
728 (new_zhdr->last_chunks << BUDDY_SHIFT);
729 write_unlock(&zhdr->slots->lock);
730 add_to_unbuddied(pool, new_zhdr);
731 z3fold_page_unlock(new_zhdr);
732
733 *moved_chunks = 0;
734 }
735
736 return new_zhdr;
737
738out_fail:
5e36c25b
ML
739 if (new_zhdr && !kref_put(&new_zhdr->refcount, release_z3fold_page_locked)) {
740 add_to_unbuddied(pool, new_zhdr);
741 z3fold_page_unlock(new_zhdr);
4a3ac931
VW
742 }
743 return NULL;
744
745}
746
1b096e5a 747#define BIG_CHUNK_GAP 3
9a001fc1
VW
748/* Has to be called with lock held */
749static int z3fold_compact_page(struct z3fold_header *zhdr)
750{
751 struct page *page = virt_to_page(zhdr);
9a001fc1 752
ede93213
VW
753 if (test_bit(MIDDLE_CHUNK_MAPPED, &page->private))
754 return 0; /* can't move middle chunk, it's used */
9a001fc1 755
1f862989
VW
756 if (unlikely(PageIsolated(page)))
757 return 0;
758
ede93213
VW
759 if (zhdr->middle_chunks == 0)
760 return 0; /* nothing to compact */
761
762 if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
763 /* move to the beginning */
764 mchunk_memmove(zhdr, ZHDR_CHUNKS);
9a001fc1
VW
765 zhdr->first_chunks = zhdr->middle_chunks;
766 zhdr->middle_chunks = 0;
767 zhdr->start_middle = 0;
768 zhdr->first_num++;
1b096e5a 769 return 1;
9a001fc1 770 }
1b096e5a
VW
771
772 /*
773 * moving data is expensive, so let's only do that if
774 * there's substantial gain (at least BIG_CHUNK_GAP chunks)
775 */
776 if (zhdr->first_chunks != 0 && zhdr->last_chunks == 0 &&
777 zhdr->start_middle - (zhdr->first_chunks + ZHDR_CHUNKS) >=
778 BIG_CHUNK_GAP) {
779 mchunk_memmove(zhdr, zhdr->first_chunks + ZHDR_CHUNKS);
780 zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
781 return 1;
782 } else if (zhdr->last_chunks != 0 && zhdr->first_chunks == 0 &&
783 TOTAL_CHUNKS - (zhdr->last_chunks + zhdr->start_middle
784 + zhdr->middle_chunks) >=
785 BIG_CHUNK_GAP) {
786 unsigned short new_start = TOTAL_CHUNKS - zhdr->last_chunks -
787 zhdr->middle_chunks;
788 mchunk_memmove(zhdr, new_start);
789 zhdr->start_middle = new_start;
790 return 1;
791 }
792
793 return 0;
9a001fc1
VW
794}
795
d30561c5
VW
796static void do_compact_page(struct z3fold_header *zhdr, bool locked)
797{
9050cce1 798 struct z3fold_pool *pool = zhdr_to_pool(zhdr);
d30561c5 799 struct page *page;
d30561c5
VW
800
801 page = virt_to_page(zhdr);
802 if (locked)
803 WARN_ON(z3fold_page_trylock(zhdr));
804 else
805 z3fold_page_lock(zhdr);
5d03a661 806 if (WARN_ON(!test_and_clear_bit(NEEDS_COMPACTING, &page->private))) {
d30561c5
VW
807 z3fold_page_unlock(zhdr);
808 return;
809 }
810 spin_lock(&pool->lock);
811 list_del_init(&zhdr->buddy);
812 spin_unlock(&pool->lock);
813
5e36c25b 814 if (kref_put(&zhdr->refcount, release_z3fold_page_locked))
5d03a661 815 return;
5d03a661 816
dcf5aedb
VW
817 if (test_bit(PAGE_STALE, &page->private) ||
818 test_and_set_bit(PAGE_CLAIMED, &page->private)) {
1f862989
VW
819 z3fold_page_unlock(zhdr);
820 return;
821 }
822
4a3ac931
VW
823 if (!zhdr->foreign_handles && buddy_single(zhdr) &&
824 zhdr->mapped_count == 0 && compact_single_buddy(zhdr)) {
5e36c25b 825 if (!kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
dcf5aedb 826 clear_bit(PAGE_CLAIMED, &page->private);
4a3ac931 827 z3fold_page_unlock(zhdr);
dcf5aedb 828 }
4a3ac931
VW
829 return;
830 }
831
d30561c5 832 z3fold_compact_page(zhdr);
9050cce1 833 add_to_unbuddied(pool, zhdr);
dcf5aedb 834 clear_bit(PAGE_CLAIMED, &page->private);
d30561c5
VW
835 z3fold_page_unlock(zhdr);
836}
837
838static void compact_page_work(struct work_struct *w)
839{
840 struct z3fold_header *zhdr = container_of(w, struct z3fold_header,
841 work);
842
843 do_compact_page(zhdr, false);
844}
845
9050cce1
VW
846/* returns _locked_ z3fold page header or NULL */
847static inline struct z3fold_header *__z3fold_alloc(struct z3fold_pool *pool,
848 size_t size, bool can_sleep)
849{
850 struct z3fold_header *zhdr = NULL;
851 struct page *page;
852 struct list_head *unbuddied;
853 int chunks = size_to_chunks(size), i;
854
855lookup:
135f97fd 856 migrate_disable();
9050cce1 857 /* First, try to find an unbuddied z3fold page. */
135f97fd 858 unbuddied = this_cpu_ptr(pool->unbuddied);
9050cce1
VW
859 for_each_unbuddied_list(i, chunks) {
860 struct list_head *l = &unbuddied[i];
861
862 zhdr = list_first_entry_or_null(READ_ONCE(l),
863 struct z3fold_header, buddy);
864
865 if (!zhdr)
866 continue;
867
868 /* Re-check under lock. */
869 spin_lock(&pool->lock);
9050cce1
VW
870 if (unlikely(zhdr != list_first_entry(READ_ONCE(l),
871 struct z3fold_header, buddy)) ||
872 !z3fold_page_trylock(zhdr)) {
873 spin_unlock(&pool->lock);
874 zhdr = NULL;
135f97fd 875 migrate_enable();
9050cce1
VW
876 if (can_sleep)
877 cond_resched();
878 goto lookup;
879 }
880 list_del_init(&zhdr->buddy);
881 zhdr->cpu = -1;
882 spin_unlock(&pool->lock);
883
884 page = virt_to_page(zhdr);
4a3ac931
VW
885 if (test_bit(NEEDS_COMPACTING, &page->private) ||
886 test_bit(PAGE_CLAIMED, &page->private)) {
9050cce1
VW
887 z3fold_page_unlock(zhdr);
888 zhdr = NULL;
135f97fd 889 migrate_enable();
9050cce1
VW
890 if (can_sleep)
891 cond_resched();
892 goto lookup;
893 }
894
895 /*
896 * this page could not be removed from its unbuddied
897 * list while pool lock was held, and then we've taken
898 * page lock so kref_put could not be called before
899 * we got here, so it's safe to just call kref_get()
900 */
901 kref_get(&zhdr->refcount);
902 break;
903 }
135f97fd 904 migrate_enable();
9050cce1 905
351618b2
VW
906 if (!zhdr) {
907 int cpu;
908
909 /* look for _exact_ match on other cpus' lists */
910 for_each_online_cpu(cpu) {
911 struct list_head *l;
912
913 unbuddied = per_cpu_ptr(pool->unbuddied, cpu);
914 spin_lock(&pool->lock);
915 l = &unbuddied[chunks];
916
917 zhdr = list_first_entry_or_null(READ_ONCE(l),
918 struct z3fold_header, buddy);
919
920 if (!zhdr || !z3fold_page_trylock(zhdr)) {
921 spin_unlock(&pool->lock);
922 zhdr = NULL;
923 continue;
924 }
925 list_del_init(&zhdr->buddy);
926 zhdr->cpu = -1;
927 spin_unlock(&pool->lock);
928
929 page = virt_to_page(zhdr);
4a3ac931
VW
930 if (test_bit(NEEDS_COMPACTING, &page->private) ||
931 test_bit(PAGE_CLAIMED, &page->private)) {
351618b2
VW
932 z3fold_page_unlock(zhdr);
933 zhdr = NULL;
934 if (can_sleep)
935 cond_resched();
936 continue;
937 }
938 kref_get(&zhdr->refcount);
939 break;
940 }
941 }
942
fc548865
VW
943 if (zhdr && !zhdr->slots)
944 zhdr->slots = alloc_slots(pool,
945 can_sleep ? GFP_NOIO : GFP_ATOMIC);
9050cce1
VW
946 return zhdr;
947}
d30561c5
VW
948
949/*
950 * API Functions
951 */
952
953/**
954 * z3fold_create_pool() - create a new z3fold pool
955 * @name: pool name
956 * @gfp: gfp flags when allocating the z3fold pool structure
957 * @ops: user-defined operations for the z3fold pool
958 *
959 * Return: pointer to the new z3fold pool or NULL if the metadata allocation
960 * failed.
961 */
962static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp,
963 const struct z3fold_ops *ops)
964{
965 struct z3fold_pool *pool = NULL;
966 int i, cpu;
967
968 pool = kzalloc(sizeof(struct z3fold_pool), gfp);
969 if (!pool)
970 goto out;
7c2b8baa
VW
971 pool->c_handle = kmem_cache_create("z3fold_handle",
972 sizeof(struct z3fold_buddy_slots),
973 SLOTS_ALIGN, 0, NULL);
974 if (!pool->c_handle)
975 goto out_c;
d30561c5
VW
976 spin_lock_init(&pool->lock);
977 spin_lock_init(&pool->stale_lock);
e891f60e
ML
978 pool->unbuddied = __alloc_percpu(sizeof(struct list_head) * NCHUNKS,
979 __alignof__(struct list_head));
1ec6995d
XW
980 if (!pool->unbuddied)
981 goto out_pool;
d30561c5
VW
982 for_each_possible_cpu(cpu) {
983 struct list_head *unbuddied =
984 per_cpu_ptr(pool->unbuddied, cpu);
985 for_each_unbuddied_list(i, 0)
986 INIT_LIST_HEAD(&unbuddied[i]);
987 }
988 INIT_LIST_HEAD(&pool->lru);
989 INIT_LIST_HEAD(&pool->stale);
990 atomic64_set(&pool->pages_nr, 0);
991 pool->name = name;
992 pool->compact_wq = create_singlethread_workqueue(pool->name);
993 if (!pool->compact_wq)
1ec6995d 994 goto out_unbuddied;
d30561c5
VW
995 pool->release_wq = create_singlethread_workqueue(pool->name);
996 if (!pool->release_wq)
997 goto out_wq;
1f862989
VW
998 if (z3fold_register_migration(pool))
999 goto out_rwq;
d30561c5
VW
1000 INIT_WORK(&pool->work, free_pages_work);
1001 pool->ops = ops;
1002 return pool;
1003
1f862989
VW
1004out_rwq:
1005 destroy_workqueue(pool->release_wq);
d30561c5
VW
1006out_wq:
1007 destroy_workqueue(pool->compact_wq);
1ec6995d
XW
1008out_unbuddied:
1009 free_percpu(pool->unbuddied);
1010out_pool:
7c2b8baa
VW
1011 kmem_cache_destroy(pool->c_handle);
1012out_c:
d30561c5 1013 kfree(pool);
1ec6995d 1014out:
d30561c5
VW
1015 return NULL;
1016}
1017
1018/**
1019 * z3fold_destroy_pool() - destroys an existing z3fold pool
1020 * @pool: the z3fold pool to be destroyed
1021 *
1022 * The pool should be emptied before this function is called.
1023 */
1024static void z3fold_destroy_pool(struct z3fold_pool *pool)
1025{
7c2b8baa 1026 kmem_cache_destroy(pool->c_handle);
6051d3bd
HB
1027
1028 /*
1029 * We need to destroy pool->compact_wq before pool->release_wq,
1030 * as any pending work on pool->compact_wq will call
1031 * queue_work(pool->release_wq, &pool->work).
b997052b
HB
1032 *
1033 * There are still outstanding pages until both workqueues are drained,
1034 * so we cannot unregister migration until then.
6051d3bd
HB
1035 */
1036
d30561c5 1037 destroy_workqueue(pool->compact_wq);
6051d3bd 1038 destroy_workqueue(pool->release_wq);
b997052b 1039 z3fold_unregister_migration(pool);
dac0d1cf 1040 free_percpu(pool->unbuddied);
d30561c5
VW
1041 kfree(pool);
1042}
1043
9a001fc1
VW
1044/**
1045 * z3fold_alloc() - allocates a region of a given size
1046 * @pool: z3fold pool from which to allocate
1047 * @size: size in bytes of the desired allocation
1048 * @gfp: gfp flags used if the pool needs to grow
1049 * @handle: handle of the new allocation
1050 *
1051 * This function will attempt to find a free region in the pool large enough to
1052 * satisfy the allocation request. A search of the unbuddied lists is
1053 * performed first. If no suitable free region is found, then a new page is
1054 * allocated and added to the pool to satisfy the request.
1055 *
9a001fc1
VW
1056 * Return: 0 if success and handle is set, otherwise -EINVAL if the size or
1057 * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate
1058 * a new page.
1059 */
1060static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
1061 unsigned long *handle)
1062{
9050cce1 1063 int chunks = size_to_chunks(size);
9a001fc1 1064 struct z3fold_header *zhdr = NULL;
d30561c5 1065 struct page *page = NULL;
9a001fc1 1066 enum buddy bud;
8a97ea54 1067 bool can_sleep = gfpflags_allow_blocking(gfp);
9a001fc1 1068
f1549cb5 1069 if (!size)
9a001fc1
VW
1070 return -EINVAL;
1071
1072 if (size > PAGE_SIZE)
1073 return -ENOSPC;
1074
1075 if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE)
1076 bud = HEADLESS;
1077 else {
9050cce1
VW
1078retry:
1079 zhdr = __z3fold_alloc(pool, size, can_sleep);
d30561c5 1080 if (zhdr) {
dcf5aedb
VW
1081 bud = get_free_buddy(zhdr, chunks);
1082 if (bud == HEADLESS) {
5e36c25b 1083 if (!kref_put(&zhdr->refcount,
d30561c5 1084 release_z3fold_page_locked))
d30561c5 1085 z3fold_page_unlock(zhdr);
2f1e5e4d
VW
1086 pr_err("No free chunks in unbuddied\n");
1087 WARN_ON(1);
9050cce1 1088 goto retry;
9a001fc1 1089 }
9050cce1 1090 page = virt_to_page(zhdr);
2f1e5e4d 1091 goto found;
9a001fc1
VW
1092 }
1093 bud = FIRST;
9a001fc1
VW
1094 }
1095
5c9bab59
VW
1096 page = NULL;
1097 if (can_sleep) {
1098 spin_lock(&pool->stale_lock);
1099 zhdr = list_first_entry_or_null(&pool->stale,
1100 struct z3fold_header, buddy);
1101 /*
1102 * Before allocating a page, let's see if we can take one from
1103 * the stale pages list. cancel_work_sync() can sleep so we
1104 * limit this case to the contexts where we can sleep
1105 */
1106 if (zhdr) {
1107 list_del(&zhdr->buddy);
1108 spin_unlock(&pool->stale_lock);
d30561c5 1109 cancel_work_sync(&zhdr->work);
5c9bab59
VW
1110 page = virt_to_page(zhdr);
1111 } else {
1112 spin_unlock(&pool->stale_lock);
1113 }
d30561c5 1114 }
5c9bab59
VW
1115 if (!page)
1116 page = alloc_page(gfp);
d30561c5 1117
9a001fc1
VW
1118 if (!page)
1119 return -ENOMEM;
2f1e5e4d 1120
63398413 1121 zhdr = init_z3fold_page(page, bud == HEADLESS, pool, gfp);
9050cce1
VW
1122 if (!zhdr) {
1123 __free_page(page);
1124 return -ENOMEM;
1125 }
1126 atomic64_inc(&pool->pages_nr);
9a001fc1
VW
1127
1128 if (bud == HEADLESS) {
1129 set_bit(PAGE_HEADLESS, &page->private);
1130 goto headless;
1131 }
810481a2
HB
1132 if (can_sleep) {
1133 lock_page(page);
1134 __SetPageMovable(page, pool->inode->i_mapping);
1135 unlock_page(page);
1136 } else {
1137 if (trylock_page(page)) {
1138 __SetPageMovable(page, pool->inode->i_mapping);
1139 unlock_page(page);
1140 }
1141 }
2f1e5e4d 1142 z3fold_page_lock(zhdr);
9a001fc1
VW
1143
1144found:
1145 if (bud == FIRST)
1146 zhdr->first_chunks = chunks;
1147 else if (bud == LAST)
1148 zhdr->last_chunks = chunks;
1149 else {
1150 zhdr->middle_chunks = chunks;
ede93213 1151 zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
9a001fc1 1152 }
9050cce1 1153 add_to_unbuddied(pool, zhdr);
9a001fc1
VW
1154
1155headless:
d30561c5 1156 spin_lock(&pool->lock);
9a001fc1
VW
1157 /* Add/move z3fold page to beginning of LRU */
1158 if (!list_empty(&page->lru))
1159 list_del(&page->lru);
1160
1161 list_add(&page->lru, &pool->lru);
1162
1163 *handle = encode_handle(zhdr, bud);
1164 spin_unlock(&pool->lock);
2f1e5e4d
VW
1165 if (bud != HEADLESS)
1166 z3fold_page_unlock(zhdr);
9a001fc1
VW
1167
1168 return 0;
1169}
1170
1171/**
1172 * z3fold_free() - frees the allocation associated with the given handle
1173 * @pool: pool in which the allocation resided
1174 * @handle: handle associated with the allocation returned by z3fold_alloc()
1175 *
1176 * In the case that the z3fold page in which the allocation resides is under
ed0e5dca
ML
1177 * reclaim, as indicated by the PAGE_CLAIMED flag being set, this function
1178 * only sets the first|middle|last_chunks to 0. The page is actually freed
1179 * once all buddies are evicted (see z3fold_reclaim_page() below).
9a001fc1
VW
1180 */
1181static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
1182{
1183 struct z3fold_header *zhdr;
9a001fc1
VW
1184 struct page *page;
1185 enum buddy bud;
5b6807de 1186 bool page_claimed;
9a001fc1 1187
4a3ac931 1188 zhdr = get_z3fold_header(handle);
9a001fc1 1189 page = virt_to_page(zhdr);
5b6807de 1190 page_claimed = test_and_set_bit(PAGE_CLAIMED, &page->private);
9a001fc1
VW
1191
1192 if (test_bit(PAGE_HEADLESS, &page->private)) {
ca0246bb
VW
1193 /* if a headless page is under reclaim, just leave.
1194 * NB: we use test_and_set_bit for a reason: if the bit
1195 * has not been set before, we release this page
1196 * immediately so we don't care about its value any more.
1197 */
5b6807de 1198 if (!page_claimed) {
ca0246bb
VW
1199 spin_lock(&pool->lock);
1200 list_del(&page->lru);
1201 spin_unlock(&pool->lock);
4a3ac931 1202 put_z3fold_header(zhdr);
1f862989 1203 free_z3fold_page(page, true);
ca0246bb 1204 atomic64_dec(&pool->pages_nr);
9a001fc1 1205 }
ca0246bb 1206 return;
9a001fc1
VW
1207 }
1208
ca0246bb 1209 /* Non-headless case */
ca0246bb
VW
1210 bud = handle_to_buddy(handle);
1211
1212 switch (bud) {
1213 case FIRST:
1214 zhdr->first_chunks = 0;
1215 break;
1216 case MIDDLE:
1217 zhdr->middle_chunks = 0;
1218 break;
1219 case LAST:
1220 zhdr->last_chunks = 0;
1221 break;
1222 default:
1223 pr_err("%s: unknown bud %d\n", __func__, bud);
1224 WARN_ON(1);
4a3ac931 1225 put_z3fold_header(zhdr);
d30561c5
VW
1226 return;
1227 }
1228
4a3ac931 1229 if (!page_claimed)
fc548865 1230 free_handle(handle, zhdr);
5e36c25b 1231 if (kref_put(&zhdr->refcount, release_z3fold_page_locked_list))
d30561c5 1232 return;
5b6807de
VW
1233 if (page_claimed) {
1234 /* the page has not been claimed by us */
ed0e5dca 1235 put_z3fold_header(zhdr);
6098d7e1
VW
1236 return;
1237 }
dcf5aedb 1238 if (test_and_set_bit(NEEDS_COMPACTING, &page->private)) {
4a3ac931 1239 put_z3fold_header(zhdr);
5b6807de 1240 clear_bit(PAGE_CLAIMED, &page->private);
d30561c5
VW
1241 return;
1242 }
1243 if (zhdr->cpu < 0 || !cpu_online(zhdr->cpu)) {
d30561c5 1244 zhdr->cpu = -1;
5d03a661 1245 kref_get(&zhdr->refcount);
5b6807de 1246 clear_bit(PAGE_CLAIMED, &page->private);
4a3ac931 1247 do_compact_page(zhdr, true);
d30561c5 1248 return;
9a001fc1 1249 }
5d03a661 1250 kref_get(&zhdr->refcount);
5b6807de 1251 clear_bit(PAGE_CLAIMED, &page->private);
4a3ac931
VW
1252 queue_work_on(zhdr->cpu, pool->compact_wq, &zhdr->work);
1253 put_z3fold_header(zhdr);
9a001fc1
VW
1254}
1255
1256/**
1257 * z3fold_reclaim_page() - evicts allocations from a pool page and frees it
1258 * @pool: pool from which a page will attempt to be evicted
f144c390 1259 * @retries: number of pages on the LRU list for which eviction will
9a001fc1
VW
1260 * be attempted before failing
1261 *
1262 * z3fold reclaim is different from normal system reclaim in that it is done
1263 * from the bottom, up. This is because only the bottom layer, z3fold, has
1264 * information on how the allocations are organized within each z3fold page.
1265 * This has the potential to create interesting locking situations between
1266 * z3fold and the user, however.
1267 *
1268 * To avoid these, this is how z3fold_reclaim_page() should be called:
f144c390 1269 *
9a001fc1
VW
1270 * The user detects a page should be reclaimed and calls z3fold_reclaim_page().
1271 * z3fold_reclaim_page() will remove a z3fold page from the pool LRU list and
1272 * call the user-defined eviction handler with the pool and handle as
1273 * arguments.
1274 *
1275 * If the handle can not be evicted, the eviction handler should return
1276 * non-zero. z3fold_reclaim_page() will add the z3fold page back to the
1277 * appropriate list and try the next z3fold page on the LRU up to
1278 * a user defined number of retries.
1279 *
1280 * If the handle is successfully evicted, the eviction handler should
1281 * return 0 _and_ should have called z3fold_free() on the handle. z3fold_free()
1282 * contains logic to delay freeing the page if the page is under reclaim,
1283 * as indicated by the setting of the PG_reclaim flag on the underlying page.
1284 *
1285 * If all buddies in the z3fold page are successfully evicted, then the
1286 * z3fold page can be freed.
1287 *
1288 * Returns: 0 if page is successfully freed, otherwise -EINVAL if there are
1289 * no pages to evict or an eviction handler is not registered, -EAGAIN if
1290 * the retry limit was hit.
1291 */
1292static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
1293{
4a3ac931 1294 int i, ret = -1;
d30561c5
VW
1295 struct z3fold_header *zhdr = NULL;
1296 struct page *page = NULL;
1297 struct list_head *pos;
9a001fc1 1298 unsigned long first_handle = 0, middle_handle = 0, last_handle = 0;
dcf5aedb
VW
1299 struct z3fold_buddy_slots slots __attribute__((aligned(SLOTS_ALIGN)));
1300
1301 rwlock_init(&slots.lock);
1302 slots.pool = (unsigned long)pool | (1 << HANDLES_NOFREE);
9a001fc1
VW
1303
1304 spin_lock(&pool->lock);
2f1e5e4d 1305 if (!pool->ops || !pool->ops->evict || retries == 0) {
9a001fc1
VW
1306 spin_unlock(&pool->lock);
1307 return -EINVAL;
1308 }
1309 for (i = 0; i < retries; i++) {
2f1e5e4d
VW
1310 if (list_empty(&pool->lru)) {
1311 spin_unlock(&pool->lock);
1312 return -EINVAL;
1313 }
d30561c5
VW
1314 list_for_each_prev(pos, &pool->lru) {
1315 page = list_entry(pos, struct page, lru);
ca0246bb 1316
3f9d2b57 1317 zhdr = page_address(page);
6d679578
TH
1318 if (test_bit(PAGE_HEADLESS, &page->private)) {
1319 /*
1320 * For non-headless pages, we wait to do this
1321 * until we have the page lock to avoid racing
1322 * with __z3fold_alloc(). Headless pages don't
1323 * have a lock (and __z3fold_alloc() will never
1324 * see them), but we still need to test and set
1325 * PAGE_CLAIMED to avoid racing with
1326 * z3fold_free(), so just do it now before
1327 * leaving the loop.
1328 */
1329 if (test_and_set_bit(PAGE_CLAIMED, &page->private))
1330 continue;
1331
d30561c5 1332 break;
6d679578 1333 }
d30561c5 1334
dcf5aedb
VW
1335 if (kref_get_unless_zero(&zhdr->refcount) == 0) {
1336 zhdr = NULL;
1337 break;
1338 }
ca0246bb 1339 if (!z3fold_page_trylock(zhdr)) {
5e36c25b 1340 kref_put(&zhdr->refcount, release_z3fold_page);
ca0246bb 1341 zhdr = NULL;
d30561c5 1342 continue; /* can't evict at this point */
ca0246bb 1343 }
dcf5aedb
VW
1344
1345 /* test_and_set_bit is of course atomic, but we still
1346 * need to do it under page lock, otherwise checking
1347 * that bit in __z3fold_alloc wouldn't make sense
1348 */
1349 if (zhdr->foreign_handles ||
1350 test_and_set_bit(PAGE_CLAIMED, &page->private)) {
5e36c25b 1351 if (!kref_put(&zhdr->refcount,
28473d91 1352 release_z3fold_page_locked))
dcf5aedb 1353 z3fold_page_unlock(zhdr);
4a3ac931
VW
1354 zhdr = NULL;
1355 continue; /* can't evict such page */
1356 }
d30561c5
VW
1357 list_del_init(&zhdr->buddy);
1358 zhdr->cpu = -1;
6098d7e1 1359 break;
d30561c5
VW
1360 }
1361
ca0246bb
VW
1362 if (!zhdr)
1363 break;
1364
5a27aa82 1365 list_del_init(&page->lru);
d30561c5 1366 spin_unlock(&pool->lock);
9a001fc1 1367
9a001fc1 1368 if (!test_bit(PAGE_HEADLESS, &page->private)) {
9a001fc1 1369 /*
3f9d2b57
VW
1370 * We need encode the handles before unlocking, and
1371 * use our local slots structure because z3fold_free
1372 * can zero out zhdr->slots and we can't do much
1373 * about that
9a001fc1
VW
1374 */
1375 first_handle = 0;
1376 last_handle = 0;
1377 middle_handle = 0;
dcf5aedb 1378 memset(slots.slot, 0, sizeof(slots.slot));
9a001fc1 1379 if (zhdr->first_chunks)
dcf5aedb
VW
1380 first_handle = __encode_handle(zhdr, &slots,
1381 FIRST);
9a001fc1 1382 if (zhdr->middle_chunks)
dcf5aedb
VW
1383 middle_handle = __encode_handle(zhdr, &slots,
1384 MIDDLE);
9a001fc1 1385 if (zhdr->last_chunks)
dcf5aedb
VW
1386 last_handle = __encode_handle(zhdr, &slots,
1387 LAST);
d30561c5
VW
1388 /*
1389 * it's safe to unlock here because we hold a
1390 * reference to this page
1391 */
2f1e5e4d 1392 z3fold_page_unlock(zhdr);
9a001fc1 1393 } else {
4a3ac931 1394 first_handle = encode_handle(zhdr, HEADLESS);
9a001fc1
VW
1395 last_handle = middle_handle = 0;
1396 }
9a001fc1
VW
1397 /* Issue the eviction callback(s) */
1398 if (middle_handle) {
1399 ret = pool->ops->evict(pool, middle_handle);
1400 if (ret)
1401 goto next;
1402 }
1403 if (first_handle) {
1404 ret = pool->ops->evict(pool, first_handle);
1405 if (ret)
1406 goto next;
1407 }
1408 if (last_handle) {
1409 ret = pool->ops->evict(pool, last_handle);
1410 if (ret)
1411 goto next;
1412 }
1413next:
5a27aa82
VW
1414 if (test_bit(PAGE_HEADLESS, &page->private)) {
1415 if (ret == 0) {
1f862989 1416 free_z3fold_page(page, true);
ca0246bb 1417 atomic64_dec(&pool->pages_nr);
5a27aa82 1418 return 0;
5a27aa82 1419 }
6098d7e1
VW
1420 spin_lock(&pool->lock);
1421 list_add(&page->lru, &pool->lru);
1422 spin_unlock(&pool->lock);
3f9d2b57 1423 clear_bit(PAGE_CLAIMED, &page->private);
6098d7e1 1424 } else {
dcf5aedb 1425 struct z3fold_buddy_slots *slots = zhdr->slots;
6098d7e1 1426 z3fold_page_lock(zhdr);
6098d7e1
VW
1427 if (kref_put(&zhdr->refcount,
1428 release_z3fold_page_locked)) {
dcf5aedb 1429 kmem_cache_free(pool->c_handle, slots);
6098d7e1
VW
1430 return 0;
1431 }
1432 /*
1433 * if we are here, the page is still not completely
1434 * free. Take the global pool lock then to be able
1435 * to add it back to the lru list
1436 */
1437 spin_lock(&pool->lock);
1438 list_add(&page->lru, &pool->lru);
d5567c9d 1439 spin_unlock(&pool->lock);
6098d7e1 1440 z3fold_page_unlock(zhdr);
3f9d2b57 1441 clear_bit(PAGE_CLAIMED, &page->private);
5a27aa82 1442 }
2f1e5e4d 1443
6098d7e1
VW
1444 /* We started off locked to we need to lock the pool back */
1445 spin_lock(&pool->lock);
9a001fc1
VW
1446 }
1447 spin_unlock(&pool->lock);
1448 return -EAGAIN;
1449}
1450
1451/**
1452 * z3fold_map() - maps the allocation associated with the given handle
1453 * @pool: pool in which the allocation resides
1454 * @handle: handle associated with the allocation to be mapped
1455 *
1456 * Extracts the buddy number from handle and constructs the pointer to the
1457 * correct starting chunk within the page.
1458 *
1459 * Returns: a pointer to the mapped allocation
1460 */
1461static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle)
1462{
1463 struct z3fold_header *zhdr;
1464 struct page *page;
1465 void *addr;
1466 enum buddy buddy;
1467
4a3ac931 1468 zhdr = get_z3fold_header(handle);
9a001fc1
VW
1469 addr = zhdr;
1470 page = virt_to_page(zhdr);
1471
1472 if (test_bit(PAGE_HEADLESS, &page->private))
1473 goto out;
1474
1475 buddy = handle_to_buddy(handle);
1476 switch (buddy) {
1477 case FIRST:
1478 addr += ZHDR_SIZE_ALIGNED;
1479 break;
1480 case MIDDLE:
1481 addr += zhdr->start_middle << CHUNK_SHIFT;
1482 set_bit(MIDDLE_CHUNK_MAPPED, &page->private);
1483 break;
1484 case LAST:
ca0246bb 1485 addr += PAGE_SIZE - (handle_to_chunks(handle) << CHUNK_SHIFT);
9a001fc1
VW
1486 break;
1487 default:
1488 pr_err("unknown buddy id %d\n", buddy);
1489 WARN_ON(1);
1490 addr = NULL;
1491 break;
1492 }
2f1e5e4d 1493
1f862989
VW
1494 if (addr)
1495 zhdr->mapped_count++;
9a001fc1 1496out:
4a3ac931 1497 put_z3fold_header(zhdr);
9a001fc1
VW
1498 return addr;
1499}
1500
1501/**
1502 * z3fold_unmap() - unmaps the allocation associated with the given handle
1503 * @pool: pool in which the allocation resides
1504 * @handle: handle associated with the allocation to be unmapped
1505 */
1506static void z3fold_unmap(struct z3fold_pool *pool, unsigned long handle)
1507{
1508 struct z3fold_header *zhdr;
1509 struct page *page;
1510 enum buddy buddy;
1511
4a3ac931 1512 zhdr = get_z3fold_header(handle);
9a001fc1
VW
1513 page = virt_to_page(zhdr);
1514
2f1e5e4d 1515 if (test_bit(PAGE_HEADLESS, &page->private))
9a001fc1 1516 return;
9a001fc1
VW
1517
1518 buddy = handle_to_buddy(handle);
1519 if (buddy == MIDDLE)
1520 clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
1f862989 1521 zhdr->mapped_count--;
4a3ac931 1522 put_z3fold_header(zhdr);
9a001fc1
VW
1523}
1524
1525/**
1526 * z3fold_get_pool_size() - gets the z3fold pool size in pages
1527 * @pool: pool whose size is being queried
1528 *
12d59ae6 1529 * Returns: size in pages of the given pool.
9a001fc1
VW
1530 */
1531static u64 z3fold_get_pool_size(struct z3fold_pool *pool)
1532{
12d59ae6 1533 return atomic64_read(&pool->pages_nr);
9a001fc1
VW
1534}
1535
1f862989
VW
1536static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode)
1537{
1538 struct z3fold_header *zhdr;
1539 struct z3fold_pool *pool;
1540
1541 VM_BUG_ON_PAGE(!PageMovable(page), page);
1542 VM_BUG_ON_PAGE(PageIsolated(page), page);
1543
dcf5aedb 1544 if (test_bit(PAGE_HEADLESS, &page->private))
1f862989
VW
1545 return false;
1546
1547 zhdr = page_address(page);
1548 z3fold_page_lock(zhdr);
1549 if (test_bit(NEEDS_COMPACTING, &page->private) ||
1550 test_bit(PAGE_STALE, &page->private))
1551 goto out;
1552
4a3ac931
VW
1553 if (zhdr->mapped_count != 0 || zhdr->foreign_handles != 0)
1554 goto out;
1555
dcf5aedb
VW
1556 if (test_and_set_bit(PAGE_CLAIMED, &page->private))
1557 goto out;
1f862989 1558 pool = zhdr_to_pool(zhdr);
4a3ac931
VW
1559 spin_lock(&pool->lock);
1560 if (!list_empty(&zhdr->buddy))
1561 list_del_init(&zhdr->buddy);
1562 if (!list_empty(&page->lru))
1563 list_del_init(&page->lru);
1564 spin_unlock(&pool->lock);
1565
1566 kref_get(&zhdr->refcount);
1567 z3fold_page_unlock(zhdr);
1568 return true;
1f862989 1569
1f862989
VW
1570out:
1571 z3fold_page_unlock(zhdr);
1572 return false;
1573}
1574
1575static int z3fold_page_migrate(struct address_space *mapping, struct page *newpage,
1576 struct page *page, enum migrate_mode mode)
1577{
1578 struct z3fold_header *zhdr, *new_zhdr;
1579 struct z3fold_pool *pool;
1580 struct address_space *new_mapping;
1581
1582 VM_BUG_ON_PAGE(!PageMovable(page), page);
1583 VM_BUG_ON_PAGE(!PageIsolated(page), page);
dcf5aedb 1584 VM_BUG_ON_PAGE(!test_bit(PAGE_CLAIMED, &page->private), page);
810481a2 1585 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
1f862989
VW
1586
1587 zhdr = page_address(page);
1588 pool = zhdr_to_pool(zhdr);
1589
dcf5aedb 1590 if (!z3fold_page_trylock(zhdr))
1f862989 1591 return -EAGAIN;
4a3ac931 1592 if (zhdr->mapped_count != 0 || zhdr->foreign_handles != 0) {
1f862989 1593 z3fold_page_unlock(zhdr);
dcf5aedb 1594 clear_bit(PAGE_CLAIMED, &page->private);
1f862989
VW
1595 return -EBUSY;
1596 }
c92d2f38
HB
1597 if (work_pending(&zhdr->work)) {
1598 z3fold_page_unlock(zhdr);
1599 return -EAGAIN;
1600 }
1f862989
VW
1601 new_zhdr = page_address(newpage);
1602 memcpy(new_zhdr, zhdr, PAGE_SIZE);
1603 newpage->private = page->private;
1604 page->private = 0;
1605 z3fold_page_unlock(zhdr);
1606 spin_lock_init(&new_zhdr->page_lock);
c92d2f38
HB
1607 INIT_WORK(&new_zhdr->work, compact_page_work);
1608 /*
1609 * z3fold_page_isolate() ensures that new_zhdr->buddy is empty,
1610 * so we only have to reinitialize it.
1611 */
1612 INIT_LIST_HEAD(&new_zhdr->buddy);
1f862989
VW
1613 new_mapping = page_mapping(page);
1614 __ClearPageMovable(page);
1f862989
VW
1615
1616 get_page(newpage);
1617 z3fold_page_lock(new_zhdr);
1618 if (new_zhdr->first_chunks)
1619 encode_handle(new_zhdr, FIRST);
1620 if (new_zhdr->last_chunks)
1621 encode_handle(new_zhdr, LAST);
1622 if (new_zhdr->middle_chunks)
1623 encode_handle(new_zhdr, MIDDLE);
1624 set_bit(NEEDS_COMPACTING, &newpage->private);
1625 new_zhdr->cpu = smp_processor_id();
1626 spin_lock(&pool->lock);
1627 list_add(&newpage->lru, &pool->lru);
1628 spin_unlock(&pool->lock);
1629 __SetPageMovable(newpage, new_mapping);
1630 z3fold_page_unlock(new_zhdr);
1631
1632 queue_work_on(new_zhdr->cpu, pool->compact_wq, &new_zhdr->work);
1633
dcf5aedb 1634 clear_bit(PAGE_CLAIMED, &page->private);
1f862989
VW
1635 put_page(page);
1636 return 0;
1637}
1638
1639static void z3fold_page_putback(struct page *page)
1640{
1641 struct z3fold_header *zhdr;
1642 struct z3fold_pool *pool;
1643
1644 zhdr = page_address(page);
1645 pool = zhdr_to_pool(zhdr);
1646
1647 z3fold_page_lock(zhdr);
1648 if (!list_empty(&zhdr->buddy))
1649 list_del_init(&zhdr->buddy);
1650 INIT_LIST_HEAD(&page->lru);
5e36c25b 1651 if (kref_put(&zhdr->refcount, release_z3fold_page_locked))
1f862989 1652 return;
1f862989
VW
1653 spin_lock(&pool->lock);
1654 list_add(&page->lru, &pool->lru);
1655 spin_unlock(&pool->lock);
dcf5aedb 1656 clear_bit(PAGE_CLAIMED, &page->private);
1f862989
VW
1657 z3fold_page_unlock(zhdr);
1658}
1659
1660static const struct address_space_operations z3fold_aops = {
1661 .isolate_page = z3fold_page_isolate,
1662 .migratepage = z3fold_page_migrate,
1663 .putback_page = z3fold_page_putback,
1664};
1665
9a001fc1
VW
1666/*****************
1667 * zpool
1668 ****************/
1669
1670static int z3fold_zpool_evict(struct z3fold_pool *pool, unsigned long handle)
1671{
1672 if (pool->zpool && pool->zpool_ops && pool->zpool_ops->evict)
1673 return pool->zpool_ops->evict(pool->zpool, handle);
1674 else
1675 return -ENOENT;
1676}
1677
1678static const struct z3fold_ops z3fold_zpool_ops = {
1679 .evict = z3fold_zpool_evict
1680};
1681
1682static void *z3fold_zpool_create(const char *name, gfp_t gfp,
1683 const struct zpool_ops *zpool_ops,
1684 struct zpool *zpool)
1685{
1686 struct z3fold_pool *pool;
1687
d30561c5
VW
1688 pool = z3fold_create_pool(name, gfp,
1689 zpool_ops ? &z3fold_zpool_ops : NULL);
9a001fc1
VW
1690 if (pool) {
1691 pool->zpool = zpool;
1692 pool->zpool_ops = zpool_ops;
1693 }
1694 return pool;
1695}
1696
1697static void z3fold_zpool_destroy(void *pool)
1698{
1699 z3fold_destroy_pool(pool);
1700}
1701
1702static int z3fold_zpool_malloc(void *pool, size_t size, gfp_t gfp,
1703 unsigned long *handle)
1704{
1705 return z3fold_alloc(pool, size, gfp, handle);
1706}
1707static void z3fold_zpool_free(void *pool, unsigned long handle)
1708{
1709 z3fold_free(pool, handle);
1710}
1711
1712static int z3fold_zpool_shrink(void *pool, unsigned int pages,
1713 unsigned int *reclaimed)
1714{
1715 unsigned int total = 0;
1716 int ret = -EINVAL;
1717
1718 while (total < pages) {
1719 ret = z3fold_reclaim_page(pool, 8);
1720 if (ret < 0)
1721 break;
1722 total++;
1723 }
1724
1725 if (reclaimed)
1726 *reclaimed = total;
1727
1728 return ret;
1729}
1730
1731static void *z3fold_zpool_map(void *pool, unsigned long handle,
1732 enum zpool_mapmode mm)
1733{
1734 return z3fold_map(pool, handle);
1735}
1736static void z3fold_zpool_unmap(void *pool, unsigned long handle)
1737{
1738 z3fold_unmap(pool, handle);
1739}
1740
1741static u64 z3fold_zpool_total_size(void *pool)
1742{
1743 return z3fold_get_pool_size(pool) * PAGE_SIZE;
1744}
1745
1746static struct zpool_driver z3fold_zpool_driver = {
1747 .type = "z3fold",
e818e820 1748 .sleep_mapped = true,
9a001fc1
VW
1749 .owner = THIS_MODULE,
1750 .create = z3fold_zpool_create,
1751 .destroy = z3fold_zpool_destroy,
1752 .malloc = z3fold_zpool_malloc,
1753 .free = z3fold_zpool_free,
1754 .shrink = z3fold_zpool_shrink,
1755 .map = z3fold_zpool_map,
1756 .unmap = z3fold_zpool_unmap,
1757 .total_size = z3fold_zpool_total_size,
1758};
1759
1760MODULE_ALIAS("zpool-z3fold");
1761
1762static int __init init_z3fold(void)
1763{
1f862989
VW
1764 int ret;
1765
014284a0
ML
1766 /*
1767 * Make sure the z3fold header is not larger than the page size and
1768 * there has remaining spaces for its buddy.
1769 */
1770 BUILD_BUG_ON(ZHDR_SIZE_ALIGNED > PAGE_SIZE - CHUNK_SIZE);
1f862989
VW
1771 ret = z3fold_mount();
1772 if (ret)
1773 return ret;
1774
9a001fc1
VW
1775 zpool_register_driver(&z3fold_zpool_driver);
1776
1777 return 0;
1778}
1779
1780static void __exit exit_z3fold(void)
1781{
1f862989 1782 z3fold_unmount();
9a001fc1
VW
1783 zpool_unregister_driver(&z3fold_zpool_driver);
1784}
1785
1786module_init(init_z3fold);
1787module_exit(exit_z3fold);
1788
1789MODULE_LICENSE("GPL");
1790MODULE_AUTHOR("Vitaly Wool <vitalywool@gmail.com>");
1791MODULE_DESCRIPTION("3-Fold Allocator for Compressed Pages");