mm/z3fold: remove confusing local variable l reassignment
[linux-2.6-block.git] / mm / z3fold.c
CommitLineData
09c434b8 1// SPDX-License-Identifier: GPL-2.0-only
9a001fc1
VW
2/*
3 * z3fold.c
4 *
5 * Author: Vitaly Wool <vitaly.wool@konsulko.com>
6 * Copyright (C) 2016, Sony Mobile Communications Inc.
7 *
8 * This implementation is based on zbud written by Seth Jennings.
9 *
10 * z3fold is an special purpose allocator for storing compressed pages. It
11 * can store up to three compressed pages per page which improves the
12 * compression ratio of zbud while retaining its main concepts (e. g. always
13 * storing an integral number of objects per page) and simplicity.
14 * It still has simple and deterministic reclaim properties that make it
15 * preferable to a higher density approach (with no requirement on integral
16 * number of object per page) when reclaim is used.
17 *
18 * As in zbud, pages are divided into "chunks". The size of the chunks is
19 * fixed at compile time and is determined by NCHUNKS_ORDER below.
20 *
21 * z3fold doesn't export any API and is meant to be used via zpool API.
22 */
23
24#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25
26#include <linux/atomic.h>
d30561c5 27#include <linux/sched.h>
1f862989 28#include <linux/cpumask.h>
9a001fc1
VW
29#include <linux/list.h>
30#include <linux/mm.h>
31#include <linux/module.h>
1f862989
VW
32#include <linux/page-flags.h>
33#include <linux/migrate.h>
34#include <linux/node.h>
35#include <linux/compaction.h>
d30561c5 36#include <linux/percpu.h>
1f862989 37#include <linux/mount.h>
ea8157ab 38#include <linux/pseudo_fs.h>
1f862989 39#include <linux/fs.h>
9a001fc1 40#include <linux/preempt.h>
d30561c5 41#include <linux/workqueue.h>
9a001fc1
VW
42#include <linux/slab.h>
43#include <linux/spinlock.h>
44#include <linux/zpool.h>
ea8157ab 45#include <linux/magic.h>
af4798a5 46#include <linux/kmemleak.h>
9a001fc1 47
7c2b8baa
VW
48/*
49 * NCHUNKS_ORDER determines the internal allocation granularity, effectively
50 * adjusting internal fragmentation. It also determines the number of
51 * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the
52 * allocation granularity will be in chunks of size PAGE_SIZE/64. Some chunks
53 * in the beginning of an allocated page are occupied by z3fold header, so
54 * NCHUNKS will be calculated to 63 (or 62 in case CONFIG_DEBUG_SPINLOCK=y),
55 * which shows the max number of free chunks in z3fold page, also there will
56 * be 63, or 62, respectively, freelists per pool.
57 */
58#define NCHUNKS_ORDER 6
59
60#define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
61#define CHUNK_SIZE (1 << CHUNK_SHIFT)
62#define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
63#define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
64#define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
e3c0db4f 65#define NCHUNKS (TOTAL_CHUNKS - ZHDR_CHUNKS)
7c2b8baa
VW
66
67#define BUDDY_MASK (0x3)
68#define BUDDY_SHIFT 2
69#define SLOTS_ALIGN (0x40)
70
9a001fc1
VW
71/*****************
72 * Structures
73*****************/
ede93213
VW
74struct z3fold_pool;
75struct z3fold_ops {
76 int (*evict)(struct z3fold_pool *pool, unsigned long handle);
77};
78
79enum buddy {
80 HEADLESS = 0,
81 FIRST,
82 MIDDLE,
83 LAST,
7c2b8baa
VW
84 BUDDIES_MAX = LAST
85};
86
87struct z3fold_buddy_slots {
88 /*
89 * we are using BUDDY_MASK in handle_to_buddy etc. so there should
90 * be enough slots to hold all possible variants
91 */
92 unsigned long slot[BUDDY_MASK + 1];
fc548865 93 unsigned long pool; /* back link */
4a3ac931 94 rwlock_t lock;
ede93213 95};
7c2b8baa 96#define HANDLE_FLAG_MASK (0x03)
ede93213
VW
97
98/*
d30561c5 99 * struct z3fold_header - z3fold page metadata occupying first chunks of each
ede93213 100 * z3fold page, except for HEADLESS pages
d30561c5
VW
101 * @buddy: links the z3fold page into the relevant list in the
102 * pool
2f1e5e4d 103 * @page_lock: per-page lock
d30561c5
VW
104 * @refcount: reference count for the z3fold page
105 * @work: work_struct for page layout optimization
7c2b8baa 106 * @slots: pointer to the structure holding buddy slots
bb9a374d 107 * @pool: pointer to the containing pool
d30561c5 108 * @cpu: CPU which this page "belongs" to
ede93213
VW
109 * @first_chunks: the size of the first buddy in chunks, 0 if free
110 * @middle_chunks: the size of the middle buddy in chunks, 0 if free
111 * @last_chunks: the size of the last buddy in chunks, 0 if free
112 * @first_num: the starting number (for the first handle)
1f862989 113 * @mapped_count: the number of objects currently mapped
ede93213
VW
114 */
115struct z3fold_header {
116 struct list_head buddy;
2f1e5e4d 117 spinlock_t page_lock;
5a27aa82 118 struct kref refcount;
d30561c5 119 struct work_struct work;
7c2b8baa 120 struct z3fold_buddy_slots *slots;
bb9a374d 121 struct z3fold_pool *pool;
d30561c5 122 short cpu;
ede93213
VW
123 unsigned short first_chunks;
124 unsigned short middle_chunks;
125 unsigned short last_chunks;
126 unsigned short start_middle;
127 unsigned short first_num:2;
1f862989 128 unsigned short mapped_count:2;
4a3ac931 129 unsigned short foreign_handles:2;
ede93213
VW
130};
131
9a001fc1
VW
132/**
133 * struct z3fold_pool - stores metadata for each z3fold pool
d30561c5
VW
134 * @name: pool name
135 * @lock: protects pool unbuddied/lru lists
136 * @stale_lock: protects pool stale page list
137 * @unbuddied: per-cpu array of lists tracking z3fold pages that contain 2-
138 * buddies; the list each z3fold page is added to depends on
139 * the size of its free region.
9a001fc1
VW
140 * @lru: list tracking the z3fold pages in LRU order by most recently
141 * added buddy.
d30561c5 142 * @stale: list of pages marked for freeing
9a001fc1 143 * @pages_nr: number of z3fold pages in the pool.
7c2b8baa 144 * @c_handle: cache for z3fold_buddy_slots allocation
9a001fc1
VW
145 * @ops: pointer to a structure of user defined operations specified at
146 * pool creation time.
30522175
MG
147 * @zpool: zpool driver
148 * @zpool_ops: zpool operations structure with an evict callback
d30561c5
VW
149 * @compact_wq: workqueue for page layout background optimization
150 * @release_wq: workqueue for safe page release
151 * @work: work_struct for safe page release
1f862989 152 * @inode: inode for z3fold pseudo filesystem
9a001fc1
VW
153 *
154 * This structure is allocated at pool creation time and maintains metadata
155 * pertaining to a particular z3fold pool.
156 */
157struct z3fold_pool {
d30561c5 158 const char *name;
9a001fc1 159 spinlock_t lock;
d30561c5
VW
160 spinlock_t stale_lock;
161 struct list_head *unbuddied;
9a001fc1 162 struct list_head lru;
d30561c5 163 struct list_head stale;
12d59ae6 164 atomic64_t pages_nr;
7c2b8baa 165 struct kmem_cache *c_handle;
9a001fc1
VW
166 const struct z3fold_ops *ops;
167 struct zpool *zpool;
168 const struct zpool_ops *zpool_ops;
d30561c5
VW
169 struct workqueue_struct *compact_wq;
170 struct workqueue_struct *release_wq;
171 struct work_struct work;
1f862989 172 struct inode *inode;
9a001fc1
VW
173};
174
9a001fc1
VW
175/*
176 * Internal z3fold page flags
177 */
178enum z3fold_page_flags {
5a27aa82 179 PAGE_HEADLESS = 0,
9a001fc1 180 MIDDLE_CHUNK_MAPPED,
d30561c5 181 NEEDS_COMPACTING,
6098d7e1 182 PAGE_STALE,
ca0246bb 183 PAGE_CLAIMED, /* by either reclaim or free */
9a001fc1
VW
184};
185
dcf5aedb
VW
186/*
187 * handle flags, go under HANDLE_FLAG_MASK
188 */
189enum z3fold_handle_flags {
190 HANDLES_NOFREE = 0,
191};
192
4a3ac931
VW
193/*
194 * Forward declarations
195 */
196static struct z3fold_header *__z3fold_alloc(struct z3fold_pool *, size_t, bool);
197static void compact_page_work(struct work_struct *w);
198
9a001fc1
VW
199/*****************
200 * Helpers
201*****************/
202
203/* Converts an allocation size in bytes to size in z3fold chunks */
204static int size_to_chunks(size_t size)
205{
206 return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT;
207}
208
209#define for_each_unbuddied_list(_iter, _begin) \
210 for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
211
bb9f6f63
VW
212static inline struct z3fold_buddy_slots *alloc_slots(struct z3fold_pool *pool,
213 gfp_t gfp)
7c2b8baa 214{
f1549cb5
HB
215 struct z3fold_buddy_slots *slots;
216
f94afee9 217 slots = kmem_cache_zalloc(pool->c_handle,
f1549cb5 218 (gfp & ~(__GFP_HIGHMEM | __GFP_MOVABLE)));
7c2b8baa
VW
219
220 if (slots) {
af4798a5
QC
221 /* It will be freed separately in free_handle(). */
222 kmemleak_not_leak(slots);
7c2b8baa 223 slots->pool = (unsigned long)pool;
4a3ac931 224 rwlock_init(&slots->lock);
7c2b8baa
VW
225 }
226
227 return slots;
228}
229
230static inline struct z3fold_pool *slots_to_pool(struct z3fold_buddy_slots *s)
231{
232 return (struct z3fold_pool *)(s->pool & ~HANDLE_FLAG_MASK);
233}
234
235static inline struct z3fold_buddy_slots *handle_to_slots(unsigned long handle)
236{
237 return (struct z3fold_buddy_slots *)(handle & ~(SLOTS_ALIGN - 1));
238}
239
4a3ac931
VW
240/* Lock a z3fold page */
241static inline void z3fold_page_lock(struct z3fold_header *zhdr)
242{
243 spin_lock(&zhdr->page_lock);
244}
245
246/* Try to lock a z3fold page */
247static inline int z3fold_page_trylock(struct z3fold_header *zhdr)
248{
249 return spin_trylock(&zhdr->page_lock);
250}
251
252/* Unlock a z3fold page */
253static inline void z3fold_page_unlock(struct z3fold_header *zhdr)
254{
255 spin_unlock(&zhdr->page_lock);
256}
257
767cc6c5
ML
258/* return locked z3fold page if it's not headless */
259static inline struct z3fold_header *get_z3fold_header(unsigned long handle)
4a3ac931
VW
260{
261 struct z3fold_buddy_slots *slots;
262 struct z3fold_header *zhdr;
263 int locked = 0;
264
265 if (!(handle & (1 << PAGE_HEADLESS))) {
266 slots = handle_to_slots(handle);
267 do {
268 unsigned long addr;
269
270 read_lock(&slots->lock);
271 addr = *(unsigned long *)handle;
272 zhdr = (struct z3fold_header *)(addr & PAGE_MASK);
767cc6c5 273 locked = z3fold_page_trylock(zhdr);
4a3ac931
VW
274 read_unlock(&slots->lock);
275 if (locked)
276 break;
277 cpu_relax();
767cc6c5 278 } while (true);
4a3ac931
VW
279 } else {
280 zhdr = (struct z3fold_header *)(handle & PAGE_MASK);
281 }
282
283 return zhdr;
284}
285
4a3ac931
VW
286static inline void put_z3fold_header(struct z3fold_header *zhdr)
287{
288 struct page *page = virt_to_page(zhdr);
289
290 if (!test_bit(PAGE_HEADLESS, &page->private))
291 z3fold_page_unlock(zhdr);
292}
293
fc548865 294static inline void free_handle(unsigned long handle, struct z3fold_header *zhdr)
7c2b8baa
VW
295{
296 struct z3fold_buddy_slots *slots;
297 int i;
298 bool is_free;
299
300 if (handle & (1 << PAGE_HEADLESS))
301 return;
302
4a3ac931
VW
303 if (WARN_ON(*(unsigned long *)handle == 0))
304 return;
305
7c2b8baa 306 slots = handle_to_slots(handle);
4a3ac931
VW
307 write_lock(&slots->lock);
308 *(unsigned long *)handle = 0;
dcf5aedb
VW
309
310 if (test_bit(HANDLES_NOFREE, &slots->pool)) {
311 write_unlock(&slots->lock);
312 return; /* simple case, nothing else to do */
313 }
314
fc548865
VW
315 if (zhdr->slots != slots)
316 zhdr->foreign_handles--;
4a3ac931 317
7c2b8baa
VW
318 is_free = true;
319 for (i = 0; i <= BUDDY_MASK; i++) {
320 if (slots->slot[i]) {
321 is_free = false;
322 break;
323 }
324 }
d8f117ab 325 write_unlock(&slots->lock);
7c2b8baa
VW
326
327 if (is_free) {
328 struct z3fold_pool *pool = slots_to_pool(slots);
329
fc548865
VW
330 if (zhdr->slots == slots)
331 zhdr->slots = NULL;
7c2b8baa
VW
332 kmem_cache_free(pool->c_handle, slots);
333 }
334}
335
ea8157ab 336static int z3fold_init_fs_context(struct fs_context *fc)
1f862989 337{
ea8157ab 338 return init_pseudo(fc, Z3FOLD_MAGIC) ? 0 : -ENOMEM;
1f862989
VW
339}
340
341static struct file_system_type z3fold_fs = {
342 .name = "z3fold",
ea8157ab 343 .init_fs_context = z3fold_init_fs_context,
1f862989
VW
344 .kill_sb = kill_anon_super,
345};
346
347static struct vfsmount *z3fold_mnt;
dc3a1f30 348static int __init z3fold_mount(void)
1f862989
VW
349{
350 int ret = 0;
351
352 z3fold_mnt = kern_mount(&z3fold_fs);
353 if (IS_ERR(z3fold_mnt))
354 ret = PTR_ERR(z3fold_mnt);
355
356 return ret;
357}
358
359static void z3fold_unmount(void)
360{
361 kern_unmount(z3fold_mnt);
362}
363
364static const struct address_space_operations z3fold_aops;
365static int z3fold_register_migration(struct z3fold_pool *pool)
366{
367 pool->inode = alloc_anon_inode(z3fold_mnt->mnt_sb);
368 if (IS_ERR(pool->inode)) {
369 pool->inode = NULL;
370 return 1;
371 }
372
373 pool->inode->i_mapping->private_data = pool;
374 pool->inode->i_mapping->a_ops = &z3fold_aops;
375 return 0;
376}
377
378static void z3fold_unregister_migration(struct z3fold_pool *pool)
379{
380 if (pool->inode)
381 iput(pool->inode);
cb152a1a 382}
1f862989 383
9a001fc1 384/* Initializes the z3fold header of a newly allocated z3fold page */
63398413 385static struct z3fold_header *init_z3fold_page(struct page *page, bool headless,
bb9f6f63 386 struct z3fold_pool *pool, gfp_t gfp)
9a001fc1
VW
387{
388 struct z3fold_header *zhdr = page_address(page);
63398413 389 struct z3fold_buddy_slots *slots;
9a001fc1
VW
390
391 INIT_LIST_HEAD(&page->lru);
9a001fc1
VW
392 clear_bit(PAGE_HEADLESS, &page->private);
393 clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
d30561c5
VW
394 clear_bit(NEEDS_COMPACTING, &page->private);
395 clear_bit(PAGE_STALE, &page->private);
ca0246bb 396 clear_bit(PAGE_CLAIMED, &page->private);
63398413
VW
397 if (headless)
398 return zhdr;
399
400 slots = alloc_slots(pool, gfp);
401 if (!slots)
402 return NULL;
9a001fc1 403
c457cd96 404 memset(zhdr, 0, sizeof(*zhdr));
2f1e5e4d 405 spin_lock_init(&zhdr->page_lock);
5a27aa82 406 kref_init(&zhdr->refcount);
d30561c5 407 zhdr->cpu = -1;
7c2b8baa 408 zhdr->slots = slots;
bb9a374d 409 zhdr->pool = pool;
9a001fc1 410 INIT_LIST_HEAD(&zhdr->buddy);
d30561c5 411 INIT_WORK(&zhdr->work, compact_page_work);
9a001fc1
VW
412 return zhdr;
413}
414
415/* Resets the struct page fields and frees the page */
1f862989 416static void free_z3fold_page(struct page *page, bool headless)
9a001fc1 417{
1f862989
VW
418 if (!headless) {
419 lock_page(page);
420 __ClearPageMovable(page);
421 unlock_page(page);
422 }
5a27aa82
VW
423 __free_page(page);
424}
425
7c2b8baa
VW
426/* Helper function to build the index */
427static inline int __idx(struct z3fold_header *zhdr, enum buddy bud)
428{
429 return (bud + zhdr->first_num) & BUDDY_MASK;
430}
431
9a001fc1
VW
432/*
433 * Encodes the handle of a particular buddy within a z3fold page
434 * Pool lock should be held as this function accesses first_num
435 */
3f9d2b57
VW
436static unsigned long __encode_handle(struct z3fold_header *zhdr,
437 struct z3fold_buddy_slots *slots,
438 enum buddy bud)
9a001fc1 439{
7c2b8baa
VW
440 unsigned long h = (unsigned long)zhdr;
441 int idx = 0;
9a001fc1 442
7c2b8baa
VW
443 /*
444 * For a headless page, its handle is its pointer with the extra
445 * PAGE_HEADLESS bit set
446 */
447 if (bud == HEADLESS)
448 return h | (1 << PAGE_HEADLESS);
449
450 /* otherwise, return pointer to encoded handle */
451 idx = __idx(zhdr, bud);
452 h += idx;
453 if (bud == LAST)
454 h |= (zhdr->last_chunks << BUDDY_SHIFT);
455
4a3ac931 456 write_lock(&slots->lock);
7c2b8baa 457 slots->slot[idx] = h;
4a3ac931 458 write_unlock(&slots->lock);
7c2b8baa 459 return (unsigned long)&slots->slot[idx];
9a001fc1
VW
460}
461
3f9d2b57
VW
462static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud)
463{
464 return __encode_handle(zhdr, zhdr->slots, bud);
465}
466
ca0246bb
VW
467/* only for LAST bud, returns zero otherwise */
468static unsigned short handle_to_chunks(unsigned long handle)
469{
4a3ac931
VW
470 struct z3fold_buddy_slots *slots = handle_to_slots(handle);
471 unsigned long addr;
7c2b8baa 472
4a3ac931
VW
473 read_lock(&slots->lock);
474 addr = *(unsigned long *)handle;
475 read_unlock(&slots->lock);
7c2b8baa 476 return (addr & ~PAGE_MASK) >> BUDDY_SHIFT;
ca0246bb
VW
477}
478
f201ebd8 479/*
480 * (handle & BUDDY_MASK) < zhdr->first_num is possible in encode_handle
481 * but that doesn't matter. because the masking will result in the
482 * correct buddy number.
483 */
9a001fc1
VW
484static enum buddy handle_to_buddy(unsigned long handle)
485{
7c2b8baa 486 struct z3fold_header *zhdr;
4a3ac931 487 struct z3fold_buddy_slots *slots = handle_to_slots(handle);
7c2b8baa
VW
488 unsigned long addr;
489
4a3ac931 490 read_lock(&slots->lock);
7c2b8baa
VW
491 WARN_ON(handle & (1 << PAGE_HEADLESS));
492 addr = *(unsigned long *)handle;
4a3ac931 493 read_unlock(&slots->lock);
7c2b8baa
VW
494 zhdr = (struct z3fold_header *)(addr & PAGE_MASK);
495 return (addr - zhdr->first_num) & BUDDY_MASK;
9a001fc1
VW
496}
497
9050cce1
VW
498static inline struct z3fold_pool *zhdr_to_pool(struct z3fold_header *zhdr)
499{
bb9a374d 500 return zhdr->pool;
9050cce1
VW
501}
502
d30561c5
VW
503static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked)
504{
505 struct page *page = virt_to_page(zhdr);
9050cce1 506 struct z3fold_pool *pool = zhdr_to_pool(zhdr);
d30561c5
VW
507
508 WARN_ON(!list_empty(&zhdr->buddy));
509 set_bit(PAGE_STALE, &page->private);
35529357 510 clear_bit(NEEDS_COMPACTING, &page->private);
d30561c5
VW
511 spin_lock(&pool->lock);
512 if (!list_empty(&page->lru))
1f862989 513 list_del_init(&page->lru);
d30561c5 514 spin_unlock(&pool->lock);
4a3ac931 515
d30561c5
VW
516 if (locked)
517 z3fold_page_unlock(zhdr);
4a3ac931 518
d30561c5
VW
519 spin_lock(&pool->stale_lock);
520 list_add(&zhdr->buddy, &pool->stale);
521 queue_work(pool->release_wq, &pool->work);
522 spin_unlock(&pool->stale_lock);
523}
524
70ad3196 525static void release_z3fold_page(struct kref *ref)
d30561c5
VW
526{
527 struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
528 refcount);
529 __release_z3fold_page(zhdr, false);
530}
531
532static void release_z3fold_page_locked(struct kref *ref)
533{
534 struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
535 refcount);
536 WARN_ON(z3fold_page_trylock(zhdr));
537 __release_z3fold_page(zhdr, true);
538}
539
540static void release_z3fold_page_locked_list(struct kref *ref)
541{
542 struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
543 refcount);
9050cce1 544 struct z3fold_pool *pool = zhdr_to_pool(zhdr);
4a3ac931 545
9050cce1 546 spin_lock(&pool->lock);
d30561c5 547 list_del_init(&zhdr->buddy);
9050cce1 548 spin_unlock(&pool->lock);
d30561c5
VW
549
550 WARN_ON(z3fold_page_trylock(zhdr));
551 __release_z3fold_page(zhdr, true);
552}
553
554static void free_pages_work(struct work_struct *w)
555{
556 struct z3fold_pool *pool = container_of(w, struct z3fold_pool, work);
557
558 spin_lock(&pool->stale_lock);
559 while (!list_empty(&pool->stale)) {
560 struct z3fold_header *zhdr = list_first_entry(&pool->stale,
561 struct z3fold_header, buddy);
562 struct page *page = virt_to_page(zhdr);
563
564 list_del(&zhdr->buddy);
565 if (WARN_ON(!test_bit(PAGE_STALE, &page->private)))
566 continue;
d30561c5
VW
567 spin_unlock(&pool->stale_lock);
568 cancel_work_sync(&zhdr->work);
1f862989 569 free_z3fold_page(page, false);
d30561c5
VW
570 cond_resched();
571 spin_lock(&pool->stale_lock);
572 }
573 spin_unlock(&pool->stale_lock);
574}
575
9a001fc1
VW
576/*
577 * Returns the number of free chunks in a z3fold page.
578 * NB: can't be used with HEADLESS pages.
579 */
580static int num_free_chunks(struct z3fold_header *zhdr)
581{
582 int nfree;
583 /*
584 * If there is a middle object, pick up the bigger free space
585 * either before or after it. Otherwise just subtract the number
586 * of chunks occupied by the first and the last objects.
587 */
588 if (zhdr->middle_chunks != 0) {
589 int nfree_before = zhdr->first_chunks ?
ede93213 590 0 : zhdr->start_middle - ZHDR_CHUNKS;
9a001fc1 591 int nfree_after = zhdr->last_chunks ?
ede93213
VW
592 0 : TOTAL_CHUNKS -
593 (zhdr->start_middle + zhdr->middle_chunks);
9a001fc1
VW
594 nfree = max(nfree_before, nfree_after);
595 } else
596 nfree = NCHUNKS - zhdr->first_chunks - zhdr->last_chunks;
597 return nfree;
598}
599
9050cce1
VW
600/* Add to the appropriate unbuddied list */
601static inline void add_to_unbuddied(struct z3fold_pool *pool,
602 struct z3fold_header *zhdr)
603{
604 if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0 ||
605 zhdr->middle_chunks == 0) {
135f97fd 606 struct list_head *unbuddied;
9050cce1 607 int freechunks = num_free_chunks(zhdr);
135f97fd
VW
608
609 migrate_disable();
610 unbuddied = this_cpu_ptr(pool->unbuddied);
9050cce1
VW
611 spin_lock(&pool->lock);
612 list_add(&zhdr->buddy, &unbuddied[freechunks]);
613 spin_unlock(&pool->lock);
614 zhdr->cpu = smp_processor_id();
135f97fd 615 migrate_enable();
9050cce1
VW
616 }
617}
618
dcf5aedb
VW
619static inline enum buddy get_free_buddy(struct z3fold_header *zhdr, int chunks)
620{
621 enum buddy bud = HEADLESS;
622
623 if (zhdr->middle_chunks) {
624 if (!zhdr->first_chunks &&
625 chunks <= zhdr->start_middle - ZHDR_CHUNKS)
626 bud = FIRST;
627 else if (!zhdr->last_chunks)
628 bud = LAST;
629 } else {
630 if (!zhdr->first_chunks)
631 bud = FIRST;
632 else if (!zhdr->last_chunks)
633 bud = LAST;
634 else
635 bud = MIDDLE;
636 }
637
638 return bud;
639}
640
ede93213
VW
641static inline void *mchunk_memmove(struct z3fold_header *zhdr,
642 unsigned short dst_chunk)
643{
644 void *beg = zhdr;
645 return memmove(beg + (dst_chunk << CHUNK_SHIFT),
646 beg + (zhdr->start_middle << CHUNK_SHIFT),
647 zhdr->middle_chunks << CHUNK_SHIFT);
648}
649
4a3ac931
VW
650static inline bool buddy_single(struct z3fold_header *zhdr)
651{
652 return !((zhdr->first_chunks && zhdr->middle_chunks) ||
653 (zhdr->first_chunks && zhdr->last_chunks) ||
654 (zhdr->middle_chunks && zhdr->last_chunks));
655}
656
657static struct z3fold_header *compact_single_buddy(struct z3fold_header *zhdr)
658{
659 struct z3fold_pool *pool = zhdr_to_pool(zhdr);
660 void *p = zhdr;
661 unsigned long old_handle = 0;
662 size_t sz = 0;
663 struct z3fold_header *new_zhdr = NULL;
664 int first_idx = __idx(zhdr, FIRST);
665 int middle_idx = __idx(zhdr, MIDDLE);
666 int last_idx = __idx(zhdr, LAST);
667 unsigned short *moved_chunks = NULL;
668
669 /*
670 * No need to protect slots here -- all the slots are "local" and
671 * the page lock is already taken
672 */
673 if (zhdr->first_chunks && zhdr->slots->slot[first_idx]) {
674 p += ZHDR_SIZE_ALIGNED;
675 sz = zhdr->first_chunks << CHUNK_SHIFT;
676 old_handle = (unsigned long)&zhdr->slots->slot[first_idx];
677 moved_chunks = &zhdr->first_chunks;
678 } else if (zhdr->middle_chunks && zhdr->slots->slot[middle_idx]) {
679 p += zhdr->start_middle << CHUNK_SHIFT;
680 sz = zhdr->middle_chunks << CHUNK_SHIFT;
681 old_handle = (unsigned long)&zhdr->slots->slot[middle_idx];
682 moved_chunks = &zhdr->middle_chunks;
683 } else if (zhdr->last_chunks && zhdr->slots->slot[last_idx]) {
684 p += PAGE_SIZE - (zhdr->last_chunks << CHUNK_SHIFT);
685 sz = zhdr->last_chunks << CHUNK_SHIFT;
686 old_handle = (unsigned long)&zhdr->slots->slot[last_idx];
687 moved_chunks = &zhdr->last_chunks;
688 }
689
690 if (sz > 0) {
691 enum buddy new_bud = HEADLESS;
692 short chunks = size_to_chunks(sz);
693 void *q;
694
695 new_zhdr = __z3fold_alloc(pool, sz, false);
696 if (!new_zhdr)
697 return NULL;
698
699 if (WARN_ON(new_zhdr == zhdr))
700 goto out_fail;
701
dcf5aedb 702 new_bud = get_free_buddy(new_zhdr, chunks);
4a3ac931
VW
703 q = new_zhdr;
704 switch (new_bud) {
705 case FIRST:
706 new_zhdr->first_chunks = chunks;
707 q += ZHDR_SIZE_ALIGNED;
708 break;
709 case MIDDLE:
710 new_zhdr->middle_chunks = chunks;
711 new_zhdr->start_middle =
712 new_zhdr->first_chunks + ZHDR_CHUNKS;
713 q += new_zhdr->start_middle << CHUNK_SHIFT;
714 break;
715 case LAST:
716 new_zhdr->last_chunks = chunks;
717 q += PAGE_SIZE - (new_zhdr->last_chunks << CHUNK_SHIFT);
718 break;
719 default:
720 goto out_fail;
721 }
722 new_zhdr->foreign_handles++;
723 memcpy(q, p, sz);
724 write_lock(&zhdr->slots->lock);
725 *(unsigned long *)old_handle = (unsigned long)new_zhdr +
726 __idx(new_zhdr, new_bud);
727 if (new_bud == LAST)
728 *(unsigned long *)old_handle |=
729 (new_zhdr->last_chunks << BUDDY_SHIFT);
730 write_unlock(&zhdr->slots->lock);
731 add_to_unbuddied(pool, new_zhdr);
732 z3fold_page_unlock(new_zhdr);
733
734 *moved_chunks = 0;
735 }
736
737 return new_zhdr;
738
739out_fail:
740 if (new_zhdr) {
741 if (kref_put(&new_zhdr->refcount, release_z3fold_page_locked))
742 atomic64_dec(&pool->pages_nr);
743 else {
744 add_to_unbuddied(pool, new_zhdr);
745 z3fold_page_unlock(new_zhdr);
746 }
747 }
748 return NULL;
749
750}
751
1b096e5a 752#define BIG_CHUNK_GAP 3
9a001fc1
VW
753/* Has to be called with lock held */
754static int z3fold_compact_page(struct z3fold_header *zhdr)
755{
756 struct page *page = virt_to_page(zhdr);
9a001fc1 757
ede93213
VW
758 if (test_bit(MIDDLE_CHUNK_MAPPED, &page->private))
759 return 0; /* can't move middle chunk, it's used */
9a001fc1 760
1f862989
VW
761 if (unlikely(PageIsolated(page)))
762 return 0;
763
ede93213
VW
764 if (zhdr->middle_chunks == 0)
765 return 0; /* nothing to compact */
766
767 if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
768 /* move to the beginning */
769 mchunk_memmove(zhdr, ZHDR_CHUNKS);
9a001fc1
VW
770 zhdr->first_chunks = zhdr->middle_chunks;
771 zhdr->middle_chunks = 0;
772 zhdr->start_middle = 0;
773 zhdr->first_num++;
1b096e5a 774 return 1;
9a001fc1 775 }
1b096e5a
VW
776
777 /*
778 * moving data is expensive, so let's only do that if
779 * there's substantial gain (at least BIG_CHUNK_GAP chunks)
780 */
781 if (zhdr->first_chunks != 0 && zhdr->last_chunks == 0 &&
782 zhdr->start_middle - (zhdr->first_chunks + ZHDR_CHUNKS) >=
783 BIG_CHUNK_GAP) {
784 mchunk_memmove(zhdr, zhdr->first_chunks + ZHDR_CHUNKS);
785 zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
786 return 1;
787 } else if (zhdr->last_chunks != 0 && zhdr->first_chunks == 0 &&
788 TOTAL_CHUNKS - (zhdr->last_chunks + zhdr->start_middle
789 + zhdr->middle_chunks) >=
790 BIG_CHUNK_GAP) {
791 unsigned short new_start = TOTAL_CHUNKS - zhdr->last_chunks -
792 zhdr->middle_chunks;
793 mchunk_memmove(zhdr, new_start);
794 zhdr->start_middle = new_start;
795 return 1;
796 }
797
798 return 0;
9a001fc1
VW
799}
800
d30561c5
VW
801static void do_compact_page(struct z3fold_header *zhdr, bool locked)
802{
9050cce1 803 struct z3fold_pool *pool = zhdr_to_pool(zhdr);
d30561c5 804 struct page *page;
d30561c5
VW
805
806 page = virt_to_page(zhdr);
807 if (locked)
808 WARN_ON(z3fold_page_trylock(zhdr));
809 else
810 z3fold_page_lock(zhdr);
5d03a661 811 if (WARN_ON(!test_and_clear_bit(NEEDS_COMPACTING, &page->private))) {
d30561c5
VW
812 z3fold_page_unlock(zhdr);
813 return;
814 }
815 spin_lock(&pool->lock);
816 list_del_init(&zhdr->buddy);
817 spin_unlock(&pool->lock);
818
5d03a661
VW
819 if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
820 atomic64_dec(&pool->pages_nr);
821 return;
822 }
823
dcf5aedb
VW
824 if (test_bit(PAGE_STALE, &page->private) ||
825 test_and_set_bit(PAGE_CLAIMED, &page->private)) {
1f862989
VW
826 z3fold_page_unlock(zhdr);
827 return;
828 }
829
4a3ac931
VW
830 if (!zhdr->foreign_handles && buddy_single(zhdr) &&
831 zhdr->mapped_count == 0 && compact_single_buddy(zhdr)) {
832 if (kref_put(&zhdr->refcount, release_z3fold_page_locked))
833 atomic64_dec(&pool->pages_nr);
dcf5aedb
VW
834 else {
835 clear_bit(PAGE_CLAIMED, &page->private);
4a3ac931 836 z3fold_page_unlock(zhdr);
dcf5aedb 837 }
4a3ac931
VW
838 return;
839 }
840
d30561c5 841 z3fold_compact_page(zhdr);
9050cce1 842 add_to_unbuddied(pool, zhdr);
dcf5aedb 843 clear_bit(PAGE_CLAIMED, &page->private);
d30561c5
VW
844 z3fold_page_unlock(zhdr);
845}
846
847static void compact_page_work(struct work_struct *w)
848{
849 struct z3fold_header *zhdr = container_of(w, struct z3fold_header,
850 work);
851
852 do_compact_page(zhdr, false);
853}
854
9050cce1
VW
855/* returns _locked_ z3fold page header or NULL */
856static inline struct z3fold_header *__z3fold_alloc(struct z3fold_pool *pool,
857 size_t size, bool can_sleep)
858{
859 struct z3fold_header *zhdr = NULL;
860 struct page *page;
861 struct list_head *unbuddied;
862 int chunks = size_to_chunks(size), i;
863
864lookup:
135f97fd 865 migrate_disable();
9050cce1 866 /* First, try to find an unbuddied z3fold page. */
135f97fd 867 unbuddied = this_cpu_ptr(pool->unbuddied);
9050cce1
VW
868 for_each_unbuddied_list(i, chunks) {
869 struct list_head *l = &unbuddied[i];
870
871 zhdr = list_first_entry_or_null(READ_ONCE(l),
872 struct z3fold_header, buddy);
873
874 if (!zhdr)
875 continue;
876
877 /* Re-check under lock. */
878 spin_lock(&pool->lock);
9050cce1
VW
879 if (unlikely(zhdr != list_first_entry(READ_ONCE(l),
880 struct z3fold_header, buddy)) ||
881 !z3fold_page_trylock(zhdr)) {
882 spin_unlock(&pool->lock);
883 zhdr = NULL;
135f97fd 884 migrate_enable();
9050cce1
VW
885 if (can_sleep)
886 cond_resched();
887 goto lookup;
888 }
889 list_del_init(&zhdr->buddy);
890 zhdr->cpu = -1;
891 spin_unlock(&pool->lock);
892
893 page = virt_to_page(zhdr);
4a3ac931
VW
894 if (test_bit(NEEDS_COMPACTING, &page->private) ||
895 test_bit(PAGE_CLAIMED, &page->private)) {
9050cce1
VW
896 z3fold_page_unlock(zhdr);
897 zhdr = NULL;
135f97fd 898 migrate_enable();
9050cce1
VW
899 if (can_sleep)
900 cond_resched();
901 goto lookup;
902 }
903
904 /*
905 * this page could not be removed from its unbuddied
906 * list while pool lock was held, and then we've taken
907 * page lock so kref_put could not be called before
908 * we got here, so it's safe to just call kref_get()
909 */
910 kref_get(&zhdr->refcount);
911 break;
912 }
135f97fd 913 migrate_enable();
9050cce1 914
351618b2
VW
915 if (!zhdr) {
916 int cpu;
917
918 /* look for _exact_ match on other cpus' lists */
919 for_each_online_cpu(cpu) {
920 struct list_head *l;
921
922 unbuddied = per_cpu_ptr(pool->unbuddied, cpu);
923 spin_lock(&pool->lock);
924 l = &unbuddied[chunks];
925
926 zhdr = list_first_entry_or_null(READ_ONCE(l),
927 struct z3fold_header, buddy);
928
929 if (!zhdr || !z3fold_page_trylock(zhdr)) {
930 spin_unlock(&pool->lock);
931 zhdr = NULL;
932 continue;
933 }
934 list_del_init(&zhdr->buddy);
935 zhdr->cpu = -1;
936 spin_unlock(&pool->lock);
937
938 page = virt_to_page(zhdr);
4a3ac931
VW
939 if (test_bit(NEEDS_COMPACTING, &page->private) ||
940 test_bit(PAGE_CLAIMED, &page->private)) {
351618b2
VW
941 z3fold_page_unlock(zhdr);
942 zhdr = NULL;
943 if (can_sleep)
944 cond_resched();
945 continue;
946 }
947 kref_get(&zhdr->refcount);
948 break;
949 }
950 }
951
fc548865
VW
952 if (zhdr && !zhdr->slots)
953 zhdr->slots = alloc_slots(pool,
954 can_sleep ? GFP_NOIO : GFP_ATOMIC);
9050cce1
VW
955 return zhdr;
956}
d30561c5
VW
957
958/*
959 * API Functions
960 */
961
962/**
963 * z3fold_create_pool() - create a new z3fold pool
964 * @name: pool name
965 * @gfp: gfp flags when allocating the z3fold pool structure
966 * @ops: user-defined operations for the z3fold pool
967 *
968 * Return: pointer to the new z3fold pool or NULL if the metadata allocation
969 * failed.
970 */
971static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp,
972 const struct z3fold_ops *ops)
973{
974 struct z3fold_pool *pool = NULL;
975 int i, cpu;
976
977 pool = kzalloc(sizeof(struct z3fold_pool), gfp);
978 if (!pool)
979 goto out;
7c2b8baa
VW
980 pool->c_handle = kmem_cache_create("z3fold_handle",
981 sizeof(struct z3fold_buddy_slots),
982 SLOTS_ALIGN, 0, NULL);
983 if (!pool->c_handle)
984 goto out_c;
d30561c5
VW
985 spin_lock_init(&pool->lock);
986 spin_lock_init(&pool->stale_lock);
e891f60e
ML
987 pool->unbuddied = __alloc_percpu(sizeof(struct list_head) * NCHUNKS,
988 __alignof__(struct list_head));
1ec6995d
XW
989 if (!pool->unbuddied)
990 goto out_pool;
d30561c5
VW
991 for_each_possible_cpu(cpu) {
992 struct list_head *unbuddied =
993 per_cpu_ptr(pool->unbuddied, cpu);
994 for_each_unbuddied_list(i, 0)
995 INIT_LIST_HEAD(&unbuddied[i]);
996 }
997 INIT_LIST_HEAD(&pool->lru);
998 INIT_LIST_HEAD(&pool->stale);
999 atomic64_set(&pool->pages_nr, 0);
1000 pool->name = name;
1001 pool->compact_wq = create_singlethread_workqueue(pool->name);
1002 if (!pool->compact_wq)
1ec6995d 1003 goto out_unbuddied;
d30561c5
VW
1004 pool->release_wq = create_singlethread_workqueue(pool->name);
1005 if (!pool->release_wq)
1006 goto out_wq;
1f862989
VW
1007 if (z3fold_register_migration(pool))
1008 goto out_rwq;
d30561c5
VW
1009 INIT_WORK(&pool->work, free_pages_work);
1010 pool->ops = ops;
1011 return pool;
1012
1f862989
VW
1013out_rwq:
1014 destroy_workqueue(pool->release_wq);
d30561c5
VW
1015out_wq:
1016 destroy_workqueue(pool->compact_wq);
1ec6995d
XW
1017out_unbuddied:
1018 free_percpu(pool->unbuddied);
1019out_pool:
7c2b8baa
VW
1020 kmem_cache_destroy(pool->c_handle);
1021out_c:
d30561c5 1022 kfree(pool);
1ec6995d 1023out:
d30561c5
VW
1024 return NULL;
1025}
1026
1027/**
1028 * z3fold_destroy_pool() - destroys an existing z3fold pool
1029 * @pool: the z3fold pool to be destroyed
1030 *
1031 * The pool should be emptied before this function is called.
1032 */
1033static void z3fold_destroy_pool(struct z3fold_pool *pool)
1034{
7c2b8baa 1035 kmem_cache_destroy(pool->c_handle);
6051d3bd
HB
1036
1037 /*
1038 * We need to destroy pool->compact_wq before pool->release_wq,
1039 * as any pending work on pool->compact_wq will call
1040 * queue_work(pool->release_wq, &pool->work).
b997052b
HB
1041 *
1042 * There are still outstanding pages until both workqueues are drained,
1043 * so we cannot unregister migration until then.
6051d3bd
HB
1044 */
1045
d30561c5 1046 destroy_workqueue(pool->compact_wq);
6051d3bd 1047 destroy_workqueue(pool->release_wq);
b997052b 1048 z3fold_unregister_migration(pool);
dac0d1cf 1049 free_percpu(pool->unbuddied);
d30561c5
VW
1050 kfree(pool);
1051}
1052
9a001fc1
VW
1053/**
1054 * z3fold_alloc() - allocates a region of a given size
1055 * @pool: z3fold pool from which to allocate
1056 * @size: size in bytes of the desired allocation
1057 * @gfp: gfp flags used if the pool needs to grow
1058 * @handle: handle of the new allocation
1059 *
1060 * This function will attempt to find a free region in the pool large enough to
1061 * satisfy the allocation request. A search of the unbuddied lists is
1062 * performed first. If no suitable free region is found, then a new page is
1063 * allocated and added to the pool to satisfy the request.
1064 *
9a001fc1
VW
1065 * Return: 0 if success and handle is set, otherwise -EINVAL if the size or
1066 * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate
1067 * a new page.
1068 */
1069static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
1070 unsigned long *handle)
1071{
9050cce1 1072 int chunks = size_to_chunks(size);
9a001fc1 1073 struct z3fold_header *zhdr = NULL;
d30561c5 1074 struct page *page = NULL;
9a001fc1 1075 enum buddy bud;
8a97ea54 1076 bool can_sleep = gfpflags_allow_blocking(gfp);
9a001fc1 1077
f1549cb5 1078 if (!size)
9a001fc1
VW
1079 return -EINVAL;
1080
1081 if (size > PAGE_SIZE)
1082 return -ENOSPC;
1083
1084 if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE)
1085 bud = HEADLESS;
1086 else {
9050cce1
VW
1087retry:
1088 zhdr = __z3fold_alloc(pool, size, can_sleep);
d30561c5 1089 if (zhdr) {
dcf5aedb
VW
1090 bud = get_free_buddy(zhdr, chunks);
1091 if (bud == HEADLESS) {
5a27aa82 1092 if (kref_put(&zhdr->refcount,
d30561c5 1093 release_z3fold_page_locked))
5a27aa82 1094 atomic64_dec(&pool->pages_nr);
d30561c5
VW
1095 else
1096 z3fold_page_unlock(zhdr);
2f1e5e4d
VW
1097 pr_err("No free chunks in unbuddied\n");
1098 WARN_ON(1);
9050cce1 1099 goto retry;
9a001fc1 1100 }
9050cce1 1101 page = virt_to_page(zhdr);
2f1e5e4d 1102 goto found;
9a001fc1
VW
1103 }
1104 bud = FIRST;
9a001fc1
VW
1105 }
1106
5c9bab59
VW
1107 page = NULL;
1108 if (can_sleep) {
1109 spin_lock(&pool->stale_lock);
1110 zhdr = list_first_entry_or_null(&pool->stale,
1111 struct z3fold_header, buddy);
1112 /*
1113 * Before allocating a page, let's see if we can take one from
1114 * the stale pages list. cancel_work_sync() can sleep so we
1115 * limit this case to the contexts where we can sleep
1116 */
1117 if (zhdr) {
1118 list_del(&zhdr->buddy);
1119 spin_unlock(&pool->stale_lock);
d30561c5 1120 cancel_work_sync(&zhdr->work);
5c9bab59
VW
1121 page = virt_to_page(zhdr);
1122 } else {
1123 spin_unlock(&pool->stale_lock);
1124 }
d30561c5 1125 }
5c9bab59
VW
1126 if (!page)
1127 page = alloc_page(gfp);
d30561c5 1128
9a001fc1
VW
1129 if (!page)
1130 return -ENOMEM;
2f1e5e4d 1131
63398413 1132 zhdr = init_z3fold_page(page, bud == HEADLESS, pool, gfp);
9050cce1
VW
1133 if (!zhdr) {
1134 __free_page(page);
1135 return -ENOMEM;
1136 }
1137 atomic64_inc(&pool->pages_nr);
9a001fc1
VW
1138
1139 if (bud == HEADLESS) {
1140 set_bit(PAGE_HEADLESS, &page->private);
1141 goto headless;
1142 }
810481a2
HB
1143 if (can_sleep) {
1144 lock_page(page);
1145 __SetPageMovable(page, pool->inode->i_mapping);
1146 unlock_page(page);
1147 } else {
1148 if (trylock_page(page)) {
1149 __SetPageMovable(page, pool->inode->i_mapping);
1150 unlock_page(page);
1151 }
1152 }
2f1e5e4d 1153 z3fold_page_lock(zhdr);
9a001fc1
VW
1154
1155found:
1156 if (bud == FIRST)
1157 zhdr->first_chunks = chunks;
1158 else if (bud == LAST)
1159 zhdr->last_chunks = chunks;
1160 else {
1161 zhdr->middle_chunks = chunks;
ede93213 1162 zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
9a001fc1 1163 }
9050cce1 1164 add_to_unbuddied(pool, zhdr);
9a001fc1
VW
1165
1166headless:
d30561c5 1167 spin_lock(&pool->lock);
9a001fc1
VW
1168 /* Add/move z3fold page to beginning of LRU */
1169 if (!list_empty(&page->lru))
1170 list_del(&page->lru);
1171
1172 list_add(&page->lru, &pool->lru);
1173
1174 *handle = encode_handle(zhdr, bud);
1175 spin_unlock(&pool->lock);
2f1e5e4d
VW
1176 if (bud != HEADLESS)
1177 z3fold_page_unlock(zhdr);
9a001fc1
VW
1178
1179 return 0;
1180}
1181
1182/**
1183 * z3fold_free() - frees the allocation associated with the given handle
1184 * @pool: pool in which the allocation resided
1185 * @handle: handle associated with the allocation returned by z3fold_alloc()
1186 *
1187 * In the case that the z3fold page in which the allocation resides is under
ed0e5dca
ML
1188 * reclaim, as indicated by the PAGE_CLAIMED flag being set, this function
1189 * only sets the first|middle|last_chunks to 0. The page is actually freed
1190 * once all buddies are evicted (see z3fold_reclaim_page() below).
9a001fc1
VW
1191 */
1192static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
1193{
1194 struct z3fold_header *zhdr;
9a001fc1
VW
1195 struct page *page;
1196 enum buddy bud;
5b6807de 1197 bool page_claimed;
9a001fc1 1198
4a3ac931 1199 zhdr = get_z3fold_header(handle);
9a001fc1 1200 page = virt_to_page(zhdr);
5b6807de 1201 page_claimed = test_and_set_bit(PAGE_CLAIMED, &page->private);
9a001fc1
VW
1202
1203 if (test_bit(PAGE_HEADLESS, &page->private)) {
ca0246bb
VW
1204 /* if a headless page is under reclaim, just leave.
1205 * NB: we use test_and_set_bit for a reason: if the bit
1206 * has not been set before, we release this page
1207 * immediately so we don't care about its value any more.
1208 */
5b6807de 1209 if (!page_claimed) {
ca0246bb
VW
1210 spin_lock(&pool->lock);
1211 list_del(&page->lru);
1212 spin_unlock(&pool->lock);
4a3ac931 1213 put_z3fold_header(zhdr);
1f862989 1214 free_z3fold_page(page, true);
ca0246bb 1215 atomic64_dec(&pool->pages_nr);
9a001fc1 1216 }
ca0246bb 1217 return;
9a001fc1
VW
1218 }
1219
ca0246bb 1220 /* Non-headless case */
ca0246bb
VW
1221 bud = handle_to_buddy(handle);
1222
1223 switch (bud) {
1224 case FIRST:
1225 zhdr->first_chunks = 0;
1226 break;
1227 case MIDDLE:
1228 zhdr->middle_chunks = 0;
1229 break;
1230 case LAST:
1231 zhdr->last_chunks = 0;
1232 break;
1233 default:
1234 pr_err("%s: unknown bud %d\n", __func__, bud);
1235 WARN_ON(1);
4a3ac931 1236 put_z3fold_header(zhdr);
d30561c5
VW
1237 return;
1238 }
1239
4a3ac931 1240 if (!page_claimed)
fc548865 1241 free_handle(handle, zhdr);
d30561c5
VW
1242 if (kref_put(&zhdr->refcount, release_z3fold_page_locked_list)) {
1243 atomic64_dec(&pool->pages_nr);
1244 return;
1245 }
5b6807de
VW
1246 if (page_claimed) {
1247 /* the page has not been claimed by us */
ed0e5dca 1248 put_z3fold_header(zhdr);
6098d7e1
VW
1249 return;
1250 }
dcf5aedb 1251 if (test_and_set_bit(NEEDS_COMPACTING, &page->private)) {
4a3ac931 1252 put_z3fold_header(zhdr);
5b6807de 1253 clear_bit(PAGE_CLAIMED, &page->private);
d30561c5
VW
1254 return;
1255 }
1256 if (zhdr->cpu < 0 || !cpu_online(zhdr->cpu)) {
2f1e5e4d 1257 spin_lock(&pool->lock);
d30561c5 1258 list_del_init(&zhdr->buddy);
2f1e5e4d 1259 spin_unlock(&pool->lock);
d30561c5 1260 zhdr->cpu = -1;
5d03a661 1261 kref_get(&zhdr->refcount);
5b6807de 1262 clear_bit(PAGE_CLAIMED, &page->private);
4a3ac931 1263 do_compact_page(zhdr, true);
d30561c5 1264 return;
9a001fc1 1265 }
5d03a661 1266 kref_get(&zhdr->refcount);
5b6807de 1267 clear_bit(PAGE_CLAIMED, &page->private);
4a3ac931
VW
1268 queue_work_on(zhdr->cpu, pool->compact_wq, &zhdr->work);
1269 put_z3fold_header(zhdr);
9a001fc1
VW
1270}
1271
1272/**
1273 * z3fold_reclaim_page() - evicts allocations from a pool page and frees it
1274 * @pool: pool from which a page will attempt to be evicted
f144c390 1275 * @retries: number of pages on the LRU list for which eviction will
9a001fc1
VW
1276 * be attempted before failing
1277 *
1278 * z3fold reclaim is different from normal system reclaim in that it is done
1279 * from the bottom, up. This is because only the bottom layer, z3fold, has
1280 * information on how the allocations are organized within each z3fold page.
1281 * This has the potential to create interesting locking situations between
1282 * z3fold and the user, however.
1283 *
1284 * To avoid these, this is how z3fold_reclaim_page() should be called:
f144c390 1285 *
9a001fc1
VW
1286 * The user detects a page should be reclaimed and calls z3fold_reclaim_page().
1287 * z3fold_reclaim_page() will remove a z3fold page from the pool LRU list and
1288 * call the user-defined eviction handler with the pool and handle as
1289 * arguments.
1290 *
1291 * If the handle can not be evicted, the eviction handler should return
1292 * non-zero. z3fold_reclaim_page() will add the z3fold page back to the
1293 * appropriate list and try the next z3fold page on the LRU up to
1294 * a user defined number of retries.
1295 *
1296 * If the handle is successfully evicted, the eviction handler should
1297 * return 0 _and_ should have called z3fold_free() on the handle. z3fold_free()
1298 * contains logic to delay freeing the page if the page is under reclaim,
1299 * as indicated by the setting of the PG_reclaim flag on the underlying page.
1300 *
1301 * If all buddies in the z3fold page are successfully evicted, then the
1302 * z3fold page can be freed.
1303 *
1304 * Returns: 0 if page is successfully freed, otherwise -EINVAL if there are
1305 * no pages to evict or an eviction handler is not registered, -EAGAIN if
1306 * the retry limit was hit.
1307 */
1308static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
1309{
4a3ac931 1310 int i, ret = -1;
d30561c5
VW
1311 struct z3fold_header *zhdr = NULL;
1312 struct page *page = NULL;
1313 struct list_head *pos;
9a001fc1 1314 unsigned long first_handle = 0, middle_handle = 0, last_handle = 0;
dcf5aedb
VW
1315 struct z3fold_buddy_slots slots __attribute__((aligned(SLOTS_ALIGN)));
1316
1317 rwlock_init(&slots.lock);
1318 slots.pool = (unsigned long)pool | (1 << HANDLES_NOFREE);
9a001fc1
VW
1319
1320 spin_lock(&pool->lock);
2f1e5e4d 1321 if (!pool->ops || !pool->ops->evict || retries == 0) {
9a001fc1
VW
1322 spin_unlock(&pool->lock);
1323 return -EINVAL;
1324 }
1325 for (i = 0; i < retries; i++) {
2f1e5e4d
VW
1326 if (list_empty(&pool->lru)) {
1327 spin_unlock(&pool->lock);
1328 return -EINVAL;
1329 }
d30561c5
VW
1330 list_for_each_prev(pos, &pool->lru) {
1331 page = list_entry(pos, struct page, lru);
ca0246bb 1332
3f9d2b57 1333 zhdr = page_address(page);
6d679578
TH
1334 if (test_bit(PAGE_HEADLESS, &page->private)) {
1335 /*
1336 * For non-headless pages, we wait to do this
1337 * until we have the page lock to avoid racing
1338 * with __z3fold_alloc(). Headless pages don't
1339 * have a lock (and __z3fold_alloc() will never
1340 * see them), but we still need to test and set
1341 * PAGE_CLAIMED to avoid racing with
1342 * z3fold_free(), so just do it now before
1343 * leaving the loop.
1344 */
1345 if (test_and_set_bit(PAGE_CLAIMED, &page->private))
1346 continue;
1347
d30561c5 1348 break;
6d679578 1349 }
d30561c5 1350
dcf5aedb
VW
1351 if (kref_get_unless_zero(&zhdr->refcount) == 0) {
1352 zhdr = NULL;
1353 break;
1354 }
ca0246bb 1355 if (!z3fold_page_trylock(zhdr)) {
dcf5aedb
VW
1356 if (kref_put(&zhdr->refcount,
1357 release_z3fold_page))
1358 atomic64_dec(&pool->pages_nr);
ca0246bb 1359 zhdr = NULL;
d30561c5 1360 continue; /* can't evict at this point */
ca0246bb 1361 }
dcf5aedb
VW
1362
1363 /* test_and_set_bit is of course atomic, but we still
1364 * need to do it under page lock, otherwise checking
1365 * that bit in __z3fold_alloc wouldn't make sense
1366 */
1367 if (zhdr->foreign_handles ||
1368 test_and_set_bit(PAGE_CLAIMED, &page->private)) {
1369 if (kref_put(&zhdr->refcount,
28473d91 1370 release_z3fold_page_locked))
dcf5aedb
VW
1371 atomic64_dec(&pool->pages_nr);
1372 else
1373 z3fold_page_unlock(zhdr);
4a3ac931
VW
1374 zhdr = NULL;
1375 continue; /* can't evict such page */
1376 }
d30561c5
VW
1377 list_del_init(&zhdr->buddy);
1378 zhdr->cpu = -1;
6098d7e1 1379 break;
d30561c5
VW
1380 }
1381
ca0246bb
VW
1382 if (!zhdr)
1383 break;
1384
5a27aa82 1385 list_del_init(&page->lru);
d30561c5 1386 spin_unlock(&pool->lock);
9a001fc1 1387
9a001fc1 1388 if (!test_bit(PAGE_HEADLESS, &page->private)) {
9a001fc1 1389 /*
3f9d2b57
VW
1390 * We need encode the handles before unlocking, and
1391 * use our local slots structure because z3fold_free
1392 * can zero out zhdr->slots and we can't do much
1393 * about that
9a001fc1
VW
1394 */
1395 first_handle = 0;
1396 last_handle = 0;
1397 middle_handle = 0;
dcf5aedb 1398 memset(slots.slot, 0, sizeof(slots.slot));
9a001fc1 1399 if (zhdr->first_chunks)
dcf5aedb
VW
1400 first_handle = __encode_handle(zhdr, &slots,
1401 FIRST);
9a001fc1 1402 if (zhdr->middle_chunks)
dcf5aedb
VW
1403 middle_handle = __encode_handle(zhdr, &slots,
1404 MIDDLE);
9a001fc1 1405 if (zhdr->last_chunks)
dcf5aedb
VW
1406 last_handle = __encode_handle(zhdr, &slots,
1407 LAST);
d30561c5
VW
1408 /*
1409 * it's safe to unlock here because we hold a
1410 * reference to this page
1411 */
2f1e5e4d 1412 z3fold_page_unlock(zhdr);
9a001fc1 1413 } else {
4a3ac931 1414 first_handle = encode_handle(zhdr, HEADLESS);
9a001fc1
VW
1415 last_handle = middle_handle = 0;
1416 }
9a001fc1
VW
1417 /* Issue the eviction callback(s) */
1418 if (middle_handle) {
1419 ret = pool->ops->evict(pool, middle_handle);
1420 if (ret)
1421 goto next;
1422 }
1423 if (first_handle) {
1424 ret = pool->ops->evict(pool, first_handle);
1425 if (ret)
1426 goto next;
1427 }
1428 if (last_handle) {
1429 ret = pool->ops->evict(pool, last_handle);
1430 if (ret)
1431 goto next;
1432 }
1433next:
5a27aa82
VW
1434 if (test_bit(PAGE_HEADLESS, &page->private)) {
1435 if (ret == 0) {
1f862989 1436 free_z3fold_page(page, true);
ca0246bb 1437 atomic64_dec(&pool->pages_nr);
5a27aa82 1438 return 0;
5a27aa82 1439 }
6098d7e1
VW
1440 spin_lock(&pool->lock);
1441 list_add(&page->lru, &pool->lru);
1442 spin_unlock(&pool->lock);
3f9d2b57 1443 clear_bit(PAGE_CLAIMED, &page->private);
6098d7e1 1444 } else {
dcf5aedb 1445 struct z3fold_buddy_slots *slots = zhdr->slots;
6098d7e1 1446 z3fold_page_lock(zhdr);
6098d7e1
VW
1447 if (kref_put(&zhdr->refcount,
1448 release_z3fold_page_locked)) {
dcf5aedb 1449 kmem_cache_free(pool->c_handle, slots);
6098d7e1
VW
1450 atomic64_dec(&pool->pages_nr);
1451 return 0;
1452 }
1453 /*
1454 * if we are here, the page is still not completely
1455 * free. Take the global pool lock then to be able
1456 * to add it back to the lru list
1457 */
1458 spin_lock(&pool->lock);
1459 list_add(&page->lru, &pool->lru);
d5567c9d 1460 spin_unlock(&pool->lock);
6098d7e1 1461 z3fold_page_unlock(zhdr);
3f9d2b57 1462 clear_bit(PAGE_CLAIMED, &page->private);
5a27aa82 1463 }
2f1e5e4d 1464
6098d7e1
VW
1465 /* We started off locked to we need to lock the pool back */
1466 spin_lock(&pool->lock);
9a001fc1
VW
1467 }
1468 spin_unlock(&pool->lock);
1469 return -EAGAIN;
1470}
1471
1472/**
1473 * z3fold_map() - maps the allocation associated with the given handle
1474 * @pool: pool in which the allocation resides
1475 * @handle: handle associated with the allocation to be mapped
1476 *
1477 * Extracts the buddy number from handle and constructs the pointer to the
1478 * correct starting chunk within the page.
1479 *
1480 * Returns: a pointer to the mapped allocation
1481 */
1482static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle)
1483{
1484 struct z3fold_header *zhdr;
1485 struct page *page;
1486 void *addr;
1487 enum buddy buddy;
1488
4a3ac931 1489 zhdr = get_z3fold_header(handle);
9a001fc1
VW
1490 addr = zhdr;
1491 page = virt_to_page(zhdr);
1492
1493 if (test_bit(PAGE_HEADLESS, &page->private))
1494 goto out;
1495
1496 buddy = handle_to_buddy(handle);
1497 switch (buddy) {
1498 case FIRST:
1499 addr += ZHDR_SIZE_ALIGNED;
1500 break;
1501 case MIDDLE:
1502 addr += zhdr->start_middle << CHUNK_SHIFT;
1503 set_bit(MIDDLE_CHUNK_MAPPED, &page->private);
1504 break;
1505 case LAST:
ca0246bb 1506 addr += PAGE_SIZE - (handle_to_chunks(handle) << CHUNK_SHIFT);
9a001fc1
VW
1507 break;
1508 default:
1509 pr_err("unknown buddy id %d\n", buddy);
1510 WARN_ON(1);
1511 addr = NULL;
1512 break;
1513 }
2f1e5e4d 1514
1f862989
VW
1515 if (addr)
1516 zhdr->mapped_count++;
9a001fc1 1517out:
4a3ac931 1518 put_z3fold_header(zhdr);
9a001fc1
VW
1519 return addr;
1520}
1521
1522/**
1523 * z3fold_unmap() - unmaps the allocation associated with the given handle
1524 * @pool: pool in which the allocation resides
1525 * @handle: handle associated with the allocation to be unmapped
1526 */
1527static void z3fold_unmap(struct z3fold_pool *pool, unsigned long handle)
1528{
1529 struct z3fold_header *zhdr;
1530 struct page *page;
1531 enum buddy buddy;
1532
4a3ac931 1533 zhdr = get_z3fold_header(handle);
9a001fc1
VW
1534 page = virt_to_page(zhdr);
1535
2f1e5e4d 1536 if (test_bit(PAGE_HEADLESS, &page->private))
9a001fc1 1537 return;
9a001fc1
VW
1538
1539 buddy = handle_to_buddy(handle);
1540 if (buddy == MIDDLE)
1541 clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
1f862989 1542 zhdr->mapped_count--;
4a3ac931 1543 put_z3fold_header(zhdr);
9a001fc1
VW
1544}
1545
1546/**
1547 * z3fold_get_pool_size() - gets the z3fold pool size in pages
1548 * @pool: pool whose size is being queried
1549 *
12d59ae6 1550 * Returns: size in pages of the given pool.
9a001fc1
VW
1551 */
1552static u64 z3fold_get_pool_size(struct z3fold_pool *pool)
1553{
12d59ae6 1554 return atomic64_read(&pool->pages_nr);
9a001fc1
VW
1555}
1556
1f862989
VW
1557static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode)
1558{
1559 struct z3fold_header *zhdr;
1560 struct z3fold_pool *pool;
1561
1562 VM_BUG_ON_PAGE(!PageMovable(page), page);
1563 VM_BUG_ON_PAGE(PageIsolated(page), page);
1564
dcf5aedb 1565 if (test_bit(PAGE_HEADLESS, &page->private))
1f862989
VW
1566 return false;
1567
1568 zhdr = page_address(page);
1569 z3fold_page_lock(zhdr);
1570 if (test_bit(NEEDS_COMPACTING, &page->private) ||
1571 test_bit(PAGE_STALE, &page->private))
1572 goto out;
1573
4a3ac931
VW
1574 if (zhdr->mapped_count != 0 || zhdr->foreign_handles != 0)
1575 goto out;
1576
dcf5aedb
VW
1577 if (test_and_set_bit(PAGE_CLAIMED, &page->private))
1578 goto out;
1f862989 1579 pool = zhdr_to_pool(zhdr);
4a3ac931
VW
1580 spin_lock(&pool->lock);
1581 if (!list_empty(&zhdr->buddy))
1582 list_del_init(&zhdr->buddy);
1583 if (!list_empty(&page->lru))
1584 list_del_init(&page->lru);
1585 spin_unlock(&pool->lock);
1586
1587 kref_get(&zhdr->refcount);
1588 z3fold_page_unlock(zhdr);
1589 return true;
1f862989 1590
1f862989
VW
1591out:
1592 z3fold_page_unlock(zhdr);
1593 return false;
1594}
1595
1596static int z3fold_page_migrate(struct address_space *mapping, struct page *newpage,
1597 struct page *page, enum migrate_mode mode)
1598{
1599 struct z3fold_header *zhdr, *new_zhdr;
1600 struct z3fold_pool *pool;
1601 struct address_space *new_mapping;
1602
1603 VM_BUG_ON_PAGE(!PageMovable(page), page);
1604 VM_BUG_ON_PAGE(!PageIsolated(page), page);
dcf5aedb 1605 VM_BUG_ON_PAGE(!test_bit(PAGE_CLAIMED, &page->private), page);
810481a2 1606 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
1f862989
VW
1607
1608 zhdr = page_address(page);
1609 pool = zhdr_to_pool(zhdr);
1610
dcf5aedb 1611 if (!z3fold_page_trylock(zhdr))
1f862989 1612 return -EAGAIN;
4a3ac931 1613 if (zhdr->mapped_count != 0 || zhdr->foreign_handles != 0) {
1f862989 1614 z3fold_page_unlock(zhdr);
dcf5aedb 1615 clear_bit(PAGE_CLAIMED, &page->private);
1f862989
VW
1616 return -EBUSY;
1617 }
c92d2f38
HB
1618 if (work_pending(&zhdr->work)) {
1619 z3fold_page_unlock(zhdr);
1620 return -EAGAIN;
1621 }
1f862989
VW
1622 new_zhdr = page_address(newpage);
1623 memcpy(new_zhdr, zhdr, PAGE_SIZE);
1624 newpage->private = page->private;
1625 page->private = 0;
1626 z3fold_page_unlock(zhdr);
1627 spin_lock_init(&new_zhdr->page_lock);
c92d2f38
HB
1628 INIT_WORK(&new_zhdr->work, compact_page_work);
1629 /*
1630 * z3fold_page_isolate() ensures that new_zhdr->buddy is empty,
1631 * so we only have to reinitialize it.
1632 */
1633 INIT_LIST_HEAD(&new_zhdr->buddy);
1f862989
VW
1634 new_mapping = page_mapping(page);
1635 __ClearPageMovable(page);
1f862989
VW
1636
1637 get_page(newpage);
1638 z3fold_page_lock(new_zhdr);
1639 if (new_zhdr->first_chunks)
1640 encode_handle(new_zhdr, FIRST);
1641 if (new_zhdr->last_chunks)
1642 encode_handle(new_zhdr, LAST);
1643 if (new_zhdr->middle_chunks)
1644 encode_handle(new_zhdr, MIDDLE);
1645 set_bit(NEEDS_COMPACTING, &newpage->private);
1646 new_zhdr->cpu = smp_processor_id();
1647 spin_lock(&pool->lock);
1648 list_add(&newpage->lru, &pool->lru);
1649 spin_unlock(&pool->lock);
1650 __SetPageMovable(newpage, new_mapping);
1651 z3fold_page_unlock(new_zhdr);
1652
1653 queue_work_on(new_zhdr->cpu, pool->compact_wq, &new_zhdr->work);
1654
dcf5aedb 1655 clear_bit(PAGE_CLAIMED, &page->private);
1f862989
VW
1656 put_page(page);
1657 return 0;
1658}
1659
1660static void z3fold_page_putback(struct page *page)
1661{
1662 struct z3fold_header *zhdr;
1663 struct z3fold_pool *pool;
1664
1665 zhdr = page_address(page);
1666 pool = zhdr_to_pool(zhdr);
1667
1668 z3fold_page_lock(zhdr);
1669 if (!list_empty(&zhdr->buddy))
1670 list_del_init(&zhdr->buddy);
1671 INIT_LIST_HEAD(&page->lru);
1672 if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
1673 atomic64_dec(&pool->pages_nr);
1674 return;
1675 }
1676 spin_lock(&pool->lock);
1677 list_add(&page->lru, &pool->lru);
1678 spin_unlock(&pool->lock);
dcf5aedb 1679 clear_bit(PAGE_CLAIMED, &page->private);
1f862989
VW
1680 z3fold_page_unlock(zhdr);
1681}
1682
1683static const struct address_space_operations z3fold_aops = {
1684 .isolate_page = z3fold_page_isolate,
1685 .migratepage = z3fold_page_migrate,
1686 .putback_page = z3fold_page_putback,
1687};
1688
9a001fc1
VW
1689/*****************
1690 * zpool
1691 ****************/
1692
1693static int z3fold_zpool_evict(struct z3fold_pool *pool, unsigned long handle)
1694{
1695 if (pool->zpool && pool->zpool_ops && pool->zpool_ops->evict)
1696 return pool->zpool_ops->evict(pool->zpool, handle);
1697 else
1698 return -ENOENT;
1699}
1700
1701static const struct z3fold_ops z3fold_zpool_ops = {
1702 .evict = z3fold_zpool_evict
1703};
1704
1705static void *z3fold_zpool_create(const char *name, gfp_t gfp,
1706 const struct zpool_ops *zpool_ops,
1707 struct zpool *zpool)
1708{
1709 struct z3fold_pool *pool;
1710
d30561c5
VW
1711 pool = z3fold_create_pool(name, gfp,
1712 zpool_ops ? &z3fold_zpool_ops : NULL);
9a001fc1
VW
1713 if (pool) {
1714 pool->zpool = zpool;
1715 pool->zpool_ops = zpool_ops;
1716 }
1717 return pool;
1718}
1719
1720static void z3fold_zpool_destroy(void *pool)
1721{
1722 z3fold_destroy_pool(pool);
1723}
1724
1725static int z3fold_zpool_malloc(void *pool, size_t size, gfp_t gfp,
1726 unsigned long *handle)
1727{
1728 return z3fold_alloc(pool, size, gfp, handle);
1729}
1730static void z3fold_zpool_free(void *pool, unsigned long handle)
1731{
1732 z3fold_free(pool, handle);
1733}
1734
1735static int z3fold_zpool_shrink(void *pool, unsigned int pages,
1736 unsigned int *reclaimed)
1737{
1738 unsigned int total = 0;
1739 int ret = -EINVAL;
1740
1741 while (total < pages) {
1742 ret = z3fold_reclaim_page(pool, 8);
1743 if (ret < 0)
1744 break;
1745 total++;
1746 }
1747
1748 if (reclaimed)
1749 *reclaimed = total;
1750
1751 return ret;
1752}
1753
1754static void *z3fold_zpool_map(void *pool, unsigned long handle,
1755 enum zpool_mapmode mm)
1756{
1757 return z3fold_map(pool, handle);
1758}
1759static void z3fold_zpool_unmap(void *pool, unsigned long handle)
1760{
1761 z3fold_unmap(pool, handle);
1762}
1763
1764static u64 z3fold_zpool_total_size(void *pool)
1765{
1766 return z3fold_get_pool_size(pool) * PAGE_SIZE;
1767}
1768
1769static struct zpool_driver z3fold_zpool_driver = {
1770 .type = "z3fold",
e818e820 1771 .sleep_mapped = true,
9a001fc1
VW
1772 .owner = THIS_MODULE,
1773 .create = z3fold_zpool_create,
1774 .destroy = z3fold_zpool_destroy,
1775 .malloc = z3fold_zpool_malloc,
1776 .free = z3fold_zpool_free,
1777 .shrink = z3fold_zpool_shrink,
1778 .map = z3fold_zpool_map,
1779 .unmap = z3fold_zpool_unmap,
1780 .total_size = z3fold_zpool_total_size,
1781};
1782
1783MODULE_ALIAS("zpool-z3fold");
1784
1785static int __init init_z3fold(void)
1786{
1f862989
VW
1787 int ret;
1788
014284a0
ML
1789 /*
1790 * Make sure the z3fold header is not larger than the page size and
1791 * there has remaining spaces for its buddy.
1792 */
1793 BUILD_BUG_ON(ZHDR_SIZE_ALIGNED > PAGE_SIZE - CHUNK_SIZE);
1f862989
VW
1794 ret = z3fold_mount();
1795 if (ret)
1796 return ret;
1797
9a001fc1
VW
1798 zpool_register_driver(&z3fold_zpool_driver);
1799
1800 return 0;
1801}
1802
1803static void __exit exit_z3fold(void)
1804{
1f862989 1805 z3fold_unmount();
9a001fc1
VW
1806 zpool_unregister_driver(&z3fold_zpool_driver);
1807}
1808
1809module_init(init_z3fold);
1810module_exit(exit_z3fold);
1811
1812MODULE_LICENSE("GPL");
1813MODULE_AUTHOR("Vitaly Wool <vitalywool@gmail.com>");
1814MODULE_DESCRIPTION("3-Fold Allocator for Compressed Pages");