Merge tag 'asoc-v5.4-2' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie...
[linux-2.6-block.git] / mm / z3fold.c
CommitLineData
09c434b8 1// SPDX-License-Identifier: GPL-2.0-only
9a001fc1
VW
2/*
3 * z3fold.c
4 *
5 * Author: Vitaly Wool <vitaly.wool@konsulko.com>
6 * Copyright (C) 2016, Sony Mobile Communications Inc.
7 *
8 * This implementation is based on zbud written by Seth Jennings.
9 *
10 * z3fold is an special purpose allocator for storing compressed pages. It
11 * can store up to three compressed pages per page which improves the
12 * compression ratio of zbud while retaining its main concepts (e. g. always
13 * storing an integral number of objects per page) and simplicity.
14 * It still has simple and deterministic reclaim properties that make it
15 * preferable to a higher density approach (with no requirement on integral
16 * number of object per page) when reclaim is used.
17 *
18 * As in zbud, pages are divided into "chunks". The size of the chunks is
19 * fixed at compile time and is determined by NCHUNKS_ORDER below.
20 *
21 * z3fold doesn't export any API and is meant to be used via zpool API.
22 */
23
24#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25
26#include <linux/atomic.h>
d30561c5 27#include <linux/sched.h>
1f862989 28#include <linux/cpumask.h>
9a001fc1
VW
29#include <linux/list.h>
30#include <linux/mm.h>
31#include <linux/module.h>
1f862989
VW
32#include <linux/page-flags.h>
33#include <linux/migrate.h>
34#include <linux/node.h>
35#include <linux/compaction.h>
d30561c5 36#include <linux/percpu.h>
1f862989 37#include <linux/mount.h>
ea8157ab 38#include <linux/pseudo_fs.h>
1f862989 39#include <linux/fs.h>
9a001fc1 40#include <linux/preempt.h>
d30561c5 41#include <linux/workqueue.h>
9a001fc1
VW
42#include <linux/slab.h>
43#include <linux/spinlock.h>
d776aaa9 44#include <linux/wait.h>
9a001fc1 45#include <linux/zpool.h>
ea8157ab 46#include <linux/magic.h>
9a001fc1 47
7c2b8baa
VW
48/*
49 * NCHUNKS_ORDER determines the internal allocation granularity, effectively
50 * adjusting internal fragmentation. It also determines the number of
51 * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the
52 * allocation granularity will be in chunks of size PAGE_SIZE/64. Some chunks
53 * in the beginning of an allocated page are occupied by z3fold header, so
54 * NCHUNKS will be calculated to 63 (or 62 in case CONFIG_DEBUG_SPINLOCK=y),
55 * which shows the max number of free chunks in z3fold page, also there will
56 * be 63, or 62, respectively, freelists per pool.
57 */
58#define NCHUNKS_ORDER 6
59
60#define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
61#define CHUNK_SIZE (1 << CHUNK_SHIFT)
62#define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
63#define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
64#define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
65#define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
66
67#define BUDDY_MASK (0x3)
68#define BUDDY_SHIFT 2
69#define SLOTS_ALIGN (0x40)
70
9a001fc1
VW
71/*****************
72 * Structures
73*****************/
ede93213
VW
74struct z3fold_pool;
75struct z3fold_ops {
76 int (*evict)(struct z3fold_pool *pool, unsigned long handle);
77};
78
79enum buddy {
80 HEADLESS = 0,
81 FIRST,
82 MIDDLE,
83 LAST,
7c2b8baa
VW
84 BUDDIES_MAX = LAST
85};
86
87struct z3fold_buddy_slots {
88 /*
89 * we are using BUDDY_MASK in handle_to_buddy etc. so there should
90 * be enough slots to hold all possible variants
91 */
92 unsigned long slot[BUDDY_MASK + 1];
93 unsigned long pool; /* back link + flags */
ede93213 94};
7c2b8baa 95#define HANDLE_FLAG_MASK (0x03)
ede93213
VW
96
97/*
d30561c5 98 * struct z3fold_header - z3fold page metadata occupying first chunks of each
ede93213 99 * z3fold page, except for HEADLESS pages
d30561c5
VW
100 * @buddy: links the z3fold page into the relevant list in the
101 * pool
2f1e5e4d 102 * @page_lock: per-page lock
d30561c5
VW
103 * @refcount: reference count for the z3fold page
104 * @work: work_struct for page layout optimization
7c2b8baa 105 * @slots: pointer to the structure holding buddy slots
bb9a374d 106 * @pool: pointer to the containing pool
d30561c5 107 * @cpu: CPU which this page "belongs" to
ede93213
VW
108 * @first_chunks: the size of the first buddy in chunks, 0 if free
109 * @middle_chunks: the size of the middle buddy in chunks, 0 if free
110 * @last_chunks: the size of the last buddy in chunks, 0 if free
111 * @first_num: the starting number (for the first handle)
1f862989 112 * @mapped_count: the number of objects currently mapped
ede93213
VW
113 */
114struct z3fold_header {
115 struct list_head buddy;
2f1e5e4d 116 spinlock_t page_lock;
5a27aa82 117 struct kref refcount;
d30561c5 118 struct work_struct work;
7c2b8baa 119 struct z3fold_buddy_slots *slots;
bb9a374d 120 struct z3fold_pool *pool;
d30561c5 121 short cpu;
ede93213
VW
122 unsigned short first_chunks;
123 unsigned short middle_chunks;
124 unsigned short last_chunks;
125 unsigned short start_middle;
126 unsigned short first_num:2;
1f862989 127 unsigned short mapped_count:2;
ede93213
VW
128};
129
9a001fc1
VW
130/**
131 * struct z3fold_pool - stores metadata for each z3fold pool
d30561c5
VW
132 * @name: pool name
133 * @lock: protects pool unbuddied/lru lists
134 * @stale_lock: protects pool stale page list
135 * @unbuddied: per-cpu array of lists tracking z3fold pages that contain 2-
136 * buddies; the list each z3fold page is added to depends on
137 * the size of its free region.
9a001fc1
VW
138 * @lru: list tracking the z3fold pages in LRU order by most recently
139 * added buddy.
d30561c5 140 * @stale: list of pages marked for freeing
9a001fc1 141 * @pages_nr: number of z3fold pages in the pool.
7c2b8baa 142 * @c_handle: cache for z3fold_buddy_slots allocation
9a001fc1
VW
143 * @ops: pointer to a structure of user defined operations specified at
144 * pool creation time.
d30561c5
VW
145 * @compact_wq: workqueue for page layout background optimization
146 * @release_wq: workqueue for safe page release
147 * @work: work_struct for safe page release
1f862989 148 * @inode: inode for z3fold pseudo filesystem
d776aaa9
HB
149 * @destroying: bool to stop migration once we start destruction
150 * @isolated: int to count the number of pages currently in isolation
9a001fc1
VW
151 *
152 * This structure is allocated at pool creation time and maintains metadata
153 * pertaining to a particular z3fold pool.
154 */
155struct z3fold_pool {
d30561c5 156 const char *name;
9a001fc1 157 spinlock_t lock;
d30561c5
VW
158 spinlock_t stale_lock;
159 struct list_head *unbuddied;
9a001fc1 160 struct list_head lru;
d30561c5 161 struct list_head stale;
12d59ae6 162 atomic64_t pages_nr;
7c2b8baa 163 struct kmem_cache *c_handle;
9a001fc1
VW
164 const struct z3fold_ops *ops;
165 struct zpool *zpool;
166 const struct zpool_ops *zpool_ops;
d30561c5
VW
167 struct workqueue_struct *compact_wq;
168 struct workqueue_struct *release_wq;
d776aaa9 169 struct wait_queue_head isolate_wait;
d30561c5 170 struct work_struct work;
1f862989 171 struct inode *inode;
d776aaa9
HB
172 bool destroying;
173 int isolated;
9a001fc1
VW
174};
175
9a001fc1
VW
176/*
177 * Internal z3fold page flags
178 */
179enum z3fold_page_flags {
5a27aa82 180 PAGE_HEADLESS = 0,
9a001fc1 181 MIDDLE_CHUNK_MAPPED,
d30561c5 182 NEEDS_COMPACTING,
6098d7e1 183 PAGE_STALE,
ca0246bb 184 PAGE_CLAIMED, /* by either reclaim or free */
9a001fc1
VW
185};
186
187/*****************
188 * Helpers
189*****************/
190
191/* Converts an allocation size in bytes to size in z3fold chunks */
192static int size_to_chunks(size_t size)
193{
194 return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT;
195}
196
197#define for_each_unbuddied_list(_iter, _begin) \
198 for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
199
d30561c5
VW
200static void compact_page_work(struct work_struct *w);
201
bb9f6f63
VW
202static inline struct z3fold_buddy_slots *alloc_slots(struct z3fold_pool *pool,
203 gfp_t gfp)
7c2b8baa 204{
f1549cb5
HB
205 struct z3fold_buddy_slots *slots;
206
207 slots = kmem_cache_alloc(pool->c_handle,
208 (gfp & ~(__GFP_HIGHMEM | __GFP_MOVABLE)));
7c2b8baa
VW
209
210 if (slots) {
211 memset(slots->slot, 0, sizeof(slots->slot));
212 slots->pool = (unsigned long)pool;
213 }
214
215 return slots;
216}
217
218static inline struct z3fold_pool *slots_to_pool(struct z3fold_buddy_slots *s)
219{
220 return (struct z3fold_pool *)(s->pool & ~HANDLE_FLAG_MASK);
221}
222
223static inline struct z3fold_buddy_slots *handle_to_slots(unsigned long handle)
224{
225 return (struct z3fold_buddy_slots *)(handle & ~(SLOTS_ALIGN - 1));
226}
227
228static inline void free_handle(unsigned long handle)
229{
230 struct z3fold_buddy_slots *slots;
231 int i;
232 bool is_free;
233
234 if (handle & (1 << PAGE_HEADLESS))
235 return;
236
237 WARN_ON(*(unsigned long *)handle == 0);
238 *(unsigned long *)handle = 0;
239 slots = handle_to_slots(handle);
240 is_free = true;
241 for (i = 0; i <= BUDDY_MASK; i++) {
242 if (slots->slot[i]) {
243 is_free = false;
244 break;
245 }
246 }
247
248 if (is_free) {
249 struct z3fold_pool *pool = slots_to_pool(slots);
250
251 kmem_cache_free(pool->c_handle, slots);
252 }
253}
254
ea8157ab 255static int z3fold_init_fs_context(struct fs_context *fc)
1f862989 256{
ea8157ab 257 return init_pseudo(fc, Z3FOLD_MAGIC) ? 0 : -ENOMEM;
1f862989
VW
258}
259
260static struct file_system_type z3fold_fs = {
261 .name = "z3fold",
ea8157ab 262 .init_fs_context = z3fold_init_fs_context,
1f862989
VW
263 .kill_sb = kill_anon_super,
264};
265
266static struct vfsmount *z3fold_mnt;
267static int z3fold_mount(void)
268{
269 int ret = 0;
270
271 z3fold_mnt = kern_mount(&z3fold_fs);
272 if (IS_ERR(z3fold_mnt))
273 ret = PTR_ERR(z3fold_mnt);
274
275 return ret;
276}
277
278static void z3fold_unmount(void)
279{
280 kern_unmount(z3fold_mnt);
281}
282
283static const struct address_space_operations z3fold_aops;
284static int z3fold_register_migration(struct z3fold_pool *pool)
285{
286 pool->inode = alloc_anon_inode(z3fold_mnt->mnt_sb);
287 if (IS_ERR(pool->inode)) {
288 pool->inode = NULL;
289 return 1;
290 }
291
292 pool->inode->i_mapping->private_data = pool;
293 pool->inode->i_mapping->a_ops = &z3fold_aops;
294 return 0;
295}
296
297static void z3fold_unregister_migration(struct z3fold_pool *pool)
298{
299 if (pool->inode)
300 iput(pool->inode);
301 }
302
9a001fc1 303/* Initializes the z3fold header of a newly allocated z3fold page */
d30561c5 304static struct z3fold_header *init_z3fold_page(struct page *page,
bb9f6f63 305 struct z3fold_pool *pool, gfp_t gfp)
9a001fc1
VW
306{
307 struct z3fold_header *zhdr = page_address(page);
bb9f6f63 308 struct z3fold_buddy_slots *slots = alloc_slots(pool, gfp);
7c2b8baa
VW
309
310 if (!slots)
311 return NULL;
9a001fc1
VW
312
313 INIT_LIST_HEAD(&page->lru);
9a001fc1
VW
314 clear_bit(PAGE_HEADLESS, &page->private);
315 clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
d30561c5
VW
316 clear_bit(NEEDS_COMPACTING, &page->private);
317 clear_bit(PAGE_STALE, &page->private);
ca0246bb 318 clear_bit(PAGE_CLAIMED, &page->private);
9a001fc1 319
2f1e5e4d 320 spin_lock_init(&zhdr->page_lock);
5a27aa82 321 kref_init(&zhdr->refcount);
9a001fc1
VW
322 zhdr->first_chunks = 0;
323 zhdr->middle_chunks = 0;
324 zhdr->last_chunks = 0;
325 zhdr->first_num = 0;
326 zhdr->start_middle = 0;
d30561c5 327 zhdr->cpu = -1;
7c2b8baa 328 zhdr->slots = slots;
bb9a374d 329 zhdr->pool = pool;
9a001fc1 330 INIT_LIST_HEAD(&zhdr->buddy);
d30561c5 331 INIT_WORK(&zhdr->work, compact_page_work);
9a001fc1
VW
332 return zhdr;
333}
334
335/* Resets the struct page fields and frees the page */
1f862989 336static void free_z3fold_page(struct page *page, bool headless)
9a001fc1 337{
1f862989
VW
338 if (!headless) {
339 lock_page(page);
340 __ClearPageMovable(page);
341 unlock_page(page);
342 }
343 ClearPagePrivate(page);
5a27aa82
VW
344 __free_page(page);
345}
346
2f1e5e4d
VW
347/* Lock a z3fold page */
348static inline void z3fold_page_lock(struct z3fold_header *zhdr)
349{
350 spin_lock(&zhdr->page_lock);
351}
352
76e32a2a
VW
353/* Try to lock a z3fold page */
354static inline int z3fold_page_trylock(struct z3fold_header *zhdr)
355{
356 return spin_trylock(&zhdr->page_lock);
357}
358
2f1e5e4d
VW
359/* Unlock a z3fold page */
360static inline void z3fold_page_unlock(struct z3fold_header *zhdr)
361{
362 spin_unlock(&zhdr->page_lock);
363}
364
7c2b8baa
VW
365/* Helper function to build the index */
366static inline int __idx(struct z3fold_header *zhdr, enum buddy bud)
367{
368 return (bud + zhdr->first_num) & BUDDY_MASK;
369}
370
9a001fc1
VW
371/*
372 * Encodes the handle of a particular buddy within a z3fold page
373 * Pool lock should be held as this function accesses first_num
374 */
375static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud)
376{
7c2b8baa
VW
377 struct z3fold_buddy_slots *slots;
378 unsigned long h = (unsigned long)zhdr;
379 int idx = 0;
9a001fc1 380
7c2b8baa
VW
381 /*
382 * For a headless page, its handle is its pointer with the extra
383 * PAGE_HEADLESS bit set
384 */
385 if (bud == HEADLESS)
386 return h | (1 << PAGE_HEADLESS);
387
388 /* otherwise, return pointer to encoded handle */
389 idx = __idx(zhdr, bud);
390 h += idx;
391 if (bud == LAST)
392 h |= (zhdr->last_chunks << BUDDY_SHIFT);
393
394 slots = zhdr->slots;
395 slots->slot[idx] = h;
396 return (unsigned long)&slots->slot[idx];
9a001fc1
VW
397}
398
399/* Returns the z3fold page where a given handle is stored */
1f862989 400static inline struct z3fold_header *handle_to_z3fold_header(unsigned long h)
9a001fc1 401{
1f862989 402 unsigned long addr = h;
7c2b8baa
VW
403
404 if (!(addr & (1 << PAGE_HEADLESS)))
1f862989 405 addr = *(unsigned long *)h;
7c2b8baa
VW
406
407 return (struct z3fold_header *)(addr & PAGE_MASK);
9a001fc1
VW
408}
409
ca0246bb
VW
410/* only for LAST bud, returns zero otherwise */
411static unsigned short handle_to_chunks(unsigned long handle)
412{
7c2b8baa
VW
413 unsigned long addr = *(unsigned long *)handle;
414
415 return (addr & ~PAGE_MASK) >> BUDDY_SHIFT;
ca0246bb
VW
416}
417
f201ebd8 418/*
419 * (handle & BUDDY_MASK) < zhdr->first_num is possible in encode_handle
420 * but that doesn't matter. because the masking will result in the
421 * correct buddy number.
422 */
9a001fc1
VW
423static enum buddy handle_to_buddy(unsigned long handle)
424{
7c2b8baa
VW
425 struct z3fold_header *zhdr;
426 unsigned long addr;
427
428 WARN_ON(handle & (1 << PAGE_HEADLESS));
429 addr = *(unsigned long *)handle;
430 zhdr = (struct z3fold_header *)(addr & PAGE_MASK);
431 return (addr - zhdr->first_num) & BUDDY_MASK;
9a001fc1
VW
432}
433
9050cce1
VW
434static inline struct z3fold_pool *zhdr_to_pool(struct z3fold_header *zhdr)
435{
bb9a374d 436 return zhdr->pool;
9050cce1
VW
437}
438
d30561c5
VW
439static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked)
440{
441 struct page *page = virt_to_page(zhdr);
9050cce1 442 struct z3fold_pool *pool = zhdr_to_pool(zhdr);
d30561c5
VW
443
444 WARN_ON(!list_empty(&zhdr->buddy));
445 set_bit(PAGE_STALE, &page->private);
35529357 446 clear_bit(NEEDS_COMPACTING, &page->private);
d30561c5
VW
447 spin_lock(&pool->lock);
448 if (!list_empty(&page->lru))
1f862989 449 list_del_init(&page->lru);
d30561c5
VW
450 spin_unlock(&pool->lock);
451 if (locked)
452 z3fold_page_unlock(zhdr);
453 spin_lock(&pool->stale_lock);
454 list_add(&zhdr->buddy, &pool->stale);
455 queue_work(pool->release_wq, &pool->work);
456 spin_unlock(&pool->stale_lock);
457}
458
459static void __attribute__((__unused__))
460 release_z3fold_page(struct kref *ref)
461{
462 struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
463 refcount);
464 __release_z3fold_page(zhdr, false);
465}
466
467static void release_z3fold_page_locked(struct kref *ref)
468{
469 struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
470 refcount);
471 WARN_ON(z3fold_page_trylock(zhdr));
472 __release_z3fold_page(zhdr, true);
473}
474
475static void release_z3fold_page_locked_list(struct kref *ref)
476{
477 struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
478 refcount);
9050cce1
VW
479 struct z3fold_pool *pool = zhdr_to_pool(zhdr);
480 spin_lock(&pool->lock);
d30561c5 481 list_del_init(&zhdr->buddy);
9050cce1 482 spin_unlock(&pool->lock);
d30561c5
VW
483
484 WARN_ON(z3fold_page_trylock(zhdr));
485 __release_z3fold_page(zhdr, true);
486}
487
488static void free_pages_work(struct work_struct *w)
489{
490 struct z3fold_pool *pool = container_of(w, struct z3fold_pool, work);
491
492 spin_lock(&pool->stale_lock);
493 while (!list_empty(&pool->stale)) {
494 struct z3fold_header *zhdr = list_first_entry(&pool->stale,
495 struct z3fold_header, buddy);
496 struct page *page = virt_to_page(zhdr);
497
498 list_del(&zhdr->buddy);
499 if (WARN_ON(!test_bit(PAGE_STALE, &page->private)))
500 continue;
d30561c5
VW
501 spin_unlock(&pool->stale_lock);
502 cancel_work_sync(&zhdr->work);
1f862989 503 free_z3fold_page(page, false);
d30561c5
VW
504 cond_resched();
505 spin_lock(&pool->stale_lock);
506 }
507 spin_unlock(&pool->stale_lock);
508}
509
9a001fc1
VW
510/*
511 * Returns the number of free chunks in a z3fold page.
512 * NB: can't be used with HEADLESS pages.
513 */
514static int num_free_chunks(struct z3fold_header *zhdr)
515{
516 int nfree;
517 /*
518 * If there is a middle object, pick up the bigger free space
519 * either before or after it. Otherwise just subtract the number
520 * of chunks occupied by the first and the last objects.
521 */
522 if (zhdr->middle_chunks != 0) {
523 int nfree_before = zhdr->first_chunks ?
ede93213 524 0 : zhdr->start_middle - ZHDR_CHUNKS;
9a001fc1 525 int nfree_after = zhdr->last_chunks ?
ede93213
VW
526 0 : TOTAL_CHUNKS -
527 (zhdr->start_middle + zhdr->middle_chunks);
9a001fc1
VW
528 nfree = max(nfree_before, nfree_after);
529 } else
530 nfree = NCHUNKS - zhdr->first_chunks - zhdr->last_chunks;
531 return nfree;
532}
533
9050cce1
VW
534/* Add to the appropriate unbuddied list */
535static inline void add_to_unbuddied(struct z3fold_pool *pool,
536 struct z3fold_header *zhdr)
537{
538 if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0 ||
539 zhdr->middle_chunks == 0) {
540 struct list_head *unbuddied = get_cpu_ptr(pool->unbuddied);
541
542 int freechunks = num_free_chunks(zhdr);
543 spin_lock(&pool->lock);
544 list_add(&zhdr->buddy, &unbuddied[freechunks]);
545 spin_unlock(&pool->lock);
546 zhdr->cpu = smp_processor_id();
547 put_cpu_ptr(pool->unbuddied);
548 }
549}
550
ede93213
VW
551static inline void *mchunk_memmove(struct z3fold_header *zhdr,
552 unsigned short dst_chunk)
553{
554 void *beg = zhdr;
555 return memmove(beg + (dst_chunk << CHUNK_SHIFT),
556 beg + (zhdr->start_middle << CHUNK_SHIFT),
557 zhdr->middle_chunks << CHUNK_SHIFT);
558}
559
1b096e5a 560#define BIG_CHUNK_GAP 3
9a001fc1
VW
561/* Has to be called with lock held */
562static int z3fold_compact_page(struct z3fold_header *zhdr)
563{
564 struct page *page = virt_to_page(zhdr);
9a001fc1 565
ede93213
VW
566 if (test_bit(MIDDLE_CHUNK_MAPPED, &page->private))
567 return 0; /* can't move middle chunk, it's used */
9a001fc1 568
1f862989
VW
569 if (unlikely(PageIsolated(page)))
570 return 0;
571
ede93213
VW
572 if (zhdr->middle_chunks == 0)
573 return 0; /* nothing to compact */
574
575 if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
576 /* move to the beginning */
577 mchunk_memmove(zhdr, ZHDR_CHUNKS);
9a001fc1
VW
578 zhdr->first_chunks = zhdr->middle_chunks;
579 zhdr->middle_chunks = 0;
580 zhdr->start_middle = 0;
581 zhdr->first_num++;
1b096e5a 582 return 1;
9a001fc1 583 }
1b096e5a
VW
584
585 /*
586 * moving data is expensive, so let's only do that if
587 * there's substantial gain (at least BIG_CHUNK_GAP chunks)
588 */
589 if (zhdr->first_chunks != 0 && zhdr->last_chunks == 0 &&
590 zhdr->start_middle - (zhdr->first_chunks + ZHDR_CHUNKS) >=
591 BIG_CHUNK_GAP) {
592 mchunk_memmove(zhdr, zhdr->first_chunks + ZHDR_CHUNKS);
593 zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
594 return 1;
595 } else if (zhdr->last_chunks != 0 && zhdr->first_chunks == 0 &&
596 TOTAL_CHUNKS - (zhdr->last_chunks + zhdr->start_middle
597 + zhdr->middle_chunks) >=
598 BIG_CHUNK_GAP) {
599 unsigned short new_start = TOTAL_CHUNKS - zhdr->last_chunks -
600 zhdr->middle_chunks;
601 mchunk_memmove(zhdr, new_start);
602 zhdr->start_middle = new_start;
603 return 1;
604 }
605
606 return 0;
9a001fc1
VW
607}
608
d30561c5
VW
609static void do_compact_page(struct z3fold_header *zhdr, bool locked)
610{
9050cce1 611 struct z3fold_pool *pool = zhdr_to_pool(zhdr);
d30561c5 612 struct page *page;
d30561c5
VW
613
614 page = virt_to_page(zhdr);
615 if (locked)
616 WARN_ON(z3fold_page_trylock(zhdr));
617 else
618 z3fold_page_lock(zhdr);
5d03a661 619 if (WARN_ON(!test_and_clear_bit(NEEDS_COMPACTING, &page->private))) {
d30561c5
VW
620 z3fold_page_unlock(zhdr);
621 return;
622 }
623 spin_lock(&pool->lock);
624 list_del_init(&zhdr->buddy);
625 spin_unlock(&pool->lock);
626
5d03a661
VW
627 if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
628 atomic64_dec(&pool->pages_nr);
629 return;
630 }
631
1f862989
VW
632 if (unlikely(PageIsolated(page) ||
633 test_bit(PAGE_STALE, &page->private))) {
634 z3fold_page_unlock(zhdr);
635 return;
636 }
637
d30561c5 638 z3fold_compact_page(zhdr);
9050cce1 639 add_to_unbuddied(pool, zhdr);
d30561c5
VW
640 z3fold_page_unlock(zhdr);
641}
642
643static void compact_page_work(struct work_struct *w)
644{
645 struct z3fold_header *zhdr = container_of(w, struct z3fold_header,
646 work);
647
648 do_compact_page(zhdr, false);
649}
650
9050cce1
VW
651/* returns _locked_ z3fold page header or NULL */
652static inline struct z3fold_header *__z3fold_alloc(struct z3fold_pool *pool,
653 size_t size, bool can_sleep)
654{
655 struct z3fold_header *zhdr = NULL;
656 struct page *page;
657 struct list_head *unbuddied;
658 int chunks = size_to_chunks(size), i;
659
660lookup:
661 /* First, try to find an unbuddied z3fold page. */
662 unbuddied = get_cpu_ptr(pool->unbuddied);
663 for_each_unbuddied_list(i, chunks) {
664 struct list_head *l = &unbuddied[i];
665
666 zhdr = list_first_entry_or_null(READ_ONCE(l),
667 struct z3fold_header, buddy);
668
669 if (!zhdr)
670 continue;
671
672 /* Re-check under lock. */
673 spin_lock(&pool->lock);
674 l = &unbuddied[i];
675 if (unlikely(zhdr != list_first_entry(READ_ONCE(l),
676 struct z3fold_header, buddy)) ||
677 !z3fold_page_trylock(zhdr)) {
678 spin_unlock(&pool->lock);
679 zhdr = NULL;
680 put_cpu_ptr(pool->unbuddied);
681 if (can_sleep)
682 cond_resched();
683 goto lookup;
684 }
685 list_del_init(&zhdr->buddy);
686 zhdr->cpu = -1;
687 spin_unlock(&pool->lock);
688
689 page = virt_to_page(zhdr);
690 if (test_bit(NEEDS_COMPACTING, &page->private)) {
691 z3fold_page_unlock(zhdr);
692 zhdr = NULL;
693 put_cpu_ptr(pool->unbuddied);
694 if (can_sleep)
695 cond_resched();
696 goto lookup;
697 }
698
699 /*
700 * this page could not be removed from its unbuddied
701 * list while pool lock was held, and then we've taken
702 * page lock so kref_put could not be called before
703 * we got here, so it's safe to just call kref_get()
704 */
705 kref_get(&zhdr->refcount);
706 break;
707 }
708 put_cpu_ptr(pool->unbuddied);
709
351618b2
VW
710 if (!zhdr) {
711 int cpu;
712
713 /* look for _exact_ match on other cpus' lists */
714 for_each_online_cpu(cpu) {
715 struct list_head *l;
716
717 unbuddied = per_cpu_ptr(pool->unbuddied, cpu);
718 spin_lock(&pool->lock);
719 l = &unbuddied[chunks];
720
721 zhdr = list_first_entry_or_null(READ_ONCE(l),
722 struct z3fold_header, buddy);
723
724 if (!zhdr || !z3fold_page_trylock(zhdr)) {
725 spin_unlock(&pool->lock);
726 zhdr = NULL;
727 continue;
728 }
729 list_del_init(&zhdr->buddy);
730 zhdr->cpu = -1;
731 spin_unlock(&pool->lock);
732
733 page = virt_to_page(zhdr);
734 if (test_bit(NEEDS_COMPACTING, &page->private)) {
735 z3fold_page_unlock(zhdr);
736 zhdr = NULL;
737 if (can_sleep)
738 cond_resched();
739 continue;
740 }
741 kref_get(&zhdr->refcount);
742 break;
743 }
744 }
745
9050cce1
VW
746 return zhdr;
747}
d30561c5
VW
748
749/*
750 * API Functions
751 */
752
753/**
754 * z3fold_create_pool() - create a new z3fold pool
755 * @name: pool name
756 * @gfp: gfp flags when allocating the z3fold pool structure
757 * @ops: user-defined operations for the z3fold pool
758 *
759 * Return: pointer to the new z3fold pool or NULL if the metadata allocation
760 * failed.
761 */
762static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp,
763 const struct z3fold_ops *ops)
764{
765 struct z3fold_pool *pool = NULL;
766 int i, cpu;
767
768 pool = kzalloc(sizeof(struct z3fold_pool), gfp);
769 if (!pool)
770 goto out;
7c2b8baa
VW
771 pool->c_handle = kmem_cache_create("z3fold_handle",
772 sizeof(struct z3fold_buddy_slots),
773 SLOTS_ALIGN, 0, NULL);
774 if (!pool->c_handle)
775 goto out_c;
d30561c5
VW
776 spin_lock_init(&pool->lock);
777 spin_lock_init(&pool->stale_lock);
d776aaa9 778 init_waitqueue_head(&pool->isolate_wait);
d30561c5 779 pool->unbuddied = __alloc_percpu(sizeof(struct list_head)*NCHUNKS, 2);
1ec6995d
XW
780 if (!pool->unbuddied)
781 goto out_pool;
d30561c5
VW
782 for_each_possible_cpu(cpu) {
783 struct list_head *unbuddied =
784 per_cpu_ptr(pool->unbuddied, cpu);
785 for_each_unbuddied_list(i, 0)
786 INIT_LIST_HEAD(&unbuddied[i]);
787 }
788 INIT_LIST_HEAD(&pool->lru);
789 INIT_LIST_HEAD(&pool->stale);
790 atomic64_set(&pool->pages_nr, 0);
791 pool->name = name;
792 pool->compact_wq = create_singlethread_workqueue(pool->name);
793 if (!pool->compact_wq)
1ec6995d 794 goto out_unbuddied;
d30561c5
VW
795 pool->release_wq = create_singlethread_workqueue(pool->name);
796 if (!pool->release_wq)
797 goto out_wq;
1f862989
VW
798 if (z3fold_register_migration(pool))
799 goto out_rwq;
d30561c5
VW
800 INIT_WORK(&pool->work, free_pages_work);
801 pool->ops = ops;
802 return pool;
803
1f862989
VW
804out_rwq:
805 destroy_workqueue(pool->release_wq);
d30561c5
VW
806out_wq:
807 destroy_workqueue(pool->compact_wq);
1ec6995d
XW
808out_unbuddied:
809 free_percpu(pool->unbuddied);
810out_pool:
7c2b8baa
VW
811 kmem_cache_destroy(pool->c_handle);
812out_c:
d30561c5 813 kfree(pool);
1ec6995d 814out:
d30561c5
VW
815 return NULL;
816}
817
d776aaa9
HB
818static bool pool_isolated_are_drained(struct z3fold_pool *pool)
819{
820 bool ret;
821
822 spin_lock(&pool->lock);
823 ret = pool->isolated == 0;
824 spin_unlock(&pool->lock);
825 return ret;
826}
d30561c5
VW
827/**
828 * z3fold_destroy_pool() - destroys an existing z3fold pool
829 * @pool: the z3fold pool to be destroyed
830 *
831 * The pool should be emptied before this function is called.
832 */
833static void z3fold_destroy_pool(struct z3fold_pool *pool)
834{
7c2b8baa 835 kmem_cache_destroy(pool->c_handle);
d776aaa9
HB
836 /*
837 * We set pool-> destroying under lock to ensure that
838 * z3fold_page_isolate() sees any changes to destroying. This way we
839 * avoid the need for any memory barriers.
840 */
841
842 spin_lock(&pool->lock);
843 pool->destroying = true;
844 spin_unlock(&pool->lock);
845
846 /*
847 * We need to ensure that no pages are being migrated while we destroy
848 * these workqueues, as migration can queue work on either of the
849 * workqueues.
850 */
851 wait_event(pool->isolate_wait, !pool_isolated_are_drained(pool));
6051d3bd
HB
852
853 /*
854 * We need to destroy pool->compact_wq before pool->release_wq,
855 * as any pending work on pool->compact_wq will call
856 * queue_work(pool->release_wq, &pool->work).
b997052b
HB
857 *
858 * There are still outstanding pages until both workqueues are drained,
859 * so we cannot unregister migration until then.
6051d3bd
HB
860 */
861
d30561c5 862 destroy_workqueue(pool->compact_wq);
6051d3bd 863 destroy_workqueue(pool->release_wq);
b997052b 864 z3fold_unregister_migration(pool);
d30561c5
VW
865 kfree(pool);
866}
867
9a001fc1
VW
868/**
869 * z3fold_alloc() - allocates a region of a given size
870 * @pool: z3fold pool from which to allocate
871 * @size: size in bytes of the desired allocation
872 * @gfp: gfp flags used if the pool needs to grow
873 * @handle: handle of the new allocation
874 *
875 * This function will attempt to find a free region in the pool large enough to
876 * satisfy the allocation request. A search of the unbuddied lists is
877 * performed first. If no suitable free region is found, then a new page is
878 * allocated and added to the pool to satisfy the request.
879 *
880 * gfp should not set __GFP_HIGHMEM as highmem pages cannot be used
881 * as z3fold pool pages.
882 *
883 * Return: 0 if success and handle is set, otherwise -EINVAL if the size or
884 * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate
885 * a new page.
886 */
887static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
888 unsigned long *handle)
889{
9050cce1 890 int chunks = size_to_chunks(size);
9a001fc1 891 struct z3fold_header *zhdr = NULL;
d30561c5 892 struct page *page = NULL;
9a001fc1 893 enum buddy bud;
8a97ea54 894 bool can_sleep = gfpflags_allow_blocking(gfp);
9a001fc1 895
f1549cb5 896 if (!size)
9a001fc1
VW
897 return -EINVAL;
898
899 if (size > PAGE_SIZE)
900 return -ENOSPC;
901
902 if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE)
903 bud = HEADLESS;
904 else {
9050cce1
VW
905retry:
906 zhdr = __z3fold_alloc(pool, size, can_sleep);
d30561c5 907 if (zhdr) {
2f1e5e4d
VW
908 if (zhdr->first_chunks == 0) {
909 if (zhdr->middle_chunks != 0 &&
910 chunks >= zhdr->start_middle)
9a001fc1 911 bud = LAST;
2f1e5e4d
VW
912 else
913 bud = FIRST;
914 } else if (zhdr->last_chunks == 0)
915 bud = LAST;
916 else if (zhdr->middle_chunks == 0)
917 bud = MIDDLE;
918 else {
5a27aa82 919 if (kref_put(&zhdr->refcount,
d30561c5 920 release_z3fold_page_locked))
5a27aa82 921 atomic64_dec(&pool->pages_nr);
d30561c5
VW
922 else
923 z3fold_page_unlock(zhdr);
2f1e5e4d
VW
924 pr_err("No free chunks in unbuddied\n");
925 WARN_ON(1);
9050cce1 926 goto retry;
9a001fc1 927 }
9050cce1 928 page = virt_to_page(zhdr);
2f1e5e4d 929 goto found;
9a001fc1
VW
930 }
931 bud = FIRST;
9a001fc1
VW
932 }
933
5c9bab59
VW
934 page = NULL;
935 if (can_sleep) {
936 spin_lock(&pool->stale_lock);
937 zhdr = list_first_entry_or_null(&pool->stale,
938 struct z3fold_header, buddy);
939 /*
940 * Before allocating a page, let's see if we can take one from
941 * the stale pages list. cancel_work_sync() can sleep so we
942 * limit this case to the contexts where we can sleep
943 */
944 if (zhdr) {
945 list_del(&zhdr->buddy);
946 spin_unlock(&pool->stale_lock);
d30561c5 947 cancel_work_sync(&zhdr->work);
5c9bab59
VW
948 page = virt_to_page(zhdr);
949 } else {
950 spin_unlock(&pool->stale_lock);
951 }
d30561c5 952 }
5c9bab59
VW
953 if (!page)
954 page = alloc_page(gfp);
d30561c5 955
9a001fc1
VW
956 if (!page)
957 return -ENOMEM;
2f1e5e4d 958
bb9f6f63 959 zhdr = init_z3fold_page(page, pool, gfp);
9050cce1
VW
960 if (!zhdr) {
961 __free_page(page);
962 return -ENOMEM;
963 }
964 atomic64_inc(&pool->pages_nr);
9a001fc1
VW
965
966 if (bud == HEADLESS) {
967 set_bit(PAGE_HEADLESS, &page->private);
968 goto headless;
969 }
810481a2
HB
970 if (can_sleep) {
971 lock_page(page);
972 __SetPageMovable(page, pool->inode->i_mapping);
973 unlock_page(page);
974 } else {
975 if (trylock_page(page)) {
976 __SetPageMovable(page, pool->inode->i_mapping);
977 unlock_page(page);
978 }
979 }
2f1e5e4d 980 z3fold_page_lock(zhdr);
9a001fc1
VW
981
982found:
983 if (bud == FIRST)
984 zhdr->first_chunks = chunks;
985 else if (bud == LAST)
986 zhdr->last_chunks = chunks;
987 else {
988 zhdr->middle_chunks = chunks;
ede93213 989 zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
9a001fc1 990 }
9050cce1 991 add_to_unbuddied(pool, zhdr);
9a001fc1
VW
992
993headless:
d30561c5 994 spin_lock(&pool->lock);
9a001fc1
VW
995 /* Add/move z3fold page to beginning of LRU */
996 if (!list_empty(&page->lru))
997 list_del(&page->lru);
998
999 list_add(&page->lru, &pool->lru);
1000
1001 *handle = encode_handle(zhdr, bud);
1002 spin_unlock(&pool->lock);
2f1e5e4d
VW
1003 if (bud != HEADLESS)
1004 z3fold_page_unlock(zhdr);
9a001fc1
VW
1005
1006 return 0;
1007}
1008
1009/**
1010 * z3fold_free() - frees the allocation associated with the given handle
1011 * @pool: pool in which the allocation resided
1012 * @handle: handle associated with the allocation returned by z3fold_alloc()
1013 *
1014 * In the case that the z3fold page in which the allocation resides is under
1015 * reclaim, as indicated by the PG_reclaim flag being set, this function
1016 * only sets the first|last_chunks to 0. The page is actually freed
1017 * once both buddies are evicted (see z3fold_reclaim_page() below).
1018 */
1019static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
1020{
1021 struct z3fold_header *zhdr;
9a001fc1
VW
1022 struct page *page;
1023 enum buddy bud;
1024
9a001fc1
VW
1025 zhdr = handle_to_z3fold_header(handle);
1026 page = virt_to_page(zhdr);
1027
1028 if (test_bit(PAGE_HEADLESS, &page->private)) {
ca0246bb
VW
1029 /* if a headless page is under reclaim, just leave.
1030 * NB: we use test_and_set_bit for a reason: if the bit
1031 * has not been set before, we release this page
1032 * immediately so we don't care about its value any more.
1033 */
1034 if (!test_and_set_bit(PAGE_CLAIMED, &page->private)) {
1035 spin_lock(&pool->lock);
1036 list_del(&page->lru);
1037 spin_unlock(&pool->lock);
1f862989 1038 free_z3fold_page(page, true);
ca0246bb 1039 atomic64_dec(&pool->pages_nr);
9a001fc1 1040 }
ca0246bb 1041 return;
9a001fc1
VW
1042 }
1043
ca0246bb
VW
1044 /* Non-headless case */
1045 z3fold_page_lock(zhdr);
1046 bud = handle_to_buddy(handle);
1047
1048 switch (bud) {
1049 case FIRST:
1050 zhdr->first_chunks = 0;
1051 break;
1052 case MIDDLE:
1053 zhdr->middle_chunks = 0;
1054 break;
1055 case LAST:
1056 zhdr->last_chunks = 0;
1057 break;
1058 default:
1059 pr_err("%s: unknown bud %d\n", __func__, bud);
1060 WARN_ON(1);
1061 z3fold_page_unlock(zhdr);
d30561c5
VW
1062 return;
1063 }
1064
7c2b8baa 1065 free_handle(handle);
d30561c5
VW
1066 if (kref_put(&zhdr->refcount, release_z3fold_page_locked_list)) {
1067 atomic64_dec(&pool->pages_nr);
1068 return;
1069 }
ca0246bb 1070 if (test_bit(PAGE_CLAIMED, &page->private)) {
6098d7e1
VW
1071 z3fold_page_unlock(zhdr);
1072 return;
1073 }
1f862989
VW
1074 if (unlikely(PageIsolated(page)) ||
1075 test_and_set_bit(NEEDS_COMPACTING, &page->private)) {
5a27aa82 1076 z3fold_page_unlock(zhdr);
d30561c5
VW
1077 return;
1078 }
1079 if (zhdr->cpu < 0 || !cpu_online(zhdr->cpu)) {
2f1e5e4d 1080 spin_lock(&pool->lock);
d30561c5 1081 list_del_init(&zhdr->buddy);
2f1e5e4d 1082 spin_unlock(&pool->lock);
d30561c5 1083 zhdr->cpu = -1;
5d03a661 1084 kref_get(&zhdr->refcount);
d30561c5
VW
1085 do_compact_page(zhdr, true);
1086 return;
9a001fc1 1087 }
5d03a661 1088 kref_get(&zhdr->refcount);
d30561c5
VW
1089 queue_work_on(zhdr->cpu, pool->compact_wq, &zhdr->work);
1090 z3fold_page_unlock(zhdr);
9a001fc1
VW
1091}
1092
1093/**
1094 * z3fold_reclaim_page() - evicts allocations from a pool page and frees it
1095 * @pool: pool from which a page will attempt to be evicted
f144c390 1096 * @retries: number of pages on the LRU list for which eviction will
9a001fc1
VW
1097 * be attempted before failing
1098 *
1099 * z3fold reclaim is different from normal system reclaim in that it is done
1100 * from the bottom, up. This is because only the bottom layer, z3fold, has
1101 * information on how the allocations are organized within each z3fold page.
1102 * This has the potential to create interesting locking situations between
1103 * z3fold and the user, however.
1104 *
1105 * To avoid these, this is how z3fold_reclaim_page() should be called:
f144c390 1106 *
9a001fc1
VW
1107 * The user detects a page should be reclaimed and calls z3fold_reclaim_page().
1108 * z3fold_reclaim_page() will remove a z3fold page from the pool LRU list and
1109 * call the user-defined eviction handler with the pool and handle as
1110 * arguments.
1111 *
1112 * If the handle can not be evicted, the eviction handler should return
1113 * non-zero. z3fold_reclaim_page() will add the z3fold page back to the
1114 * appropriate list and try the next z3fold page on the LRU up to
1115 * a user defined number of retries.
1116 *
1117 * If the handle is successfully evicted, the eviction handler should
1118 * return 0 _and_ should have called z3fold_free() on the handle. z3fold_free()
1119 * contains logic to delay freeing the page if the page is under reclaim,
1120 * as indicated by the setting of the PG_reclaim flag on the underlying page.
1121 *
1122 * If all buddies in the z3fold page are successfully evicted, then the
1123 * z3fold page can be freed.
1124 *
1125 * Returns: 0 if page is successfully freed, otherwise -EINVAL if there are
1126 * no pages to evict or an eviction handler is not registered, -EAGAIN if
1127 * the retry limit was hit.
1128 */
1129static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
1130{
d30561c5
VW
1131 int i, ret = 0;
1132 struct z3fold_header *zhdr = NULL;
1133 struct page *page = NULL;
1134 struct list_head *pos;
9a001fc1
VW
1135 unsigned long first_handle = 0, middle_handle = 0, last_handle = 0;
1136
1137 spin_lock(&pool->lock);
2f1e5e4d 1138 if (!pool->ops || !pool->ops->evict || retries == 0) {
9a001fc1
VW
1139 spin_unlock(&pool->lock);
1140 return -EINVAL;
1141 }
1142 for (i = 0; i < retries; i++) {
2f1e5e4d
VW
1143 if (list_empty(&pool->lru)) {
1144 spin_unlock(&pool->lock);
1145 return -EINVAL;
1146 }
d30561c5
VW
1147 list_for_each_prev(pos, &pool->lru) {
1148 page = list_entry(pos, struct page, lru);
ca0246bb
VW
1149
1150 /* this bit could have been set by free, in which case
1151 * we pass over to the next page in the pool.
1152 */
1153 if (test_and_set_bit(PAGE_CLAIMED, &page->private))
1154 continue;
1155
1f862989
VW
1156 if (unlikely(PageIsolated(page)))
1157 continue;
d30561c5 1158 if (test_bit(PAGE_HEADLESS, &page->private))
d30561c5
VW
1159 break;
1160
1f862989 1161 zhdr = page_address(page);
ca0246bb
VW
1162 if (!z3fold_page_trylock(zhdr)) {
1163 zhdr = NULL;
d30561c5 1164 continue; /* can't evict at this point */
ca0246bb 1165 }
d30561c5
VW
1166 kref_get(&zhdr->refcount);
1167 list_del_init(&zhdr->buddy);
1168 zhdr->cpu = -1;
6098d7e1 1169 break;
d30561c5
VW
1170 }
1171
ca0246bb
VW
1172 if (!zhdr)
1173 break;
1174
5a27aa82 1175 list_del_init(&page->lru);
d30561c5 1176 spin_unlock(&pool->lock);
9a001fc1 1177
9a001fc1 1178 if (!test_bit(PAGE_HEADLESS, &page->private)) {
9a001fc1
VW
1179 /*
1180 * We need encode the handles before unlocking, since
1181 * we can race with free that will set
1182 * (first|last)_chunks to 0
1183 */
1184 first_handle = 0;
1185 last_handle = 0;
1186 middle_handle = 0;
1187 if (zhdr->first_chunks)
1188 first_handle = encode_handle(zhdr, FIRST);
1189 if (zhdr->middle_chunks)
1190 middle_handle = encode_handle(zhdr, MIDDLE);
1191 if (zhdr->last_chunks)
1192 last_handle = encode_handle(zhdr, LAST);
d30561c5
VW
1193 /*
1194 * it's safe to unlock here because we hold a
1195 * reference to this page
1196 */
2f1e5e4d 1197 z3fold_page_unlock(zhdr);
9a001fc1
VW
1198 } else {
1199 first_handle = encode_handle(zhdr, HEADLESS);
1200 last_handle = middle_handle = 0;
1201 }
1202
9a001fc1
VW
1203 /* Issue the eviction callback(s) */
1204 if (middle_handle) {
1205 ret = pool->ops->evict(pool, middle_handle);
1206 if (ret)
1207 goto next;
1208 }
1209 if (first_handle) {
1210 ret = pool->ops->evict(pool, first_handle);
1211 if (ret)
1212 goto next;
1213 }
1214 if (last_handle) {
1215 ret = pool->ops->evict(pool, last_handle);
1216 if (ret)
1217 goto next;
1218 }
1219next:
5a27aa82
VW
1220 if (test_bit(PAGE_HEADLESS, &page->private)) {
1221 if (ret == 0) {
1f862989 1222 free_z3fold_page(page, true);
ca0246bb 1223 atomic64_dec(&pool->pages_nr);
5a27aa82 1224 return 0;
5a27aa82 1225 }
6098d7e1
VW
1226 spin_lock(&pool->lock);
1227 list_add(&page->lru, &pool->lru);
1228 spin_unlock(&pool->lock);
1229 } else {
1230 z3fold_page_lock(zhdr);
ca0246bb 1231 clear_bit(PAGE_CLAIMED, &page->private);
6098d7e1
VW
1232 if (kref_put(&zhdr->refcount,
1233 release_z3fold_page_locked)) {
1234 atomic64_dec(&pool->pages_nr);
1235 return 0;
1236 }
1237 /*
1238 * if we are here, the page is still not completely
1239 * free. Take the global pool lock then to be able
1240 * to add it back to the lru list
1241 */
1242 spin_lock(&pool->lock);
1243 list_add(&page->lru, &pool->lru);
d5567c9d 1244 spin_unlock(&pool->lock);
6098d7e1 1245 z3fold_page_unlock(zhdr);
5a27aa82 1246 }
2f1e5e4d 1247
6098d7e1
VW
1248 /* We started off locked to we need to lock the pool back */
1249 spin_lock(&pool->lock);
9a001fc1
VW
1250 }
1251 spin_unlock(&pool->lock);
1252 return -EAGAIN;
1253}
1254
1255/**
1256 * z3fold_map() - maps the allocation associated with the given handle
1257 * @pool: pool in which the allocation resides
1258 * @handle: handle associated with the allocation to be mapped
1259 *
1260 * Extracts the buddy number from handle and constructs the pointer to the
1261 * correct starting chunk within the page.
1262 *
1263 * Returns: a pointer to the mapped allocation
1264 */
1265static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle)
1266{
1267 struct z3fold_header *zhdr;
1268 struct page *page;
1269 void *addr;
1270 enum buddy buddy;
1271
9a001fc1
VW
1272 zhdr = handle_to_z3fold_header(handle);
1273 addr = zhdr;
1274 page = virt_to_page(zhdr);
1275
1276 if (test_bit(PAGE_HEADLESS, &page->private))
1277 goto out;
1278
2f1e5e4d 1279 z3fold_page_lock(zhdr);
9a001fc1
VW
1280 buddy = handle_to_buddy(handle);
1281 switch (buddy) {
1282 case FIRST:
1283 addr += ZHDR_SIZE_ALIGNED;
1284 break;
1285 case MIDDLE:
1286 addr += zhdr->start_middle << CHUNK_SHIFT;
1287 set_bit(MIDDLE_CHUNK_MAPPED, &page->private);
1288 break;
1289 case LAST:
ca0246bb 1290 addr += PAGE_SIZE - (handle_to_chunks(handle) << CHUNK_SHIFT);
9a001fc1
VW
1291 break;
1292 default:
1293 pr_err("unknown buddy id %d\n", buddy);
1294 WARN_ON(1);
1295 addr = NULL;
1296 break;
1297 }
2f1e5e4d 1298
1f862989
VW
1299 if (addr)
1300 zhdr->mapped_count++;
2f1e5e4d 1301 z3fold_page_unlock(zhdr);
9a001fc1 1302out:
9a001fc1
VW
1303 return addr;
1304}
1305
1306/**
1307 * z3fold_unmap() - unmaps the allocation associated with the given handle
1308 * @pool: pool in which the allocation resides
1309 * @handle: handle associated with the allocation to be unmapped
1310 */
1311static void z3fold_unmap(struct z3fold_pool *pool, unsigned long handle)
1312{
1313 struct z3fold_header *zhdr;
1314 struct page *page;
1315 enum buddy buddy;
1316
9a001fc1
VW
1317 zhdr = handle_to_z3fold_header(handle);
1318 page = virt_to_page(zhdr);
1319
2f1e5e4d 1320 if (test_bit(PAGE_HEADLESS, &page->private))
9a001fc1 1321 return;
9a001fc1 1322
2f1e5e4d 1323 z3fold_page_lock(zhdr);
9a001fc1
VW
1324 buddy = handle_to_buddy(handle);
1325 if (buddy == MIDDLE)
1326 clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
1f862989 1327 zhdr->mapped_count--;
2f1e5e4d 1328 z3fold_page_unlock(zhdr);
9a001fc1
VW
1329}
1330
1331/**
1332 * z3fold_get_pool_size() - gets the z3fold pool size in pages
1333 * @pool: pool whose size is being queried
1334 *
12d59ae6 1335 * Returns: size in pages of the given pool.
9a001fc1
VW
1336 */
1337static u64 z3fold_get_pool_size(struct z3fold_pool *pool)
1338{
12d59ae6 1339 return atomic64_read(&pool->pages_nr);
9a001fc1
VW
1340}
1341
d776aaa9
HB
1342/*
1343 * z3fold_dec_isolated() expects to be called while pool->lock is held.
1344 */
1345static void z3fold_dec_isolated(struct z3fold_pool *pool)
1346{
1347 assert_spin_locked(&pool->lock);
1348 VM_BUG_ON(pool->isolated <= 0);
1349 pool->isolated--;
1350
1351 /*
1352 * If we have no more isolated pages, we have to see if
1353 * z3fold_destroy_pool() is waiting for a signal.
1354 */
1355 if (pool->isolated == 0 && waitqueue_active(&pool->isolate_wait))
1356 wake_up_all(&pool->isolate_wait);
1357}
1358
1359static void z3fold_inc_isolated(struct z3fold_pool *pool)
1360{
1361 pool->isolated++;
1362}
1363
1f862989
VW
1364static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode)
1365{
1366 struct z3fold_header *zhdr;
1367 struct z3fold_pool *pool;
1368
1369 VM_BUG_ON_PAGE(!PageMovable(page), page);
1370 VM_BUG_ON_PAGE(PageIsolated(page), page);
1371
1372 if (test_bit(PAGE_HEADLESS, &page->private))
1373 return false;
1374
1375 zhdr = page_address(page);
1376 z3fold_page_lock(zhdr);
1377 if (test_bit(NEEDS_COMPACTING, &page->private) ||
1378 test_bit(PAGE_STALE, &page->private))
1379 goto out;
1380
1381 pool = zhdr_to_pool(zhdr);
1382
1383 if (zhdr->mapped_count == 0) {
1384 kref_get(&zhdr->refcount);
1385 if (!list_empty(&zhdr->buddy))
1386 list_del_init(&zhdr->buddy);
1387 spin_lock(&pool->lock);
1388 if (!list_empty(&page->lru))
1389 list_del(&page->lru);
d776aaa9
HB
1390 /*
1391 * We need to check for destruction while holding pool->lock, as
1392 * otherwise destruction could see 0 isolated pages, and
1393 * proceed.
1394 */
1395 if (unlikely(pool->destroying)) {
1396 spin_unlock(&pool->lock);
1397 /*
1398 * If this page isn't stale, somebody else holds a
1399 * reference to it. Let't drop our refcount so that they
1400 * can call the release logic.
1401 */
1402 if (unlikely(kref_put(&zhdr->refcount,
1403 release_z3fold_page_locked))) {
1404 /*
1405 * If we get here we have kref problems, so we
1406 * should freak out.
1407 */
1408 WARN(1, "Z3fold is experiencing kref problems\n");
14108b91 1409 z3fold_page_unlock(zhdr);
d776aaa9
HB
1410 return false;
1411 }
1412 z3fold_page_unlock(zhdr);
1413 return false;
1414 }
1415
1416
1417 z3fold_inc_isolated(pool);
1f862989
VW
1418 spin_unlock(&pool->lock);
1419 z3fold_page_unlock(zhdr);
1420 return true;
1421 }
1422out:
1423 z3fold_page_unlock(zhdr);
1424 return false;
1425}
1426
1427static int z3fold_page_migrate(struct address_space *mapping, struct page *newpage,
1428 struct page *page, enum migrate_mode mode)
1429{
1430 struct z3fold_header *zhdr, *new_zhdr;
1431 struct z3fold_pool *pool;
1432 struct address_space *new_mapping;
1433
1434 VM_BUG_ON_PAGE(!PageMovable(page), page);
1435 VM_BUG_ON_PAGE(!PageIsolated(page), page);
810481a2 1436 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
1f862989
VW
1437
1438 zhdr = page_address(page);
1439 pool = zhdr_to_pool(zhdr);
1440
1f862989 1441 if (!z3fold_page_trylock(zhdr)) {
1f862989
VW
1442 return -EAGAIN;
1443 }
1444 if (zhdr->mapped_count != 0) {
1445 z3fold_page_unlock(zhdr);
1f862989
VW
1446 return -EBUSY;
1447 }
c92d2f38
HB
1448 if (work_pending(&zhdr->work)) {
1449 z3fold_page_unlock(zhdr);
1450 return -EAGAIN;
1451 }
1f862989
VW
1452 new_zhdr = page_address(newpage);
1453 memcpy(new_zhdr, zhdr, PAGE_SIZE);
1454 newpage->private = page->private;
1455 page->private = 0;
1456 z3fold_page_unlock(zhdr);
1457 spin_lock_init(&new_zhdr->page_lock);
c92d2f38
HB
1458 INIT_WORK(&new_zhdr->work, compact_page_work);
1459 /*
1460 * z3fold_page_isolate() ensures that new_zhdr->buddy is empty,
1461 * so we only have to reinitialize it.
1462 */
1463 INIT_LIST_HEAD(&new_zhdr->buddy);
1f862989
VW
1464 new_mapping = page_mapping(page);
1465 __ClearPageMovable(page);
1466 ClearPagePrivate(page);
1467
1468 get_page(newpage);
1469 z3fold_page_lock(new_zhdr);
1470 if (new_zhdr->first_chunks)
1471 encode_handle(new_zhdr, FIRST);
1472 if (new_zhdr->last_chunks)
1473 encode_handle(new_zhdr, LAST);
1474 if (new_zhdr->middle_chunks)
1475 encode_handle(new_zhdr, MIDDLE);
1476 set_bit(NEEDS_COMPACTING, &newpage->private);
1477 new_zhdr->cpu = smp_processor_id();
1478 spin_lock(&pool->lock);
1479 list_add(&newpage->lru, &pool->lru);
1480 spin_unlock(&pool->lock);
1481 __SetPageMovable(newpage, new_mapping);
1482 z3fold_page_unlock(new_zhdr);
1483
1484 queue_work_on(new_zhdr->cpu, pool->compact_wq, &new_zhdr->work);
1485
d776aaa9
HB
1486 spin_lock(&pool->lock);
1487 z3fold_dec_isolated(pool);
1488 spin_unlock(&pool->lock);
1489
1f862989 1490 page_mapcount_reset(page);
1f862989
VW
1491 put_page(page);
1492 return 0;
1493}
1494
1495static void z3fold_page_putback(struct page *page)
1496{
1497 struct z3fold_header *zhdr;
1498 struct z3fold_pool *pool;
1499
1500 zhdr = page_address(page);
1501 pool = zhdr_to_pool(zhdr);
1502
1503 z3fold_page_lock(zhdr);
1504 if (!list_empty(&zhdr->buddy))
1505 list_del_init(&zhdr->buddy);
1506 INIT_LIST_HEAD(&page->lru);
1507 if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
1508 atomic64_dec(&pool->pages_nr);
d776aaa9
HB
1509 spin_lock(&pool->lock);
1510 z3fold_dec_isolated(pool);
1511 spin_unlock(&pool->lock);
1f862989
VW
1512 return;
1513 }
1514 spin_lock(&pool->lock);
1515 list_add(&page->lru, &pool->lru);
d776aaa9 1516 z3fold_dec_isolated(pool);
1f862989
VW
1517 spin_unlock(&pool->lock);
1518 z3fold_page_unlock(zhdr);
1519}
1520
1521static const struct address_space_operations z3fold_aops = {
1522 .isolate_page = z3fold_page_isolate,
1523 .migratepage = z3fold_page_migrate,
1524 .putback_page = z3fold_page_putback,
1525};
1526
9a001fc1
VW
1527/*****************
1528 * zpool
1529 ****************/
1530
1531static int z3fold_zpool_evict(struct z3fold_pool *pool, unsigned long handle)
1532{
1533 if (pool->zpool && pool->zpool_ops && pool->zpool_ops->evict)
1534 return pool->zpool_ops->evict(pool->zpool, handle);
1535 else
1536 return -ENOENT;
1537}
1538
1539static const struct z3fold_ops z3fold_zpool_ops = {
1540 .evict = z3fold_zpool_evict
1541};
1542
1543static void *z3fold_zpool_create(const char *name, gfp_t gfp,
1544 const struct zpool_ops *zpool_ops,
1545 struct zpool *zpool)
1546{
1547 struct z3fold_pool *pool;
1548
d30561c5
VW
1549 pool = z3fold_create_pool(name, gfp,
1550 zpool_ops ? &z3fold_zpool_ops : NULL);
9a001fc1
VW
1551 if (pool) {
1552 pool->zpool = zpool;
1553 pool->zpool_ops = zpool_ops;
1554 }
1555 return pool;
1556}
1557
1558static void z3fold_zpool_destroy(void *pool)
1559{
1560 z3fold_destroy_pool(pool);
1561}
1562
1563static int z3fold_zpool_malloc(void *pool, size_t size, gfp_t gfp,
1564 unsigned long *handle)
1565{
1566 return z3fold_alloc(pool, size, gfp, handle);
1567}
1568static void z3fold_zpool_free(void *pool, unsigned long handle)
1569{
1570 z3fold_free(pool, handle);
1571}
1572
1573static int z3fold_zpool_shrink(void *pool, unsigned int pages,
1574 unsigned int *reclaimed)
1575{
1576 unsigned int total = 0;
1577 int ret = -EINVAL;
1578
1579 while (total < pages) {
1580 ret = z3fold_reclaim_page(pool, 8);
1581 if (ret < 0)
1582 break;
1583 total++;
1584 }
1585
1586 if (reclaimed)
1587 *reclaimed = total;
1588
1589 return ret;
1590}
1591
1592static void *z3fold_zpool_map(void *pool, unsigned long handle,
1593 enum zpool_mapmode mm)
1594{
1595 return z3fold_map(pool, handle);
1596}
1597static void z3fold_zpool_unmap(void *pool, unsigned long handle)
1598{
1599 z3fold_unmap(pool, handle);
1600}
1601
1602static u64 z3fold_zpool_total_size(void *pool)
1603{
1604 return z3fold_get_pool_size(pool) * PAGE_SIZE;
1605}
1606
1607static struct zpool_driver z3fold_zpool_driver = {
1608 .type = "z3fold",
1609 .owner = THIS_MODULE,
1610 .create = z3fold_zpool_create,
1611 .destroy = z3fold_zpool_destroy,
1612 .malloc = z3fold_zpool_malloc,
1613 .free = z3fold_zpool_free,
1614 .shrink = z3fold_zpool_shrink,
1615 .map = z3fold_zpool_map,
1616 .unmap = z3fold_zpool_unmap,
1617 .total_size = z3fold_zpool_total_size,
1618};
1619
1620MODULE_ALIAS("zpool-z3fold");
1621
1622static int __init init_z3fold(void)
1623{
1f862989
VW
1624 int ret;
1625
ede93213
VW
1626 /* Make sure the z3fold header is not larger than the page size */
1627 BUILD_BUG_ON(ZHDR_SIZE_ALIGNED > PAGE_SIZE);
1f862989
VW
1628 ret = z3fold_mount();
1629 if (ret)
1630 return ret;
1631
9a001fc1
VW
1632 zpool_register_driver(&z3fold_zpool_driver);
1633
1634 return 0;
1635}
1636
1637static void __exit exit_z3fold(void)
1638{
1f862989 1639 z3fold_unmount();
9a001fc1
VW
1640 zpool_unregister_driver(&z3fold_zpool_driver);
1641}
1642
1643module_init(init_z3fold);
1644module_exit(exit_z3fold);
1645
1646MODULE_LICENSE("GPL");
1647MODULE_AUTHOR("Vitaly Wool <vitalywool@gmail.com>");
1648MODULE_DESCRIPTION("3-Fold Allocator for Compressed Pages");