Merge tag 'media/v5.2-1' of git://git.kernel.org/pub/scm/linux/kernel/git/mchehab...
[linux-2.6-block.git] / mm / z3fold.c
CommitLineData
9a001fc1
VW
1/*
2 * z3fold.c
3 *
4 * Author: Vitaly Wool <vitaly.wool@konsulko.com>
5 * Copyright (C) 2016, Sony Mobile Communications Inc.
6 *
7 * This implementation is based on zbud written by Seth Jennings.
8 *
9 * z3fold is an special purpose allocator for storing compressed pages. It
10 * can store up to three compressed pages per page which improves the
11 * compression ratio of zbud while retaining its main concepts (e. g. always
12 * storing an integral number of objects per page) and simplicity.
13 * It still has simple and deterministic reclaim properties that make it
14 * preferable to a higher density approach (with no requirement on integral
15 * number of object per page) when reclaim is used.
16 *
17 * As in zbud, pages are divided into "chunks". The size of the chunks is
18 * fixed at compile time and is determined by NCHUNKS_ORDER below.
19 *
20 * z3fold doesn't export any API and is meant to be used via zpool API.
21 */
22
23#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24
25#include <linux/atomic.h>
d30561c5 26#include <linux/sched.h>
1f862989
VW
27#include <linux/cpumask.h>
28#include <linux/dcache.h>
9a001fc1
VW
29#include <linux/list.h>
30#include <linux/mm.h>
31#include <linux/module.h>
1f862989
VW
32#include <linux/page-flags.h>
33#include <linux/migrate.h>
34#include <linux/node.h>
35#include <linux/compaction.h>
d30561c5 36#include <linux/percpu.h>
1f862989
VW
37#include <linux/mount.h>
38#include <linux/fs.h>
9a001fc1 39#include <linux/preempt.h>
d30561c5 40#include <linux/workqueue.h>
9a001fc1
VW
41#include <linux/slab.h>
42#include <linux/spinlock.h>
43#include <linux/zpool.h>
44
7c2b8baa
VW
45/*
46 * NCHUNKS_ORDER determines the internal allocation granularity, effectively
47 * adjusting internal fragmentation. It also determines the number of
48 * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the
49 * allocation granularity will be in chunks of size PAGE_SIZE/64. Some chunks
50 * in the beginning of an allocated page are occupied by z3fold header, so
51 * NCHUNKS will be calculated to 63 (or 62 in case CONFIG_DEBUG_SPINLOCK=y),
52 * which shows the max number of free chunks in z3fold page, also there will
53 * be 63, or 62, respectively, freelists per pool.
54 */
55#define NCHUNKS_ORDER 6
56
57#define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
58#define CHUNK_SIZE (1 << CHUNK_SHIFT)
59#define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
60#define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
61#define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
62#define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
63
64#define BUDDY_MASK (0x3)
65#define BUDDY_SHIFT 2
66#define SLOTS_ALIGN (0x40)
67
9a001fc1
VW
68/*****************
69 * Structures
70*****************/
ede93213
VW
71struct z3fold_pool;
72struct z3fold_ops {
73 int (*evict)(struct z3fold_pool *pool, unsigned long handle);
74};
75
76enum buddy {
77 HEADLESS = 0,
78 FIRST,
79 MIDDLE,
80 LAST,
7c2b8baa
VW
81 BUDDIES_MAX = LAST
82};
83
84struct z3fold_buddy_slots {
85 /*
86 * we are using BUDDY_MASK in handle_to_buddy etc. so there should
87 * be enough slots to hold all possible variants
88 */
89 unsigned long slot[BUDDY_MASK + 1];
90 unsigned long pool; /* back link + flags */
ede93213 91};
7c2b8baa 92#define HANDLE_FLAG_MASK (0x03)
ede93213
VW
93
94/*
d30561c5 95 * struct z3fold_header - z3fold page metadata occupying first chunks of each
ede93213 96 * z3fold page, except for HEADLESS pages
d30561c5
VW
97 * @buddy: links the z3fold page into the relevant list in the
98 * pool
2f1e5e4d 99 * @page_lock: per-page lock
d30561c5
VW
100 * @refcount: reference count for the z3fold page
101 * @work: work_struct for page layout optimization
7c2b8baa 102 * @slots: pointer to the structure holding buddy slots
d30561c5 103 * @cpu: CPU which this page "belongs" to
ede93213
VW
104 * @first_chunks: the size of the first buddy in chunks, 0 if free
105 * @middle_chunks: the size of the middle buddy in chunks, 0 if free
106 * @last_chunks: the size of the last buddy in chunks, 0 if free
107 * @first_num: the starting number (for the first handle)
1f862989 108 * @mapped_count: the number of objects currently mapped
ede93213
VW
109 */
110struct z3fold_header {
111 struct list_head buddy;
2f1e5e4d 112 spinlock_t page_lock;
5a27aa82 113 struct kref refcount;
d30561c5 114 struct work_struct work;
7c2b8baa 115 struct z3fold_buddy_slots *slots;
d30561c5 116 short cpu;
ede93213
VW
117 unsigned short first_chunks;
118 unsigned short middle_chunks;
119 unsigned short last_chunks;
120 unsigned short start_middle;
121 unsigned short first_num:2;
1f862989 122 unsigned short mapped_count:2;
ede93213
VW
123};
124
9a001fc1
VW
125/**
126 * struct z3fold_pool - stores metadata for each z3fold pool
d30561c5
VW
127 * @name: pool name
128 * @lock: protects pool unbuddied/lru lists
129 * @stale_lock: protects pool stale page list
130 * @unbuddied: per-cpu array of lists tracking z3fold pages that contain 2-
131 * buddies; the list each z3fold page is added to depends on
132 * the size of its free region.
9a001fc1
VW
133 * @lru: list tracking the z3fold pages in LRU order by most recently
134 * added buddy.
d30561c5 135 * @stale: list of pages marked for freeing
9a001fc1 136 * @pages_nr: number of z3fold pages in the pool.
7c2b8baa 137 * @c_handle: cache for z3fold_buddy_slots allocation
9a001fc1
VW
138 * @ops: pointer to a structure of user defined operations specified at
139 * pool creation time.
d30561c5
VW
140 * @compact_wq: workqueue for page layout background optimization
141 * @release_wq: workqueue for safe page release
142 * @work: work_struct for safe page release
1f862989 143 * @inode: inode for z3fold pseudo filesystem
9a001fc1
VW
144 *
145 * This structure is allocated at pool creation time and maintains metadata
146 * pertaining to a particular z3fold pool.
147 */
148struct z3fold_pool {
d30561c5 149 const char *name;
9a001fc1 150 spinlock_t lock;
d30561c5
VW
151 spinlock_t stale_lock;
152 struct list_head *unbuddied;
9a001fc1 153 struct list_head lru;
d30561c5 154 struct list_head stale;
12d59ae6 155 atomic64_t pages_nr;
7c2b8baa 156 struct kmem_cache *c_handle;
9a001fc1
VW
157 const struct z3fold_ops *ops;
158 struct zpool *zpool;
159 const struct zpool_ops *zpool_ops;
d30561c5
VW
160 struct workqueue_struct *compact_wq;
161 struct workqueue_struct *release_wq;
162 struct work_struct work;
1f862989 163 struct inode *inode;
9a001fc1
VW
164};
165
9a001fc1
VW
166/*
167 * Internal z3fold page flags
168 */
169enum z3fold_page_flags {
5a27aa82 170 PAGE_HEADLESS = 0,
9a001fc1 171 MIDDLE_CHUNK_MAPPED,
d30561c5 172 NEEDS_COMPACTING,
6098d7e1 173 PAGE_STALE,
ca0246bb 174 PAGE_CLAIMED, /* by either reclaim or free */
9a001fc1
VW
175};
176
177/*****************
178 * Helpers
179*****************/
180
181/* Converts an allocation size in bytes to size in z3fold chunks */
182static int size_to_chunks(size_t size)
183{
184 return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT;
185}
186
187#define for_each_unbuddied_list(_iter, _begin) \
188 for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
189
d30561c5
VW
190static void compact_page_work(struct work_struct *w);
191
7c2b8baa
VW
192static inline struct z3fold_buddy_slots *alloc_slots(struct z3fold_pool *pool)
193{
194 struct z3fold_buddy_slots *slots = kmem_cache_alloc(pool->c_handle,
195 GFP_KERNEL);
196
197 if (slots) {
198 memset(slots->slot, 0, sizeof(slots->slot));
199 slots->pool = (unsigned long)pool;
200 }
201
202 return slots;
203}
204
205static inline struct z3fold_pool *slots_to_pool(struct z3fold_buddy_slots *s)
206{
207 return (struct z3fold_pool *)(s->pool & ~HANDLE_FLAG_MASK);
208}
209
210static inline struct z3fold_buddy_slots *handle_to_slots(unsigned long handle)
211{
212 return (struct z3fold_buddy_slots *)(handle & ~(SLOTS_ALIGN - 1));
213}
214
215static inline void free_handle(unsigned long handle)
216{
217 struct z3fold_buddy_slots *slots;
218 int i;
219 bool is_free;
220
221 if (handle & (1 << PAGE_HEADLESS))
222 return;
223
224 WARN_ON(*(unsigned long *)handle == 0);
225 *(unsigned long *)handle = 0;
226 slots = handle_to_slots(handle);
227 is_free = true;
228 for (i = 0; i <= BUDDY_MASK; i++) {
229 if (slots->slot[i]) {
230 is_free = false;
231 break;
232 }
233 }
234
235 if (is_free) {
236 struct z3fold_pool *pool = slots_to_pool(slots);
237
238 kmem_cache_free(pool->c_handle, slots);
239 }
240}
241
1f862989
VW
242static struct dentry *z3fold_do_mount(struct file_system_type *fs_type,
243 int flags, const char *dev_name, void *data)
244{
245 static const struct dentry_operations ops = {
246 .d_dname = simple_dname,
247 };
248
249 return mount_pseudo(fs_type, "z3fold:", NULL, &ops, 0x33);
250}
251
252static struct file_system_type z3fold_fs = {
253 .name = "z3fold",
254 .mount = z3fold_do_mount,
255 .kill_sb = kill_anon_super,
256};
257
258static struct vfsmount *z3fold_mnt;
259static int z3fold_mount(void)
260{
261 int ret = 0;
262
263 z3fold_mnt = kern_mount(&z3fold_fs);
264 if (IS_ERR(z3fold_mnt))
265 ret = PTR_ERR(z3fold_mnt);
266
267 return ret;
268}
269
270static void z3fold_unmount(void)
271{
272 kern_unmount(z3fold_mnt);
273}
274
275static const struct address_space_operations z3fold_aops;
276static int z3fold_register_migration(struct z3fold_pool *pool)
277{
278 pool->inode = alloc_anon_inode(z3fold_mnt->mnt_sb);
279 if (IS_ERR(pool->inode)) {
280 pool->inode = NULL;
281 return 1;
282 }
283
284 pool->inode->i_mapping->private_data = pool;
285 pool->inode->i_mapping->a_ops = &z3fold_aops;
286 return 0;
287}
288
289static void z3fold_unregister_migration(struct z3fold_pool *pool)
290{
291 if (pool->inode)
292 iput(pool->inode);
293 }
294
9a001fc1 295/* Initializes the z3fold header of a newly allocated z3fold page */
d30561c5
VW
296static struct z3fold_header *init_z3fold_page(struct page *page,
297 struct z3fold_pool *pool)
9a001fc1
VW
298{
299 struct z3fold_header *zhdr = page_address(page);
7c2b8baa
VW
300 struct z3fold_buddy_slots *slots = alloc_slots(pool);
301
302 if (!slots)
303 return NULL;
9a001fc1
VW
304
305 INIT_LIST_HEAD(&page->lru);
9a001fc1
VW
306 clear_bit(PAGE_HEADLESS, &page->private);
307 clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
d30561c5
VW
308 clear_bit(NEEDS_COMPACTING, &page->private);
309 clear_bit(PAGE_STALE, &page->private);
ca0246bb 310 clear_bit(PAGE_CLAIMED, &page->private);
9a001fc1 311
2f1e5e4d 312 spin_lock_init(&zhdr->page_lock);
5a27aa82 313 kref_init(&zhdr->refcount);
9a001fc1
VW
314 zhdr->first_chunks = 0;
315 zhdr->middle_chunks = 0;
316 zhdr->last_chunks = 0;
317 zhdr->first_num = 0;
318 zhdr->start_middle = 0;
d30561c5 319 zhdr->cpu = -1;
7c2b8baa 320 zhdr->slots = slots;
9a001fc1 321 INIT_LIST_HEAD(&zhdr->buddy);
d30561c5 322 INIT_WORK(&zhdr->work, compact_page_work);
9a001fc1
VW
323 return zhdr;
324}
325
326/* Resets the struct page fields and frees the page */
1f862989 327static void free_z3fold_page(struct page *page, bool headless)
9a001fc1 328{
1f862989
VW
329 if (!headless) {
330 lock_page(page);
331 __ClearPageMovable(page);
332 unlock_page(page);
333 }
334 ClearPagePrivate(page);
5a27aa82
VW
335 __free_page(page);
336}
337
2f1e5e4d
VW
338/* Lock a z3fold page */
339static inline void z3fold_page_lock(struct z3fold_header *zhdr)
340{
341 spin_lock(&zhdr->page_lock);
342}
343
76e32a2a
VW
344/* Try to lock a z3fold page */
345static inline int z3fold_page_trylock(struct z3fold_header *zhdr)
346{
347 return spin_trylock(&zhdr->page_lock);
348}
349
2f1e5e4d
VW
350/* Unlock a z3fold page */
351static inline void z3fold_page_unlock(struct z3fold_header *zhdr)
352{
353 spin_unlock(&zhdr->page_lock);
354}
355
7c2b8baa
VW
356/* Helper function to build the index */
357static inline int __idx(struct z3fold_header *zhdr, enum buddy bud)
358{
359 return (bud + zhdr->first_num) & BUDDY_MASK;
360}
361
9a001fc1
VW
362/*
363 * Encodes the handle of a particular buddy within a z3fold page
364 * Pool lock should be held as this function accesses first_num
365 */
366static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud)
367{
7c2b8baa
VW
368 struct z3fold_buddy_slots *slots;
369 unsigned long h = (unsigned long)zhdr;
370 int idx = 0;
9a001fc1 371
7c2b8baa
VW
372 /*
373 * For a headless page, its handle is its pointer with the extra
374 * PAGE_HEADLESS bit set
375 */
376 if (bud == HEADLESS)
377 return h | (1 << PAGE_HEADLESS);
378
379 /* otherwise, return pointer to encoded handle */
380 idx = __idx(zhdr, bud);
381 h += idx;
382 if (bud == LAST)
383 h |= (zhdr->last_chunks << BUDDY_SHIFT);
384
385 slots = zhdr->slots;
386 slots->slot[idx] = h;
387 return (unsigned long)&slots->slot[idx];
9a001fc1
VW
388}
389
390/* Returns the z3fold page where a given handle is stored */
1f862989 391static inline struct z3fold_header *handle_to_z3fold_header(unsigned long h)
9a001fc1 392{
1f862989 393 unsigned long addr = h;
7c2b8baa
VW
394
395 if (!(addr & (1 << PAGE_HEADLESS)))
1f862989 396 addr = *(unsigned long *)h;
7c2b8baa
VW
397
398 return (struct z3fold_header *)(addr & PAGE_MASK);
9a001fc1
VW
399}
400
ca0246bb
VW
401/* only for LAST bud, returns zero otherwise */
402static unsigned short handle_to_chunks(unsigned long handle)
403{
7c2b8baa
VW
404 unsigned long addr = *(unsigned long *)handle;
405
406 return (addr & ~PAGE_MASK) >> BUDDY_SHIFT;
ca0246bb
VW
407}
408
f201ebd8 409/*
410 * (handle & BUDDY_MASK) < zhdr->first_num is possible in encode_handle
411 * but that doesn't matter. because the masking will result in the
412 * correct buddy number.
413 */
9a001fc1
VW
414static enum buddy handle_to_buddy(unsigned long handle)
415{
7c2b8baa
VW
416 struct z3fold_header *zhdr;
417 unsigned long addr;
418
419 WARN_ON(handle & (1 << PAGE_HEADLESS));
420 addr = *(unsigned long *)handle;
421 zhdr = (struct z3fold_header *)(addr & PAGE_MASK);
422 return (addr - zhdr->first_num) & BUDDY_MASK;
9a001fc1
VW
423}
424
9050cce1
VW
425static inline struct z3fold_pool *zhdr_to_pool(struct z3fold_header *zhdr)
426{
7c2b8baa 427 return slots_to_pool(zhdr->slots);
9050cce1
VW
428}
429
d30561c5
VW
430static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked)
431{
432 struct page *page = virt_to_page(zhdr);
9050cce1 433 struct z3fold_pool *pool = zhdr_to_pool(zhdr);
d30561c5
VW
434
435 WARN_ON(!list_empty(&zhdr->buddy));
436 set_bit(PAGE_STALE, &page->private);
35529357 437 clear_bit(NEEDS_COMPACTING, &page->private);
d30561c5
VW
438 spin_lock(&pool->lock);
439 if (!list_empty(&page->lru))
1f862989 440 list_del_init(&page->lru);
d30561c5
VW
441 spin_unlock(&pool->lock);
442 if (locked)
443 z3fold_page_unlock(zhdr);
444 spin_lock(&pool->stale_lock);
445 list_add(&zhdr->buddy, &pool->stale);
446 queue_work(pool->release_wq, &pool->work);
447 spin_unlock(&pool->stale_lock);
448}
449
450static void __attribute__((__unused__))
451 release_z3fold_page(struct kref *ref)
452{
453 struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
454 refcount);
455 __release_z3fold_page(zhdr, false);
456}
457
458static void release_z3fold_page_locked(struct kref *ref)
459{
460 struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
461 refcount);
462 WARN_ON(z3fold_page_trylock(zhdr));
463 __release_z3fold_page(zhdr, true);
464}
465
466static void release_z3fold_page_locked_list(struct kref *ref)
467{
468 struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
469 refcount);
9050cce1
VW
470 struct z3fold_pool *pool = zhdr_to_pool(zhdr);
471 spin_lock(&pool->lock);
d30561c5 472 list_del_init(&zhdr->buddy);
9050cce1 473 spin_unlock(&pool->lock);
d30561c5
VW
474
475 WARN_ON(z3fold_page_trylock(zhdr));
476 __release_z3fold_page(zhdr, true);
477}
478
479static void free_pages_work(struct work_struct *w)
480{
481 struct z3fold_pool *pool = container_of(w, struct z3fold_pool, work);
482
483 spin_lock(&pool->stale_lock);
484 while (!list_empty(&pool->stale)) {
485 struct z3fold_header *zhdr = list_first_entry(&pool->stale,
486 struct z3fold_header, buddy);
487 struct page *page = virt_to_page(zhdr);
488
489 list_del(&zhdr->buddy);
490 if (WARN_ON(!test_bit(PAGE_STALE, &page->private)))
491 continue;
d30561c5
VW
492 spin_unlock(&pool->stale_lock);
493 cancel_work_sync(&zhdr->work);
1f862989 494 free_z3fold_page(page, false);
d30561c5
VW
495 cond_resched();
496 spin_lock(&pool->stale_lock);
497 }
498 spin_unlock(&pool->stale_lock);
499}
500
9a001fc1
VW
501/*
502 * Returns the number of free chunks in a z3fold page.
503 * NB: can't be used with HEADLESS pages.
504 */
505static int num_free_chunks(struct z3fold_header *zhdr)
506{
507 int nfree;
508 /*
509 * If there is a middle object, pick up the bigger free space
510 * either before or after it. Otherwise just subtract the number
511 * of chunks occupied by the first and the last objects.
512 */
513 if (zhdr->middle_chunks != 0) {
514 int nfree_before = zhdr->first_chunks ?
ede93213 515 0 : zhdr->start_middle - ZHDR_CHUNKS;
9a001fc1 516 int nfree_after = zhdr->last_chunks ?
ede93213
VW
517 0 : TOTAL_CHUNKS -
518 (zhdr->start_middle + zhdr->middle_chunks);
9a001fc1
VW
519 nfree = max(nfree_before, nfree_after);
520 } else
521 nfree = NCHUNKS - zhdr->first_chunks - zhdr->last_chunks;
522 return nfree;
523}
524
9050cce1
VW
525/* Add to the appropriate unbuddied list */
526static inline void add_to_unbuddied(struct z3fold_pool *pool,
527 struct z3fold_header *zhdr)
528{
529 if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0 ||
530 zhdr->middle_chunks == 0) {
531 struct list_head *unbuddied = get_cpu_ptr(pool->unbuddied);
532
533 int freechunks = num_free_chunks(zhdr);
534 spin_lock(&pool->lock);
535 list_add(&zhdr->buddy, &unbuddied[freechunks]);
536 spin_unlock(&pool->lock);
537 zhdr->cpu = smp_processor_id();
538 put_cpu_ptr(pool->unbuddied);
539 }
540}
541
ede93213
VW
542static inline void *mchunk_memmove(struct z3fold_header *zhdr,
543 unsigned short dst_chunk)
544{
545 void *beg = zhdr;
546 return memmove(beg + (dst_chunk << CHUNK_SHIFT),
547 beg + (zhdr->start_middle << CHUNK_SHIFT),
548 zhdr->middle_chunks << CHUNK_SHIFT);
549}
550
1b096e5a 551#define BIG_CHUNK_GAP 3
9a001fc1
VW
552/* Has to be called with lock held */
553static int z3fold_compact_page(struct z3fold_header *zhdr)
554{
555 struct page *page = virt_to_page(zhdr);
9a001fc1 556
ede93213
VW
557 if (test_bit(MIDDLE_CHUNK_MAPPED, &page->private))
558 return 0; /* can't move middle chunk, it's used */
9a001fc1 559
1f862989
VW
560 if (unlikely(PageIsolated(page)))
561 return 0;
562
ede93213
VW
563 if (zhdr->middle_chunks == 0)
564 return 0; /* nothing to compact */
565
566 if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
567 /* move to the beginning */
568 mchunk_memmove(zhdr, ZHDR_CHUNKS);
9a001fc1
VW
569 zhdr->first_chunks = zhdr->middle_chunks;
570 zhdr->middle_chunks = 0;
571 zhdr->start_middle = 0;
572 zhdr->first_num++;
1b096e5a 573 return 1;
9a001fc1 574 }
1b096e5a
VW
575
576 /*
577 * moving data is expensive, so let's only do that if
578 * there's substantial gain (at least BIG_CHUNK_GAP chunks)
579 */
580 if (zhdr->first_chunks != 0 && zhdr->last_chunks == 0 &&
581 zhdr->start_middle - (zhdr->first_chunks + ZHDR_CHUNKS) >=
582 BIG_CHUNK_GAP) {
583 mchunk_memmove(zhdr, zhdr->first_chunks + ZHDR_CHUNKS);
584 zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
585 return 1;
586 } else if (zhdr->last_chunks != 0 && zhdr->first_chunks == 0 &&
587 TOTAL_CHUNKS - (zhdr->last_chunks + zhdr->start_middle
588 + zhdr->middle_chunks) >=
589 BIG_CHUNK_GAP) {
590 unsigned short new_start = TOTAL_CHUNKS - zhdr->last_chunks -
591 zhdr->middle_chunks;
592 mchunk_memmove(zhdr, new_start);
593 zhdr->start_middle = new_start;
594 return 1;
595 }
596
597 return 0;
9a001fc1
VW
598}
599
d30561c5
VW
600static void do_compact_page(struct z3fold_header *zhdr, bool locked)
601{
9050cce1 602 struct z3fold_pool *pool = zhdr_to_pool(zhdr);
d30561c5 603 struct page *page;
d30561c5
VW
604
605 page = virt_to_page(zhdr);
606 if (locked)
607 WARN_ON(z3fold_page_trylock(zhdr));
608 else
609 z3fold_page_lock(zhdr);
5d03a661 610 if (WARN_ON(!test_and_clear_bit(NEEDS_COMPACTING, &page->private))) {
d30561c5
VW
611 z3fold_page_unlock(zhdr);
612 return;
613 }
614 spin_lock(&pool->lock);
615 list_del_init(&zhdr->buddy);
616 spin_unlock(&pool->lock);
617
5d03a661
VW
618 if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
619 atomic64_dec(&pool->pages_nr);
620 return;
621 }
622
1f862989
VW
623 if (unlikely(PageIsolated(page) ||
624 test_bit(PAGE_STALE, &page->private))) {
625 z3fold_page_unlock(zhdr);
626 return;
627 }
628
d30561c5 629 z3fold_compact_page(zhdr);
9050cce1 630 add_to_unbuddied(pool, zhdr);
d30561c5
VW
631 z3fold_page_unlock(zhdr);
632}
633
634static void compact_page_work(struct work_struct *w)
635{
636 struct z3fold_header *zhdr = container_of(w, struct z3fold_header,
637 work);
638
639 do_compact_page(zhdr, false);
640}
641
9050cce1
VW
642/* returns _locked_ z3fold page header or NULL */
643static inline struct z3fold_header *__z3fold_alloc(struct z3fold_pool *pool,
644 size_t size, bool can_sleep)
645{
646 struct z3fold_header *zhdr = NULL;
647 struct page *page;
648 struct list_head *unbuddied;
649 int chunks = size_to_chunks(size), i;
650
651lookup:
652 /* First, try to find an unbuddied z3fold page. */
653 unbuddied = get_cpu_ptr(pool->unbuddied);
654 for_each_unbuddied_list(i, chunks) {
655 struct list_head *l = &unbuddied[i];
656
657 zhdr = list_first_entry_or_null(READ_ONCE(l),
658 struct z3fold_header, buddy);
659
660 if (!zhdr)
661 continue;
662
663 /* Re-check under lock. */
664 spin_lock(&pool->lock);
665 l = &unbuddied[i];
666 if (unlikely(zhdr != list_first_entry(READ_ONCE(l),
667 struct z3fold_header, buddy)) ||
668 !z3fold_page_trylock(zhdr)) {
669 spin_unlock(&pool->lock);
670 zhdr = NULL;
671 put_cpu_ptr(pool->unbuddied);
672 if (can_sleep)
673 cond_resched();
674 goto lookup;
675 }
676 list_del_init(&zhdr->buddy);
677 zhdr->cpu = -1;
678 spin_unlock(&pool->lock);
679
680 page = virt_to_page(zhdr);
681 if (test_bit(NEEDS_COMPACTING, &page->private)) {
682 z3fold_page_unlock(zhdr);
683 zhdr = NULL;
684 put_cpu_ptr(pool->unbuddied);
685 if (can_sleep)
686 cond_resched();
687 goto lookup;
688 }
689
690 /*
691 * this page could not be removed from its unbuddied
692 * list while pool lock was held, and then we've taken
693 * page lock so kref_put could not be called before
694 * we got here, so it's safe to just call kref_get()
695 */
696 kref_get(&zhdr->refcount);
697 break;
698 }
699 put_cpu_ptr(pool->unbuddied);
700
351618b2
VW
701 if (!zhdr) {
702 int cpu;
703
704 /* look for _exact_ match on other cpus' lists */
705 for_each_online_cpu(cpu) {
706 struct list_head *l;
707
708 unbuddied = per_cpu_ptr(pool->unbuddied, cpu);
709 spin_lock(&pool->lock);
710 l = &unbuddied[chunks];
711
712 zhdr = list_first_entry_or_null(READ_ONCE(l),
713 struct z3fold_header, buddy);
714
715 if (!zhdr || !z3fold_page_trylock(zhdr)) {
716 spin_unlock(&pool->lock);
717 zhdr = NULL;
718 continue;
719 }
720 list_del_init(&zhdr->buddy);
721 zhdr->cpu = -1;
722 spin_unlock(&pool->lock);
723
724 page = virt_to_page(zhdr);
725 if (test_bit(NEEDS_COMPACTING, &page->private)) {
726 z3fold_page_unlock(zhdr);
727 zhdr = NULL;
728 if (can_sleep)
729 cond_resched();
730 continue;
731 }
732 kref_get(&zhdr->refcount);
733 break;
734 }
735 }
736
9050cce1
VW
737 return zhdr;
738}
d30561c5
VW
739
740/*
741 * API Functions
742 */
743
744/**
745 * z3fold_create_pool() - create a new z3fold pool
746 * @name: pool name
747 * @gfp: gfp flags when allocating the z3fold pool structure
748 * @ops: user-defined operations for the z3fold pool
749 *
750 * Return: pointer to the new z3fold pool or NULL if the metadata allocation
751 * failed.
752 */
753static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp,
754 const struct z3fold_ops *ops)
755{
756 struct z3fold_pool *pool = NULL;
757 int i, cpu;
758
759 pool = kzalloc(sizeof(struct z3fold_pool), gfp);
760 if (!pool)
761 goto out;
7c2b8baa
VW
762 pool->c_handle = kmem_cache_create("z3fold_handle",
763 sizeof(struct z3fold_buddy_slots),
764 SLOTS_ALIGN, 0, NULL);
765 if (!pool->c_handle)
766 goto out_c;
d30561c5
VW
767 spin_lock_init(&pool->lock);
768 spin_lock_init(&pool->stale_lock);
769 pool->unbuddied = __alloc_percpu(sizeof(struct list_head)*NCHUNKS, 2);
1ec6995d
XW
770 if (!pool->unbuddied)
771 goto out_pool;
d30561c5
VW
772 for_each_possible_cpu(cpu) {
773 struct list_head *unbuddied =
774 per_cpu_ptr(pool->unbuddied, cpu);
775 for_each_unbuddied_list(i, 0)
776 INIT_LIST_HEAD(&unbuddied[i]);
777 }
778 INIT_LIST_HEAD(&pool->lru);
779 INIT_LIST_HEAD(&pool->stale);
780 atomic64_set(&pool->pages_nr, 0);
781 pool->name = name;
782 pool->compact_wq = create_singlethread_workqueue(pool->name);
783 if (!pool->compact_wq)
1ec6995d 784 goto out_unbuddied;
d30561c5
VW
785 pool->release_wq = create_singlethread_workqueue(pool->name);
786 if (!pool->release_wq)
787 goto out_wq;
1f862989
VW
788 if (z3fold_register_migration(pool))
789 goto out_rwq;
d30561c5
VW
790 INIT_WORK(&pool->work, free_pages_work);
791 pool->ops = ops;
792 return pool;
793
1f862989
VW
794out_rwq:
795 destroy_workqueue(pool->release_wq);
d30561c5
VW
796out_wq:
797 destroy_workqueue(pool->compact_wq);
1ec6995d
XW
798out_unbuddied:
799 free_percpu(pool->unbuddied);
800out_pool:
7c2b8baa
VW
801 kmem_cache_destroy(pool->c_handle);
802out_c:
d30561c5 803 kfree(pool);
1ec6995d 804out:
d30561c5
VW
805 return NULL;
806}
807
808/**
809 * z3fold_destroy_pool() - destroys an existing z3fold pool
810 * @pool: the z3fold pool to be destroyed
811 *
812 * The pool should be emptied before this function is called.
813 */
814static void z3fold_destroy_pool(struct z3fold_pool *pool)
815{
7c2b8baa 816 kmem_cache_destroy(pool->c_handle);
1f862989 817 z3fold_unregister_migration(pool);
d30561c5
VW
818 destroy_workqueue(pool->release_wq);
819 destroy_workqueue(pool->compact_wq);
820 kfree(pool);
821}
822
9a001fc1
VW
823/**
824 * z3fold_alloc() - allocates a region of a given size
825 * @pool: z3fold pool from which to allocate
826 * @size: size in bytes of the desired allocation
827 * @gfp: gfp flags used if the pool needs to grow
828 * @handle: handle of the new allocation
829 *
830 * This function will attempt to find a free region in the pool large enough to
831 * satisfy the allocation request. A search of the unbuddied lists is
832 * performed first. If no suitable free region is found, then a new page is
833 * allocated and added to the pool to satisfy the request.
834 *
835 * gfp should not set __GFP_HIGHMEM as highmem pages cannot be used
836 * as z3fold pool pages.
837 *
838 * Return: 0 if success and handle is set, otherwise -EINVAL if the size or
839 * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate
840 * a new page.
841 */
842static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
843 unsigned long *handle)
844{
9050cce1 845 int chunks = size_to_chunks(size);
9a001fc1 846 struct z3fold_header *zhdr = NULL;
d30561c5 847 struct page *page = NULL;
9a001fc1 848 enum buddy bud;
8a97ea54 849 bool can_sleep = gfpflags_allow_blocking(gfp);
9a001fc1
VW
850
851 if (!size || (gfp & __GFP_HIGHMEM))
852 return -EINVAL;
853
854 if (size > PAGE_SIZE)
855 return -ENOSPC;
856
857 if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE)
858 bud = HEADLESS;
859 else {
9050cce1
VW
860retry:
861 zhdr = __z3fold_alloc(pool, size, can_sleep);
d30561c5 862 if (zhdr) {
2f1e5e4d
VW
863 if (zhdr->first_chunks == 0) {
864 if (zhdr->middle_chunks != 0 &&
865 chunks >= zhdr->start_middle)
9a001fc1 866 bud = LAST;
2f1e5e4d
VW
867 else
868 bud = FIRST;
869 } else if (zhdr->last_chunks == 0)
870 bud = LAST;
871 else if (zhdr->middle_chunks == 0)
872 bud = MIDDLE;
873 else {
5a27aa82 874 if (kref_put(&zhdr->refcount,
d30561c5 875 release_z3fold_page_locked))
5a27aa82 876 atomic64_dec(&pool->pages_nr);
d30561c5
VW
877 else
878 z3fold_page_unlock(zhdr);
2f1e5e4d
VW
879 pr_err("No free chunks in unbuddied\n");
880 WARN_ON(1);
9050cce1 881 goto retry;
9a001fc1 882 }
9050cce1 883 page = virt_to_page(zhdr);
2f1e5e4d 884 goto found;
9a001fc1
VW
885 }
886 bud = FIRST;
9a001fc1
VW
887 }
888
5c9bab59
VW
889 page = NULL;
890 if (can_sleep) {
891 spin_lock(&pool->stale_lock);
892 zhdr = list_first_entry_or_null(&pool->stale,
893 struct z3fold_header, buddy);
894 /*
895 * Before allocating a page, let's see if we can take one from
896 * the stale pages list. cancel_work_sync() can sleep so we
897 * limit this case to the contexts where we can sleep
898 */
899 if (zhdr) {
900 list_del(&zhdr->buddy);
901 spin_unlock(&pool->stale_lock);
d30561c5 902 cancel_work_sync(&zhdr->work);
5c9bab59
VW
903 page = virt_to_page(zhdr);
904 } else {
905 spin_unlock(&pool->stale_lock);
906 }
d30561c5 907 }
5c9bab59
VW
908 if (!page)
909 page = alloc_page(gfp);
d30561c5 910
9a001fc1
VW
911 if (!page)
912 return -ENOMEM;
2f1e5e4d 913
d30561c5 914 zhdr = init_z3fold_page(page, pool);
9050cce1
VW
915 if (!zhdr) {
916 __free_page(page);
917 return -ENOMEM;
918 }
919 atomic64_inc(&pool->pages_nr);
9a001fc1
VW
920
921 if (bud == HEADLESS) {
922 set_bit(PAGE_HEADLESS, &page->private);
923 goto headless;
924 }
1f862989 925 __SetPageMovable(page, pool->inode->i_mapping);
2f1e5e4d 926 z3fold_page_lock(zhdr);
9a001fc1
VW
927
928found:
929 if (bud == FIRST)
930 zhdr->first_chunks = chunks;
931 else if (bud == LAST)
932 zhdr->last_chunks = chunks;
933 else {
934 zhdr->middle_chunks = chunks;
ede93213 935 zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
9a001fc1 936 }
9050cce1 937 add_to_unbuddied(pool, zhdr);
9a001fc1
VW
938
939headless:
d30561c5 940 spin_lock(&pool->lock);
9a001fc1
VW
941 /* Add/move z3fold page to beginning of LRU */
942 if (!list_empty(&page->lru))
943 list_del(&page->lru);
944
945 list_add(&page->lru, &pool->lru);
946
947 *handle = encode_handle(zhdr, bud);
948 spin_unlock(&pool->lock);
2f1e5e4d
VW
949 if (bud != HEADLESS)
950 z3fold_page_unlock(zhdr);
9a001fc1
VW
951
952 return 0;
953}
954
955/**
956 * z3fold_free() - frees the allocation associated with the given handle
957 * @pool: pool in which the allocation resided
958 * @handle: handle associated with the allocation returned by z3fold_alloc()
959 *
960 * In the case that the z3fold page in which the allocation resides is under
961 * reclaim, as indicated by the PG_reclaim flag being set, this function
962 * only sets the first|last_chunks to 0. The page is actually freed
963 * once both buddies are evicted (see z3fold_reclaim_page() below).
964 */
965static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
966{
967 struct z3fold_header *zhdr;
9a001fc1
VW
968 struct page *page;
969 enum buddy bud;
970
9a001fc1
VW
971 zhdr = handle_to_z3fold_header(handle);
972 page = virt_to_page(zhdr);
973
974 if (test_bit(PAGE_HEADLESS, &page->private)) {
ca0246bb
VW
975 /* if a headless page is under reclaim, just leave.
976 * NB: we use test_and_set_bit for a reason: if the bit
977 * has not been set before, we release this page
978 * immediately so we don't care about its value any more.
979 */
980 if (!test_and_set_bit(PAGE_CLAIMED, &page->private)) {
981 spin_lock(&pool->lock);
982 list_del(&page->lru);
983 spin_unlock(&pool->lock);
1f862989 984 free_z3fold_page(page, true);
ca0246bb 985 atomic64_dec(&pool->pages_nr);
9a001fc1 986 }
ca0246bb 987 return;
9a001fc1
VW
988 }
989
ca0246bb
VW
990 /* Non-headless case */
991 z3fold_page_lock(zhdr);
992 bud = handle_to_buddy(handle);
993
994 switch (bud) {
995 case FIRST:
996 zhdr->first_chunks = 0;
997 break;
998 case MIDDLE:
999 zhdr->middle_chunks = 0;
1000 break;
1001 case LAST:
1002 zhdr->last_chunks = 0;
1003 break;
1004 default:
1005 pr_err("%s: unknown bud %d\n", __func__, bud);
1006 WARN_ON(1);
1007 z3fold_page_unlock(zhdr);
d30561c5
VW
1008 return;
1009 }
1010
7c2b8baa 1011 free_handle(handle);
d30561c5
VW
1012 if (kref_put(&zhdr->refcount, release_z3fold_page_locked_list)) {
1013 atomic64_dec(&pool->pages_nr);
1014 return;
1015 }
ca0246bb 1016 if (test_bit(PAGE_CLAIMED, &page->private)) {
6098d7e1
VW
1017 z3fold_page_unlock(zhdr);
1018 return;
1019 }
1f862989
VW
1020 if (unlikely(PageIsolated(page)) ||
1021 test_and_set_bit(NEEDS_COMPACTING, &page->private)) {
5a27aa82 1022 z3fold_page_unlock(zhdr);
d30561c5
VW
1023 return;
1024 }
1025 if (zhdr->cpu < 0 || !cpu_online(zhdr->cpu)) {
2f1e5e4d 1026 spin_lock(&pool->lock);
d30561c5 1027 list_del_init(&zhdr->buddy);
2f1e5e4d 1028 spin_unlock(&pool->lock);
d30561c5 1029 zhdr->cpu = -1;
5d03a661 1030 kref_get(&zhdr->refcount);
d30561c5
VW
1031 do_compact_page(zhdr, true);
1032 return;
9a001fc1 1033 }
5d03a661 1034 kref_get(&zhdr->refcount);
d30561c5
VW
1035 queue_work_on(zhdr->cpu, pool->compact_wq, &zhdr->work);
1036 z3fold_page_unlock(zhdr);
9a001fc1
VW
1037}
1038
1039/**
1040 * z3fold_reclaim_page() - evicts allocations from a pool page and frees it
1041 * @pool: pool from which a page will attempt to be evicted
f144c390 1042 * @retries: number of pages on the LRU list for which eviction will
9a001fc1
VW
1043 * be attempted before failing
1044 *
1045 * z3fold reclaim is different from normal system reclaim in that it is done
1046 * from the bottom, up. This is because only the bottom layer, z3fold, has
1047 * information on how the allocations are organized within each z3fold page.
1048 * This has the potential to create interesting locking situations between
1049 * z3fold and the user, however.
1050 *
1051 * To avoid these, this is how z3fold_reclaim_page() should be called:
f144c390 1052 *
9a001fc1
VW
1053 * The user detects a page should be reclaimed and calls z3fold_reclaim_page().
1054 * z3fold_reclaim_page() will remove a z3fold page from the pool LRU list and
1055 * call the user-defined eviction handler with the pool and handle as
1056 * arguments.
1057 *
1058 * If the handle can not be evicted, the eviction handler should return
1059 * non-zero. z3fold_reclaim_page() will add the z3fold page back to the
1060 * appropriate list and try the next z3fold page on the LRU up to
1061 * a user defined number of retries.
1062 *
1063 * If the handle is successfully evicted, the eviction handler should
1064 * return 0 _and_ should have called z3fold_free() on the handle. z3fold_free()
1065 * contains logic to delay freeing the page if the page is under reclaim,
1066 * as indicated by the setting of the PG_reclaim flag on the underlying page.
1067 *
1068 * If all buddies in the z3fold page are successfully evicted, then the
1069 * z3fold page can be freed.
1070 *
1071 * Returns: 0 if page is successfully freed, otherwise -EINVAL if there are
1072 * no pages to evict or an eviction handler is not registered, -EAGAIN if
1073 * the retry limit was hit.
1074 */
1075static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
1076{
d30561c5
VW
1077 int i, ret = 0;
1078 struct z3fold_header *zhdr = NULL;
1079 struct page *page = NULL;
1080 struct list_head *pos;
9a001fc1
VW
1081 unsigned long first_handle = 0, middle_handle = 0, last_handle = 0;
1082
1083 spin_lock(&pool->lock);
2f1e5e4d 1084 if (!pool->ops || !pool->ops->evict || retries == 0) {
9a001fc1
VW
1085 spin_unlock(&pool->lock);
1086 return -EINVAL;
1087 }
1088 for (i = 0; i < retries; i++) {
2f1e5e4d
VW
1089 if (list_empty(&pool->lru)) {
1090 spin_unlock(&pool->lock);
1091 return -EINVAL;
1092 }
d30561c5
VW
1093 list_for_each_prev(pos, &pool->lru) {
1094 page = list_entry(pos, struct page, lru);
ca0246bb
VW
1095
1096 /* this bit could have been set by free, in which case
1097 * we pass over to the next page in the pool.
1098 */
1099 if (test_and_set_bit(PAGE_CLAIMED, &page->private))
1100 continue;
1101
1f862989
VW
1102 if (unlikely(PageIsolated(page)))
1103 continue;
d30561c5 1104 if (test_bit(PAGE_HEADLESS, &page->private))
d30561c5
VW
1105 break;
1106
1f862989 1107 zhdr = page_address(page);
ca0246bb
VW
1108 if (!z3fold_page_trylock(zhdr)) {
1109 zhdr = NULL;
d30561c5 1110 continue; /* can't evict at this point */
ca0246bb 1111 }
d30561c5
VW
1112 kref_get(&zhdr->refcount);
1113 list_del_init(&zhdr->buddy);
1114 zhdr->cpu = -1;
6098d7e1 1115 break;
d30561c5
VW
1116 }
1117
ca0246bb
VW
1118 if (!zhdr)
1119 break;
1120
5a27aa82 1121 list_del_init(&page->lru);
d30561c5 1122 spin_unlock(&pool->lock);
9a001fc1 1123
9a001fc1 1124 if (!test_bit(PAGE_HEADLESS, &page->private)) {
9a001fc1
VW
1125 /*
1126 * We need encode the handles before unlocking, since
1127 * we can race with free that will set
1128 * (first|last)_chunks to 0
1129 */
1130 first_handle = 0;
1131 last_handle = 0;
1132 middle_handle = 0;
1133 if (zhdr->first_chunks)
1134 first_handle = encode_handle(zhdr, FIRST);
1135 if (zhdr->middle_chunks)
1136 middle_handle = encode_handle(zhdr, MIDDLE);
1137 if (zhdr->last_chunks)
1138 last_handle = encode_handle(zhdr, LAST);
d30561c5
VW
1139 /*
1140 * it's safe to unlock here because we hold a
1141 * reference to this page
1142 */
2f1e5e4d 1143 z3fold_page_unlock(zhdr);
9a001fc1
VW
1144 } else {
1145 first_handle = encode_handle(zhdr, HEADLESS);
1146 last_handle = middle_handle = 0;
1147 }
1148
9a001fc1
VW
1149 /* Issue the eviction callback(s) */
1150 if (middle_handle) {
1151 ret = pool->ops->evict(pool, middle_handle);
1152 if (ret)
1153 goto next;
1154 }
1155 if (first_handle) {
1156 ret = pool->ops->evict(pool, first_handle);
1157 if (ret)
1158 goto next;
1159 }
1160 if (last_handle) {
1161 ret = pool->ops->evict(pool, last_handle);
1162 if (ret)
1163 goto next;
1164 }
1165next:
5a27aa82
VW
1166 if (test_bit(PAGE_HEADLESS, &page->private)) {
1167 if (ret == 0) {
1f862989 1168 free_z3fold_page(page, true);
ca0246bb 1169 atomic64_dec(&pool->pages_nr);
5a27aa82 1170 return 0;
5a27aa82 1171 }
6098d7e1
VW
1172 spin_lock(&pool->lock);
1173 list_add(&page->lru, &pool->lru);
1174 spin_unlock(&pool->lock);
1175 } else {
1176 z3fold_page_lock(zhdr);
ca0246bb 1177 clear_bit(PAGE_CLAIMED, &page->private);
6098d7e1
VW
1178 if (kref_put(&zhdr->refcount,
1179 release_z3fold_page_locked)) {
1180 atomic64_dec(&pool->pages_nr);
1181 return 0;
1182 }
1183 /*
1184 * if we are here, the page is still not completely
1185 * free. Take the global pool lock then to be able
1186 * to add it back to the lru list
1187 */
1188 spin_lock(&pool->lock);
1189 list_add(&page->lru, &pool->lru);
d5567c9d 1190 spin_unlock(&pool->lock);
6098d7e1 1191 z3fold_page_unlock(zhdr);
5a27aa82 1192 }
2f1e5e4d 1193
6098d7e1
VW
1194 /* We started off locked to we need to lock the pool back */
1195 spin_lock(&pool->lock);
9a001fc1
VW
1196 }
1197 spin_unlock(&pool->lock);
1198 return -EAGAIN;
1199}
1200
1201/**
1202 * z3fold_map() - maps the allocation associated with the given handle
1203 * @pool: pool in which the allocation resides
1204 * @handle: handle associated with the allocation to be mapped
1205 *
1206 * Extracts the buddy number from handle and constructs the pointer to the
1207 * correct starting chunk within the page.
1208 *
1209 * Returns: a pointer to the mapped allocation
1210 */
1211static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle)
1212{
1213 struct z3fold_header *zhdr;
1214 struct page *page;
1215 void *addr;
1216 enum buddy buddy;
1217
9a001fc1
VW
1218 zhdr = handle_to_z3fold_header(handle);
1219 addr = zhdr;
1220 page = virt_to_page(zhdr);
1221
1222 if (test_bit(PAGE_HEADLESS, &page->private))
1223 goto out;
1224
2f1e5e4d 1225 z3fold_page_lock(zhdr);
9a001fc1
VW
1226 buddy = handle_to_buddy(handle);
1227 switch (buddy) {
1228 case FIRST:
1229 addr += ZHDR_SIZE_ALIGNED;
1230 break;
1231 case MIDDLE:
1232 addr += zhdr->start_middle << CHUNK_SHIFT;
1233 set_bit(MIDDLE_CHUNK_MAPPED, &page->private);
1234 break;
1235 case LAST:
ca0246bb 1236 addr += PAGE_SIZE - (handle_to_chunks(handle) << CHUNK_SHIFT);
9a001fc1
VW
1237 break;
1238 default:
1239 pr_err("unknown buddy id %d\n", buddy);
1240 WARN_ON(1);
1241 addr = NULL;
1242 break;
1243 }
2f1e5e4d 1244
1f862989
VW
1245 if (addr)
1246 zhdr->mapped_count++;
2f1e5e4d 1247 z3fold_page_unlock(zhdr);
9a001fc1 1248out:
9a001fc1
VW
1249 return addr;
1250}
1251
1252/**
1253 * z3fold_unmap() - unmaps the allocation associated with the given handle
1254 * @pool: pool in which the allocation resides
1255 * @handle: handle associated with the allocation to be unmapped
1256 */
1257static void z3fold_unmap(struct z3fold_pool *pool, unsigned long handle)
1258{
1259 struct z3fold_header *zhdr;
1260 struct page *page;
1261 enum buddy buddy;
1262
9a001fc1
VW
1263 zhdr = handle_to_z3fold_header(handle);
1264 page = virt_to_page(zhdr);
1265
2f1e5e4d 1266 if (test_bit(PAGE_HEADLESS, &page->private))
9a001fc1 1267 return;
9a001fc1 1268
2f1e5e4d 1269 z3fold_page_lock(zhdr);
9a001fc1
VW
1270 buddy = handle_to_buddy(handle);
1271 if (buddy == MIDDLE)
1272 clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
1f862989 1273 zhdr->mapped_count--;
2f1e5e4d 1274 z3fold_page_unlock(zhdr);
9a001fc1
VW
1275}
1276
1277/**
1278 * z3fold_get_pool_size() - gets the z3fold pool size in pages
1279 * @pool: pool whose size is being queried
1280 *
12d59ae6 1281 * Returns: size in pages of the given pool.
9a001fc1
VW
1282 */
1283static u64 z3fold_get_pool_size(struct z3fold_pool *pool)
1284{
12d59ae6 1285 return atomic64_read(&pool->pages_nr);
9a001fc1
VW
1286}
1287
1f862989
VW
1288static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode)
1289{
1290 struct z3fold_header *zhdr;
1291 struct z3fold_pool *pool;
1292
1293 VM_BUG_ON_PAGE(!PageMovable(page), page);
1294 VM_BUG_ON_PAGE(PageIsolated(page), page);
1295
1296 if (test_bit(PAGE_HEADLESS, &page->private))
1297 return false;
1298
1299 zhdr = page_address(page);
1300 z3fold_page_lock(zhdr);
1301 if (test_bit(NEEDS_COMPACTING, &page->private) ||
1302 test_bit(PAGE_STALE, &page->private))
1303 goto out;
1304
1305 pool = zhdr_to_pool(zhdr);
1306
1307 if (zhdr->mapped_count == 0) {
1308 kref_get(&zhdr->refcount);
1309 if (!list_empty(&zhdr->buddy))
1310 list_del_init(&zhdr->buddy);
1311 spin_lock(&pool->lock);
1312 if (!list_empty(&page->lru))
1313 list_del(&page->lru);
1314 spin_unlock(&pool->lock);
1315 z3fold_page_unlock(zhdr);
1316 return true;
1317 }
1318out:
1319 z3fold_page_unlock(zhdr);
1320 return false;
1321}
1322
1323static int z3fold_page_migrate(struct address_space *mapping, struct page *newpage,
1324 struct page *page, enum migrate_mode mode)
1325{
1326 struct z3fold_header *zhdr, *new_zhdr;
1327 struct z3fold_pool *pool;
1328 struct address_space *new_mapping;
1329
1330 VM_BUG_ON_PAGE(!PageMovable(page), page);
1331 VM_BUG_ON_PAGE(!PageIsolated(page), page);
1332
1333 zhdr = page_address(page);
1334 pool = zhdr_to_pool(zhdr);
1335
1336 if (!trylock_page(page))
1337 return -EAGAIN;
1338
1339 if (!z3fold_page_trylock(zhdr)) {
1340 unlock_page(page);
1341 return -EAGAIN;
1342 }
1343 if (zhdr->mapped_count != 0) {
1344 z3fold_page_unlock(zhdr);
1345 unlock_page(page);
1346 return -EBUSY;
1347 }
1348 new_zhdr = page_address(newpage);
1349 memcpy(new_zhdr, zhdr, PAGE_SIZE);
1350 newpage->private = page->private;
1351 page->private = 0;
1352 z3fold_page_unlock(zhdr);
1353 spin_lock_init(&new_zhdr->page_lock);
1354 new_mapping = page_mapping(page);
1355 __ClearPageMovable(page);
1356 ClearPagePrivate(page);
1357
1358 get_page(newpage);
1359 z3fold_page_lock(new_zhdr);
1360 if (new_zhdr->first_chunks)
1361 encode_handle(new_zhdr, FIRST);
1362 if (new_zhdr->last_chunks)
1363 encode_handle(new_zhdr, LAST);
1364 if (new_zhdr->middle_chunks)
1365 encode_handle(new_zhdr, MIDDLE);
1366 set_bit(NEEDS_COMPACTING, &newpage->private);
1367 new_zhdr->cpu = smp_processor_id();
1368 spin_lock(&pool->lock);
1369 list_add(&newpage->lru, &pool->lru);
1370 spin_unlock(&pool->lock);
1371 __SetPageMovable(newpage, new_mapping);
1372 z3fold_page_unlock(new_zhdr);
1373
1374 queue_work_on(new_zhdr->cpu, pool->compact_wq, &new_zhdr->work);
1375
1376 page_mapcount_reset(page);
1377 unlock_page(page);
1378 put_page(page);
1379 return 0;
1380}
1381
1382static void z3fold_page_putback(struct page *page)
1383{
1384 struct z3fold_header *zhdr;
1385 struct z3fold_pool *pool;
1386
1387 zhdr = page_address(page);
1388 pool = zhdr_to_pool(zhdr);
1389
1390 z3fold_page_lock(zhdr);
1391 if (!list_empty(&zhdr->buddy))
1392 list_del_init(&zhdr->buddy);
1393 INIT_LIST_HEAD(&page->lru);
1394 if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
1395 atomic64_dec(&pool->pages_nr);
1396 return;
1397 }
1398 spin_lock(&pool->lock);
1399 list_add(&page->lru, &pool->lru);
1400 spin_unlock(&pool->lock);
1401 z3fold_page_unlock(zhdr);
1402}
1403
1404static const struct address_space_operations z3fold_aops = {
1405 .isolate_page = z3fold_page_isolate,
1406 .migratepage = z3fold_page_migrate,
1407 .putback_page = z3fold_page_putback,
1408};
1409
9a001fc1
VW
1410/*****************
1411 * zpool
1412 ****************/
1413
1414static int z3fold_zpool_evict(struct z3fold_pool *pool, unsigned long handle)
1415{
1416 if (pool->zpool && pool->zpool_ops && pool->zpool_ops->evict)
1417 return pool->zpool_ops->evict(pool->zpool, handle);
1418 else
1419 return -ENOENT;
1420}
1421
1422static const struct z3fold_ops z3fold_zpool_ops = {
1423 .evict = z3fold_zpool_evict
1424};
1425
1426static void *z3fold_zpool_create(const char *name, gfp_t gfp,
1427 const struct zpool_ops *zpool_ops,
1428 struct zpool *zpool)
1429{
1430 struct z3fold_pool *pool;
1431
d30561c5
VW
1432 pool = z3fold_create_pool(name, gfp,
1433 zpool_ops ? &z3fold_zpool_ops : NULL);
9a001fc1
VW
1434 if (pool) {
1435 pool->zpool = zpool;
1436 pool->zpool_ops = zpool_ops;
1437 }
1438 return pool;
1439}
1440
1441static void z3fold_zpool_destroy(void *pool)
1442{
1443 z3fold_destroy_pool(pool);
1444}
1445
1446static int z3fold_zpool_malloc(void *pool, size_t size, gfp_t gfp,
1447 unsigned long *handle)
1448{
1449 return z3fold_alloc(pool, size, gfp, handle);
1450}
1451static void z3fold_zpool_free(void *pool, unsigned long handle)
1452{
1453 z3fold_free(pool, handle);
1454}
1455
1456static int z3fold_zpool_shrink(void *pool, unsigned int pages,
1457 unsigned int *reclaimed)
1458{
1459 unsigned int total = 0;
1460 int ret = -EINVAL;
1461
1462 while (total < pages) {
1463 ret = z3fold_reclaim_page(pool, 8);
1464 if (ret < 0)
1465 break;
1466 total++;
1467 }
1468
1469 if (reclaimed)
1470 *reclaimed = total;
1471
1472 return ret;
1473}
1474
1475static void *z3fold_zpool_map(void *pool, unsigned long handle,
1476 enum zpool_mapmode mm)
1477{
1478 return z3fold_map(pool, handle);
1479}
1480static void z3fold_zpool_unmap(void *pool, unsigned long handle)
1481{
1482 z3fold_unmap(pool, handle);
1483}
1484
1485static u64 z3fold_zpool_total_size(void *pool)
1486{
1487 return z3fold_get_pool_size(pool) * PAGE_SIZE;
1488}
1489
1490static struct zpool_driver z3fold_zpool_driver = {
1491 .type = "z3fold",
1492 .owner = THIS_MODULE,
1493 .create = z3fold_zpool_create,
1494 .destroy = z3fold_zpool_destroy,
1495 .malloc = z3fold_zpool_malloc,
1496 .free = z3fold_zpool_free,
1497 .shrink = z3fold_zpool_shrink,
1498 .map = z3fold_zpool_map,
1499 .unmap = z3fold_zpool_unmap,
1500 .total_size = z3fold_zpool_total_size,
1501};
1502
1503MODULE_ALIAS("zpool-z3fold");
1504
1505static int __init init_z3fold(void)
1506{
1f862989
VW
1507 int ret;
1508
ede93213
VW
1509 /* Make sure the z3fold header is not larger than the page size */
1510 BUILD_BUG_ON(ZHDR_SIZE_ALIGNED > PAGE_SIZE);
1f862989
VW
1511 ret = z3fold_mount();
1512 if (ret)
1513 return ret;
1514
9a001fc1
VW
1515 zpool_register_driver(&z3fold_zpool_driver);
1516
1517 return 0;
1518}
1519
1520static void __exit exit_z3fold(void)
1521{
1f862989 1522 z3fold_unmount();
9a001fc1
VW
1523 zpool_unregister_driver(&z3fold_zpool_driver);
1524}
1525
1526module_init(init_z3fold);
1527module_exit(exit_z3fold);
1528
1529MODULE_LICENSE("GPL");
1530MODULE_AUTHOR("Vitaly Wool <vitalywool@gmail.com>");
1531MODULE_DESCRIPTION("3-Fold Allocator for Compressed Pages");