0b14daf930a84016ab054b4a2e2acc5965ffa2c4
[linux-2.6-block.git] / mm / z3fold.c
1 /*
2  * z3fold.c
3  *
4  * Author: Vitaly Wool <vitaly.wool@konsulko.com>
5  * Copyright (C) 2016, Sony Mobile Communications Inc.
6  *
7  * This implementation is based on zbud written by Seth Jennings.
8  *
9  * z3fold is an special purpose allocator for storing compressed pages. It
10  * can store up to three compressed pages per page which improves the
11  * compression ratio of zbud while retaining its main concepts (e. g. always
12  * storing an integral number of objects per page) and simplicity.
13  * It still has simple and deterministic reclaim properties that make it
14  * preferable to a higher density approach (with no requirement on integral
15  * number of object per page) when reclaim is used.
16  *
17  * As in zbud, pages are divided into "chunks".  The size of the chunks is
18  * fixed at compile time and is determined by NCHUNKS_ORDER below.
19  *
20  * z3fold doesn't export any API and is meant to be used via zpool API.
21  */
22
23 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24
25 #include <linux/atomic.h>
26 #include <linux/sched.h>
27 #include <linux/cpumask.h>
28 #include <linux/dcache.h>
29 #include <linux/list.h>
30 #include <linux/mm.h>
31 #include <linux/module.h>
32 #include <linux/page-flags.h>
33 #include <linux/migrate.h>
34 #include <linux/node.h>
35 #include <linux/compaction.h>
36 #include <linux/percpu.h>
37 #include <linux/mount.h>
38 #include <linux/fs.h>
39 #include <linux/preempt.h>
40 #include <linux/workqueue.h>
41 #include <linux/slab.h>
42 #include <linux/spinlock.h>
43 #include <linux/zpool.h>
44
45 /*
46  * NCHUNKS_ORDER determines the internal allocation granularity, effectively
47  * adjusting internal fragmentation.  It also determines the number of
48  * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the
49  * allocation granularity will be in chunks of size PAGE_SIZE/64. Some chunks
50  * in the beginning of an allocated page are occupied by z3fold header, so
51  * NCHUNKS will be calculated to 63 (or 62 in case CONFIG_DEBUG_SPINLOCK=y),
52  * which shows the max number of free chunks in z3fold page, also there will
53  * be 63, or 62, respectively, freelists per pool.
54  */
55 #define NCHUNKS_ORDER   6
56
57 #define CHUNK_SHIFT     (PAGE_SHIFT - NCHUNKS_ORDER)
58 #define CHUNK_SIZE      (1 << CHUNK_SHIFT)
59 #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
60 #define ZHDR_CHUNKS     (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
61 #define TOTAL_CHUNKS    (PAGE_SIZE >> CHUNK_SHIFT)
62 #define NCHUNKS         ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
63
64 #define BUDDY_MASK      (0x3)
65 #define BUDDY_SHIFT     2
66 #define SLOTS_ALIGN     (0x40)
67
68 /*****************
69  * Structures
70 *****************/
71 struct z3fold_pool;
72 struct z3fold_ops {
73         int (*evict)(struct z3fold_pool *pool, unsigned long handle);
74 };
75
76 enum buddy {
77         HEADLESS = 0,
78         FIRST,
79         MIDDLE,
80         LAST,
81         BUDDIES_MAX = LAST
82 };
83
84 struct z3fold_buddy_slots {
85         /*
86          * we are using BUDDY_MASK in handle_to_buddy etc. so there should
87          * be enough slots to hold all possible variants
88          */
89         unsigned long slot[BUDDY_MASK + 1];
90         unsigned long pool; /* back link + flags */
91 };
92 #define HANDLE_FLAG_MASK        (0x03)
93
94 /*
95  * struct z3fold_header - z3fold page metadata occupying first chunks of each
96  *                      z3fold page, except for HEADLESS pages
97  * @buddy:              links the z3fold page into the relevant list in the
98  *                      pool
99  * @page_lock:          per-page lock
100  * @refcount:           reference count for the z3fold page
101  * @work:               work_struct for page layout optimization
102  * @slots:              pointer to the structure holding buddy slots
103  * @cpu:                CPU which this page "belongs" to
104  * @first_chunks:       the size of the first buddy in chunks, 0 if free
105  * @middle_chunks:      the size of the middle buddy in chunks, 0 if free
106  * @last_chunks:        the size of the last buddy in chunks, 0 if free
107  * @first_num:          the starting number (for the first handle)
108  * @mapped_count:       the number of objects currently mapped
109  */
110 struct z3fold_header {
111         struct list_head buddy;
112         spinlock_t page_lock;
113         struct kref refcount;
114         struct work_struct work;
115         struct z3fold_buddy_slots *slots;
116         short cpu;
117         unsigned short first_chunks;
118         unsigned short middle_chunks;
119         unsigned short last_chunks;
120         unsigned short start_middle;
121         unsigned short first_num:2;
122         unsigned short mapped_count:2;
123 };
124
125 /**
126  * struct z3fold_pool - stores metadata for each z3fold pool
127  * @name:       pool name
128  * @lock:       protects pool unbuddied/lru lists
129  * @stale_lock: protects pool stale page list
130  * @unbuddied:  per-cpu array of lists tracking z3fold pages that contain 2-
131  *              buddies; the list each z3fold page is added to depends on
132  *              the size of its free region.
133  * @lru:        list tracking the z3fold pages in LRU order by most recently
134  *              added buddy.
135  * @stale:      list of pages marked for freeing
136  * @pages_nr:   number of z3fold pages in the pool.
137  * @c_handle:   cache for z3fold_buddy_slots allocation
138  * @ops:        pointer to a structure of user defined operations specified at
139  *              pool creation time.
140  * @compact_wq: workqueue for page layout background optimization
141  * @release_wq: workqueue for safe page release
142  * @work:       work_struct for safe page release
143  * @inode:      inode for z3fold pseudo filesystem
144  *
145  * This structure is allocated at pool creation time and maintains metadata
146  * pertaining to a particular z3fold pool.
147  */
148 struct z3fold_pool {
149         const char *name;
150         spinlock_t lock;
151         spinlock_t stale_lock;
152         struct list_head *unbuddied;
153         struct list_head lru;
154         struct list_head stale;
155         atomic64_t pages_nr;
156         struct kmem_cache *c_handle;
157         const struct z3fold_ops *ops;
158         struct zpool *zpool;
159         const struct zpool_ops *zpool_ops;
160         struct workqueue_struct *compact_wq;
161         struct workqueue_struct *release_wq;
162         struct work_struct work;
163         struct inode *inode;
164 };
165
166 /*
167  * Internal z3fold page flags
168  */
169 enum z3fold_page_flags {
170         PAGE_HEADLESS = 0,
171         MIDDLE_CHUNK_MAPPED,
172         NEEDS_COMPACTING,
173         PAGE_STALE,
174         PAGE_CLAIMED, /* by either reclaim or free */
175 };
176
177 /*****************
178  * Helpers
179 *****************/
180
181 /* Converts an allocation size in bytes to size in z3fold chunks */
182 static int size_to_chunks(size_t size)
183 {
184         return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT;
185 }
186
187 #define for_each_unbuddied_list(_iter, _begin) \
188         for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
189
190 static void compact_page_work(struct work_struct *w);
191
192 static inline struct z3fold_buddy_slots *alloc_slots(struct z3fold_pool *pool)
193 {
194         struct z3fold_buddy_slots *slots = kmem_cache_alloc(pool->c_handle,
195                                                         GFP_KERNEL);
196
197         if (slots) {
198                 memset(slots->slot, 0, sizeof(slots->slot));
199                 slots->pool = (unsigned long)pool;
200         }
201
202         return slots;
203 }
204
205 static inline struct z3fold_pool *slots_to_pool(struct z3fold_buddy_slots *s)
206 {
207         return (struct z3fold_pool *)(s->pool & ~HANDLE_FLAG_MASK);
208 }
209
210 static inline struct z3fold_buddy_slots *handle_to_slots(unsigned long handle)
211 {
212         return (struct z3fold_buddy_slots *)(handle & ~(SLOTS_ALIGN - 1));
213 }
214
215 static inline void free_handle(unsigned long handle)
216 {
217         struct z3fold_buddy_slots *slots;
218         int i;
219         bool is_free;
220
221         if (handle & (1 << PAGE_HEADLESS))
222                 return;
223
224         WARN_ON(*(unsigned long *)handle == 0);
225         *(unsigned long *)handle = 0;
226         slots = handle_to_slots(handle);
227         is_free = true;
228         for (i = 0; i <= BUDDY_MASK; i++) {
229                 if (slots->slot[i]) {
230                         is_free = false;
231                         break;
232                 }
233         }
234
235         if (is_free) {
236                 struct z3fold_pool *pool = slots_to_pool(slots);
237
238                 kmem_cache_free(pool->c_handle, slots);
239         }
240 }
241
242 static struct dentry *z3fold_do_mount(struct file_system_type *fs_type,
243                                 int flags, const char *dev_name, void *data)
244 {
245         return mount_pseudo(fs_type, "z3fold:", NULL, NULL, 0x33);
246 }
247
248 static struct file_system_type z3fold_fs = {
249         .name           = "z3fold",
250         .mount          = z3fold_do_mount,
251         .kill_sb        = kill_anon_super,
252 };
253
254 static struct vfsmount *z3fold_mnt;
255 static int z3fold_mount(void)
256 {
257         int ret = 0;
258
259         z3fold_mnt = kern_mount(&z3fold_fs);
260         if (IS_ERR(z3fold_mnt))
261                 ret = PTR_ERR(z3fold_mnt);
262
263         return ret;
264 }
265
266 static void z3fold_unmount(void)
267 {
268         kern_unmount(z3fold_mnt);
269 }
270
271 static const struct address_space_operations z3fold_aops;
272 static int z3fold_register_migration(struct z3fold_pool *pool)
273 {
274         pool->inode = alloc_anon_inode(z3fold_mnt->mnt_sb);
275         if (IS_ERR(pool->inode)) {
276                 pool->inode = NULL;
277                 return 1;
278         }
279
280         pool->inode->i_mapping->private_data = pool;
281         pool->inode->i_mapping->a_ops = &z3fold_aops;
282         return 0;
283 }
284
285 static void z3fold_unregister_migration(struct z3fold_pool *pool)
286 {
287         if (pool->inode)
288                 iput(pool->inode);
289  }
290
291 /* Initializes the z3fold header of a newly allocated z3fold page */
292 static struct z3fold_header *init_z3fold_page(struct page *page,
293                                         struct z3fold_pool *pool)
294 {
295         struct z3fold_header *zhdr = page_address(page);
296         struct z3fold_buddy_slots *slots = alloc_slots(pool);
297
298         if (!slots)
299                 return NULL;
300
301         INIT_LIST_HEAD(&page->lru);
302         clear_bit(PAGE_HEADLESS, &page->private);
303         clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
304         clear_bit(NEEDS_COMPACTING, &page->private);
305         clear_bit(PAGE_STALE, &page->private);
306         clear_bit(PAGE_CLAIMED, &page->private);
307
308         spin_lock_init(&zhdr->page_lock);
309         kref_init(&zhdr->refcount);
310         zhdr->first_chunks = 0;
311         zhdr->middle_chunks = 0;
312         zhdr->last_chunks = 0;
313         zhdr->first_num = 0;
314         zhdr->start_middle = 0;
315         zhdr->cpu = -1;
316         zhdr->slots = slots;
317         INIT_LIST_HEAD(&zhdr->buddy);
318         INIT_WORK(&zhdr->work, compact_page_work);
319         return zhdr;
320 }
321
322 /* Resets the struct page fields and frees the page */
323 static void free_z3fold_page(struct page *page, bool headless)
324 {
325         if (!headless) {
326                 lock_page(page);
327                 __ClearPageMovable(page);
328                 unlock_page(page);
329         }
330         ClearPagePrivate(page);
331         __free_page(page);
332 }
333
334 /* Lock a z3fold page */
335 static inline void z3fold_page_lock(struct z3fold_header *zhdr)
336 {
337         spin_lock(&zhdr->page_lock);
338 }
339
340 /* Try to lock a z3fold page */
341 static inline int z3fold_page_trylock(struct z3fold_header *zhdr)
342 {
343         return spin_trylock(&zhdr->page_lock);
344 }
345
346 /* Unlock a z3fold page */
347 static inline void z3fold_page_unlock(struct z3fold_header *zhdr)
348 {
349         spin_unlock(&zhdr->page_lock);
350 }
351
352 /* Helper function to build the index */
353 static inline int __idx(struct z3fold_header *zhdr, enum buddy bud)
354 {
355         return (bud + zhdr->first_num) & BUDDY_MASK;
356 }
357
358 /*
359  * Encodes the handle of a particular buddy within a z3fold page
360  * Pool lock should be held as this function accesses first_num
361  */
362 static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud)
363 {
364         struct z3fold_buddy_slots *slots;
365         unsigned long h = (unsigned long)zhdr;
366         int idx = 0;
367
368         /*
369          * For a headless page, its handle is its pointer with the extra
370          * PAGE_HEADLESS bit set
371          */
372         if (bud == HEADLESS)
373                 return h | (1 << PAGE_HEADLESS);
374
375         /* otherwise, return pointer to encoded handle */
376         idx = __idx(zhdr, bud);
377         h += idx;
378         if (bud == LAST)
379                 h |= (zhdr->last_chunks << BUDDY_SHIFT);
380
381         slots = zhdr->slots;
382         slots->slot[idx] = h;
383         return (unsigned long)&slots->slot[idx];
384 }
385
386 /* Returns the z3fold page where a given handle is stored */
387 static inline struct z3fold_header *handle_to_z3fold_header(unsigned long h)
388 {
389         unsigned long addr = h;
390
391         if (!(addr & (1 << PAGE_HEADLESS)))
392                 addr = *(unsigned long *)h;
393
394         return (struct z3fold_header *)(addr & PAGE_MASK);
395 }
396
397 /* only for LAST bud, returns zero otherwise */
398 static unsigned short handle_to_chunks(unsigned long handle)
399 {
400         unsigned long addr = *(unsigned long *)handle;
401
402         return (addr & ~PAGE_MASK) >> BUDDY_SHIFT;
403 }
404
405 /*
406  * (handle & BUDDY_MASK) < zhdr->first_num is possible in encode_handle
407  *  but that doesn't matter. because the masking will result in the
408  *  correct buddy number.
409  */
410 static enum buddy handle_to_buddy(unsigned long handle)
411 {
412         struct z3fold_header *zhdr;
413         unsigned long addr;
414
415         WARN_ON(handle & (1 << PAGE_HEADLESS));
416         addr = *(unsigned long *)handle;
417         zhdr = (struct z3fold_header *)(addr & PAGE_MASK);
418         return (addr - zhdr->first_num) & BUDDY_MASK;
419 }
420
421 static inline struct z3fold_pool *zhdr_to_pool(struct z3fold_header *zhdr)
422 {
423         return slots_to_pool(zhdr->slots);
424 }
425
426 static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked)
427 {
428         struct page *page = virt_to_page(zhdr);
429         struct z3fold_pool *pool = zhdr_to_pool(zhdr);
430
431         WARN_ON(!list_empty(&zhdr->buddy));
432         set_bit(PAGE_STALE, &page->private);
433         clear_bit(NEEDS_COMPACTING, &page->private);
434         spin_lock(&pool->lock);
435         if (!list_empty(&page->lru))
436                 list_del_init(&page->lru);
437         spin_unlock(&pool->lock);
438         if (locked)
439                 z3fold_page_unlock(zhdr);
440         spin_lock(&pool->stale_lock);
441         list_add(&zhdr->buddy, &pool->stale);
442         queue_work(pool->release_wq, &pool->work);
443         spin_unlock(&pool->stale_lock);
444 }
445
446 static void __attribute__((__unused__))
447                         release_z3fold_page(struct kref *ref)
448 {
449         struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
450                                                 refcount);
451         __release_z3fold_page(zhdr, false);
452 }
453
454 static void release_z3fold_page_locked(struct kref *ref)
455 {
456         struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
457                                                 refcount);
458         WARN_ON(z3fold_page_trylock(zhdr));
459         __release_z3fold_page(zhdr, true);
460 }
461
462 static void release_z3fold_page_locked_list(struct kref *ref)
463 {
464         struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
465                                                refcount);
466         struct z3fold_pool *pool = zhdr_to_pool(zhdr);
467         spin_lock(&pool->lock);
468         list_del_init(&zhdr->buddy);
469         spin_unlock(&pool->lock);
470
471         WARN_ON(z3fold_page_trylock(zhdr));
472         __release_z3fold_page(zhdr, true);
473 }
474
475 static void free_pages_work(struct work_struct *w)
476 {
477         struct z3fold_pool *pool = container_of(w, struct z3fold_pool, work);
478
479         spin_lock(&pool->stale_lock);
480         while (!list_empty(&pool->stale)) {
481                 struct z3fold_header *zhdr = list_first_entry(&pool->stale,
482                                                 struct z3fold_header, buddy);
483                 struct page *page = virt_to_page(zhdr);
484
485                 list_del(&zhdr->buddy);
486                 if (WARN_ON(!test_bit(PAGE_STALE, &page->private)))
487                         continue;
488                 spin_unlock(&pool->stale_lock);
489                 cancel_work_sync(&zhdr->work);
490                 free_z3fold_page(page, false);
491                 cond_resched();
492                 spin_lock(&pool->stale_lock);
493         }
494         spin_unlock(&pool->stale_lock);
495 }
496
497 /*
498  * Returns the number of free chunks in a z3fold page.
499  * NB: can't be used with HEADLESS pages.
500  */
501 static int num_free_chunks(struct z3fold_header *zhdr)
502 {
503         int nfree;
504         /*
505          * If there is a middle object, pick up the bigger free space
506          * either before or after it. Otherwise just subtract the number
507          * of chunks occupied by the first and the last objects.
508          */
509         if (zhdr->middle_chunks != 0) {
510                 int nfree_before = zhdr->first_chunks ?
511                         0 : zhdr->start_middle - ZHDR_CHUNKS;
512                 int nfree_after = zhdr->last_chunks ?
513                         0 : TOTAL_CHUNKS -
514                                 (zhdr->start_middle + zhdr->middle_chunks);
515                 nfree = max(nfree_before, nfree_after);
516         } else
517                 nfree = NCHUNKS - zhdr->first_chunks - zhdr->last_chunks;
518         return nfree;
519 }
520
521 /* Add to the appropriate unbuddied list */
522 static inline void add_to_unbuddied(struct z3fold_pool *pool,
523                                 struct z3fold_header *zhdr)
524 {
525         if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0 ||
526                         zhdr->middle_chunks == 0) {
527                 struct list_head *unbuddied = get_cpu_ptr(pool->unbuddied);
528
529                 int freechunks = num_free_chunks(zhdr);
530                 spin_lock(&pool->lock);
531                 list_add(&zhdr->buddy, &unbuddied[freechunks]);
532                 spin_unlock(&pool->lock);
533                 zhdr->cpu = smp_processor_id();
534                 put_cpu_ptr(pool->unbuddied);
535         }
536 }
537
538 static inline void *mchunk_memmove(struct z3fold_header *zhdr,
539                                 unsigned short dst_chunk)
540 {
541         void *beg = zhdr;
542         return memmove(beg + (dst_chunk << CHUNK_SHIFT),
543                        beg + (zhdr->start_middle << CHUNK_SHIFT),
544                        zhdr->middle_chunks << CHUNK_SHIFT);
545 }
546
547 #define BIG_CHUNK_GAP   3
548 /* Has to be called with lock held */
549 static int z3fold_compact_page(struct z3fold_header *zhdr)
550 {
551         struct page *page = virt_to_page(zhdr);
552
553         if (test_bit(MIDDLE_CHUNK_MAPPED, &page->private))
554                 return 0; /* can't move middle chunk, it's used */
555
556         if (unlikely(PageIsolated(page)))
557                 return 0;
558
559         if (zhdr->middle_chunks == 0)
560                 return 0; /* nothing to compact */
561
562         if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
563                 /* move to the beginning */
564                 mchunk_memmove(zhdr, ZHDR_CHUNKS);
565                 zhdr->first_chunks = zhdr->middle_chunks;
566                 zhdr->middle_chunks = 0;
567                 zhdr->start_middle = 0;
568                 zhdr->first_num++;
569                 return 1;
570         }
571
572         /*
573          * moving data is expensive, so let's only do that if
574          * there's substantial gain (at least BIG_CHUNK_GAP chunks)
575          */
576         if (zhdr->first_chunks != 0 && zhdr->last_chunks == 0 &&
577             zhdr->start_middle - (zhdr->first_chunks + ZHDR_CHUNKS) >=
578                         BIG_CHUNK_GAP) {
579                 mchunk_memmove(zhdr, zhdr->first_chunks + ZHDR_CHUNKS);
580                 zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
581                 return 1;
582         } else if (zhdr->last_chunks != 0 && zhdr->first_chunks == 0 &&
583                    TOTAL_CHUNKS - (zhdr->last_chunks + zhdr->start_middle
584                                         + zhdr->middle_chunks) >=
585                         BIG_CHUNK_GAP) {
586                 unsigned short new_start = TOTAL_CHUNKS - zhdr->last_chunks -
587                         zhdr->middle_chunks;
588                 mchunk_memmove(zhdr, new_start);
589                 zhdr->start_middle = new_start;
590                 return 1;
591         }
592
593         return 0;
594 }
595
596 static void do_compact_page(struct z3fold_header *zhdr, bool locked)
597 {
598         struct z3fold_pool *pool = zhdr_to_pool(zhdr);
599         struct page *page;
600
601         page = virt_to_page(zhdr);
602         if (locked)
603                 WARN_ON(z3fold_page_trylock(zhdr));
604         else
605                 z3fold_page_lock(zhdr);
606         if (WARN_ON(!test_and_clear_bit(NEEDS_COMPACTING, &page->private))) {
607                 z3fold_page_unlock(zhdr);
608                 return;
609         }
610         spin_lock(&pool->lock);
611         list_del_init(&zhdr->buddy);
612         spin_unlock(&pool->lock);
613
614         if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
615                 atomic64_dec(&pool->pages_nr);
616                 return;
617         }
618
619         if (unlikely(PageIsolated(page) ||
620                      test_bit(PAGE_STALE, &page->private))) {
621                 z3fold_page_unlock(zhdr);
622                 return;
623         }
624
625         z3fold_compact_page(zhdr);
626         add_to_unbuddied(pool, zhdr);
627         z3fold_page_unlock(zhdr);
628 }
629
630 static void compact_page_work(struct work_struct *w)
631 {
632         struct z3fold_header *zhdr = container_of(w, struct z3fold_header,
633                                                 work);
634
635         do_compact_page(zhdr, false);
636 }
637
638 /* returns _locked_ z3fold page header or NULL */
639 static inline struct z3fold_header *__z3fold_alloc(struct z3fold_pool *pool,
640                                                 size_t size, bool can_sleep)
641 {
642         struct z3fold_header *zhdr = NULL;
643         struct page *page;
644         struct list_head *unbuddied;
645         int chunks = size_to_chunks(size), i;
646
647 lookup:
648         /* First, try to find an unbuddied z3fold page. */
649         unbuddied = get_cpu_ptr(pool->unbuddied);
650         for_each_unbuddied_list(i, chunks) {
651                 struct list_head *l = &unbuddied[i];
652
653                 zhdr = list_first_entry_or_null(READ_ONCE(l),
654                                         struct z3fold_header, buddy);
655
656                 if (!zhdr)
657                         continue;
658
659                 /* Re-check under lock. */
660                 spin_lock(&pool->lock);
661                 l = &unbuddied[i];
662                 if (unlikely(zhdr != list_first_entry(READ_ONCE(l),
663                                                 struct z3fold_header, buddy)) ||
664                     !z3fold_page_trylock(zhdr)) {
665                         spin_unlock(&pool->lock);
666                         zhdr = NULL;
667                         put_cpu_ptr(pool->unbuddied);
668                         if (can_sleep)
669                                 cond_resched();
670                         goto lookup;
671                 }
672                 list_del_init(&zhdr->buddy);
673                 zhdr->cpu = -1;
674                 spin_unlock(&pool->lock);
675
676                 page = virt_to_page(zhdr);
677                 if (test_bit(NEEDS_COMPACTING, &page->private)) {
678                         z3fold_page_unlock(zhdr);
679                         zhdr = NULL;
680                         put_cpu_ptr(pool->unbuddied);
681                         if (can_sleep)
682                                 cond_resched();
683                         goto lookup;
684                 }
685
686                 /*
687                  * this page could not be removed from its unbuddied
688                  * list while pool lock was held, and then we've taken
689                  * page lock so kref_put could not be called before
690                  * we got here, so it's safe to just call kref_get()
691                  */
692                 kref_get(&zhdr->refcount);
693                 break;
694         }
695         put_cpu_ptr(pool->unbuddied);
696
697         if (!zhdr) {
698                 int cpu;
699
700                 /* look for _exact_ match on other cpus' lists */
701                 for_each_online_cpu(cpu) {
702                         struct list_head *l;
703
704                         unbuddied = per_cpu_ptr(pool->unbuddied, cpu);
705                         spin_lock(&pool->lock);
706                         l = &unbuddied[chunks];
707
708                         zhdr = list_first_entry_or_null(READ_ONCE(l),
709                                                 struct z3fold_header, buddy);
710
711                         if (!zhdr || !z3fold_page_trylock(zhdr)) {
712                                 spin_unlock(&pool->lock);
713                                 zhdr = NULL;
714                                 continue;
715                         }
716                         list_del_init(&zhdr->buddy);
717                         zhdr->cpu = -1;
718                         spin_unlock(&pool->lock);
719
720                         page = virt_to_page(zhdr);
721                         if (test_bit(NEEDS_COMPACTING, &page->private)) {
722                                 z3fold_page_unlock(zhdr);
723                                 zhdr = NULL;
724                                 if (can_sleep)
725                                         cond_resched();
726                                 continue;
727                         }
728                         kref_get(&zhdr->refcount);
729                         break;
730                 }
731         }
732
733         return zhdr;
734 }
735
736 /*
737  * API Functions
738  */
739
740 /**
741  * z3fold_create_pool() - create a new z3fold pool
742  * @name:       pool name
743  * @gfp:        gfp flags when allocating the z3fold pool structure
744  * @ops:        user-defined operations for the z3fold pool
745  *
746  * Return: pointer to the new z3fold pool or NULL if the metadata allocation
747  * failed.
748  */
749 static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp,
750                 const struct z3fold_ops *ops)
751 {
752         struct z3fold_pool *pool = NULL;
753         int i, cpu;
754
755         pool = kzalloc(sizeof(struct z3fold_pool), gfp);
756         if (!pool)
757                 goto out;
758         pool->c_handle = kmem_cache_create("z3fold_handle",
759                                 sizeof(struct z3fold_buddy_slots),
760                                 SLOTS_ALIGN, 0, NULL);
761         if (!pool->c_handle)
762                 goto out_c;
763         spin_lock_init(&pool->lock);
764         spin_lock_init(&pool->stale_lock);
765         pool->unbuddied = __alloc_percpu(sizeof(struct list_head)*NCHUNKS, 2);
766         if (!pool->unbuddied)
767                 goto out_pool;
768         for_each_possible_cpu(cpu) {
769                 struct list_head *unbuddied =
770                                 per_cpu_ptr(pool->unbuddied, cpu);
771                 for_each_unbuddied_list(i, 0)
772                         INIT_LIST_HEAD(&unbuddied[i]);
773         }
774         INIT_LIST_HEAD(&pool->lru);
775         INIT_LIST_HEAD(&pool->stale);
776         atomic64_set(&pool->pages_nr, 0);
777         pool->name = name;
778         pool->compact_wq = create_singlethread_workqueue(pool->name);
779         if (!pool->compact_wq)
780                 goto out_unbuddied;
781         pool->release_wq = create_singlethread_workqueue(pool->name);
782         if (!pool->release_wq)
783                 goto out_wq;
784         if (z3fold_register_migration(pool))
785                 goto out_rwq;
786         INIT_WORK(&pool->work, free_pages_work);
787         pool->ops = ops;
788         return pool;
789
790 out_rwq:
791         destroy_workqueue(pool->release_wq);
792 out_wq:
793         destroy_workqueue(pool->compact_wq);
794 out_unbuddied:
795         free_percpu(pool->unbuddied);
796 out_pool:
797         kmem_cache_destroy(pool->c_handle);
798 out_c:
799         kfree(pool);
800 out:
801         return NULL;
802 }
803
804 /**
805  * z3fold_destroy_pool() - destroys an existing z3fold pool
806  * @pool:       the z3fold pool to be destroyed
807  *
808  * The pool should be emptied before this function is called.
809  */
810 static void z3fold_destroy_pool(struct z3fold_pool *pool)
811 {
812         kmem_cache_destroy(pool->c_handle);
813         z3fold_unregister_migration(pool);
814         destroy_workqueue(pool->release_wq);
815         destroy_workqueue(pool->compact_wq);
816         kfree(pool);
817 }
818
819 /**
820  * z3fold_alloc() - allocates a region of a given size
821  * @pool:       z3fold pool from which to allocate
822  * @size:       size in bytes of the desired allocation
823  * @gfp:        gfp flags used if the pool needs to grow
824  * @handle:     handle of the new allocation
825  *
826  * This function will attempt to find a free region in the pool large enough to
827  * satisfy the allocation request.  A search of the unbuddied lists is
828  * performed first. If no suitable free region is found, then a new page is
829  * allocated and added to the pool to satisfy the request.
830  *
831  * gfp should not set __GFP_HIGHMEM as highmem pages cannot be used
832  * as z3fold pool pages.
833  *
834  * Return: 0 if success and handle is set, otherwise -EINVAL if the size or
835  * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate
836  * a new page.
837  */
838 static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
839                         unsigned long *handle)
840 {
841         int chunks = size_to_chunks(size);
842         struct z3fold_header *zhdr = NULL;
843         struct page *page = NULL;
844         enum buddy bud;
845         bool can_sleep = gfpflags_allow_blocking(gfp);
846
847         if (!size || (gfp & __GFP_HIGHMEM))
848                 return -EINVAL;
849
850         if (size > PAGE_SIZE)
851                 return -ENOSPC;
852
853         if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE)
854                 bud = HEADLESS;
855         else {
856 retry:
857                 zhdr = __z3fold_alloc(pool, size, can_sleep);
858                 if (zhdr) {
859                         if (zhdr->first_chunks == 0) {
860                                 if (zhdr->middle_chunks != 0 &&
861                                     chunks >= zhdr->start_middle)
862                                         bud = LAST;
863                                 else
864                                         bud = FIRST;
865                         } else if (zhdr->last_chunks == 0)
866                                 bud = LAST;
867                         else if (zhdr->middle_chunks == 0)
868                                 bud = MIDDLE;
869                         else {
870                                 if (kref_put(&zhdr->refcount,
871                                              release_z3fold_page_locked))
872                                         atomic64_dec(&pool->pages_nr);
873                                 else
874                                         z3fold_page_unlock(zhdr);
875                                 pr_err("No free chunks in unbuddied\n");
876                                 WARN_ON(1);
877                                 goto retry;
878                         }
879                         page = virt_to_page(zhdr);
880                         goto found;
881                 }
882                 bud = FIRST;
883         }
884
885         page = NULL;
886         if (can_sleep) {
887                 spin_lock(&pool->stale_lock);
888                 zhdr = list_first_entry_or_null(&pool->stale,
889                                                 struct z3fold_header, buddy);
890                 /*
891                  * Before allocating a page, let's see if we can take one from
892                  * the stale pages list. cancel_work_sync() can sleep so we
893                  * limit this case to the contexts where we can sleep
894                  */
895                 if (zhdr) {
896                         list_del(&zhdr->buddy);
897                         spin_unlock(&pool->stale_lock);
898                         cancel_work_sync(&zhdr->work);
899                         page = virt_to_page(zhdr);
900                 } else {
901                         spin_unlock(&pool->stale_lock);
902                 }
903         }
904         if (!page)
905                 page = alloc_page(gfp);
906
907         if (!page)
908                 return -ENOMEM;
909
910         zhdr = init_z3fold_page(page, pool);
911         if (!zhdr) {
912                 __free_page(page);
913                 return -ENOMEM;
914         }
915         atomic64_inc(&pool->pages_nr);
916
917         if (bud == HEADLESS) {
918                 set_bit(PAGE_HEADLESS, &page->private);
919                 goto headless;
920         }
921         __SetPageMovable(page, pool->inode->i_mapping);
922         z3fold_page_lock(zhdr);
923
924 found:
925         if (bud == FIRST)
926                 zhdr->first_chunks = chunks;
927         else if (bud == LAST)
928                 zhdr->last_chunks = chunks;
929         else {
930                 zhdr->middle_chunks = chunks;
931                 zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
932         }
933         add_to_unbuddied(pool, zhdr);
934
935 headless:
936         spin_lock(&pool->lock);
937         /* Add/move z3fold page to beginning of LRU */
938         if (!list_empty(&page->lru))
939                 list_del(&page->lru);
940
941         list_add(&page->lru, &pool->lru);
942
943         *handle = encode_handle(zhdr, bud);
944         spin_unlock(&pool->lock);
945         if (bud != HEADLESS)
946                 z3fold_page_unlock(zhdr);
947
948         return 0;
949 }
950
951 /**
952  * z3fold_free() - frees the allocation associated with the given handle
953  * @pool:       pool in which the allocation resided
954  * @handle:     handle associated with the allocation returned by z3fold_alloc()
955  *
956  * In the case that the z3fold page in which the allocation resides is under
957  * reclaim, as indicated by the PG_reclaim flag being set, this function
958  * only sets the first|last_chunks to 0.  The page is actually freed
959  * once both buddies are evicted (see z3fold_reclaim_page() below).
960  */
961 static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
962 {
963         struct z3fold_header *zhdr;
964         struct page *page;
965         enum buddy bud;
966
967         zhdr = handle_to_z3fold_header(handle);
968         page = virt_to_page(zhdr);
969
970         if (test_bit(PAGE_HEADLESS, &page->private)) {
971                 /* if a headless page is under reclaim, just leave.
972                  * NB: we use test_and_set_bit for a reason: if the bit
973                  * has not been set before, we release this page
974                  * immediately so we don't care about its value any more.
975                  */
976                 if (!test_and_set_bit(PAGE_CLAIMED, &page->private)) {
977                         spin_lock(&pool->lock);
978                         list_del(&page->lru);
979                         spin_unlock(&pool->lock);
980                         free_z3fold_page(page, true);
981                         atomic64_dec(&pool->pages_nr);
982                 }
983                 return;
984         }
985
986         /* Non-headless case */
987         z3fold_page_lock(zhdr);
988         bud = handle_to_buddy(handle);
989
990         switch (bud) {
991         case FIRST:
992                 zhdr->first_chunks = 0;
993                 break;
994         case MIDDLE:
995                 zhdr->middle_chunks = 0;
996                 break;
997         case LAST:
998                 zhdr->last_chunks = 0;
999                 break;
1000         default:
1001                 pr_err("%s: unknown bud %d\n", __func__, bud);
1002                 WARN_ON(1);
1003                 z3fold_page_unlock(zhdr);
1004                 return;
1005         }
1006
1007         free_handle(handle);
1008         if (kref_put(&zhdr->refcount, release_z3fold_page_locked_list)) {
1009                 atomic64_dec(&pool->pages_nr);
1010                 return;
1011         }
1012         if (test_bit(PAGE_CLAIMED, &page->private)) {
1013                 z3fold_page_unlock(zhdr);
1014                 return;
1015         }
1016         if (unlikely(PageIsolated(page)) ||
1017             test_and_set_bit(NEEDS_COMPACTING, &page->private)) {
1018                 z3fold_page_unlock(zhdr);
1019                 return;
1020         }
1021         if (zhdr->cpu < 0 || !cpu_online(zhdr->cpu)) {
1022                 spin_lock(&pool->lock);
1023                 list_del_init(&zhdr->buddy);
1024                 spin_unlock(&pool->lock);
1025                 zhdr->cpu = -1;
1026                 kref_get(&zhdr->refcount);
1027                 do_compact_page(zhdr, true);
1028                 return;
1029         }
1030         kref_get(&zhdr->refcount);
1031         queue_work_on(zhdr->cpu, pool->compact_wq, &zhdr->work);
1032         z3fold_page_unlock(zhdr);
1033 }
1034
1035 /**
1036  * z3fold_reclaim_page() - evicts allocations from a pool page and frees it
1037  * @pool:       pool from which a page will attempt to be evicted
1038  * @retries:    number of pages on the LRU list for which eviction will
1039  *              be attempted before failing
1040  *
1041  * z3fold reclaim is different from normal system reclaim in that it is done
1042  * from the bottom, up. This is because only the bottom layer, z3fold, has
1043  * information on how the allocations are organized within each z3fold page.
1044  * This has the potential to create interesting locking situations between
1045  * z3fold and the user, however.
1046  *
1047  * To avoid these, this is how z3fold_reclaim_page() should be called:
1048  *
1049  * The user detects a page should be reclaimed and calls z3fold_reclaim_page().
1050  * z3fold_reclaim_page() will remove a z3fold page from the pool LRU list and
1051  * call the user-defined eviction handler with the pool and handle as
1052  * arguments.
1053  *
1054  * If the handle can not be evicted, the eviction handler should return
1055  * non-zero. z3fold_reclaim_page() will add the z3fold page back to the
1056  * appropriate list and try the next z3fold page on the LRU up to
1057  * a user defined number of retries.
1058  *
1059  * If the handle is successfully evicted, the eviction handler should
1060  * return 0 _and_ should have called z3fold_free() on the handle. z3fold_free()
1061  * contains logic to delay freeing the page if the page is under reclaim,
1062  * as indicated by the setting of the PG_reclaim flag on the underlying page.
1063  *
1064  * If all buddies in the z3fold page are successfully evicted, then the
1065  * z3fold page can be freed.
1066  *
1067  * Returns: 0 if page is successfully freed, otherwise -EINVAL if there are
1068  * no pages to evict or an eviction handler is not registered, -EAGAIN if
1069  * the retry limit was hit.
1070  */
1071 static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
1072 {
1073         int i, ret = 0;
1074         struct z3fold_header *zhdr = NULL;
1075         struct page *page = NULL;
1076         struct list_head *pos;
1077         unsigned long first_handle = 0, middle_handle = 0, last_handle = 0;
1078
1079         spin_lock(&pool->lock);
1080         if (!pool->ops || !pool->ops->evict || retries == 0) {
1081                 spin_unlock(&pool->lock);
1082                 return -EINVAL;
1083         }
1084         for (i = 0; i < retries; i++) {
1085                 if (list_empty(&pool->lru)) {
1086                         spin_unlock(&pool->lock);
1087                         return -EINVAL;
1088                 }
1089                 list_for_each_prev(pos, &pool->lru) {
1090                         page = list_entry(pos, struct page, lru);
1091
1092                         /* this bit could have been set by free, in which case
1093                          * we pass over to the next page in the pool.
1094                          */
1095                         if (test_and_set_bit(PAGE_CLAIMED, &page->private))
1096                                 continue;
1097
1098                         if (unlikely(PageIsolated(page)))
1099                                 continue;
1100                         if (test_bit(PAGE_HEADLESS, &page->private))
1101                                 break;
1102
1103                         zhdr = page_address(page);
1104                         if (!z3fold_page_trylock(zhdr)) {
1105                                 zhdr = NULL;
1106                                 continue; /* can't evict at this point */
1107                         }
1108                         kref_get(&zhdr->refcount);
1109                         list_del_init(&zhdr->buddy);
1110                         zhdr->cpu = -1;
1111                         break;
1112                 }
1113
1114                 if (!zhdr)
1115                         break;
1116
1117                 list_del_init(&page->lru);
1118                 spin_unlock(&pool->lock);
1119
1120                 if (!test_bit(PAGE_HEADLESS, &page->private)) {
1121                         /*
1122                          * We need encode the handles before unlocking, since
1123                          * we can race with free that will set
1124                          * (first|last)_chunks to 0
1125                          */
1126                         first_handle = 0;
1127                         last_handle = 0;
1128                         middle_handle = 0;
1129                         if (zhdr->first_chunks)
1130                                 first_handle = encode_handle(zhdr, FIRST);
1131                         if (zhdr->middle_chunks)
1132                                 middle_handle = encode_handle(zhdr, MIDDLE);
1133                         if (zhdr->last_chunks)
1134                                 last_handle = encode_handle(zhdr, LAST);
1135                         /*
1136                          * it's safe to unlock here because we hold a
1137                          * reference to this page
1138                          */
1139                         z3fold_page_unlock(zhdr);
1140                 } else {
1141                         first_handle = encode_handle(zhdr, HEADLESS);
1142                         last_handle = middle_handle = 0;
1143                 }
1144
1145                 /* Issue the eviction callback(s) */
1146                 if (middle_handle) {
1147                         ret = pool->ops->evict(pool, middle_handle);
1148                         if (ret)
1149                                 goto next;
1150                 }
1151                 if (first_handle) {
1152                         ret = pool->ops->evict(pool, first_handle);
1153                         if (ret)
1154                                 goto next;
1155                 }
1156                 if (last_handle) {
1157                         ret = pool->ops->evict(pool, last_handle);
1158                         if (ret)
1159                                 goto next;
1160                 }
1161 next:
1162                 if (test_bit(PAGE_HEADLESS, &page->private)) {
1163                         if (ret == 0) {
1164                                 free_z3fold_page(page, true);
1165                                 atomic64_dec(&pool->pages_nr);
1166                                 return 0;
1167                         }
1168                         spin_lock(&pool->lock);
1169                         list_add(&page->lru, &pool->lru);
1170                         spin_unlock(&pool->lock);
1171                 } else {
1172                         z3fold_page_lock(zhdr);
1173                         clear_bit(PAGE_CLAIMED, &page->private);
1174                         if (kref_put(&zhdr->refcount,
1175                                         release_z3fold_page_locked)) {
1176                                 atomic64_dec(&pool->pages_nr);
1177                                 return 0;
1178                         }
1179                         /*
1180                          * if we are here, the page is still not completely
1181                          * free. Take the global pool lock then to be able
1182                          * to add it back to the lru list
1183                          */
1184                         spin_lock(&pool->lock);
1185                         list_add(&page->lru, &pool->lru);
1186                         spin_unlock(&pool->lock);
1187                         z3fold_page_unlock(zhdr);
1188                 }
1189
1190                 /* We started off locked to we need to lock the pool back */
1191                 spin_lock(&pool->lock);
1192         }
1193         spin_unlock(&pool->lock);
1194         return -EAGAIN;
1195 }
1196
1197 /**
1198  * z3fold_map() - maps the allocation associated with the given handle
1199  * @pool:       pool in which the allocation resides
1200  * @handle:     handle associated with the allocation to be mapped
1201  *
1202  * Extracts the buddy number from handle and constructs the pointer to the
1203  * correct starting chunk within the page.
1204  *
1205  * Returns: a pointer to the mapped allocation
1206  */
1207 static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle)
1208 {
1209         struct z3fold_header *zhdr;
1210         struct page *page;
1211         void *addr;
1212         enum buddy buddy;
1213
1214         zhdr = handle_to_z3fold_header(handle);
1215         addr = zhdr;
1216         page = virt_to_page(zhdr);
1217
1218         if (test_bit(PAGE_HEADLESS, &page->private))
1219                 goto out;
1220
1221         z3fold_page_lock(zhdr);
1222         buddy = handle_to_buddy(handle);
1223         switch (buddy) {
1224         case FIRST:
1225                 addr += ZHDR_SIZE_ALIGNED;
1226                 break;
1227         case MIDDLE:
1228                 addr += zhdr->start_middle << CHUNK_SHIFT;
1229                 set_bit(MIDDLE_CHUNK_MAPPED, &page->private);
1230                 break;
1231         case LAST:
1232                 addr += PAGE_SIZE - (handle_to_chunks(handle) << CHUNK_SHIFT);
1233                 break;
1234         default:
1235                 pr_err("unknown buddy id %d\n", buddy);
1236                 WARN_ON(1);
1237                 addr = NULL;
1238                 break;
1239         }
1240
1241         if (addr)
1242                 zhdr->mapped_count++;
1243         z3fold_page_unlock(zhdr);
1244 out:
1245         return addr;
1246 }
1247
1248 /**
1249  * z3fold_unmap() - unmaps the allocation associated with the given handle
1250  * @pool:       pool in which the allocation resides
1251  * @handle:     handle associated with the allocation to be unmapped
1252  */
1253 static void z3fold_unmap(struct z3fold_pool *pool, unsigned long handle)
1254 {
1255         struct z3fold_header *zhdr;
1256         struct page *page;
1257         enum buddy buddy;
1258
1259         zhdr = handle_to_z3fold_header(handle);
1260         page = virt_to_page(zhdr);
1261
1262         if (test_bit(PAGE_HEADLESS, &page->private))
1263                 return;
1264
1265         z3fold_page_lock(zhdr);
1266         buddy = handle_to_buddy(handle);
1267         if (buddy == MIDDLE)
1268                 clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
1269         zhdr->mapped_count--;
1270         z3fold_page_unlock(zhdr);
1271 }
1272
1273 /**
1274  * z3fold_get_pool_size() - gets the z3fold pool size in pages
1275  * @pool:       pool whose size is being queried
1276  *
1277  * Returns: size in pages of the given pool.
1278  */
1279 static u64 z3fold_get_pool_size(struct z3fold_pool *pool)
1280 {
1281         return atomic64_read(&pool->pages_nr);
1282 }
1283
1284 static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode)
1285 {
1286         struct z3fold_header *zhdr;
1287         struct z3fold_pool *pool;
1288
1289         VM_BUG_ON_PAGE(!PageMovable(page), page);
1290         VM_BUG_ON_PAGE(PageIsolated(page), page);
1291
1292         if (test_bit(PAGE_HEADLESS, &page->private))
1293                 return false;
1294
1295         zhdr = page_address(page);
1296         z3fold_page_lock(zhdr);
1297         if (test_bit(NEEDS_COMPACTING, &page->private) ||
1298             test_bit(PAGE_STALE, &page->private))
1299                 goto out;
1300
1301         pool = zhdr_to_pool(zhdr);
1302
1303         if (zhdr->mapped_count == 0) {
1304                 kref_get(&zhdr->refcount);
1305                 if (!list_empty(&zhdr->buddy))
1306                         list_del_init(&zhdr->buddy);
1307                 spin_lock(&pool->lock);
1308                 if (!list_empty(&page->lru))
1309                         list_del(&page->lru);
1310                 spin_unlock(&pool->lock);
1311                 z3fold_page_unlock(zhdr);
1312                 return true;
1313         }
1314 out:
1315         z3fold_page_unlock(zhdr);
1316         return false;
1317 }
1318
1319 static int z3fold_page_migrate(struct address_space *mapping, struct page *newpage,
1320                                struct page *page, enum migrate_mode mode)
1321 {
1322         struct z3fold_header *zhdr, *new_zhdr;
1323         struct z3fold_pool *pool;
1324         struct address_space *new_mapping;
1325
1326         VM_BUG_ON_PAGE(!PageMovable(page), page);
1327         VM_BUG_ON_PAGE(!PageIsolated(page), page);
1328
1329         zhdr = page_address(page);
1330         pool = zhdr_to_pool(zhdr);
1331
1332         if (!trylock_page(page))
1333                 return -EAGAIN;
1334
1335         if (!z3fold_page_trylock(zhdr)) {
1336                 unlock_page(page);
1337                 return -EAGAIN;
1338         }
1339         if (zhdr->mapped_count != 0) {
1340                 z3fold_page_unlock(zhdr);
1341                 unlock_page(page);
1342                 return -EBUSY;
1343         }
1344         new_zhdr = page_address(newpage);
1345         memcpy(new_zhdr, zhdr, PAGE_SIZE);
1346         newpage->private = page->private;
1347         page->private = 0;
1348         z3fold_page_unlock(zhdr);
1349         spin_lock_init(&new_zhdr->page_lock);
1350         new_mapping = page_mapping(page);
1351         __ClearPageMovable(page);
1352         ClearPagePrivate(page);
1353
1354         get_page(newpage);
1355         z3fold_page_lock(new_zhdr);
1356         if (new_zhdr->first_chunks)
1357                 encode_handle(new_zhdr, FIRST);
1358         if (new_zhdr->last_chunks)
1359                 encode_handle(new_zhdr, LAST);
1360         if (new_zhdr->middle_chunks)
1361                 encode_handle(new_zhdr, MIDDLE);
1362         set_bit(NEEDS_COMPACTING, &newpage->private);
1363         new_zhdr->cpu = smp_processor_id();
1364         spin_lock(&pool->lock);
1365         list_add(&newpage->lru, &pool->lru);
1366         spin_unlock(&pool->lock);
1367         __SetPageMovable(newpage, new_mapping);
1368         z3fold_page_unlock(new_zhdr);
1369
1370         queue_work_on(new_zhdr->cpu, pool->compact_wq, &new_zhdr->work);
1371
1372         page_mapcount_reset(page);
1373         unlock_page(page);
1374         put_page(page);
1375         return 0;
1376 }
1377
1378 static void z3fold_page_putback(struct page *page)
1379 {
1380         struct z3fold_header *zhdr;
1381         struct z3fold_pool *pool;
1382
1383         zhdr = page_address(page);
1384         pool = zhdr_to_pool(zhdr);
1385
1386         z3fold_page_lock(zhdr);
1387         if (!list_empty(&zhdr->buddy))
1388                 list_del_init(&zhdr->buddy);
1389         INIT_LIST_HEAD(&page->lru);
1390         if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
1391                 atomic64_dec(&pool->pages_nr);
1392                 return;
1393         }
1394         spin_lock(&pool->lock);
1395         list_add(&page->lru, &pool->lru);
1396         spin_unlock(&pool->lock);
1397         z3fold_page_unlock(zhdr);
1398 }
1399
1400 static const struct address_space_operations z3fold_aops = {
1401         .isolate_page = z3fold_page_isolate,
1402         .migratepage = z3fold_page_migrate,
1403         .putback_page = z3fold_page_putback,
1404 };
1405
1406 /*****************
1407  * zpool
1408  ****************/
1409
1410 static int z3fold_zpool_evict(struct z3fold_pool *pool, unsigned long handle)
1411 {
1412         if (pool->zpool && pool->zpool_ops && pool->zpool_ops->evict)
1413                 return pool->zpool_ops->evict(pool->zpool, handle);
1414         else
1415                 return -ENOENT;
1416 }
1417
1418 static const struct z3fold_ops z3fold_zpool_ops = {
1419         .evict =        z3fold_zpool_evict
1420 };
1421
1422 static void *z3fold_zpool_create(const char *name, gfp_t gfp,
1423                                const struct zpool_ops *zpool_ops,
1424                                struct zpool *zpool)
1425 {
1426         struct z3fold_pool *pool;
1427
1428         pool = z3fold_create_pool(name, gfp,
1429                                 zpool_ops ? &z3fold_zpool_ops : NULL);
1430         if (pool) {
1431                 pool->zpool = zpool;
1432                 pool->zpool_ops = zpool_ops;
1433         }
1434         return pool;
1435 }
1436
1437 static void z3fold_zpool_destroy(void *pool)
1438 {
1439         z3fold_destroy_pool(pool);
1440 }
1441
1442 static int z3fold_zpool_malloc(void *pool, size_t size, gfp_t gfp,
1443                         unsigned long *handle)
1444 {
1445         return z3fold_alloc(pool, size, gfp, handle);
1446 }
1447 static void z3fold_zpool_free(void *pool, unsigned long handle)
1448 {
1449         z3fold_free(pool, handle);
1450 }
1451
1452 static int z3fold_zpool_shrink(void *pool, unsigned int pages,
1453                         unsigned int *reclaimed)
1454 {
1455         unsigned int total = 0;
1456         int ret = -EINVAL;
1457
1458         while (total < pages) {
1459                 ret = z3fold_reclaim_page(pool, 8);
1460                 if (ret < 0)
1461                         break;
1462                 total++;
1463         }
1464
1465         if (reclaimed)
1466                 *reclaimed = total;
1467
1468         return ret;
1469 }
1470
1471 static void *z3fold_zpool_map(void *pool, unsigned long handle,
1472                         enum zpool_mapmode mm)
1473 {
1474         return z3fold_map(pool, handle);
1475 }
1476 static void z3fold_zpool_unmap(void *pool, unsigned long handle)
1477 {
1478         z3fold_unmap(pool, handle);
1479 }
1480
1481 static u64 z3fold_zpool_total_size(void *pool)
1482 {
1483         return z3fold_get_pool_size(pool) * PAGE_SIZE;
1484 }
1485
1486 static struct zpool_driver z3fold_zpool_driver = {
1487         .type =         "z3fold",
1488         .owner =        THIS_MODULE,
1489         .create =       z3fold_zpool_create,
1490         .destroy =      z3fold_zpool_destroy,
1491         .malloc =       z3fold_zpool_malloc,
1492         .free =         z3fold_zpool_free,
1493         .shrink =       z3fold_zpool_shrink,
1494         .map =          z3fold_zpool_map,
1495         .unmap =        z3fold_zpool_unmap,
1496         .total_size =   z3fold_zpool_total_size,
1497 };
1498
1499 MODULE_ALIAS("zpool-z3fold");
1500
1501 static int __init init_z3fold(void)
1502 {
1503         int ret;
1504
1505         /* Make sure the z3fold header is not larger than the page size */
1506         BUILD_BUG_ON(ZHDR_SIZE_ALIGNED > PAGE_SIZE);
1507         ret = z3fold_mount();
1508         if (ret)
1509                 return ret;
1510
1511         zpool_register_driver(&z3fold_zpool_driver);
1512
1513         return 0;
1514 }
1515
1516 static void __exit exit_z3fold(void)
1517 {
1518         z3fold_unmount();
1519         zpool_unregister_driver(&z3fold_zpool_driver);
1520 }
1521
1522 module_init(init_z3fold);
1523 module_exit(exit_z3fold);
1524
1525 MODULE_LICENSE("GPL");
1526 MODULE_AUTHOR("Vitaly Wool <vitalywool@gmail.com>");
1527 MODULE_DESCRIPTION("3-Fold Allocator for Compressed Pages");