4 * Author: Vitaly Wool <vitaly.wool@konsulko.com>
5 * Copyright (C) 2016, Sony Mobile Communications Inc.
7 * This implementation is based on zbud written by Seth Jennings.
9 * z3fold is an special purpose allocator for storing compressed pages. It
10 * can store up to three compressed pages per page which improves the
11 * compression ratio of zbud while retaining its main concepts (e. g. always
12 * storing an integral number of objects per page) and simplicity.
13 * It still has simple and deterministic reclaim properties that make it
14 * preferable to a higher density approach (with no requirement on integral
15 * number of object per page) when reclaim is used.
17 * As in zbud, pages are divided into "chunks". The size of the chunks is
18 * fixed at compile time and is determined by NCHUNKS_ORDER below.
20 * z3fold doesn't export any API and is meant to be used via zpool API.
23 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25 #include <linux/atomic.h>
26 #include <linux/sched.h>
27 #include <linux/cpumask.h>
28 #include <linux/dcache.h>
29 #include <linux/list.h>
31 #include <linux/module.h>
32 #include <linux/page-flags.h>
33 #include <linux/migrate.h>
34 #include <linux/node.h>
35 #include <linux/compaction.h>
36 #include <linux/percpu.h>
37 #include <linux/mount.h>
39 #include <linux/preempt.h>
40 #include <linux/workqueue.h>
41 #include <linux/slab.h>
42 #include <linux/spinlock.h>
43 #include <linux/zpool.h>
46 * NCHUNKS_ORDER determines the internal allocation granularity, effectively
47 * adjusting internal fragmentation. It also determines the number of
48 * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the
49 * allocation granularity will be in chunks of size PAGE_SIZE/64. Some chunks
50 * in the beginning of an allocated page are occupied by z3fold header, so
51 * NCHUNKS will be calculated to 63 (or 62 in case CONFIG_DEBUG_SPINLOCK=y),
52 * which shows the max number of free chunks in z3fold page, also there will
53 * be 63, or 62, respectively, freelists per pool.
55 #define NCHUNKS_ORDER 6
57 #define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
58 #define CHUNK_SIZE (1 << CHUNK_SHIFT)
59 #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
60 #define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
61 #define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
62 #define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
64 #define BUDDY_MASK (0x3)
66 #define SLOTS_ALIGN (0x40)
73 int (*evict)(struct z3fold_pool *pool, unsigned long handle);
84 struct z3fold_buddy_slots {
86 * we are using BUDDY_MASK in handle_to_buddy etc. so there should
87 * be enough slots to hold all possible variants
89 unsigned long slot[BUDDY_MASK + 1];
90 unsigned long pool; /* back link + flags */
92 #define HANDLE_FLAG_MASK (0x03)
95 * struct z3fold_header - z3fold page metadata occupying first chunks of each
96 * z3fold page, except for HEADLESS pages
97 * @buddy: links the z3fold page into the relevant list in the
99 * @page_lock: per-page lock
100 * @refcount: reference count for the z3fold page
101 * @work: work_struct for page layout optimization
102 * @slots: pointer to the structure holding buddy slots
103 * @cpu: CPU which this page "belongs" to
104 * @first_chunks: the size of the first buddy in chunks, 0 if free
105 * @middle_chunks: the size of the middle buddy in chunks, 0 if free
106 * @last_chunks: the size of the last buddy in chunks, 0 if free
107 * @first_num: the starting number (for the first handle)
108 * @mapped_count: the number of objects currently mapped
110 struct z3fold_header {
111 struct list_head buddy;
112 spinlock_t page_lock;
113 struct kref refcount;
114 struct work_struct work;
115 struct z3fold_buddy_slots *slots;
117 unsigned short first_chunks;
118 unsigned short middle_chunks;
119 unsigned short last_chunks;
120 unsigned short start_middle;
121 unsigned short first_num:2;
122 unsigned short mapped_count:2;
126 * struct z3fold_pool - stores metadata for each z3fold pool
128 * @lock: protects pool unbuddied/lru lists
129 * @stale_lock: protects pool stale page list
130 * @unbuddied: per-cpu array of lists tracking z3fold pages that contain 2-
131 * buddies; the list each z3fold page is added to depends on
132 * the size of its free region.
133 * @lru: list tracking the z3fold pages in LRU order by most recently
135 * @stale: list of pages marked for freeing
136 * @pages_nr: number of z3fold pages in the pool.
137 * @c_handle: cache for z3fold_buddy_slots allocation
138 * @ops: pointer to a structure of user defined operations specified at
139 * pool creation time.
140 * @compact_wq: workqueue for page layout background optimization
141 * @release_wq: workqueue for safe page release
142 * @work: work_struct for safe page release
143 * @inode: inode for z3fold pseudo filesystem
145 * This structure is allocated at pool creation time and maintains metadata
146 * pertaining to a particular z3fold pool.
151 spinlock_t stale_lock;
152 struct list_head *unbuddied;
153 struct list_head lru;
154 struct list_head stale;
156 struct kmem_cache *c_handle;
157 const struct z3fold_ops *ops;
159 const struct zpool_ops *zpool_ops;
160 struct workqueue_struct *compact_wq;
161 struct workqueue_struct *release_wq;
162 struct work_struct work;
167 * Internal z3fold page flags
169 enum z3fold_page_flags {
174 PAGE_CLAIMED, /* by either reclaim or free */
181 /* Converts an allocation size in bytes to size in z3fold chunks */
182 static int size_to_chunks(size_t size)
184 return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT;
187 #define for_each_unbuddied_list(_iter, _begin) \
188 for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
190 static void compact_page_work(struct work_struct *w);
192 static inline struct z3fold_buddy_slots *alloc_slots(struct z3fold_pool *pool)
194 struct z3fold_buddy_slots *slots = kmem_cache_alloc(pool->c_handle,
198 memset(slots->slot, 0, sizeof(slots->slot));
199 slots->pool = (unsigned long)pool;
205 static inline struct z3fold_pool *slots_to_pool(struct z3fold_buddy_slots *s)
207 return (struct z3fold_pool *)(s->pool & ~HANDLE_FLAG_MASK);
210 static inline struct z3fold_buddy_slots *handle_to_slots(unsigned long handle)
212 return (struct z3fold_buddy_slots *)(handle & ~(SLOTS_ALIGN - 1));
215 static inline void free_handle(unsigned long handle)
217 struct z3fold_buddy_slots *slots;
221 if (handle & (1 << PAGE_HEADLESS))
224 WARN_ON(*(unsigned long *)handle == 0);
225 *(unsigned long *)handle = 0;
226 slots = handle_to_slots(handle);
228 for (i = 0; i <= BUDDY_MASK; i++) {
229 if (slots->slot[i]) {
236 struct z3fold_pool *pool = slots_to_pool(slots);
238 kmem_cache_free(pool->c_handle, slots);
242 static struct dentry *z3fold_do_mount(struct file_system_type *fs_type,
243 int flags, const char *dev_name, void *data)
245 return mount_pseudo(fs_type, "z3fold:", NULL, NULL, 0x33);
248 static struct file_system_type z3fold_fs = {
250 .mount = z3fold_do_mount,
251 .kill_sb = kill_anon_super,
254 static struct vfsmount *z3fold_mnt;
255 static int z3fold_mount(void)
259 z3fold_mnt = kern_mount(&z3fold_fs);
260 if (IS_ERR(z3fold_mnt))
261 ret = PTR_ERR(z3fold_mnt);
266 static void z3fold_unmount(void)
268 kern_unmount(z3fold_mnt);
271 static const struct address_space_operations z3fold_aops;
272 static int z3fold_register_migration(struct z3fold_pool *pool)
274 pool->inode = alloc_anon_inode(z3fold_mnt->mnt_sb);
275 if (IS_ERR(pool->inode)) {
280 pool->inode->i_mapping->private_data = pool;
281 pool->inode->i_mapping->a_ops = &z3fold_aops;
285 static void z3fold_unregister_migration(struct z3fold_pool *pool)
291 /* Initializes the z3fold header of a newly allocated z3fold page */
292 static struct z3fold_header *init_z3fold_page(struct page *page,
293 struct z3fold_pool *pool)
295 struct z3fold_header *zhdr = page_address(page);
296 struct z3fold_buddy_slots *slots = alloc_slots(pool);
301 INIT_LIST_HEAD(&page->lru);
302 clear_bit(PAGE_HEADLESS, &page->private);
303 clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
304 clear_bit(NEEDS_COMPACTING, &page->private);
305 clear_bit(PAGE_STALE, &page->private);
306 clear_bit(PAGE_CLAIMED, &page->private);
308 spin_lock_init(&zhdr->page_lock);
309 kref_init(&zhdr->refcount);
310 zhdr->first_chunks = 0;
311 zhdr->middle_chunks = 0;
312 zhdr->last_chunks = 0;
314 zhdr->start_middle = 0;
317 INIT_LIST_HEAD(&zhdr->buddy);
318 INIT_WORK(&zhdr->work, compact_page_work);
322 /* Resets the struct page fields and frees the page */
323 static void free_z3fold_page(struct page *page, bool headless)
327 __ClearPageMovable(page);
330 ClearPagePrivate(page);
334 /* Lock a z3fold page */
335 static inline void z3fold_page_lock(struct z3fold_header *zhdr)
337 spin_lock(&zhdr->page_lock);
340 /* Try to lock a z3fold page */
341 static inline int z3fold_page_trylock(struct z3fold_header *zhdr)
343 return spin_trylock(&zhdr->page_lock);
346 /* Unlock a z3fold page */
347 static inline void z3fold_page_unlock(struct z3fold_header *zhdr)
349 spin_unlock(&zhdr->page_lock);
352 /* Helper function to build the index */
353 static inline int __idx(struct z3fold_header *zhdr, enum buddy bud)
355 return (bud + zhdr->first_num) & BUDDY_MASK;
359 * Encodes the handle of a particular buddy within a z3fold page
360 * Pool lock should be held as this function accesses first_num
362 static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud)
364 struct z3fold_buddy_slots *slots;
365 unsigned long h = (unsigned long)zhdr;
369 * For a headless page, its handle is its pointer with the extra
370 * PAGE_HEADLESS bit set
373 return h | (1 << PAGE_HEADLESS);
375 /* otherwise, return pointer to encoded handle */
376 idx = __idx(zhdr, bud);
379 h |= (zhdr->last_chunks << BUDDY_SHIFT);
382 slots->slot[idx] = h;
383 return (unsigned long)&slots->slot[idx];
386 /* Returns the z3fold page where a given handle is stored */
387 static inline struct z3fold_header *handle_to_z3fold_header(unsigned long h)
389 unsigned long addr = h;
391 if (!(addr & (1 << PAGE_HEADLESS)))
392 addr = *(unsigned long *)h;
394 return (struct z3fold_header *)(addr & PAGE_MASK);
397 /* only for LAST bud, returns zero otherwise */
398 static unsigned short handle_to_chunks(unsigned long handle)
400 unsigned long addr = *(unsigned long *)handle;
402 return (addr & ~PAGE_MASK) >> BUDDY_SHIFT;
406 * (handle & BUDDY_MASK) < zhdr->first_num is possible in encode_handle
407 * but that doesn't matter. because the masking will result in the
408 * correct buddy number.
410 static enum buddy handle_to_buddy(unsigned long handle)
412 struct z3fold_header *zhdr;
415 WARN_ON(handle & (1 << PAGE_HEADLESS));
416 addr = *(unsigned long *)handle;
417 zhdr = (struct z3fold_header *)(addr & PAGE_MASK);
418 return (addr - zhdr->first_num) & BUDDY_MASK;
421 static inline struct z3fold_pool *zhdr_to_pool(struct z3fold_header *zhdr)
423 return slots_to_pool(zhdr->slots);
426 static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked)
428 struct page *page = virt_to_page(zhdr);
429 struct z3fold_pool *pool = zhdr_to_pool(zhdr);
431 WARN_ON(!list_empty(&zhdr->buddy));
432 set_bit(PAGE_STALE, &page->private);
433 clear_bit(NEEDS_COMPACTING, &page->private);
434 spin_lock(&pool->lock);
435 if (!list_empty(&page->lru))
436 list_del_init(&page->lru);
437 spin_unlock(&pool->lock);
439 z3fold_page_unlock(zhdr);
440 spin_lock(&pool->stale_lock);
441 list_add(&zhdr->buddy, &pool->stale);
442 queue_work(pool->release_wq, &pool->work);
443 spin_unlock(&pool->stale_lock);
446 static void __attribute__((__unused__))
447 release_z3fold_page(struct kref *ref)
449 struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
451 __release_z3fold_page(zhdr, false);
454 static void release_z3fold_page_locked(struct kref *ref)
456 struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
458 WARN_ON(z3fold_page_trylock(zhdr));
459 __release_z3fold_page(zhdr, true);
462 static void release_z3fold_page_locked_list(struct kref *ref)
464 struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
466 struct z3fold_pool *pool = zhdr_to_pool(zhdr);
467 spin_lock(&pool->lock);
468 list_del_init(&zhdr->buddy);
469 spin_unlock(&pool->lock);
471 WARN_ON(z3fold_page_trylock(zhdr));
472 __release_z3fold_page(zhdr, true);
475 static void free_pages_work(struct work_struct *w)
477 struct z3fold_pool *pool = container_of(w, struct z3fold_pool, work);
479 spin_lock(&pool->stale_lock);
480 while (!list_empty(&pool->stale)) {
481 struct z3fold_header *zhdr = list_first_entry(&pool->stale,
482 struct z3fold_header, buddy);
483 struct page *page = virt_to_page(zhdr);
485 list_del(&zhdr->buddy);
486 if (WARN_ON(!test_bit(PAGE_STALE, &page->private)))
488 spin_unlock(&pool->stale_lock);
489 cancel_work_sync(&zhdr->work);
490 free_z3fold_page(page, false);
492 spin_lock(&pool->stale_lock);
494 spin_unlock(&pool->stale_lock);
498 * Returns the number of free chunks in a z3fold page.
499 * NB: can't be used with HEADLESS pages.
501 static int num_free_chunks(struct z3fold_header *zhdr)
505 * If there is a middle object, pick up the bigger free space
506 * either before or after it. Otherwise just subtract the number
507 * of chunks occupied by the first and the last objects.
509 if (zhdr->middle_chunks != 0) {
510 int nfree_before = zhdr->first_chunks ?
511 0 : zhdr->start_middle - ZHDR_CHUNKS;
512 int nfree_after = zhdr->last_chunks ?
514 (zhdr->start_middle + zhdr->middle_chunks);
515 nfree = max(nfree_before, nfree_after);
517 nfree = NCHUNKS - zhdr->first_chunks - zhdr->last_chunks;
521 /* Add to the appropriate unbuddied list */
522 static inline void add_to_unbuddied(struct z3fold_pool *pool,
523 struct z3fold_header *zhdr)
525 if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0 ||
526 zhdr->middle_chunks == 0) {
527 struct list_head *unbuddied = get_cpu_ptr(pool->unbuddied);
529 int freechunks = num_free_chunks(zhdr);
530 spin_lock(&pool->lock);
531 list_add(&zhdr->buddy, &unbuddied[freechunks]);
532 spin_unlock(&pool->lock);
533 zhdr->cpu = smp_processor_id();
534 put_cpu_ptr(pool->unbuddied);
538 static inline void *mchunk_memmove(struct z3fold_header *zhdr,
539 unsigned short dst_chunk)
542 return memmove(beg + (dst_chunk << CHUNK_SHIFT),
543 beg + (zhdr->start_middle << CHUNK_SHIFT),
544 zhdr->middle_chunks << CHUNK_SHIFT);
547 #define BIG_CHUNK_GAP 3
548 /* Has to be called with lock held */
549 static int z3fold_compact_page(struct z3fold_header *zhdr)
551 struct page *page = virt_to_page(zhdr);
553 if (test_bit(MIDDLE_CHUNK_MAPPED, &page->private))
554 return 0; /* can't move middle chunk, it's used */
556 if (unlikely(PageIsolated(page)))
559 if (zhdr->middle_chunks == 0)
560 return 0; /* nothing to compact */
562 if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
563 /* move to the beginning */
564 mchunk_memmove(zhdr, ZHDR_CHUNKS);
565 zhdr->first_chunks = zhdr->middle_chunks;
566 zhdr->middle_chunks = 0;
567 zhdr->start_middle = 0;
573 * moving data is expensive, so let's only do that if
574 * there's substantial gain (at least BIG_CHUNK_GAP chunks)
576 if (zhdr->first_chunks != 0 && zhdr->last_chunks == 0 &&
577 zhdr->start_middle - (zhdr->first_chunks + ZHDR_CHUNKS) >=
579 mchunk_memmove(zhdr, zhdr->first_chunks + ZHDR_CHUNKS);
580 zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
582 } else if (zhdr->last_chunks != 0 && zhdr->first_chunks == 0 &&
583 TOTAL_CHUNKS - (zhdr->last_chunks + zhdr->start_middle
584 + zhdr->middle_chunks) >=
586 unsigned short new_start = TOTAL_CHUNKS - zhdr->last_chunks -
588 mchunk_memmove(zhdr, new_start);
589 zhdr->start_middle = new_start;
596 static void do_compact_page(struct z3fold_header *zhdr, bool locked)
598 struct z3fold_pool *pool = zhdr_to_pool(zhdr);
601 page = virt_to_page(zhdr);
603 WARN_ON(z3fold_page_trylock(zhdr));
605 z3fold_page_lock(zhdr);
606 if (WARN_ON(!test_and_clear_bit(NEEDS_COMPACTING, &page->private))) {
607 z3fold_page_unlock(zhdr);
610 spin_lock(&pool->lock);
611 list_del_init(&zhdr->buddy);
612 spin_unlock(&pool->lock);
614 if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
615 atomic64_dec(&pool->pages_nr);
619 if (unlikely(PageIsolated(page) ||
620 test_bit(PAGE_STALE, &page->private))) {
621 z3fold_page_unlock(zhdr);
625 z3fold_compact_page(zhdr);
626 add_to_unbuddied(pool, zhdr);
627 z3fold_page_unlock(zhdr);
630 static void compact_page_work(struct work_struct *w)
632 struct z3fold_header *zhdr = container_of(w, struct z3fold_header,
635 do_compact_page(zhdr, false);
638 /* returns _locked_ z3fold page header or NULL */
639 static inline struct z3fold_header *__z3fold_alloc(struct z3fold_pool *pool,
640 size_t size, bool can_sleep)
642 struct z3fold_header *zhdr = NULL;
644 struct list_head *unbuddied;
645 int chunks = size_to_chunks(size), i;
648 /* First, try to find an unbuddied z3fold page. */
649 unbuddied = get_cpu_ptr(pool->unbuddied);
650 for_each_unbuddied_list(i, chunks) {
651 struct list_head *l = &unbuddied[i];
653 zhdr = list_first_entry_or_null(READ_ONCE(l),
654 struct z3fold_header, buddy);
659 /* Re-check under lock. */
660 spin_lock(&pool->lock);
662 if (unlikely(zhdr != list_first_entry(READ_ONCE(l),
663 struct z3fold_header, buddy)) ||
664 !z3fold_page_trylock(zhdr)) {
665 spin_unlock(&pool->lock);
667 put_cpu_ptr(pool->unbuddied);
672 list_del_init(&zhdr->buddy);
674 spin_unlock(&pool->lock);
676 page = virt_to_page(zhdr);
677 if (test_bit(NEEDS_COMPACTING, &page->private)) {
678 z3fold_page_unlock(zhdr);
680 put_cpu_ptr(pool->unbuddied);
687 * this page could not be removed from its unbuddied
688 * list while pool lock was held, and then we've taken
689 * page lock so kref_put could not be called before
690 * we got here, so it's safe to just call kref_get()
692 kref_get(&zhdr->refcount);
695 put_cpu_ptr(pool->unbuddied);
700 /* look for _exact_ match on other cpus' lists */
701 for_each_online_cpu(cpu) {
704 unbuddied = per_cpu_ptr(pool->unbuddied, cpu);
705 spin_lock(&pool->lock);
706 l = &unbuddied[chunks];
708 zhdr = list_first_entry_or_null(READ_ONCE(l),
709 struct z3fold_header, buddy);
711 if (!zhdr || !z3fold_page_trylock(zhdr)) {
712 spin_unlock(&pool->lock);
716 list_del_init(&zhdr->buddy);
718 spin_unlock(&pool->lock);
720 page = virt_to_page(zhdr);
721 if (test_bit(NEEDS_COMPACTING, &page->private)) {
722 z3fold_page_unlock(zhdr);
728 kref_get(&zhdr->refcount);
741 * z3fold_create_pool() - create a new z3fold pool
743 * @gfp: gfp flags when allocating the z3fold pool structure
744 * @ops: user-defined operations for the z3fold pool
746 * Return: pointer to the new z3fold pool or NULL if the metadata allocation
749 static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp,
750 const struct z3fold_ops *ops)
752 struct z3fold_pool *pool = NULL;
755 pool = kzalloc(sizeof(struct z3fold_pool), gfp);
758 pool->c_handle = kmem_cache_create("z3fold_handle",
759 sizeof(struct z3fold_buddy_slots),
760 SLOTS_ALIGN, 0, NULL);
763 spin_lock_init(&pool->lock);
764 spin_lock_init(&pool->stale_lock);
765 pool->unbuddied = __alloc_percpu(sizeof(struct list_head)*NCHUNKS, 2);
766 if (!pool->unbuddied)
768 for_each_possible_cpu(cpu) {
769 struct list_head *unbuddied =
770 per_cpu_ptr(pool->unbuddied, cpu);
771 for_each_unbuddied_list(i, 0)
772 INIT_LIST_HEAD(&unbuddied[i]);
774 INIT_LIST_HEAD(&pool->lru);
775 INIT_LIST_HEAD(&pool->stale);
776 atomic64_set(&pool->pages_nr, 0);
778 pool->compact_wq = create_singlethread_workqueue(pool->name);
779 if (!pool->compact_wq)
781 pool->release_wq = create_singlethread_workqueue(pool->name);
782 if (!pool->release_wq)
784 if (z3fold_register_migration(pool))
786 INIT_WORK(&pool->work, free_pages_work);
791 destroy_workqueue(pool->release_wq);
793 destroy_workqueue(pool->compact_wq);
795 free_percpu(pool->unbuddied);
797 kmem_cache_destroy(pool->c_handle);
805 * z3fold_destroy_pool() - destroys an existing z3fold pool
806 * @pool: the z3fold pool to be destroyed
808 * The pool should be emptied before this function is called.
810 static void z3fold_destroy_pool(struct z3fold_pool *pool)
812 kmem_cache_destroy(pool->c_handle);
813 z3fold_unregister_migration(pool);
814 destroy_workqueue(pool->release_wq);
815 destroy_workqueue(pool->compact_wq);
820 * z3fold_alloc() - allocates a region of a given size
821 * @pool: z3fold pool from which to allocate
822 * @size: size in bytes of the desired allocation
823 * @gfp: gfp flags used if the pool needs to grow
824 * @handle: handle of the new allocation
826 * This function will attempt to find a free region in the pool large enough to
827 * satisfy the allocation request. A search of the unbuddied lists is
828 * performed first. If no suitable free region is found, then a new page is
829 * allocated and added to the pool to satisfy the request.
831 * gfp should not set __GFP_HIGHMEM as highmem pages cannot be used
832 * as z3fold pool pages.
834 * Return: 0 if success and handle is set, otherwise -EINVAL if the size or
835 * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate
838 static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
839 unsigned long *handle)
841 int chunks = size_to_chunks(size);
842 struct z3fold_header *zhdr = NULL;
843 struct page *page = NULL;
845 bool can_sleep = gfpflags_allow_blocking(gfp);
847 if (!size || (gfp & __GFP_HIGHMEM))
850 if (size > PAGE_SIZE)
853 if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE)
857 zhdr = __z3fold_alloc(pool, size, can_sleep);
859 if (zhdr->first_chunks == 0) {
860 if (zhdr->middle_chunks != 0 &&
861 chunks >= zhdr->start_middle)
865 } else if (zhdr->last_chunks == 0)
867 else if (zhdr->middle_chunks == 0)
870 if (kref_put(&zhdr->refcount,
871 release_z3fold_page_locked))
872 atomic64_dec(&pool->pages_nr);
874 z3fold_page_unlock(zhdr);
875 pr_err("No free chunks in unbuddied\n");
879 page = virt_to_page(zhdr);
887 spin_lock(&pool->stale_lock);
888 zhdr = list_first_entry_or_null(&pool->stale,
889 struct z3fold_header, buddy);
891 * Before allocating a page, let's see if we can take one from
892 * the stale pages list. cancel_work_sync() can sleep so we
893 * limit this case to the contexts where we can sleep
896 list_del(&zhdr->buddy);
897 spin_unlock(&pool->stale_lock);
898 cancel_work_sync(&zhdr->work);
899 page = virt_to_page(zhdr);
901 spin_unlock(&pool->stale_lock);
905 page = alloc_page(gfp);
910 zhdr = init_z3fold_page(page, pool);
915 atomic64_inc(&pool->pages_nr);
917 if (bud == HEADLESS) {
918 set_bit(PAGE_HEADLESS, &page->private);
921 __SetPageMovable(page, pool->inode->i_mapping);
922 z3fold_page_lock(zhdr);
926 zhdr->first_chunks = chunks;
927 else if (bud == LAST)
928 zhdr->last_chunks = chunks;
930 zhdr->middle_chunks = chunks;
931 zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
933 add_to_unbuddied(pool, zhdr);
936 spin_lock(&pool->lock);
937 /* Add/move z3fold page to beginning of LRU */
938 if (!list_empty(&page->lru))
939 list_del(&page->lru);
941 list_add(&page->lru, &pool->lru);
943 *handle = encode_handle(zhdr, bud);
944 spin_unlock(&pool->lock);
946 z3fold_page_unlock(zhdr);
952 * z3fold_free() - frees the allocation associated with the given handle
953 * @pool: pool in which the allocation resided
954 * @handle: handle associated with the allocation returned by z3fold_alloc()
956 * In the case that the z3fold page in which the allocation resides is under
957 * reclaim, as indicated by the PG_reclaim flag being set, this function
958 * only sets the first|last_chunks to 0. The page is actually freed
959 * once both buddies are evicted (see z3fold_reclaim_page() below).
961 static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
963 struct z3fold_header *zhdr;
967 zhdr = handle_to_z3fold_header(handle);
968 page = virt_to_page(zhdr);
970 if (test_bit(PAGE_HEADLESS, &page->private)) {
971 /* if a headless page is under reclaim, just leave.
972 * NB: we use test_and_set_bit for a reason: if the bit
973 * has not been set before, we release this page
974 * immediately so we don't care about its value any more.
976 if (!test_and_set_bit(PAGE_CLAIMED, &page->private)) {
977 spin_lock(&pool->lock);
978 list_del(&page->lru);
979 spin_unlock(&pool->lock);
980 free_z3fold_page(page, true);
981 atomic64_dec(&pool->pages_nr);
986 /* Non-headless case */
987 z3fold_page_lock(zhdr);
988 bud = handle_to_buddy(handle);
992 zhdr->first_chunks = 0;
995 zhdr->middle_chunks = 0;
998 zhdr->last_chunks = 0;
1001 pr_err("%s: unknown bud %d\n", __func__, bud);
1003 z3fold_page_unlock(zhdr);
1007 free_handle(handle);
1008 if (kref_put(&zhdr->refcount, release_z3fold_page_locked_list)) {
1009 atomic64_dec(&pool->pages_nr);
1012 if (test_bit(PAGE_CLAIMED, &page->private)) {
1013 z3fold_page_unlock(zhdr);
1016 if (unlikely(PageIsolated(page)) ||
1017 test_and_set_bit(NEEDS_COMPACTING, &page->private)) {
1018 z3fold_page_unlock(zhdr);
1021 if (zhdr->cpu < 0 || !cpu_online(zhdr->cpu)) {
1022 spin_lock(&pool->lock);
1023 list_del_init(&zhdr->buddy);
1024 spin_unlock(&pool->lock);
1026 kref_get(&zhdr->refcount);
1027 do_compact_page(zhdr, true);
1030 kref_get(&zhdr->refcount);
1031 queue_work_on(zhdr->cpu, pool->compact_wq, &zhdr->work);
1032 z3fold_page_unlock(zhdr);
1036 * z3fold_reclaim_page() - evicts allocations from a pool page and frees it
1037 * @pool: pool from which a page will attempt to be evicted
1038 * @retries: number of pages on the LRU list for which eviction will
1039 * be attempted before failing
1041 * z3fold reclaim is different from normal system reclaim in that it is done
1042 * from the bottom, up. This is because only the bottom layer, z3fold, has
1043 * information on how the allocations are organized within each z3fold page.
1044 * This has the potential to create interesting locking situations between
1045 * z3fold and the user, however.
1047 * To avoid these, this is how z3fold_reclaim_page() should be called:
1049 * The user detects a page should be reclaimed and calls z3fold_reclaim_page().
1050 * z3fold_reclaim_page() will remove a z3fold page from the pool LRU list and
1051 * call the user-defined eviction handler with the pool and handle as
1054 * If the handle can not be evicted, the eviction handler should return
1055 * non-zero. z3fold_reclaim_page() will add the z3fold page back to the
1056 * appropriate list and try the next z3fold page on the LRU up to
1057 * a user defined number of retries.
1059 * If the handle is successfully evicted, the eviction handler should
1060 * return 0 _and_ should have called z3fold_free() on the handle. z3fold_free()
1061 * contains logic to delay freeing the page if the page is under reclaim,
1062 * as indicated by the setting of the PG_reclaim flag on the underlying page.
1064 * If all buddies in the z3fold page are successfully evicted, then the
1065 * z3fold page can be freed.
1067 * Returns: 0 if page is successfully freed, otherwise -EINVAL if there are
1068 * no pages to evict or an eviction handler is not registered, -EAGAIN if
1069 * the retry limit was hit.
1071 static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
1074 struct z3fold_header *zhdr = NULL;
1075 struct page *page = NULL;
1076 struct list_head *pos;
1077 unsigned long first_handle = 0, middle_handle = 0, last_handle = 0;
1079 spin_lock(&pool->lock);
1080 if (!pool->ops || !pool->ops->evict || retries == 0) {
1081 spin_unlock(&pool->lock);
1084 for (i = 0; i < retries; i++) {
1085 if (list_empty(&pool->lru)) {
1086 spin_unlock(&pool->lock);
1089 list_for_each_prev(pos, &pool->lru) {
1090 page = list_entry(pos, struct page, lru);
1092 /* this bit could have been set by free, in which case
1093 * we pass over to the next page in the pool.
1095 if (test_and_set_bit(PAGE_CLAIMED, &page->private))
1098 if (unlikely(PageIsolated(page)))
1100 if (test_bit(PAGE_HEADLESS, &page->private))
1103 zhdr = page_address(page);
1104 if (!z3fold_page_trylock(zhdr)) {
1106 continue; /* can't evict at this point */
1108 kref_get(&zhdr->refcount);
1109 list_del_init(&zhdr->buddy);
1117 list_del_init(&page->lru);
1118 spin_unlock(&pool->lock);
1120 if (!test_bit(PAGE_HEADLESS, &page->private)) {
1122 * We need encode the handles before unlocking, since
1123 * we can race with free that will set
1124 * (first|last)_chunks to 0
1129 if (zhdr->first_chunks)
1130 first_handle = encode_handle(zhdr, FIRST);
1131 if (zhdr->middle_chunks)
1132 middle_handle = encode_handle(zhdr, MIDDLE);
1133 if (zhdr->last_chunks)
1134 last_handle = encode_handle(zhdr, LAST);
1136 * it's safe to unlock here because we hold a
1137 * reference to this page
1139 z3fold_page_unlock(zhdr);
1141 first_handle = encode_handle(zhdr, HEADLESS);
1142 last_handle = middle_handle = 0;
1145 /* Issue the eviction callback(s) */
1146 if (middle_handle) {
1147 ret = pool->ops->evict(pool, middle_handle);
1152 ret = pool->ops->evict(pool, first_handle);
1157 ret = pool->ops->evict(pool, last_handle);
1162 if (test_bit(PAGE_HEADLESS, &page->private)) {
1164 free_z3fold_page(page, true);
1165 atomic64_dec(&pool->pages_nr);
1168 spin_lock(&pool->lock);
1169 list_add(&page->lru, &pool->lru);
1170 spin_unlock(&pool->lock);
1172 z3fold_page_lock(zhdr);
1173 clear_bit(PAGE_CLAIMED, &page->private);
1174 if (kref_put(&zhdr->refcount,
1175 release_z3fold_page_locked)) {
1176 atomic64_dec(&pool->pages_nr);
1180 * if we are here, the page is still not completely
1181 * free. Take the global pool lock then to be able
1182 * to add it back to the lru list
1184 spin_lock(&pool->lock);
1185 list_add(&page->lru, &pool->lru);
1186 spin_unlock(&pool->lock);
1187 z3fold_page_unlock(zhdr);
1190 /* We started off locked to we need to lock the pool back */
1191 spin_lock(&pool->lock);
1193 spin_unlock(&pool->lock);
1198 * z3fold_map() - maps the allocation associated with the given handle
1199 * @pool: pool in which the allocation resides
1200 * @handle: handle associated with the allocation to be mapped
1202 * Extracts the buddy number from handle and constructs the pointer to the
1203 * correct starting chunk within the page.
1205 * Returns: a pointer to the mapped allocation
1207 static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle)
1209 struct z3fold_header *zhdr;
1214 zhdr = handle_to_z3fold_header(handle);
1216 page = virt_to_page(zhdr);
1218 if (test_bit(PAGE_HEADLESS, &page->private))
1221 z3fold_page_lock(zhdr);
1222 buddy = handle_to_buddy(handle);
1225 addr += ZHDR_SIZE_ALIGNED;
1228 addr += zhdr->start_middle << CHUNK_SHIFT;
1229 set_bit(MIDDLE_CHUNK_MAPPED, &page->private);
1232 addr += PAGE_SIZE - (handle_to_chunks(handle) << CHUNK_SHIFT);
1235 pr_err("unknown buddy id %d\n", buddy);
1242 zhdr->mapped_count++;
1243 z3fold_page_unlock(zhdr);
1249 * z3fold_unmap() - unmaps the allocation associated with the given handle
1250 * @pool: pool in which the allocation resides
1251 * @handle: handle associated with the allocation to be unmapped
1253 static void z3fold_unmap(struct z3fold_pool *pool, unsigned long handle)
1255 struct z3fold_header *zhdr;
1259 zhdr = handle_to_z3fold_header(handle);
1260 page = virt_to_page(zhdr);
1262 if (test_bit(PAGE_HEADLESS, &page->private))
1265 z3fold_page_lock(zhdr);
1266 buddy = handle_to_buddy(handle);
1267 if (buddy == MIDDLE)
1268 clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
1269 zhdr->mapped_count--;
1270 z3fold_page_unlock(zhdr);
1274 * z3fold_get_pool_size() - gets the z3fold pool size in pages
1275 * @pool: pool whose size is being queried
1277 * Returns: size in pages of the given pool.
1279 static u64 z3fold_get_pool_size(struct z3fold_pool *pool)
1281 return atomic64_read(&pool->pages_nr);
1284 static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode)
1286 struct z3fold_header *zhdr;
1287 struct z3fold_pool *pool;
1289 VM_BUG_ON_PAGE(!PageMovable(page), page);
1290 VM_BUG_ON_PAGE(PageIsolated(page), page);
1292 if (test_bit(PAGE_HEADLESS, &page->private))
1295 zhdr = page_address(page);
1296 z3fold_page_lock(zhdr);
1297 if (test_bit(NEEDS_COMPACTING, &page->private) ||
1298 test_bit(PAGE_STALE, &page->private))
1301 pool = zhdr_to_pool(zhdr);
1303 if (zhdr->mapped_count == 0) {
1304 kref_get(&zhdr->refcount);
1305 if (!list_empty(&zhdr->buddy))
1306 list_del_init(&zhdr->buddy);
1307 spin_lock(&pool->lock);
1308 if (!list_empty(&page->lru))
1309 list_del(&page->lru);
1310 spin_unlock(&pool->lock);
1311 z3fold_page_unlock(zhdr);
1315 z3fold_page_unlock(zhdr);
1319 static int z3fold_page_migrate(struct address_space *mapping, struct page *newpage,
1320 struct page *page, enum migrate_mode mode)
1322 struct z3fold_header *zhdr, *new_zhdr;
1323 struct z3fold_pool *pool;
1324 struct address_space *new_mapping;
1326 VM_BUG_ON_PAGE(!PageMovable(page), page);
1327 VM_BUG_ON_PAGE(!PageIsolated(page), page);
1329 zhdr = page_address(page);
1330 pool = zhdr_to_pool(zhdr);
1332 if (!trylock_page(page))
1335 if (!z3fold_page_trylock(zhdr)) {
1339 if (zhdr->mapped_count != 0) {
1340 z3fold_page_unlock(zhdr);
1344 new_zhdr = page_address(newpage);
1345 memcpy(new_zhdr, zhdr, PAGE_SIZE);
1346 newpage->private = page->private;
1348 z3fold_page_unlock(zhdr);
1349 spin_lock_init(&new_zhdr->page_lock);
1350 new_mapping = page_mapping(page);
1351 __ClearPageMovable(page);
1352 ClearPagePrivate(page);
1355 z3fold_page_lock(new_zhdr);
1356 if (new_zhdr->first_chunks)
1357 encode_handle(new_zhdr, FIRST);
1358 if (new_zhdr->last_chunks)
1359 encode_handle(new_zhdr, LAST);
1360 if (new_zhdr->middle_chunks)
1361 encode_handle(new_zhdr, MIDDLE);
1362 set_bit(NEEDS_COMPACTING, &newpage->private);
1363 new_zhdr->cpu = smp_processor_id();
1364 spin_lock(&pool->lock);
1365 list_add(&newpage->lru, &pool->lru);
1366 spin_unlock(&pool->lock);
1367 __SetPageMovable(newpage, new_mapping);
1368 z3fold_page_unlock(new_zhdr);
1370 queue_work_on(new_zhdr->cpu, pool->compact_wq, &new_zhdr->work);
1372 page_mapcount_reset(page);
1378 static void z3fold_page_putback(struct page *page)
1380 struct z3fold_header *zhdr;
1381 struct z3fold_pool *pool;
1383 zhdr = page_address(page);
1384 pool = zhdr_to_pool(zhdr);
1386 z3fold_page_lock(zhdr);
1387 if (!list_empty(&zhdr->buddy))
1388 list_del_init(&zhdr->buddy);
1389 INIT_LIST_HEAD(&page->lru);
1390 if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
1391 atomic64_dec(&pool->pages_nr);
1394 spin_lock(&pool->lock);
1395 list_add(&page->lru, &pool->lru);
1396 spin_unlock(&pool->lock);
1397 z3fold_page_unlock(zhdr);
1400 static const struct address_space_operations z3fold_aops = {
1401 .isolate_page = z3fold_page_isolate,
1402 .migratepage = z3fold_page_migrate,
1403 .putback_page = z3fold_page_putback,
1410 static int z3fold_zpool_evict(struct z3fold_pool *pool, unsigned long handle)
1412 if (pool->zpool && pool->zpool_ops && pool->zpool_ops->evict)
1413 return pool->zpool_ops->evict(pool->zpool, handle);
1418 static const struct z3fold_ops z3fold_zpool_ops = {
1419 .evict = z3fold_zpool_evict
1422 static void *z3fold_zpool_create(const char *name, gfp_t gfp,
1423 const struct zpool_ops *zpool_ops,
1424 struct zpool *zpool)
1426 struct z3fold_pool *pool;
1428 pool = z3fold_create_pool(name, gfp,
1429 zpool_ops ? &z3fold_zpool_ops : NULL);
1431 pool->zpool = zpool;
1432 pool->zpool_ops = zpool_ops;
1437 static void z3fold_zpool_destroy(void *pool)
1439 z3fold_destroy_pool(pool);
1442 static int z3fold_zpool_malloc(void *pool, size_t size, gfp_t gfp,
1443 unsigned long *handle)
1445 return z3fold_alloc(pool, size, gfp, handle);
1447 static void z3fold_zpool_free(void *pool, unsigned long handle)
1449 z3fold_free(pool, handle);
1452 static int z3fold_zpool_shrink(void *pool, unsigned int pages,
1453 unsigned int *reclaimed)
1455 unsigned int total = 0;
1458 while (total < pages) {
1459 ret = z3fold_reclaim_page(pool, 8);
1471 static void *z3fold_zpool_map(void *pool, unsigned long handle,
1472 enum zpool_mapmode mm)
1474 return z3fold_map(pool, handle);
1476 static void z3fold_zpool_unmap(void *pool, unsigned long handle)
1478 z3fold_unmap(pool, handle);
1481 static u64 z3fold_zpool_total_size(void *pool)
1483 return z3fold_get_pool_size(pool) * PAGE_SIZE;
1486 static struct zpool_driver z3fold_zpool_driver = {
1488 .owner = THIS_MODULE,
1489 .create = z3fold_zpool_create,
1490 .destroy = z3fold_zpool_destroy,
1491 .malloc = z3fold_zpool_malloc,
1492 .free = z3fold_zpool_free,
1493 .shrink = z3fold_zpool_shrink,
1494 .map = z3fold_zpool_map,
1495 .unmap = z3fold_zpool_unmap,
1496 .total_size = z3fold_zpool_total_size,
1499 MODULE_ALIAS("zpool-z3fold");
1501 static int __init init_z3fold(void)
1505 /* Make sure the z3fold header is not larger than the page size */
1506 BUILD_BUG_ON(ZHDR_SIZE_ALIGNED > PAGE_SIZE);
1507 ret = z3fold_mount();
1511 zpool_register_driver(&z3fold_zpool_driver);
1516 static void __exit exit_z3fold(void)
1519 zpool_unregister_driver(&z3fold_zpool_driver);
1522 module_init(init_z3fold);
1523 module_exit(exit_z3fold);
1525 MODULE_LICENSE("GPL");
1526 MODULE_AUTHOR("Vitaly Wool <vitalywool@gmail.com>");
1527 MODULE_DESCRIPTION("3-Fold Allocator for Compressed Pages");