Merge tag 'for-linux-6.12-ofs1' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-block.git] / mm / zsmalloc.c
1 /*
2  * zsmalloc memory allocator
3  *
4  * Copyright (C) 2011  Nitin Gupta
5  * Copyright (C) 2012, 2013 Minchan Kim
6  *
7  * This code is released using a dual license strategy: BSD/GPL
8  * You can choose the license that better fits your requirements.
9  *
10  * Released under the terms of 3-clause BSD License
11  * Released under the terms of GNU General Public License Version 2.0
12  */
13
14 /*
15  * Following is how we use various fields and flags of underlying
16  * struct page(s) to form a zspage.
17  *
18  * Usage of struct page fields:
19  *      page->private: points to zspage
20  *      page->index: links together all component pages of a zspage
21  *              For the huge page, this is always 0, so we use this field
22  *              to store handle.
23  *      page->page_type: PG_zsmalloc, lower 16 bit locate the first object
24  *              offset in a subpage of a zspage
25  *
26  * Usage of struct page flags:
27  *      PG_private: identifies the first component page
28  *      PG_owner_priv_1: identifies the huge component page
29  *
30  */
31
32 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33
34 /*
35  * lock ordering:
36  *      page_lock
37  *      pool->migrate_lock
38  *      class->lock
39  *      zspage->lock
40  */
41
42 #include <linux/module.h>
43 #include <linux/kernel.h>
44 #include <linux/sched.h>
45 #include <linux/bitops.h>
46 #include <linux/errno.h>
47 #include <linux/highmem.h>
48 #include <linux/string.h>
49 #include <linux/slab.h>
50 #include <linux/pgtable.h>
51 #include <asm/tlbflush.h>
52 #include <linux/cpumask.h>
53 #include <linux/cpu.h>
54 #include <linux/vmalloc.h>
55 #include <linux/preempt.h>
56 #include <linux/spinlock.h>
57 #include <linux/sprintf.h>
58 #include <linux/shrinker.h>
59 #include <linux/types.h>
60 #include <linux/debugfs.h>
61 #include <linux/zsmalloc.h>
62 #include <linux/zpool.h>
63 #include <linux/migrate.h>
64 #include <linux/wait.h>
65 #include <linux/pagemap.h>
66 #include <linux/fs.h>
67 #include <linux/local_lock.h>
68
69 #define ZSPAGE_MAGIC    0x58
70
71 /*
72  * This must be power of 2 and greater than or equal to sizeof(link_free).
73  * These two conditions ensure that any 'struct link_free' itself doesn't
74  * span more than 1 page which avoids complex case of mapping 2 pages simply
75  * to restore link_free pointer values.
76  */
77 #define ZS_ALIGN                8
78
79 #define ZS_HANDLE_SIZE (sizeof(unsigned long))
80
81 /*
82  * Object location (<PFN>, <obj_idx>) is encoded as
83  * a single (unsigned long) handle value.
84  *
85  * Note that object index <obj_idx> starts from 0.
86  *
87  * This is made more complicated by various memory models and PAE.
88  */
89
90 #ifndef MAX_POSSIBLE_PHYSMEM_BITS
91 #ifdef MAX_PHYSMEM_BITS
92 #define MAX_POSSIBLE_PHYSMEM_BITS MAX_PHYSMEM_BITS
93 #else
94 /*
95  * If this definition of MAX_PHYSMEM_BITS is used, OBJ_INDEX_BITS will just
96  * be PAGE_SHIFT
97  */
98 #define MAX_POSSIBLE_PHYSMEM_BITS BITS_PER_LONG
99 #endif
100 #endif
101
102 #define _PFN_BITS               (MAX_POSSIBLE_PHYSMEM_BITS - PAGE_SHIFT)
103
104 /*
105  * Head in allocated object should have OBJ_ALLOCATED_TAG
106  * to identify the object was allocated or not.
107  * It's okay to add the status bit in the least bit because
108  * header keeps handle which is 4byte-aligned address so we
109  * have room for two bit at least.
110  */
111 #define OBJ_ALLOCATED_TAG 1
112
113 #define OBJ_TAG_BITS    1
114 #define OBJ_TAG_MASK    OBJ_ALLOCATED_TAG
115
116 #define OBJ_INDEX_BITS  (BITS_PER_LONG - _PFN_BITS)
117 #define OBJ_INDEX_MASK  ((_AC(1, UL) << OBJ_INDEX_BITS) - 1)
118
119 #define HUGE_BITS       1
120 #define FULLNESS_BITS   4
121 #define CLASS_BITS      8
122 #define MAGIC_VAL_BITS  8
123
124 #define ZS_MAX_PAGES_PER_ZSPAGE (_AC(CONFIG_ZSMALLOC_CHAIN_SIZE, UL))
125
126 /* ZS_MIN_ALLOC_SIZE must be multiple of ZS_ALIGN */
127 #define ZS_MIN_ALLOC_SIZE \
128         MAX(32, (ZS_MAX_PAGES_PER_ZSPAGE << PAGE_SHIFT >> OBJ_INDEX_BITS))
129 /* each chunk includes extra space to keep handle */
130 #define ZS_MAX_ALLOC_SIZE       PAGE_SIZE
131
132 /*
133  * On systems with 4K page size, this gives 255 size classes! There is a
134  * trader-off here:
135  *  - Large number of size classes is potentially wasteful as free page are
136  *    spread across these classes
137  *  - Small number of size classes causes large internal fragmentation
138  *  - Probably its better to use specific size classes (empirically
139  *    determined). NOTE: all those class sizes must be set as multiple of
140  *    ZS_ALIGN to make sure link_free itself never has to span 2 pages.
141  *
142  *  ZS_MIN_ALLOC_SIZE and ZS_SIZE_CLASS_DELTA must be multiple of ZS_ALIGN
143  *  (reason above)
144  */
145 #define ZS_SIZE_CLASS_DELTA     (PAGE_SIZE >> CLASS_BITS)
146 #define ZS_SIZE_CLASSES (DIV_ROUND_UP(ZS_MAX_ALLOC_SIZE - ZS_MIN_ALLOC_SIZE, \
147                                       ZS_SIZE_CLASS_DELTA) + 1)
148
149 /*
150  * Pages are distinguished by the ratio of used memory (that is the ratio
151  * of ->inuse objects to all objects that page can store). For example,
152  * INUSE_RATIO_10 means that the ratio of used objects is > 0% and <= 10%.
153  *
154  * The number of fullness groups is not random. It allows us to keep
155  * difference between the least busy page in the group (minimum permitted
156  * number of ->inuse objects) and the most busy page (maximum permitted
157  * number of ->inuse objects) at a reasonable value.
158  */
159 enum fullness_group {
160         ZS_INUSE_RATIO_0,
161         ZS_INUSE_RATIO_10,
162         /* NOTE: 8 more fullness groups here */
163         ZS_INUSE_RATIO_99       = 10,
164         ZS_INUSE_RATIO_100,
165         NR_FULLNESS_GROUPS,
166 };
167
168 enum class_stat_type {
169         /* NOTE: stats for 12 fullness groups here: from inuse 0 to 100 */
170         ZS_OBJS_ALLOCATED       = NR_FULLNESS_GROUPS,
171         ZS_OBJS_INUSE,
172         NR_CLASS_STAT_TYPES,
173 };
174
175 struct zs_size_stat {
176         unsigned long objs[NR_CLASS_STAT_TYPES];
177 };
178
179 #ifdef CONFIG_ZSMALLOC_STAT
180 static struct dentry *zs_stat_root;
181 #endif
182
183 static size_t huge_class_size;
184
185 struct size_class {
186         spinlock_t lock;
187         struct list_head fullness_list[NR_FULLNESS_GROUPS];
188         /*
189          * Size of objects stored in this class. Must be multiple
190          * of ZS_ALIGN.
191          */
192         int size;
193         int objs_per_zspage;
194         /* Number of PAGE_SIZE sized pages to combine to form a 'zspage' */
195         int pages_per_zspage;
196
197         unsigned int index;
198         struct zs_size_stat stats;
199 };
200
201 /*
202  * Placed within free objects to form a singly linked list.
203  * For every zspage, zspage->freeobj gives head of this list.
204  *
205  * This must be power of 2 and less than or equal to ZS_ALIGN
206  */
207 struct link_free {
208         union {
209                 /*
210                  * Free object index;
211                  * It's valid for non-allocated object
212                  */
213                 unsigned long next;
214                 /*
215                  * Handle of allocated object.
216                  */
217                 unsigned long handle;
218         };
219 };
220
221 struct zs_pool {
222         const char *name;
223
224         struct size_class *size_class[ZS_SIZE_CLASSES];
225         struct kmem_cache *handle_cachep;
226         struct kmem_cache *zspage_cachep;
227
228         atomic_long_t pages_allocated;
229
230         struct zs_pool_stats stats;
231
232         /* Compact classes */
233         struct shrinker *shrinker;
234
235 #ifdef CONFIG_ZSMALLOC_STAT
236         struct dentry *stat_dentry;
237 #endif
238 #ifdef CONFIG_COMPACTION
239         struct work_struct free_work;
240 #endif
241         /* protect page/zspage migration */
242         rwlock_t migrate_lock;
243         atomic_t compaction_in_progress;
244 };
245
246 struct zspage {
247         struct {
248                 unsigned int huge:HUGE_BITS;
249                 unsigned int fullness:FULLNESS_BITS;
250                 unsigned int class:CLASS_BITS + 1;
251                 unsigned int magic:MAGIC_VAL_BITS;
252         };
253         unsigned int inuse;
254         unsigned int freeobj;
255         struct page *first_page;
256         struct list_head list; /* fullness list */
257         struct zs_pool *pool;
258         rwlock_t lock;
259 };
260
261 struct mapping_area {
262         local_lock_t lock;
263         char *vm_buf; /* copy buffer for objects that span pages */
264         char *vm_addr; /* address of kmap_atomic()'ed pages */
265         enum zs_mapmode vm_mm; /* mapping mode */
266 };
267
268 /* huge object: pages_per_zspage == 1 && maxobj_per_zspage == 1 */
269 static void SetZsHugePage(struct zspage *zspage)
270 {
271         zspage->huge = 1;
272 }
273
274 static bool ZsHugePage(struct zspage *zspage)
275 {
276         return zspage->huge;
277 }
278
279 static void migrate_lock_init(struct zspage *zspage);
280 static void migrate_read_lock(struct zspage *zspage);
281 static void migrate_read_unlock(struct zspage *zspage);
282 static void migrate_write_lock(struct zspage *zspage);
283 static void migrate_write_unlock(struct zspage *zspage);
284
285 #ifdef CONFIG_COMPACTION
286 static void kick_deferred_free(struct zs_pool *pool);
287 static void init_deferred_free(struct zs_pool *pool);
288 static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage);
289 #else
290 static void kick_deferred_free(struct zs_pool *pool) {}
291 static void init_deferred_free(struct zs_pool *pool) {}
292 static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage) {}
293 #endif
294
295 static int create_cache(struct zs_pool *pool)
296 {
297         char *name;
298
299         name = kasprintf(GFP_KERNEL, "zs_handle-%s", pool->name);
300         if (!name)
301                 return -ENOMEM;
302         pool->handle_cachep = kmem_cache_create(name, ZS_HANDLE_SIZE,
303                                                 0, 0, NULL);
304         kfree(name);
305         if (!pool->handle_cachep)
306                 return -EINVAL;
307
308         name = kasprintf(GFP_KERNEL, "zspage-%s", pool->name);
309         if (!name)
310                 return -ENOMEM;
311         pool->zspage_cachep = kmem_cache_create(name, sizeof(struct zspage),
312                                                 0, 0, NULL);
313         kfree(name);
314         if (!pool->zspage_cachep) {
315                 kmem_cache_destroy(pool->handle_cachep);
316                 pool->handle_cachep = NULL;
317                 return -EINVAL;
318         }
319
320         return 0;
321 }
322
323 static void destroy_cache(struct zs_pool *pool)
324 {
325         kmem_cache_destroy(pool->handle_cachep);
326         kmem_cache_destroy(pool->zspage_cachep);
327 }
328
329 static unsigned long cache_alloc_handle(struct zs_pool *pool, gfp_t gfp)
330 {
331         return (unsigned long)kmem_cache_alloc(pool->handle_cachep,
332                         gfp & ~(__GFP_HIGHMEM|__GFP_MOVABLE));
333 }
334
335 static void cache_free_handle(struct zs_pool *pool, unsigned long handle)
336 {
337         kmem_cache_free(pool->handle_cachep, (void *)handle);
338 }
339
340 static struct zspage *cache_alloc_zspage(struct zs_pool *pool, gfp_t flags)
341 {
342         return kmem_cache_zalloc(pool->zspage_cachep,
343                         flags & ~(__GFP_HIGHMEM|__GFP_MOVABLE));
344 }
345
346 static void cache_free_zspage(struct zs_pool *pool, struct zspage *zspage)
347 {
348         kmem_cache_free(pool->zspage_cachep, zspage);
349 }
350
351 /* class->lock(which owns the handle) synchronizes races */
352 static void record_obj(unsigned long handle, unsigned long obj)
353 {
354         *(unsigned long *)handle = obj;
355 }
356
357 /* zpool driver */
358
359 #ifdef CONFIG_ZPOOL
360
361 static void *zs_zpool_create(const char *name, gfp_t gfp)
362 {
363         /*
364          * Ignore global gfp flags: zs_malloc() may be invoked from
365          * different contexts and its caller must provide a valid
366          * gfp mask.
367          */
368         return zs_create_pool(name);
369 }
370
371 static void zs_zpool_destroy(void *pool)
372 {
373         zs_destroy_pool(pool);
374 }
375
376 static int zs_zpool_malloc(void *pool, size_t size, gfp_t gfp,
377                         unsigned long *handle)
378 {
379         *handle = zs_malloc(pool, size, gfp);
380
381         if (IS_ERR_VALUE(*handle))
382                 return PTR_ERR((void *)*handle);
383         return 0;
384 }
385 static void zs_zpool_free(void *pool, unsigned long handle)
386 {
387         zs_free(pool, handle);
388 }
389
390 static void *zs_zpool_map(void *pool, unsigned long handle,
391                         enum zpool_mapmode mm)
392 {
393         enum zs_mapmode zs_mm;
394
395         switch (mm) {
396         case ZPOOL_MM_RO:
397                 zs_mm = ZS_MM_RO;
398                 break;
399         case ZPOOL_MM_WO:
400                 zs_mm = ZS_MM_WO;
401                 break;
402         case ZPOOL_MM_RW:
403         default:
404                 zs_mm = ZS_MM_RW;
405                 break;
406         }
407
408         return zs_map_object(pool, handle, zs_mm);
409 }
410 static void zs_zpool_unmap(void *pool, unsigned long handle)
411 {
412         zs_unmap_object(pool, handle);
413 }
414
415 static u64 zs_zpool_total_pages(void *pool)
416 {
417         return zs_get_total_pages(pool);
418 }
419
420 static struct zpool_driver zs_zpool_driver = {
421         .type =                   "zsmalloc",
422         .owner =                  THIS_MODULE,
423         .create =                 zs_zpool_create,
424         .destroy =                zs_zpool_destroy,
425         .malloc_support_movable = true,
426         .malloc =                 zs_zpool_malloc,
427         .free =                   zs_zpool_free,
428         .map =                    zs_zpool_map,
429         .unmap =                  zs_zpool_unmap,
430         .total_pages =            zs_zpool_total_pages,
431 };
432
433 MODULE_ALIAS("zpool-zsmalloc");
434 #endif /* CONFIG_ZPOOL */
435
436 /* per-cpu VM mapping areas for zspage accesses that cross page boundaries */
437 static DEFINE_PER_CPU(struct mapping_area, zs_map_area) = {
438         .lock   = INIT_LOCAL_LOCK(lock),
439 };
440
441 static __maybe_unused int is_first_page(struct page *page)
442 {
443         return PagePrivate(page);
444 }
445
446 /* Protected by class->lock */
447 static inline int get_zspage_inuse(struct zspage *zspage)
448 {
449         return zspage->inuse;
450 }
451
452
453 static inline void mod_zspage_inuse(struct zspage *zspage, int val)
454 {
455         zspage->inuse += val;
456 }
457
458 static inline struct page *get_first_page(struct zspage *zspage)
459 {
460         struct page *first_page = zspage->first_page;
461
462         VM_BUG_ON_PAGE(!is_first_page(first_page), first_page);
463         return first_page;
464 }
465
466 #define FIRST_OBJ_PAGE_TYPE_MASK        0xffff
467
468 static inline void reset_first_obj_offset(struct page *page)
469 {
470         VM_WARN_ON_ONCE(!PageZsmalloc(page));
471         page->page_type |= FIRST_OBJ_PAGE_TYPE_MASK;
472 }
473
474 static inline unsigned int get_first_obj_offset(struct page *page)
475 {
476         VM_WARN_ON_ONCE(!PageZsmalloc(page));
477         return page->page_type & FIRST_OBJ_PAGE_TYPE_MASK;
478 }
479
480 static inline void set_first_obj_offset(struct page *page, unsigned int offset)
481 {
482         /* With 16 bit available, we can support offsets into 64 KiB pages. */
483         BUILD_BUG_ON(PAGE_SIZE > SZ_64K);
484         VM_WARN_ON_ONCE(!PageZsmalloc(page));
485         VM_WARN_ON_ONCE(offset & ~FIRST_OBJ_PAGE_TYPE_MASK);
486         page->page_type &= ~FIRST_OBJ_PAGE_TYPE_MASK;
487         page->page_type |= offset & FIRST_OBJ_PAGE_TYPE_MASK;
488 }
489
490 static inline unsigned int get_freeobj(struct zspage *zspage)
491 {
492         return zspage->freeobj;
493 }
494
495 static inline void set_freeobj(struct zspage *zspage, unsigned int obj)
496 {
497         zspage->freeobj = obj;
498 }
499
500 static struct size_class *zspage_class(struct zs_pool *pool,
501                                        struct zspage *zspage)
502 {
503         return pool->size_class[zspage->class];
504 }
505
506 /*
507  * zsmalloc divides the pool into various size classes where each
508  * class maintains a list of zspages where each zspage is divided
509  * into equal sized chunks. Each allocation falls into one of these
510  * classes depending on its size. This function returns index of the
511  * size class which has chunk size big enough to hold the given size.
512  */
513 static int get_size_class_index(int size)
514 {
515         int idx = 0;
516
517         if (likely(size > ZS_MIN_ALLOC_SIZE))
518                 idx = DIV_ROUND_UP(size - ZS_MIN_ALLOC_SIZE,
519                                 ZS_SIZE_CLASS_DELTA);
520
521         return min_t(int, ZS_SIZE_CLASSES - 1, idx);
522 }
523
524 static inline void class_stat_add(struct size_class *class, int type,
525                                   unsigned long cnt)
526 {
527         class->stats.objs[type] += cnt;
528 }
529
530 static inline void class_stat_sub(struct size_class *class, int type,
531                                   unsigned long cnt)
532 {
533         class->stats.objs[type] -= cnt;
534 }
535
536 static inline unsigned long class_stat_read(struct size_class *class, int type)
537 {
538         return class->stats.objs[type];
539 }
540
541 #ifdef CONFIG_ZSMALLOC_STAT
542
543 static void __init zs_stat_init(void)
544 {
545         if (!debugfs_initialized()) {
546                 pr_warn("debugfs not available, stat dir not created\n");
547                 return;
548         }
549
550         zs_stat_root = debugfs_create_dir("zsmalloc", NULL);
551 }
552
553 static void __exit zs_stat_exit(void)
554 {
555         debugfs_remove_recursive(zs_stat_root);
556 }
557
558 static unsigned long zs_can_compact(struct size_class *class);
559
560 static int zs_stats_size_show(struct seq_file *s, void *v)
561 {
562         int i, fg;
563         struct zs_pool *pool = s->private;
564         struct size_class *class;
565         int objs_per_zspage;
566         unsigned long obj_allocated, obj_used, pages_used, freeable;
567         unsigned long total_objs = 0, total_used_objs = 0, total_pages = 0;
568         unsigned long total_freeable = 0;
569         unsigned long inuse_totals[NR_FULLNESS_GROUPS] = {0, };
570
571         seq_printf(s, " %5s %5s %9s %9s %9s %9s %9s %9s %9s %9s %9s %9s %9s %13s %10s %10s %16s %8s\n",
572                         "class", "size", "10%", "20%", "30%", "40%",
573                         "50%", "60%", "70%", "80%", "90%", "99%", "100%",
574                         "obj_allocated", "obj_used", "pages_used",
575                         "pages_per_zspage", "freeable");
576
577         for (i = 0; i < ZS_SIZE_CLASSES; i++) {
578
579                 class = pool->size_class[i];
580
581                 if (class->index != i)
582                         continue;
583
584                 spin_lock(&class->lock);
585
586                 seq_printf(s, " %5u %5u ", i, class->size);
587                 for (fg = ZS_INUSE_RATIO_10; fg < NR_FULLNESS_GROUPS; fg++) {
588                         inuse_totals[fg] += class_stat_read(class, fg);
589                         seq_printf(s, "%9lu ", class_stat_read(class, fg));
590                 }
591
592                 obj_allocated = class_stat_read(class, ZS_OBJS_ALLOCATED);
593                 obj_used = class_stat_read(class, ZS_OBJS_INUSE);
594                 freeable = zs_can_compact(class);
595                 spin_unlock(&class->lock);
596
597                 objs_per_zspage = class->objs_per_zspage;
598                 pages_used = obj_allocated / objs_per_zspage *
599                                 class->pages_per_zspage;
600
601                 seq_printf(s, "%13lu %10lu %10lu %16d %8lu\n",
602                            obj_allocated, obj_used, pages_used,
603                            class->pages_per_zspage, freeable);
604
605                 total_objs += obj_allocated;
606                 total_used_objs += obj_used;
607                 total_pages += pages_used;
608                 total_freeable += freeable;
609         }
610
611         seq_puts(s, "\n");
612         seq_printf(s, " %5s %5s ", "Total", "");
613
614         for (fg = ZS_INUSE_RATIO_10; fg < NR_FULLNESS_GROUPS; fg++)
615                 seq_printf(s, "%9lu ", inuse_totals[fg]);
616
617         seq_printf(s, "%13lu %10lu %10lu %16s %8lu\n",
618                    total_objs, total_used_objs, total_pages, "",
619                    total_freeable);
620
621         return 0;
622 }
623 DEFINE_SHOW_ATTRIBUTE(zs_stats_size);
624
625 static void zs_pool_stat_create(struct zs_pool *pool, const char *name)
626 {
627         if (!zs_stat_root) {
628                 pr_warn("no root stat dir, not creating <%s> stat dir\n", name);
629                 return;
630         }
631
632         pool->stat_dentry = debugfs_create_dir(name, zs_stat_root);
633
634         debugfs_create_file("classes", S_IFREG | 0444, pool->stat_dentry, pool,
635                             &zs_stats_size_fops);
636 }
637
638 static void zs_pool_stat_destroy(struct zs_pool *pool)
639 {
640         debugfs_remove_recursive(pool->stat_dentry);
641 }
642
643 #else /* CONFIG_ZSMALLOC_STAT */
644 static void __init zs_stat_init(void)
645 {
646 }
647
648 static void __exit zs_stat_exit(void)
649 {
650 }
651
652 static inline void zs_pool_stat_create(struct zs_pool *pool, const char *name)
653 {
654 }
655
656 static inline void zs_pool_stat_destroy(struct zs_pool *pool)
657 {
658 }
659 #endif
660
661
662 /*
663  * For each size class, zspages are divided into different groups
664  * depending on their usage ratio. This function returns fullness
665  * status of the given page.
666  */
667 static int get_fullness_group(struct size_class *class, struct zspage *zspage)
668 {
669         int inuse, objs_per_zspage, ratio;
670
671         inuse = get_zspage_inuse(zspage);
672         objs_per_zspage = class->objs_per_zspage;
673
674         if (inuse == 0)
675                 return ZS_INUSE_RATIO_0;
676         if (inuse == objs_per_zspage)
677                 return ZS_INUSE_RATIO_100;
678
679         ratio = 100 * inuse / objs_per_zspage;
680         /*
681          * Take integer division into consideration: a page with one inuse
682          * object out of 127 possible, will end up having 0 usage ratio,
683          * which is wrong as it belongs in ZS_INUSE_RATIO_10 fullness group.
684          */
685         return ratio / 10 + 1;
686 }
687
688 /*
689  * Each size class maintains various freelists and zspages are assigned
690  * to one of these freelists based on the number of live objects they
691  * have. This functions inserts the given zspage into the freelist
692  * identified by <class, fullness_group>.
693  */
694 static void insert_zspage(struct size_class *class,
695                                 struct zspage *zspage,
696                                 int fullness)
697 {
698         class_stat_add(class, fullness, 1);
699         list_add(&zspage->list, &class->fullness_list[fullness]);
700         zspage->fullness = fullness;
701 }
702
703 /*
704  * This function removes the given zspage from the freelist identified
705  * by <class, fullness_group>.
706  */
707 static void remove_zspage(struct size_class *class, struct zspage *zspage)
708 {
709         int fullness = zspage->fullness;
710
711         VM_BUG_ON(list_empty(&class->fullness_list[fullness]));
712
713         list_del_init(&zspage->list);
714         class_stat_sub(class, fullness, 1);
715 }
716
717 /*
718  * Each size class maintains zspages in different fullness groups depending
719  * on the number of live objects they contain. When allocating or freeing
720  * objects, the fullness status of the page can change, for instance, from
721  * INUSE_RATIO_80 to INUSE_RATIO_70 when freeing an object. This function
722  * checks if such a status change has occurred for the given page and
723  * accordingly moves the page from the list of the old fullness group to that
724  * of the new fullness group.
725  */
726 static int fix_fullness_group(struct size_class *class, struct zspage *zspage)
727 {
728         int newfg;
729
730         newfg = get_fullness_group(class, zspage);
731         if (newfg == zspage->fullness)
732                 goto out;
733
734         remove_zspage(class, zspage);
735         insert_zspage(class, zspage, newfg);
736 out:
737         return newfg;
738 }
739
740 static struct zspage *get_zspage(struct page *page)
741 {
742         struct zspage *zspage = (struct zspage *)page_private(page);
743
744         BUG_ON(zspage->magic != ZSPAGE_MAGIC);
745         return zspage;
746 }
747
748 static struct page *get_next_page(struct page *page)
749 {
750         struct zspage *zspage = get_zspage(page);
751
752         if (unlikely(ZsHugePage(zspage)))
753                 return NULL;
754
755         return (struct page *)page->index;
756 }
757
758 /**
759  * obj_to_location - get (<page>, <obj_idx>) from encoded object value
760  * @obj: the encoded object value
761  * @page: page object resides in zspage
762  * @obj_idx: object index
763  */
764 static void obj_to_location(unsigned long obj, struct page **page,
765                                 unsigned int *obj_idx)
766 {
767         *page = pfn_to_page(obj >> OBJ_INDEX_BITS);
768         *obj_idx = (obj & OBJ_INDEX_MASK);
769 }
770
771 static void obj_to_page(unsigned long obj, struct page **page)
772 {
773         *page = pfn_to_page(obj >> OBJ_INDEX_BITS);
774 }
775
776 /**
777  * location_to_obj - get obj value encoded from (<page>, <obj_idx>)
778  * @page: page object resides in zspage
779  * @obj_idx: object index
780  */
781 static unsigned long location_to_obj(struct page *page, unsigned int obj_idx)
782 {
783         unsigned long obj;
784
785         obj = page_to_pfn(page) << OBJ_INDEX_BITS;
786         obj |= obj_idx & OBJ_INDEX_MASK;
787
788         return obj;
789 }
790
791 static unsigned long handle_to_obj(unsigned long handle)
792 {
793         return *(unsigned long *)handle;
794 }
795
796 static inline bool obj_allocated(struct page *page, void *obj,
797                                  unsigned long *phandle)
798 {
799         unsigned long handle;
800         struct zspage *zspage = get_zspage(page);
801
802         if (unlikely(ZsHugePage(zspage))) {
803                 VM_BUG_ON_PAGE(!is_first_page(page), page);
804                 handle = page->index;
805         } else
806                 handle = *(unsigned long *)obj;
807
808         if (!(handle & OBJ_ALLOCATED_TAG))
809                 return false;
810
811         /* Clear all tags before returning the handle */
812         *phandle = handle & ~OBJ_TAG_MASK;
813         return true;
814 }
815
816 static void reset_page(struct page *page)
817 {
818         __ClearPageMovable(page);
819         ClearPagePrivate(page);
820         set_page_private(page, 0);
821         page->index = 0;
822         reset_first_obj_offset(page);
823         __ClearPageZsmalloc(page);
824 }
825
826 static int trylock_zspage(struct zspage *zspage)
827 {
828         struct page *cursor, *fail;
829
830         for (cursor = get_first_page(zspage); cursor != NULL; cursor =
831                                         get_next_page(cursor)) {
832                 if (!trylock_page(cursor)) {
833                         fail = cursor;
834                         goto unlock;
835                 }
836         }
837
838         return 1;
839 unlock:
840         for (cursor = get_first_page(zspage); cursor != fail; cursor =
841                                         get_next_page(cursor))
842                 unlock_page(cursor);
843
844         return 0;
845 }
846
847 static void __free_zspage(struct zs_pool *pool, struct size_class *class,
848                                 struct zspage *zspage)
849 {
850         struct page *page, *next;
851
852         assert_spin_locked(&class->lock);
853
854         VM_BUG_ON(get_zspage_inuse(zspage));
855         VM_BUG_ON(zspage->fullness != ZS_INUSE_RATIO_0);
856
857         next = page = get_first_page(zspage);
858         do {
859                 VM_BUG_ON_PAGE(!PageLocked(page), page);
860                 next = get_next_page(page);
861                 reset_page(page);
862                 unlock_page(page);
863                 dec_zone_page_state(page, NR_ZSPAGES);
864                 put_page(page);
865                 page = next;
866         } while (page != NULL);
867
868         cache_free_zspage(pool, zspage);
869
870         class_stat_sub(class, ZS_OBJS_ALLOCATED, class->objs_per_zspage);
871         atomic_long_sub(class->pages_per_zspage, &pool->pages_allocated);
872 }
873
874 static void free_zspage(struct zs_pool *pool, struct size_class *class,
875                                 struct zspage *zspage)
876 {
877         VM_BUG_ON(get_zspage_inuse(zspage));
878         VM_BUG_ON(list_empty(&zspage->list));
879
880         /*
881          * Since zs_free couldn't be sleepable, this function cannot call
882          * lock_page. The page locks trylock_zspage got will be released
883          * by __free_zspage.
884          */
885         if (!trylock_zspage(zspage)) {
886                 kick_deferred_free(pool);
887                 return;
888         }
889
890         remove_zspage(class, zspage);
891         __free_zspage(pool, class, zspage);
892 }
893
894 /* Initialize a newly allocated zspage */
895 static void init_zspage(struct size_class *class, struct zspage *zspage)
896 {
897         unsigned int freeobj = 1;
898         unsigned long off = 0;
899         struct page *page = get_first_page(zspage);
900
901         while (page) {
902                 struct page *next_page;
903                 struct link_free *link;
904                 void *vaddr;
905
906                 set_first_obj_offset(page, off);
907
908                 vaddr = kmap_atomic(page);
909                 link = (struct link_free *)vaddr + off / sizeof(*link);
910
911                 while ((off += class->size) < PAGE_SIZE) {
912                         link->next = freeobj++ << OBJ_TAG_BITS;
913                         link += class->size / sizeof(*link);
914                 }
915
916                 /*
917                  * We now come to the last (full or partial) object on this
918                  * page, which must point to the first object on the next
919                  * page (if present)
920                  */
921                 next_page = get_next_page(page);
922                 if (next_page) {
923                         link->next = freeobj++ << OBJ_TAG_BITS;
924                 } else {
925                         /*
926                          * Reset OBJ_TAG_BITS bit to last link to tell
927                          * whether it's allocated object or not.
928                          */
929                         link->next = -1UL << OBJ_TAG_BITS;
930                 }
931                 kunmap_atomic(vaddr);
932                 page = next_page;
933                 off %= PAGE_SIZE;
934         }
935
936         set_freeobj(zspage, 0);
937 }
938
939 static void create_page_chain(struct size_class *class, struct zspage *zspage,
940                                 struct page *pages[])
941 {
942         int i;
943         struct page *page;
944         struct page *prev_page = NULL;
945         int nr_pages = class->pages_per_zspage;
946
947         /*
948          * Allocate individual pages and link them together as:
949          * 1. all pages are linked together using page->index
950          * 2. each sub-page point to zspage using page->private
951          *
952          * we set PG_private to identify the first page (i.e. no other sub-page
953          * has this flag set).
954          */
955         for (i = 0; i < nr_pages; i++) {
956                 page = pages[i];
957                 set_page_private(page, (unsigned long)zspage);
958                 page->index = 0;
959                 if (i == 0) {
960                         zspage->first_page = page;
961                         SetPagePrivate(page);
962                         if (unlikely(class->objs_per_zspage == 1 &&
963                                         class->pages_per_zspage == 1))
964                                 SetZsHugePage(zspage);
965                 } else {
966                         prev_page->index = (unsigned long)page;
967                 }
968                 prev_page = page;
969         }
970 }
971
972 /*
973  * Allocate a zspage for the given size class
974  */
975 static struct zspage *alloc_zspage(struct zs_pool *pool,
976                                         struct size_class *class,
977                                         gfp_t gfp)
978 {
979         int i;
980         struct page *pages[ZS_MAX_PAGES_PER_ZSPAGE];
981         struct zspage *zspage = cache_alloc_zspage(pool, gfp);
982
983         if (!zspage)
984                 return NULL;
985
986         zspage->magic = ZSPAGE_MAGIC;
987         migrate_lock_init(zspage);
988
989         for (i = 0; i < class->pages_per_zspage; i++) {
990                 struct page *page;
991
992                 page = alloc_page(gfp);
993                 if (!page) {
994                         while (--i >= 0) {
995                                 dec_zone_page_state(pages[i], NR_ZSPAGES);
996                                 __ClearPageZsmalloc(pages[i]);
997                                 __free_page(pages[i]);
998                         }
999                         cache_free_zspage(pool, zspage);
1000                         return NULL;
1001                 }
1002                 __SetPageZsmalloc(page);
1003
1004                 inc_zone_page_state(page, NR_ZSPAGES);
1005                 pages[i] = page;
1006         }
1007
1008         create_page_chain(class, zspage, pages);
1009         init_zspage(class, zspage);
1010         zspage->pool = pool;
1011         zspage->class = class->index;
1012
1013         return zspage;
1014 }
1015
1016 static struct zspage *find_get_zspage(struct size_class *class)
1017 {
1018         int i;
1019         struct zspage *zspage;
1020
1021         for (i = ZS_INUSE_RATIO_99; i >= ZS_INUSE_RATIO_0; i--) {
1022                 zspage = list_first_entry_or_null(&class->fullness_list[i],
1023                                                   struct zspage, list);
1024                 if (zspage)
1025                         break;
1026         }
1027
1028         return zspage;
1029 }
1030
1031 static inline int __zs_cpu_up(struct mapping_area *area)
1032 {
1033         /*
1034          * Make sure we don't leak memory if a cpu UP notification
1035          * and zs_init() race and both call zs_cpu_up() on the same cpu
1036          */
1037         if (area->vm_buf)
1038                 return 0;
1039         area->vm_buf = kmalloc(ZS_MAX_ALLOC_SIZE, GFP_KERNEL);
1040         if (!area->vm_buf)
1041                 return -ENOMEM;
1042         return 0;
1043 }
1044
1045 static inline void __zs_cpu_down(struct mapping_area *area)
1046 {
1047         kfree(area->vm_buf);
1048         area->vm_buf = NULL;
1049 }
1050
1051 static void *__zs_map_object(struct mapping_area *area,
1052                         struct page *pages[2], int off, int size)
1053 {
1054         int sizes[2];
1055         void *addr;
1056         char *buf = area->vm_buf;
1057
1058         /* disable page faults to match kmap_atomic() return conditions */
1059         pagefault_disable();
1060
1061         /* no read fastpath */
1062         if (area->vm_mm == ZS_MM_WO)
1063                 goto out;
1064
1065         sizes[0] = PAGE_SIZE - off;
1066         sizes[1] = size - sizes[0];
1067
1068         /* copy object to per-cpu buffer */
1069         addr = kmap_atomic(pages[0]);
1070         memcpy(buf, addr + off, sizes[0]);
1071         kunmap_atomic(addr);
1072         addr = kmap_atomic(pages[1]);
1073         memcpy(buf + sizes[0], addr, sizes[1]);
1074         kunmap_atomic(addr);
1075 out:
1076         return area->vm_buf;
1077 }
1078
1079 static void __zs_unmap_object(struct mapping_area *area,
1080                         struct page *pages[2], int off, int size)
1081 {
1082         int sizes[2];
1083         void *addr;
1084         char *buf;
1085
1086         /* no write fastpath */
1087         if (area->vm_mm == ZS_MM_RO)
1088                 goto out;
1089
1090         buf = area->vm_buf;
1091         buf = buf + ZS_HANDLE_SIZE;
1092         size -= ZS_HANDLE_SIZE;
1093         off += ZS_HANDLE_SIZE;
1094
1095         sizes[0] = PAGE_SIZE - off;
1096         sizes[1] = size - sizes[0];
1097
1098         /* copy per-cpu buffer to object */
1099         addr = kmap_atomic(pages[0]);
1100         memcpy(addr + off, buf, sizes[0]);
1101         kunmap_atomic(addr);
1102         addr = kmap_atomic(pages[1]);
1103         memcpy(addr, buf + sizes[0], sizes[1]);
1104         kunmap_atomic(addr);
1105
1106 out:
1107         /* enable page faults to match kunmap_atomic() return conditions */
1108         pagefault_enable();
1109 }
1110
1111 static int zs_cpu_prepare(unsigned int cpu)
1112 {
1113         struct mapping_area *area;
1114
1115         area = &per_cpu(zs_map_area, cpu);
1116         return __zs_cpu_up(area);
1117 }
1118
1119 static int zs_cpu_dead(unsigned int cpu)
1120 {
1121         struct mapping_area *area;
1122
1123         area = &per_cpu(zs_map_area, cpu);
1124         __zs_cpu_down(area);
1125         return 0;
1126 }
1127
1128 static bool can_merge(struct size_class *prev, int pages_per_zspage,
1129                                         int objs_per_zspage)
1130 {
1131         if (prev->pages_per_zspage == pages_per_zspage &&
1132                 prev->objs_per_zspage == objs_per_zspage)
1133                 return true;
1134
1135         return false;
1136 }
1137
1138 static bool zspage_full(struct size_class *class, struct zspage *zspage)
1139 {
1140         return get_zspage_inuse(zspage) == class->objs_per_zspage;
1141 }
1142
1143 static bool zspage_empty(struct zspage *zspage)
1144 {
1145         return get_zspage_inuse(zspage) == 0;
1146 }
1147
1148 /**
1149  * zs_lookup_class_index() - Returns index of the zsmalloc &size_class
1150  * that hold objects of the provided size.
1151  * @pool: zsmalloc pool to use
1152  * @size: object size
1153  *
1154  * Context: Any context.
1155  *
1156  * Return: the index of the zsmalloc &size_class that hold objects of the
1157  * provided size.
1158  */
1159 unsigned int zs_lookup_class_index(struct zs_pool *pool, unsigned int size)
1160 {
1161         struct size_class *class;
1162
1163         class = pool->size_class[get_size_class_index(size)];
1164
1165         return class->index;
1166 }
1167 EXPORT_SYMBOL_GPL(zs_lookup_class_index);
1168
1169 unsigned long zs_get_total_pages(struct zs_pool *pool)
1170 {
1171         return atomic_long_read(&pool->pages_allocated);
1172 }
1173 EXPORT_SYMBOL_GPL(zs_get_total_pages);
1174
1175 /**
1176  * zs_map_object - get address of allocated object from handle.
1177  * @pool: pool from which the object was allocated
1178  * @handle: handle returned from zs_malloc
1179  * @mm: mapping mode to use
1180  *
1181  * Before using an object allocated from zs_malloc, it must be mapped using
1182  * this function. When done with the object, it must be unmapped using
1183  * zs_unmap_object.
1184  *
1185  * Only one object can be mapped per cpu at a time. There is no protection
1186  * against nested mappings.
1187  *
1188  * This function returns with preemption and page faults disabled.
1189  */
1190 void *zs_map_object(struct zs_pool *pool, unsigned long handle,
1191                         enum zs_mapmode mm)
1192 {
1193         struct zspage *zspage;
1194         struct page *page;
1195         unsigned long obj, off;
1196         unsigned int obj_idx;
1197
1198         struct size_class *class;
1199         struct mapping_area *area;
1200         struct page *pages[2];
1201         void *ret;
1202
1203         /*
1204          * Because we use per-cpu mapping areas shared among the
1205          * pools/users, we can't allow mapping in interrupt context
1206          * because it can corrupt another users mappings.
1207          */
1208         BUG_ON(in_interrupt());
1209
1210         /* It guarantees it can get zspage from handle safely */
1211         read_lock(&pool->migrate_lock);
1212         obj = handle_to_obj(handle);
1213         obj_to_location(obj, &page, &obj_idx);
1214         zspage = get_zspage(page);
1215
1216         /*
1217          * migration cannot move any zpages in this zspage. Here, class->lock
1218          * is too heavy since callers would take some time until they calls
1219          * zs_unmap_object API so delegate the locking from class to zspage
1220          * which is smaller granularity.
1221          */
1222         migrate_read_lock(zspage);
1223         read_unlock(&pool->migrate_lock);
1224
1225         class = zspage_class(pool, zspage);
1226         off = offset_in_page(class->size * obj_idx);
1227
1228         local_lock(&zs_map_area.lock);
1229         area = this_cpu_ptr(&zs_map_area);
1230         area->vm_mm = mm;
1231         if (off + class->size <= PAGE_SIZE) {
1232                 /* this object is contained entirely within a page */
1233                 area->vm_addr = kmap_atomic(page);
1234                 ret = area->vm_addr + off;
1235                 goto out;
1236         }
1237
1238         /* this object spans two pages */
1239         pages[0] = page;
1240         pages[1] = get_next_page(page);
1241         BUG_ON(!pages[1]);
1242
1243         ret = __zs_map_object(area, pages, off, class->size);
1244 out:
1245         if (likely(!ZsHugePage(zspage)))
1246                 ret += ZS_HANDLE_SIZE;
1247
1248         return ret;
1249 }
1250 EXPORT_SYMBOL_GPL(zs_map_object);
1251
1252 void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
1253 {
1254         struct zspage *zspage;
1255         struct page *page;
1256         unsigned long obj, off;
1257         unsigned int obj_idx;
1258
1259         struct size_class *class;
1260         struct mapping_area *area;
1261
1262         obj = handle_to_obj(handle);
1263         obj_to_location(obj, &page, &obj_idx);
1264         zspage = get_zspage(page);
1265         class = zspage_class(pool, zspage);
1266         off = offset_in_page(class->size * obj_idx);
1267
1268         area = this_cpu_ptr(&zs_map_area);
1269         if (off + class->size <= PAGE_SIZE)
1270                 kunmap_atomic(area->vm_addr);
1271         else {
1272                 struct page *pages[2];
1273
1274                 pages[0] = page;
1275                 pages[1] = get_next_page(page);
1276                 BUG_ON(!pages[1]);
1277
1278                 __zs_unmap_object(area, pages, off, class->size);
1279         }
1280         local_unlock(&zs_map_area.lock);
1281
1282         migrate_read_unlock(zspage);
1283 }
1284 EXPORT_SYMBOL_GPL(zs_unmap_object);
1285
1286 /**
1287  * zs_huge_class_size() - Returns the size (in bytes) of the first huge
1288  *                        zsmalloc &size_class.
1289  * @pool: zsmalloc pool to use
1290  *
1291  * The function returns the size of the first huge class - any object of equal
1292  * or bigger size will be stored in zspage consisting of a single physical
1293  * page.
1294  *
1295  * Context: Any context.
1296  *
1297  * Return: the size (in bytes) of the first huge zsmalloc &size_class.
1298  */
1299 size_t zs_huge_class_size(struct zs_pool *pool)
1300 {
1301         return huge_class_size;
1302 }
1303 EXPORT_SYMBOL_GPL(zs_huge_class_size);
1304
1305 static unsigned long obj_malloc(struct zs_pool *pool,
1306                                 struct zspage *zspage, unsigned long handle)
1307 {
1308         int i, nr_page, offset;
1309         unsigned long obj;
1310         struct link_free *link;
1311         struct size_class *class;
1312
1313         struct page *m_page;
1314         unsigned long m_offset;
1315         void *vaddr;
1316
1317         class = pool->size_class[zspage->class];
1318         obj = get_freeobj(zspage);
1319
1320         offset = obj * class->size;
1321         nr_page = offset >> PAGE_SHIFT;
1322         m_offset = offset_in_page(offset);
1323         m_page = get_first_page(zspage);
1324
1325         for (i = 0; i < nr_page; i++)
1326                 m_page = get_next_page(m_page);
1327
1328         vaddr = kmap_atomic(m_page);
1329         link = (struct link_free *)vaddr + m_offset / sizeof(*link);
1330         set_freeobj(zspage, link->next >> OBJ_TAG_BITS);
1331         if (likely(!ZsHugePage(zspage)))
1332                 /* record handle in the header of allocated chunk */
1333                 link->handle = handle | OBJ_ALLOCATED_TAG;
1334         else
1335                 /* record handle to page->index */
1336                 zspage->first_page->index = handle | OBJ_ALLOCATED_TAG;
1337
1338         kunmap_atomic(vaddr);
1339         mod_zspage_inuse(zspage, 1);
1340
1341         obj = location_to_obj(m_page, obj);
1342         record_obj(handle, obj);
1343
1344         return obj;
1345 }
1346
1347
1348 /**
1349  * zs_malloc - Allocate block of given size from pool.
1350  * @pool: pool to allocate from
1351  * @size: size of block to allocate
1352  * @gfp: gfp flags when allocating object
1353  *
1354  * On success, handle to the allocated object is returned,
1355  * otherwise an ERR_PTR().
1356  * Allocation requests with size > ZS_MAX_ALLOC_SIZE will fail.
1357  */
1358 unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
1359 {
1360         unsigned long handle;
1361         struct size_class *class;
1362         int newfg;
1363         struct zspage *zspage;
1364
1365         if (unlikely(!size))
1366                 return (unsigned long)ERR_PTR(-EINVAL);
1367
1368         if (unlikely(size > ZS_MAX_ALLOC_SIZE))
1369                 return (unsigned long)ERR_PTR(-ENOSPC);
1370
1371         handle = cache_alloc_handle(pool, gfp);
1372         if (!handle)
1373                 return (unsigned long)ERR_PTR(-ENOMEM);
1374
1375         /* extra space in chunk to keep the handle */
1376         size += ZS_HANDLE_SIZE;
1377         class = pool->size_class[get_size_class_index(size)];
1378
1379         /* class->lock effectively protects the zpage migration */
1380         spin_lock(&class->lock);
1381         zspage = find_get_zspage(class);
1382         if (likely(zspage)) {
1383                 obj_malloc(pool, zspage, handle);
1384                 /* Now move the zspage to another fullness group, if required */
1385                 fix_fullness_group(class, zspage);
1386                 class_stat_add(class, ZS_OBJS_INUSE, 1);
1387
1388                 goto out;
1389         }
1390
1391         spin_unlock(&class->lock);
1392
1393         zspage = alloc_zspage(pool, class, gfp);
1394         if (!zspage) {
1395                 cache_free_handle(pool, handle);
1396                 return (unsigned long)ERR_PTR(-ENOMEM);
1397         }
1398
1399         spin_lock(&class->lock);
1400         obj_malloc(pool, zspage, handle);
1401         newfg = get_fullness_group(class, zspage);
1402         insert_zspage(class, zspage, newfg);
1403         atomic_long_add(class->pages_per_zspage, &pool->pages_allocated);
1404         class_stat_add(class, ZS_OBJS_ALLOCATED, class->objs_per_zspage);
1405         class_stat_add(class, ZS_OBJS_INUSE, 1);
1406
1407         /* We completely set up zspage so mark them as movable */
1408         SetZsPageMovable(pool, zspage);
1409 out:
1410         spin_unlock(&class->lock);
1411
1412         return handle;
1413 }
1414 EXPORT_SYMBOL_GPL(zs_malloc);
1415
1416 static void obj_free(int class_size, unsigned long obj)
1417 {
1418         struct link_free *link;
1419         struct zspage *zspage;
1420         struct page *f_page;
1421         unsigned long f_offset;
1422         unsigned int f_objidx;
1423         void *vaddr;
1424
1425         obj_to_location(obj, &f_page, &f_objidx);
1426         f_offset = offset_in_page(class_size * f_objidx);
1427         zspage = get_zspage(f_page);
1428
1429         vaddr = kmap_atomic(f_page);
1430         link = (struct link_free *)(vaddr + f_offset);
1431
1432         /* Insert this object in containing zspage's freelist */
1433         if (likely(!ZsHugePage(zspage)))
1434                 link->next = get_freeobj(zspage) << OBJ_TAG_BITS;
1435         else
1436                 f_page->index = 0;
1437         set_freeobj(zspage, f_objidx);
1438
1439         kunmap_atomic(vaddr);
1440         mod_zspage_inuse(zspage, -1);
1441 }
1442
1443 void zs_free(struct zs_pool *pool, unsigned long handle)
1444 {
1445         struct zspage *zspage;
1446         struct page *f_page;
1447         unsigned long obj;
1448         struct size_class *class;
1449         int fullness;
1450
1451         if (IS_ERR_OR_NULL((void *)handle))
1452                 return;
1453
1454         /*
1455          * The pool->migrate_lock protects the race with zpage's migration
1456          * so it's safe to get the page from handle.
1457          */
1458         read_lock(&pool->migrate_lock);
1459         obj = handle_to_obj(handle);
1460         obj_to_page(obj, &f_page);
1461         zspage = get_zspage(f_page);
1462         class = zspage_class(pool, zspage);
1463         spin_lock(&class->lock);
1464         read_unlock(&pool->migrate_lock);
1465
1466         class_stat_sub(class, ZS_OBJS_INUSE, 1);
1467         obj_free(class->size, obj);
1468
1469         fullness = fix_fullness_group(class, zspage);
1470         if (fullness == ZS_INUSE_RATIO_0)
1471                 free_zspage(pool, class, zspage);
1472
1473         spin_unlock(&class->lock);
1474         cache_free_handle(pool, handle);
1475 }
1476 EXPORT_SYMBOL_GPL(zs_free);
1477
1478 static void zs_object_copy(struct size_class *class, unsigned long dst,
1479                                 unsigned long src)
1480 {
1481         struct page *s_page, *d_page;
1482         unsigned int s_objidx, d_objidx;
1483         unsigned long s_off, d_off;
1484         void *s_addr, *d_addr;
1485         int s_size, d_size, size;
1486         int written = 0;
1487
1488         s_size = d_size = class->size;
1489
1490         obj_to_location(src, &s_page, &s_objidx);
1491         obj_to_location(dst, &d_page, &d_objidx);
1492
1493         s_off = offset_in_page(class->size * s_objidx);
1494         d_off = offset_in_page(class->size * d_objidx);
1495
1496         if (s_off + class->size > PAGE_SIZE)
1497                 s_size = PAGE_SIZE - s_off;
1498
1499         if (d_off + class->size > PAGE_SIZE)
1500                 d_size = PAGE_SIZE - d_off;
1501
1502         s_addr = kmap_atomic(s_page);
1503         d_addr = kmap_atomic(d_page);
1504
1505         while (1) {
1506                 size = min(s_size, d_size);
1507                 memcpy(d_addr + d_off, s_addr + s_off, size);
1508                 written += size;
1509
1510                 if (written == class->size)
1511                         break;
1512
1513                 s_off += size;
1514                 s_size -= size;
1515                 d_off += size;
1516                 d_size -= size;
1517
1518                 /*
1519                  * Calling kunmap_atomic(d_addr) is necessary. kunmap_atomic()
1520                  * calls must occurs in reverse order of calls to kmap_atomic().
1521                  * So, to call kunmap_atomic(s_addr) we should first call
1522                  * kunmap_atomic(d_addr). For more details see
1523                  * Documentation/mm/highmem.rst.
1524                  */
1525                 if (s_off >= PAGE_SIZE) {
1526                         kunmap_atomic(d_addr);
1527                         kunmap_atomic(s_addr);
1528                         s_page = get_next_page(s_page);
1529                         s_addr = kmap_atomic(s_page);
1530                         d_addr = kmap_atomic(d_page);
1531                         s_size = class->size - written;
1532                         s_off = 0;
1533                 }
1534
1535                 if (d_off >= PAGE_SIZE) {
1536                         kunmap_atomic(d_addr);
1537                         d_page = get_next_page(d_page);
1538                         d_addr = kmap_atomic(d_page);
1539                         d_size = class->size - written;
1540                         d_off = 0;
1541                 }
1542         }
1543
1544         kunmap_atomic(d_addr);
1545         kunmap_atomic(s_addr);
1546 }
1547
1548 /*
1549  * Find alloced object in zspage from index object and
1550  * return handle.
1551  */
1552 static unsigned long find_alloced_obj(struct size_class *class,
1553                                       struct page *page, int *obj_idx)
1554 {
1555         unsigned int offset;
1556         int index = *obj_idx;
1557         unsigned long handle = 0;
1558         void *addr = kmap_atomic(page);
1559
1560         offset = get_first_obj_offset(page);
1561         offset += class->size * index;
1562
1563         while (offset < PAGE_SIZE) {
1564                 if (obj_allocated(page, addr + offset, &handle))
1565                         break;
1566
1567                 offset += class->size;
1568                 index++;
1569         }
1570
1571         kunmap_atomic(addr);
1572
1573         *obj_idx = index;
1574
1575         return handle;
1576 }
1577
1578 static void migrate_zspage(struct zs_pool *pool, struct zspage *src_zspage,
1579                            struct zspage *dst_zspage)
1580 {
1581         unsigned long used_obj, free_obj;
1582         unsigned long handle;
1583         int obj_idx = 0;
1584         struct page *s_page = get_first_page(src_zspage);
1585         struct size_class *class = pool->size_class[src_zspage->class];
1586
1587         while (1) {
1588                 handle = find_alloced_obj(class, s_page, &obj_idx);
1589                 if (!handle) {
1590                         s_page = get_next_page(s_page);
1591                         if (!s_page)
1592                                 break;
1593                         obj_idx = 0;
1594                         continue;
1595                 }
1596
1597                 used_obj = handle_to_obj(handle);
1598                 free_obj = obj_malloc(pool, dst_zspage, handle);
1599                 zs_object_copy(class, free_obj, used_obj);
1600                 obj_idx++;
1601                 obj_free(class->size, used_obj);
1602
1603                 /* Stop if there is no more space */
1604                 if (zspage_full(class, dst_zspage))
1605                         break;
1606
1607                 /* Stop if there are no more objects to migrate */
1608                 if (zspage_empty(src_zspage))
1609                         break;
1610         }
1611 }
1612
1613 static struct zspage *isolate_src_zspage(struct size_class *class)
1614 {
1615         struct zspage *zspage;
1616         int fg;
1617
1618         for (fg = ZS_INUSE_RATIO_10; fg <= ZS_INUSE_RATIO_99; fg++) {
1619                 zspage = list_first_entry_or_null(&class->fullness_list[fg],
1620                                                   struct zspage, list);
1621                 if (zspage) {
1622                         remove_zspage(class, zspage);
1623                         return zspage;
1624                 }
1625         }
1626
1627         return zspage;
1628 }
1629
1630 static struct zspage *isolate_dst_zspage(struct size_class *class)
1631 {
1632         struct zspage *zspage;
1633         int fg;
1634
1635         for (fg = ZS_INUSE_RATIO_99; fg >= ZS_INUSE_RATIO_10; fg--) {
1636                 zspage = list_first_entry_or_null(&class->fullness_list[fg],
1637                                                   struct zspage, list);
1638                 if (zspage) {
1639                         remove_zspage(class, zspage);
1640                         return zspage;
1641                 }
1642         }
1643
1644         return zspage;
1645 }
1646
1647 /*
1648  * putback_zspage - add @zspage into right class's fullness list
1649  * @class: destination class
1650  * @zspage: target page
1651  *
1652  * Return @zspage's fullness status
1653  */
1654 static int putback_zspage(struct size_class *class, struct zspage *zspage)
1655 {
1656         int fullness;
1657
1658         fullness = get_fullness_group(class, zspage);
1659         insert_zspage(class, zspage, fullness);
1660
1661         return fullness;
1662 }
1663
1664 #ifdef CONFIG_COMPACTION
1665 /*
1666  * To prevent zspage destroy during migration, zspage freeing should
1667  * hold locks of all pages in the zspage.
1668  */
1669 static void lock_zspage(struct zspage *zspage)
1670 {
1671         struct page *curr_page, *page;
1672
1673         /*
1674          * Pages we haven't locked yet can be migrated off the list while we're
1675          * trying to lock them, so we need to be careful and only attempt to
1676          * lock each page under migrate_read_lock(). Otherwise, the page we lock
1677          * may no longer belong to the zspage. This means that we may wait for
1678          * the wrong page to unlock, so we must take a reference to the page
1679          * prior to waiting for it to unlock outside migrate_read_lock().
1680          */
1681         while (1) {
1682                 migrate_read_lock(zspage);
1683                 page = get_first_page(zspage);
1684                 if (trylock_page(page))
1685                         break;
1686                 get_page(page);
1687                 migrate_read_unlock(zspage);
1688                 wait_on_page_locked(page);
1689                 put_page(page);
1690         }
1691
1692         curr_page = page;
1693         while ((page = get_next_page(curr_page))) {
1694                 if (trylock_page(page)) {
1695                         curr_page = page;
1696                 } else {
1697                         get_page(page);
1698                         migrate_read_unlock(zspage);
1699                         wait_on_page_locked(page);
1700                         put_page(page);
1701                         migrate_read_lock(zspage);
1702                 }
1703         }
1704         migrate_read_unlock(zspage);
1705 }
1706 #endif /* CONFIG_COMPACTION */
1707
1708 static void migrate_lock_init(struct zspage *zspage)
1709 {
1710         rwlock_init(&zspage->lock);
1711 }
1712
1713 static void migrate_read_lock(struct zspage *zspage) __acquires(&zspage->lock)
1714 {
1715         read_lock(&zspage->lock);
1716 }
1717
1718 static void migrate_read_unlock(struct zspage *zspage) __releases(&zspage->lock)
1719 {
1720         read_unlock(&zspage->lock);
1721 }
1722
1723 static void migrate_write_lock(struct zspage *zspage)
1724 {
1725         write_lock(&zspage->lock);
1726 }
1727
1728 static void migrate_write_unlock(struct zspage *zspage)
1729 {
1730         write_unlock(&zspage->lock);
1731 }
1732
1733 #ifdef CONFIG_COMPACTION
1734
1735 static const struct movable_operations zsmalloc_mops;
1736
1737 static void replace_sub_page(struct size_class *class, struct zspage *zspage,
1738                                 struct page *newpage, struct page *oldpage)
1739 {
1740         struct page *page;
1741         struct page *pages[ZS_MAX_PAGES_PER_ZSPAGE] = {NULL, };
1742         int idx = 0;
1743
1744         page = get_first_page(zspage);
1745         do {
1746                 if (page == oldpage)
1747                         pages[idx] = newpage;
1748                 else
1749                         pages[idx] = page;
1750                 idx++;
1751         } while ((page = get_next_page(page)) != NULL);
1752
1753         create_page_chain(class, zspage, pages);
1754         set_first_obj_offset(newpage, get_first_obj_offset(oldpage));
1755         if (unlikely(ZsHugePage(zspage)))
1756                 newpage->index = oldpage->index;
1757         __SetPageMovable(newpage, &zsmalloc_mops);
1758 }
1759
1760 static bool zs_page_isolate(struct page *page, isolate_mode_t mode)
1761 {
1762         /*
1763          * Page is locked so zspage couldn't be destroyed. For detail, look at
1764          * lock_zspage in free_zspage.
1765          */
1766         VM_BUG_ON_PAGE(PageIsolated(page), page);
1767
1768         return true;
1769 }
1770
1771 static int zs_page_migrate(struct page *newpage, struct page *page,
1772                 enum migrate_mode mode)
1773 {
1774         struct zs_pool *pool;
1775         struct size_class *class;
1776         struct zspage *zspage;
1777         struct page *dummy;
1778         void *s_addr, *d_addr, *addr;
1779         unsigned int offset;
1780         unsigned long handle;
1781         unsigned long old_obj, new_obj;
1782         unsigned int obj_idx;
1783
1784         VM_BUG_ON_PAGE(!PageIsolated(page), page);
1785
1786         /* We're committed, tell the world that this is a Zsmalloc page. */
1787         __SetPageZsmalloc(newpage);
1788
1789         /* The page is locked, so this pointer must remain valid */
1790         zspage = get_zspage(page);
1791         pool = zspage->pool;
1792
1793         /*
1794          * The pool migrate_lock protects the race between zpage migration
1795          * and zs_free.
1796          */
1797         write_lock(&pool->migrate_lock);
1798         class = zspage_class(pool, zspage);
1799
1800         /*
1801          * the class lock protects zpage alloc/free in the zspage.
1802          */
1803         spin_lock(&class->lock);
1804         /* the migrate_write_lock protects zpage access via zs_map_object */
1805         migrate_write_lock(zspage);
1806
1807         offset = get_first_obj_offset(page);
1808         s_addr = kmap_atomic(page);
1809
1810         /*
1811          * Here, any user cannot access all objects in the zspage so let's move.
1812          */
1813         d_addr = kmap_atomic(newpage);
1814         copy_page(d_addr, s_addr);
1815         kunmap_atomic(d_addr);
1816
1817         for (addr = s_addr + offset; addr < s_addr + PAGE_SIZE;
1818                                         addr += class->size) {
1819                 if (obj_allocated(page, addr, &handle)) {
1820
1821                         old_obj = handle_to_obj(handle);
1822                         obj_to_location(old_obj, &dummy, &obj_idx);
1823                         new_obj = (unsigned long)location_to_obj(newpage,
1824                                                                 obj_idx);
1825                         record_obj(handle, new_obj);
1826                 }
1827         }
1828         kunmap_atomic(s_addr);
1829
1830         replace_sub_page(class, zspage, newpage, page);
1831         /*
1832          * Since we complete the data copy and set up new zspage structure,
1833          * it's okay to release migration_lock.
1834          */
1835         write_unlock(&pool->migrate_lock);
1836         spin_unlock(&class->lock);
1837         migrate_write_unlock(zspage);
1838
1839         get_page(newpage);
1840         if (page_zone(newpage) != page_zone(page)) {
1841                 dec_zone_page_state(page, NR_ZSPAGES);
1842                 inc_zone_page_state(newpage, NR_ZSPAGES);
1843         }
1844
1845         reset_page(page);
1846         put_page(page);
1847
1848         return MIGRATEPAGE_SUCCESS;
1849 }
1850
1851 static void zs_page_putback(struct page *page)
1852 {
1853         VM_BUG_ON_PAGE(!PageIsolated(page), page);
1854 }
1855
1856 static const struct movable_operations zsmalloc_mops = {
1857         .isolate_page = zs_page_isolate,
1858         .migrate_page = zs_page_migrate,
1859         .putback_page = zs_page_putback,
1860 };
1861
1862 /*
1863  * Caller should hold page_lock of all pages in the zspage
1864  * In here, we cannot use zspage meta data.
1865  */
1866 static void async_free_zspage(struct work_struct *work)
1867 {
1868         int i;
1869         struct size_class *class;
1870         struct zspage *zspage, *tmp;
1871         LIST_HEAD(free_pages);
1872         struct zs_pool *pool = container_of(work, struct zs_pool,
1873                                         free_work);
1874
1875         for (i = 0; i < ZS_SIZE_CLASSES; i++) {
1876                 class = pool->size_class[i];
1877                 if (class->index != i)
1878                         continue;
1879
1880                 spin_lock(&class->lock);
1881                 list_splice_init(&class->fullness_list[ZS_INUSE_RATIO_0],
1882                                  &free_pages);
1883                 spin_unlock(&class->lock);
1884         }
1885
1886         list_for_each_entry_safe(zspage, tmp, &free_pages, list) {
1887                 list_del(&zspage->list);
1888                 lock_zspage(zspage);
1889
1890                 class = zspage_class(pool, zspage);
1891                 spin_lock(&class->lock);
1892                 class_stat_sub(class, ZS_INUSE_RATIO_0, 1);
1893                 __free_zspage(pool, class, zspage);
1894                 spin_unlock(&class->lock);
1895         }
1896 };
1897
1898 static void kick_deferred_free(struct zs_pool *pool)
1899 {
1900         schedule_work(&pool->free_work);
1901 }
1902
1903 static void zs_flush_migration(struct zs_pool *pool)
1904 {
1905         flush_work(&pool->free_work);
1906 }
1907
1908 static void init_deferred_free(struct zs_pool *pool)
1909 {
1910         INIT_WORK(&pool->free_work, async_free_zspage);
1911 }
1912
1913 static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage)
1914 {
1915         struct page *page = get_first_page(zspage);
1916
1917         do {
1918                 WARN_ON(!trylock_page(page));
1919                 __SetPageMovable(page, &zsmalloc_mops);
1920                 unlock_page(page);
1921         } while ((page = get_next_page(page)) != NULL);
1922 }
1923 #else
1924 static inline void zs_flush_migration(struct zs_pool *pool) { }
1925 #endif
1926
1927 /*
1928  *
1929  * Based on the number of unused allocated objects calculate
1930  * and return the number of pages that we can free.
1931  */
1932 static unsigned long zs_can_compact(struct size_class *class)
1933 {
1934         unsigned long obj_wasted;
1935         unsigned long obj_allocated = class_stat_read(class, ZS_OBJS_ALLOCATED);
1936         unsigned long obj_used = class_stat_read(class, ZS_OBJS_INUSE);
1937
1938         if (obj_allocated <= obj_used)
1939                 return 0;
1940
1941         obj_wasted = obj_allocated - obj_used;
1942         obj_wasted /= class->objs_per_zspage;
1943
1944         return obj_wasted * class->pages_per_zspage;
1945 }
1946
1947 static unsigned long __zs_compact(struct zs_pool *pool,
1948                                   struct size_class *class)
1949 {
1950         struct zspage *src_zspage = NULL;
1951         struct zspage *dst_zspage = NULL;
1952         unsigned long pages_freed = 0;
1953
1954         /*
1955          * protect the race between zpage migration and zs_free
1956          * as well as zpage allocation/free
1957          */
1958         write_lock(&pool->migrate_lock);
1959         spin_lock(&class->lock);
1960         while (zs_can_compact(class)) {
1961                 int fg;
1962
1963                 if (!dst_zspage) {
1964                         dst_zspage = isolate_dst_zspage(class);
1965                         if (!dst_zspage)
1966                                 break;
1967                 }
1968
1969                 src_zspage = isolate_src_zspage(class);
1970                 if (!src_zspage)
1971                         break;
1972
1973                 migrate_write_lock(src_zspage);
1974                 migrate_zspage(pool, src_zspage, dst_zspage);
1975                 migrate_write_unlock(src_zspage);
1976
1977                 fg = putback_zspage(class, src_zspage);
1978                 if (fg == ZS_INUSE_RATIO_0) {
1979                         free_zspage(pool, class, src_zspage);
1980                         pages_freed += class->pages_per_zspage;
1981                 }
1982                 src_zspage = NULL;
1983
1984                 if (get_fullness_group(class, dst_zspage) == ZS_INUSE_RATIO_100
1985                     || rwlock_is_contended(&pool->migrate_lock)) {
1986                         putback_zspage(class, dst_zspage);
1987                         dst_zspage = NULL;
1988
1989                         spin_unlock(&class->lock);
1990                         write_unlock(&pool->migrate_lock);
1991                         cond_resched();
1992                         write_lock(&pool->migrate_lock);
1993                         spin_lock(&class->lock);
1994                 }
1995         }
1996
1997         if (src_zspage)
1998                 putback_zspage(class, src_zspage);
1999
2000         if (dst_zspage)
2001                 putback_zspage(class, dst_zspage);
2002
2003         spin_unlock(&class->lock);
2004         write_unlock(&pool->migrate_lock);
2005
2006         return pages_freed;
2007 }
2008
2009 unsigned long zs_compact(struct zs_pool *pool)
2010 {
2011         int i;
2012         struct size_class *class;
2013         unsigned long pages_freed = 0;
2014
2015         /*
2016          * Pool compaction is performed under pool->migrate_lock so it is basically
2017          * single-threaded. Having more than one thread in __zs_compact()
2018          * will increase pool->migrate_lock contention, which will impact other
2019          * zsmalloc operations that need pool->migrate_lock.
2020          */
2021         if (atomic_xchg(&pool->compaction_in_progress, 1))
2022                 return 0;
2023
2024         for (i = ZS_SIZE_CLASSES - 1; i >= 0; i--) {
2025                 class = pool->size_class[i];
2026                 if (class->index != i)
2027                         continue;
2028                 pages_freed += __zs_compact(pool, class);
2029         }
2030         atomic_long_add(pages_freed, &pool->stats.pages_compacted);
2031         atomic_set(&pool->compaction_in_progress, 0);
2032
2033         return pages_freed;
2034 }
2035 EXPORT_SYMBOL_GPL(zs_compact);
2036
2037 void zs_pool_stats(struct zs_pool *pool, struct zs_pool_stats *stats)
2038 {
2039         memcpy(stats, &pool->stats, sizeof(struct zs_pool_stats));
2040 }
2041 EXPORT_SYMBOL_GPL(zs_pool_stats);
2042
2043 static unsigned long zs_shrinker_scan(struct shrinker *shrinker,
2044                 struct shrink_control *sc)
2045 {
2046         unsigned long pages_freed;
2047         struct zs_pool *pool = shrinker->private_data;
2048
2049         /*
2050          * Compact classes and calculate compaction delta.
2051          * Can run concurrently with a manually triggered
2052          * (by user) compaction.
2053          */
2054         pages_freed = zs_compact(pool);
2055
2056         return pages_freed ? pages_freed : SHRINK_STOP;
2057 }
2058
2059 static unsigned long zs_shrinker_count(struct shrinker *shrinker,
2060                 struct shrink_control *sc)
2061 {
2062         int i;
2063         struct size_class *class;
2064         unsigned long pages_to_free = 0;
2065         struct zs_pool *pool = shrinker->private_data;
2066
2067         for (i = ZS_SIZE_CLASSES - 1; i >= 0; i--) {
2068                 class = pool->size_class[i];
2069                 if (class->index != i)
2070                         continue;
2071
2072                 pages_to_free += zs_can_compact(class);
2073         }
2074
2075         return pages_to_free;
2076 }
2077
2078 static void zs_unregister_shrinker(struct zs_pool *pool)
2079 {
2080         shrinker_free(pool->shrinker);
2081 }
2082
2083 static int zs_register_shrinker(struct zs_pool *pool)
2084 {
2085         pool->shrinker = shrinker_alloc(0, "mm-zspool:%s", pool->name);
2086         if (!pool->shrinker)
2087                 return -ENOMEM;
2088
2089         pool->shrinker->scan_objects = zs_shrinker_scan;
2090         pool->shrinker->count_objects = zs_shrinker_count;
2091         pool->shrinker->batch = 0;
2092         pool->shrinker->private_data = pool;
2093
2094         shrinker_register(pool->shrinker);
2095
2096         return 0;
2097 }
2098
2099 static int calculate_zspage_chain_size(int class_size)
2100 {
2101         int i, min_waste = INT_MAX;
2102         int chain_size = 1;
2103
2104         if (is_power_of_2(class_size))
2105                 return chain_size;
2106
2107         for (i = 1; i <= ZS_MAX_PAGES_PER_ZSPAGE; i++) {
2108                 int waste;
2109
2110                 waste = (i * PAGE_SIZE) % class_size;
2111                 if (waste < min_waste) {
2112                         min_waste = waste;
2113                         chain_size = i;
2114                 }
2115         }
2116
2117         return chain_size;
2118 }
2119
2120 /**
2121  * zs_create_pool - Creates an allocation pool to work from.
2122  * @name: pool name to be created
2123  *
2124  * This function must be called before anything when using
2125  * the zsmalloc allocator.
2126  *
2127  * On success, a pointer to the newly created pool is returned,
2128  * otherwise NULL.
2129  */
2130 struct zs_pool *zs_create_pool(const char *name)
2131 {
2132         int i;
2133         struct zs_pool *pool;
2134         struct size_class *prev_class = NULL;
2135
2136         pool = kzalloc(sizeof(*pool), GFP_KERNEL);
2137         if (!pool)
2138                 return NULL;
2139
2140         init_deferred_free(pool);
2141         rwlock_init(&pool->migrate_lock);
2142         atomic_set(&pool->compaction_in_progress, 0);
2143
2144         pool->name = kstrdup(name, GFP_KERNEL);
2145         if (!pool->name)
2146                 goto err;
2147
2148         if (create_cache(pool))
2149                 goto err;
2150
2151         /*
2152          * Iterate reversely, because, size of size_class that we want to use
2153          * for merging should be larger or equal to current size.
2154          */
2155         for (i = ZS_SIZE_CLASSES - 1; i >= 0; i--) {
2156                 int size;
2157                 int pages_per_zspage;
2158                 int objs_per_zspage;
2159                 struct size_class *class;
2160                 int fullness;
2161
2162                 size = ZS_MIN_ALLOC_SIZE + i * ZS_SIZE_CLASS_DELTA;
2163                 if (size > ZS_MAX_ALLOC_SIZE)
2164                         size = ZS_MAX_ALLOC_SIZE;
2165                 pages_per_zspage = calculate_zspage_chain_size(size);
2166                 objs_per_zspage = pages_per_zspage * PAGE_SIZE / size;
2167
2168                 /*
2169                  * We iterate from biggest down to smallest classes,
2170                  * so huge_class_size holds the size of the first huge
2171                  * class. Any object bigger than or equal to that will
2172                  * endup in the huge class.
2173                  */
2174                 if (pages_per_zspage != 1 && objs_per_zspage != 1 &&
2175                                 !huge_class_size) {
2176                         huge_class_size = size;
2177                         /*
2178                          * The object uses ZS_HANDLE_SIZE bytes to store the
2179                          * handle. We need to subtract it, because zs_malloc()
2180                          * unconditionally adds handle size before it performs
2181                          * size class search - so object may be smaller than
2182                          * huge class size, yet it still can end up in the huge
2183                          * class because it grows by ZS_HANDLE_SIZE extra bytes
2184                          * right before class lookup.
2185                          */
2186                         huge_class_size -= (ZS_HANDLE_SIZE - 1);
2187                 }
2188
2189                 /*
2190                  * size_class is used for normal zsmalloc operation such
2191                  * as alloc/free for that size. Although it is natural that we
2192                  * have one size_class for each size, there is a chance that we
2193                  * can get more memory utilization if we use one size_class for
2194                  * many different sizes whose size_class have same
2195                  * characteristics. So, we makes size_class point to
2196                  * previous size_class if possible.
2197                  */
2198                 if (prev_class) {
2199                         if (can_merge(prev_class, pages_per_zspage, objs_per_zspage)) {
2200                                 pool->size_class[i] = prev_class;
2201                                 continue;
2202                         }
2203                 }
2204
2205                 class = kzalloc(sizeof(struct size_class), GFP_KERNEL);
2206                 if (!class)
2207                         goto err;
2208
2209                 class->size = size;
2210                 class->index = i;
2211                 class->pages_per_zspage = pages_per_zspage;
2212                 class->objs_per_zspage = objs_per_zspage;
2213                 spin_lock_init(&class->lock);
2214                 pool->size_class[i] = class;
2215
2216                 fullness = ZS_INUSE_RATIO_0;
2217                 while (fullness < NR_FULLNESS_GROUPS) {
2218                         INIT_LIST_HEAD(&class->fullness_list[fullness]);
2219                         fullness++;
2220                 }
2221
2222                 prev_class = class;
2223         }
2224
2225         /* debug only, don't abort if it fails */
2226         zs_pool_stat_create(pool, name);
2227
2228         /*
2229          * Not critical since shrinker is only used to trigger internal
2230          * defragmentation of the pool which is pretty optional thing.  If
2231          * registration fails we still can use the pool normally and user can
2232          * trigger compaction manually. Thus, ignore return code.
2233          */
2234         zs_register_shrinker(pool);
2235
2236         return pool;
2237
2238 err:
2239         zs_destroy_pool(pool);
2240         return NULL;
2241 }
2242 EXPORT_SYMBOL_GPL(zs_create_pool);
2243
2244 void zs_destroy_pool(struct zs_pool *pool)
2245 {
2246         int i;
2247
2248         zs_unregister_shrinker(pool);
2249         zs_flush_migration(pool);
2250         zs_pool_stat_destroy(pool);
2251
2252         for (i = 0; i < ZS_SIZE_CLASSES; i++) {
2253                 int fg;
2254                 struct size_class *class = pool->size_class[i];
2255
2256                 if (!class)
2257                         continue;
2258
2259                 if (class->index != i)
2260                         continue;
2261
2262                 for (fg = ZS_INUSE_RATIO_0; fg < NR_FULLNESS_GROUPS; fg++) {
2263                         if (list_empty(&class->fullness_list[fg]))
2264                                 continue;
2265
2266                         pr_err("Class-%d fullness group %d is not empty\n",
2267                                class->size, fg);
2268                 }
2269                 kfree(class);
2270         }
2271
2272         destroy_cache(pool);
2273         kfree(pool->name);
2274         kfree(pool);
2275 }
2276 EXPORT_SYMBOL_GPL(zs_destroy_pool);
2277
2278 static int __init zs_init(void)
2279 {
2280         int ret;
2281
2282         ret = cpuhp_setup_state(CPUHP_MM_ZS_PREPARE, "mm/zsmalloc:prepare",
2283                                 zs_cpu_prepare, zs_cpu_dead);
2284         if (ret)
2285                 goto out;
2286
2287 #ifdef CONFIG_ZPOOL
2288         zpool_register_driver(&zs_zpool_driver);
2289 #endif
2290
2291         zs_stat_init();
2292
2293         return 0;
2294
2295 out:
2296         return ret;
2297 }
2298
2299 static void __exit zs_exit(void)
2300 {
2301 #ifdef CONFIG_ZPOOL
2302         zpool_unregister_driver(&zs_zpool_driver);
2303 #endif
2304         cpuhp_remove_state(CPUHP_MM_ZS_PREPARE);
2305
2306         zs_stat_exit();
2307 }
2308
2309 module_init(zs_init);
2310 module_exit(zs_exit);
2311
2312 MODULE_LICENSE("Dual BSD/GPL");
2313 MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
2314 MODULE_DESCRIPTION("zsmalloc memory allocator");