mm: return the folio from __read_swap_cache_async()
[linux-block.git] / mm / zswap.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * zswap.c - zswap driver file
4  *
5  * zswap is a cache that takes pages that are in the process
6  * of being swapped out and attempts to compress and store them in a
7  * RAM-based memory pool.  This can result in a significant I/O reduction on
8  * the swap device and, in the case where decompressing from RAM is faster
9  * than reading from the swap device, can also improve workload performance.
10  *
11  * Copyright (C) 2012  Seth Jennings <sjenning@linux.vnet.ibm.com>
12 */
13
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
16 #include <linux/module.h>
17 #include <linux/cpu.h>
18 #include <linux/highmem.h>
19 #include <linux/slab.h>
20 #include <linux/spinlock.h>
21 #include <linux/types.h>
22 #include <linux/atomic.h>
23 #include <linux/rbtree.h>
24 #include <linux/swap.h>
25 #include <linux/crypto.h>
26 #include <linux/scatterlist.h>
27 #include <linux/mempolicy.h>
28 #include <linux/mempool.h>
29 #include <linux/zpool.h>
30 #include <crypto/acompress.h>
31 #include <linux/zswap.h>
32 #include <linux/mm_types.h>
33 #include <linux/page-flags.h>
34 #include <linux/swapops.h>
35 #include <linux/writeback.h>
36 #include <linux/pagemap.h>
37 #include <linux/workqueue.h>
38 #include <linux/list_lru.h>
39
40 #include "swap.h"
41 #include "internal.h"
42
43 /*********************************
44 * statistics
45 **********************************/
46 /* Total bytes used by the compressed storage */
47 u64 zswap_pool_total_size;
48 /* The number of compressed pages currently stored in zswap */
49 atomic_t zswap_stored_pages = ATOMIC_INIT(0);
50 /* The number of same-value filled pages currently stored in zswap */
51 static atomic_t zswap_same_filled_pages = ATOMIC_INIT(0);
52
53 /*
54  * The statistics below are not protected from concurrent access for
55  * performance reasons so they may not be a 100% accurate.  However,
56  * they do provide useful information on roughly how many times a
57  * certain event is occurring.
58 */
59
60 /* Pool limit was hit (see zswap_max_pool_percent) */
61 static u64 zswap_pool_limit_hit;
62 /* Pages written back when pool limit was reached */
63 static u64 zswap_written_back_pages;
64 /* Store failed due to a reclaim failure after pool limit was reached */
65 static u64 zswap_reject_reclaim_fail;
66 /* Store failed due to compression algorithm failure */
67 static u64 zswap_reject_compress_fail;
68 /* Compressed page was too big for the allocator to (optimally) store */
69 static u64 zswap_reject_compress_poor;
70 /* Store failed because underlying allocator could not get memory */
71 static u64 zswap_reject_alloc_fail;
72 /* Store failed because the entry metadata could not be allocated (rare) */
73 static u64 zswap_reject_kmemcache_fail;
74 /* Duplicate store was encountered (rare) */
75 static u64 zswap_duplicate_entry;
76
77 /* Shrinker work queue */
78 static struct workqueue_struct *shrink_wq;
79 /* Pool limit was hit, we need to calm down */
80 static bool zswap_pool_reached_full;
81
82 /*********************************
83 * tunables
84 **********************************/
85
86 #define ZSWAP_PARAM_UNSET ""
87
88 static int zswap_setup(void);
89
90 /* Enable/disable zswap */
91 static bool zswap_enabled = IS_ENABLED(CONFIG_ZSWAP_DEFAULT_ON);
92 static int zswap_enabled_param_set(const char *,
93                                    const struct kernel_param *);
94 static const struct kernel_param_ops zswap_enabled_param_ops = {
95         .set =          zswap_enabled_param_set,
96         .get =          param_get_bool,
97 };
98 module_param_cb(enabled, &zswap_enabled_param_ops, &zswap_enabled, 0644);
99
100 /* Crypto compressor to use */
101 static char *zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT;
102 static int zswap_compressor_param_set(const char *,
103                                       const struct kernel_param *);
104 static const struct kernel_param_ops zswap_compressor_param_ops = {
105         .set =          zswap_compressor_param_set,
106         .get =          param_get_charp,
107         .free =         param_free_charp,
108 };
109 module_param_cb(compressor, &zswap_compressor_param_ops,
110                 &zswap_compressor, 0644);
111
112 /* Compressed storage zpool to use */
113 static char *zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT;
114 static int zswap_zpool_param_set(const char *, const struct kernel_param *);
115 static const struct kernel_param_ops zswap_zpool_param_ops = {
116         .set =          zswap_zpool_param_set,
117         .get =          param_get_charp,
118         .free =         param_free_charp,
119 };
120 module_param_cb(zpool, &zswap_zpool_param_ops, &zswap_zpool_type, 0644);
121
122 /* The maximum percentage of memory that the compressed pool can occupy */
123 static unsigned int zswap_max_pool_percent = 20;
124 module_param_named(max_pool_percent, zswap_max_pool_percent, uint, 0644);
125
126 /* The threshold for accepting new pages after the max_pool_percent was hit */
127 static unsigned int zswap_accept_thr_percent = 90; /* of max pool size */
128 module_param_named(accept_threshold_percent, zswap_accept_thr_percent,
129                    uint, 0644);
130
131 /*
132  * Enable/disable handling same-value filled pages (enabled by default).
133  * If disabled every page is considered non-same-value filled.
134  */
135 static bool zswap_same_filled_pages_enabled = true;
136 module_param_named(same_filled_pages_enabled, zswap_same_filled_pages_enabled,
137                    bool, 0644);
138
139 /* Enable/disable handling non-same-value filled pages (enabled by default) */
140 static bool zswap_non_same_filled_pages_enabled = true;
141 module_param_named(non_same_filled_pages_enabled, zswap_non_same_filled_pages_enabled,
142                    bool, 0644);
143
144 static bool zswap_exclusive_loads_enabled = IS_ENABLED(
145                 CONFIG_ZSWAP_EXCLUSIVE_LOADS_DEFAULT_ON);
146 module_param_named(exclusive_loads, zswap_exclusive_loads_enabled, bool, 0644);
147
148 /* Number of zpools in zswap_pool (empirically determined for scalability) */
149 #define ZSWAP_NR_ZPOOLS 32
150
151 /* Enable/disable memory pressure-based shrinker. */
152 static bool zswap_shrinker_enabled = IS_ENABLED(
153                 CONFIG_ZSWAP_SHRINKER_DEFAULT_ON);
154 module_param_named(shrinker_enabled, zswap_shrinker_enabled, bool, 0644);
155
156 /*********************************
157 * data structures
158 **********************************/
159
160 struct crypto_acomp_ctx {
161         struct crypto_acomp *acomp;
162         struct acomp_req *req;
163         struct crypto_wait wait;
164         u8 *buffer;
165         struct mutex mutex;
166 };
167
168 /*
169  * The lock ordering is zswap_tree.lock -> zswap_pool.lru_lock.
170  * The only case where lru_lock is not acquired while holding tree.lock is
171  * when a zswap_entry is taken off the lru for writeback, in that case it
172  * needs to be verified that it's still valid in the tree.
173  */
174 struct zswap_pool {
175         struct zpool *zpools[ZSWAP_NR_ZPOOLS];
176         struct crypto_acomp_ctx __percpu *acomp_ctx;
177         struct kref kref;
178         struct list_head list;
179         struct work_struct release_work;
180         struct work_struct shrink_work;
181         struct hlist_node node;
182         char tfm_name[CRYPTO_MAX_ALG_NAME];
183         struct list_lru list_lru;
184         struct mem_cgroup *next_shrink;
185         struct shrinker *shrinker;
186         atomic_t nr_stored;
187 };
188
189 /*
190  * struct zswap_entry
191  *
192  * This structure contains the metadata for tracking a single compressed
193  * page within zswap.
194  *
195  * rbnode - links the entry into red-black tree for the appropriate swap type
196  * swpentry - associated swap entry, the offset indexes into the red-black tree
197  * refcount - the number of outstanding reference to the entry. This is needed
198  *            to protect against premature freeing of the entry by code
199  *            concurrent calls to load, invalidate, and writeback.  The lock
200  *            for the zswap_tree structure that contains the entry must
201  *            be held while changing the refcount.  Since the lock must
202  *            be held, there is no reason to also make refcount atomic.
203  * length - the length in bytes of the compressed page data.  Needed during
204  *          decompression. For a same value filled page length is 0, and both
205  *          pool and lru are invalid and must be ignored.
206  * pool - the zswap_pool the entry's data is in
207  * handle - zpool allocation handle that stores the compressed page data
208  * value - value of the same-value filled pages which have same content
209  * objcg - the obj_cgroup that the compressed memory is charged to
210  * lru - handle to the pool's lru used to evict pages.
211  */
212 struct zswap_entry {
213         struct rb_node rbnode;
214         swp_entry_t swpentry;
215         int refcount;
216         unsigned int length;
217         struct zswap_pool *pool;
218         union {
219                 unsigned long handle;
220                 unsigned long value;
221         };
222         struct obj_cgroup *objcg;
223         struct list_head lru;
224 };
225
226 /*
227  * The tree lock in the zswap_tree struct protects a few things:
228  * - the rbtree
229  * - the refcount field of each entry in the tree
230  */
231 struct zswap_tree {
232         struct rb_root rbroot;
233         spinlock_t lock;
234 };
235
236 static struct zswap_tree *zswap_trees[MAX_SWAPFILES];
237
238 /* RCU-protected iteration */
239 static LIST_HEAD(zswap_pools);
240 /* protects zswap_pools list modification */
241 static DEFINE_SPINLOCK(zswap_pools_lock);
242 /* pool counter to provide unique names to zpool */
243 static atomic_t zswap_pools_count = ATOMIC_INIT(0);
244
245 enum zswap_init_type {
246         ZSWAP_UNINIT,
247         ZSWAP_INIT_SUCCEED,
248         ZSWAP_INIT_FAILED
249 };
250
251 static enum zswap_init_type zswap_init_state;
252
253 /* used to ensure the integrity of initialization */
254 static DEFINE_MUTEX(zswap_init_lock);
255
256 /* init completed, but couldn't create the initial pool */
257 static bool zswap_has_pool;
258
259 /*********************************
260 * helpers and fwd declarations
261 **********************************/
262
263 #define zswap_pool_debug(msg, p)                                \
264         pr_debug("%s pool %s/%s\n", msg, (p)->tfm_name,         \
265                  zpool_get_type((p)->zpools[0]))
266
267 static int zswap_writeback_entry(struct zswap_entry *entry,
268                                  struct zswap_tree *tree);
269 static int zswap_pool_get(struct zswap_pool *pool);
270 static void zswap_pool_put(struct zswap_pool *pool);
271
272 static bool zswap_is_full(void)
273 {
274         return totalram_pages() * zswap_max_pool_percent / 100 <
275                         DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE);
276 }
277
278 static bool zswap_can_accept(void)
279 {
280         return totalram_pages() * zswap_accept_thr_percent / 100 *
281                                 zswap_max_pool_percent / 100 >
282                         DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE);
283 }
284
285 static u64 get_zswap_pool_size(struct zswap_pool *pool)
286 {
287         u64 pool_size = 0;
288         int i;
289
290         for (i = 0; i < ZSWAP_NR_ZPOOLS; i++)
291                 pool_size += zpool_get_total_size(pool->zpools[i]);
292
293         return pool_size;
294 }
295
296 static void zswap_update_total_size(void)
297 {
298         struct zswap_pool *pool;
299         u64 total = 0;
300
301         rcu_read_lock();
302
303         list_for_each_entry_rcu(pool, &zswap_pools, list)
304                 total += get_zswap_pool_size(pool);
305
306         rcu_read_unlock();
307
308         zswap_pool_total_size = total;
309 }
310
311 /* should be called under RCU */
312 #ifdef CONFIG_MEMCG
313 static inline struct mem_cgroup *mem_cgroup_from_entry(struct zswap_entry *entry)
314 {
315         return entry->objcg ? obj_cgroup_memcg(entry->objcg) : NULL;
316 }
317 #else
318 static inline struct mem_cgroup *mem_cgroup_from_entry(struct zswap_entry *entry)
319 {
320         return NULL;
321 }
322 #endif
323
324 static inline int entry_to_nid(struct zswap_entry *entry)
325 {
326         return page_to_nid(virt_to_page(entry));
327 }
328
329 void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg)
330 {
331         struct zswap_pool *pool;
332
333         /* lock out zswap pools list modification */
334         spin_lock(&zswap_pools_lock);
335         list_for_each_entry(pool, &zswap_pools, list) {
336                 if (pool->next_shrink == memcg)
337                         pool->next_shrink = mem_cgroup_iter(NULL, pool->next_shrink, NULL);
338         }
339         spin_unlock(&zswap_pools_lock);
340 }
341
342 /*********************************
343 * zswap entry functions
344 **********************************/
345 static struct kmem_cache *zswap_entry_cache;
346
347 static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp, int nid)
348 {
349         struct zswap_entry *entry;
350         entry = kmem_cache_alloc_node(zswap_entry_cache, gfp, nid);
351         if (!entry)
352                 return NULL;
353         entry->refcount = 1;
354         RB_CLEAR_NODE(&entry->rbnode);
355         return entry;
356 }
357
358 static void zswap_entry_cache_free(struct zswap_entry *entry)
359 {
360         kmem_cache_free(zswap_entry_cache, entry);
361 }
362
363 /*********************************
364 * zswap lruvec functions
365 **********************************/
366 void zswap_lruvec_state_init(struct lruvec *lruvec)
367 {
368         atomic_long_set(&lruvec->zswap_lruvec_state.nr_zswap_protected, 0);
369 }
370
371 void zswap_folio_swapin(struct folio *folio)
372 {
373         struct lruvec *lruvec;
374
375         if (folio) {
376                 lruvec = folio_lruvec(folio);
377                 atomic_long_inc(&lruvec->zswap_lruvec_state.nr_zswap_protected);
378         }
379 }
380
381 /*********************************
382 * lru functions
383 **********************************/
384 static void zswap_lru_add(struct list_lru *list_lru, struct zswap_entry *entry)
385 {
386         atomic_long_t *nr_zswap_protected;
387         unsigned long lru_size, old, new;
388         int nid = entry_to_nid(entry);
389         struct mem_cgroup *memcg;
390         struct lruvec *lruvec;
391
392         /*
393          * Note that it is safe to use rcu_read_lock() here, even in the face of
394          * concurrent memcg offlining. Thanks to the memcg->kmemcg_id indirection
395          * used in list_lru lookup, only two scenarios are possible:
396          *
397          * 1. list_lru_add() is called before memcg->kmemcg_id is updated. The
398          *    new entry will be reparented to memcg's parent's list_lru.
399          * 2. list_lru_add() is called after memcg->kmemcg_id is updated. The
400          *    new entry will be added directly to memcg's parent's list_lru.
401          *
402          * Similar reasoning holds for list_lru_del() and list_lru_putback().
403          */
404         rcu_read_lock();
405         memcg = mem_cgroup_from_entry(entry);
406         /* will always succeed */
407         list_lru_add(list_lru, &entry->lru, nid, memcg);
408
409         /* Update the protection area */
410         lru_size = list_lru_count_one(list_lru, nid, memcg);
411         lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
412         nr_zswap_protected = &lruvec->zswap_lruvec_state.nr_zswap_protected;
413         old = atomic_long_inc_return(nr_zswap_protected);
414         /*
415          * Decay to avoid overflow and adapt to changing workloads.
416          * This is based on LRU reclaim cost decaying heuristics.
417          */
418         do {
419                 new = old > lru_size / 4 ? old / 2 : old;
420         } while (!atomic_long_try_cmpxchg(nr_zswap_protected, &old, new));
421         rcu_read_unlock();
422 }
423
424 static void zswap_lru_del(struct list_lru *list_lru, struct zswap_entry *entry)
425 {
426         int nid = entry_to_nid(entry);
427         struct mem_cgroup *memcg;
428
429         rcu_read_lock();
430         memcg = mem_cgroup_from_entry(entry);
431         /* will always succeed */
432         list_lru_del(list_lru, &entry->lru, nid, memcg);
433         rcu_read_unlock();
434 }
435
436 static void zswap_lru_putback(struct list_lru *list_lru,
437                 struct zswap_entry *entry)
438 {
439         int nid = entry_to_nid(entry);
440         spinlock_t *lock = &list_lru->node[nid].lock;
441         struct mem_cgroup *memcg;
442         struct lruvec *lruvec;
443
444         rcu_read_lock();
445         memcg = mem_cgroup_from_entry(entry);
446         spin_lock(lock);
447         /* we cannot use list_lru_add here, because it increments node's lru count */
448         list_lru_putback(list_lru, &entry->lru, nid, memcg);
449         spin_unlock(lock);
450
451         lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(entry_to_nid(entry)));
452         /* increment the protection area to account for the LRU rotation. */
453         atomic_long_inc(&lruvec->zswap_lruvec_state.nr_zswap_protected);
454         rcu_read_unlock();
455 }
456
457 /*********************************
458 * rbtree functions
459 **********************************/
460 static struct zswap_entry *zswap_rb_search(struct rb_root *root, pgoff_t offset)
461 {
462         struct rb_node *node = root->rb_node;
463         struct zswap_entry *entry;
464         pgoff_t entry_offset;
465
466         while (node) {
467                 entry = rb_entry(node, struct zswap_entry, rbnode);
468                 entry_offset = swp_offset(entry->swpentry);
469                 if (entry_offset > offset)
470                         node = node->rb_left;
471                 else if (entry_offset < offset)
472                         node = node->rb_right;
473                 else
474                         return entry;
475         }
476         return NULL;
477 }
478
479 /*
480  * In the case that a entry with the same offset is found, a pointer to
481  * the existing entry is stored in dupentry and the function returns -EEXIST
482  */
483 static int zswap_rb_insert(struct rb_root *root, struct zswap_entry *entry,
484                         struct zswap_entry **dupentry)
485 {
486         struct rb_node **link = &root->rb_node, *parent = NULL;
487         struct zswap_entry *myentry;
488         pgoff_t myentry_offset, entry_offset = swp_offset(entry->swpentry);
489
490         while (*link) {
491                 parent = *link;
492                 myentry = rb_entry(parent, struct zswap_entry, rbnode);
493                 myentry_offset = swp_offset(myentry->swpentry);
494                 if (myentry_offset > entry_offset)
495                         link = &(*link)->rb_left;
496                 else if (myentry_offset < entry_offset)
497                         link = &(*link)->rb_right;
498                 else {
499                         *dupentry = myentry;
500                         return -EEXIST;
501                 }
502         }
503         rb_link_node(&entry->rbnode, parent, link);
504         rb_insert_color(&entry->rbnode, root);
505         return 0;
506 }
507
508 static bool zswap_rb_erase(struct rb_root *root, struct zswap_entry *entry)
509 {
510         if (!RB_EMPTY_NODE(&entry->rbnode)) {
511                 rb_erase(&entry->rbnode, root);
512                 RB_CLEAR_NODE(&entry->rbnode);
513                 return true;
514         }
515         return false;
516 }
517
518 static struct zpool *zswap_find_zpool(struct zswap_entry *entry)
519 {
520         int i = 0;
521
522         if (ZSWAP_NR_ZPOOLS > 1)
523                 i = hash_ptr(entry, ilog2(ZSWAP_NR_ZPOOLS));
524
525         return entry->pool->zpools[i];
526 }
527
528 /*
529  * Carries out the common pattern of freeing and entry's zpool allocation,
530  * freeing the entry itself, and decrementing the number of stored pages.
531  */
532 static void zswap_free_entry(struct zswap_entry *entry)
533 {
534         if (entry->objcg) {
535                 obj_cgroup_uncharge_zswap(entry->objcg, entry->length);
536                 obj_cgroup_put(entry->objcg);
537         }
538         if (!entry->length)
539                 atomic_dec(&zswap_same_filled_pages);
540         else {
541                 zswap_lru_del(&entry->pool->list_lru, entry);
542                 zpool_free(zswap_find_zpool(entry), entry->handle);
543                 atomic_dec(&entry->pool->nr_stored);
544                 zswap_pool_put(entry->pool);
545         }
546         zswap_entry_cache_free(entry);
547         atomic_dec(&zswap_stored_pages);
548         zswap_update_total_size();
549 }
550
551 /* caller must hold the tree lock */
552 static void zswap_entry_get(struct zswap_entry *entry)
553 {
554         entry->refcount++;
555 }
556
557 /* caller must hold the tree lock
558 * remove from the tree and free it, if nobody reference the entry
559 */
560 static void zswap_entry_put(struct zswap_tree *tree,
561                         struct zswap_entry *entry)
562 {
563         int refcount = --entry->refcount;
564
565         WARN_ON_ONCE(refcount < 0);
566         if (refcount == 0) {
567                 WARN_ON_ONCE(!RB_EMPTY_NODE(&entry->rbnode));
568                 zswap_free_entry(entry);
569         }
570 }
571
572 /* caller must hold the tree lock */
573 static struct zswap_entry *zswap_entry_find_get(struct rb_root *root,
574                                 pgoff_t offset)
575 {
576         struct zswap_entry *entry;
577
578         entry = zswap_rb_search(root, offset);
579         if (entry)
580                 zswap_entry_get(entry);
581
582         return entry;
583 }
584
585 /*********************************
586 * shrinker functions
587 **********************************/
588 static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_one *l,
589                                        spinlock_t *lock, void *arg);
590
591 static unsigned long zswap_shrinker_scan(struct shrinker *shrinker,
592                 struct shrink_control *sc)
593 {
594         struct lruvec *lruvec = mem_cgroup_lruvec(sc->memcg, NODE_DATA(sc->nid));
595         unsigned long shrink_ret, nr_protected, lru_size;
596         struct zswap_pool *pool = shrinker->private_data;
597         bool encountered_page_in_swapcache = false;
598
599         if (!zswap_shrinker_enabled) {
600                 sc->nr_scanned = 0;
601                 return SHRINK_STOP;
602         }
603
604         nr_protected =
605                 atomic_long_read(&lruvec->zswap_lruvec_state.nr_zswap_protected);
606         lru_size = list_lru_shrink_count(&pool->list_lru, sc);
607
608         /*
609          * Abort if we are shrinking into the protected region.
610          *
611          * This short-circuiting is necessary because if we have too many multiple
612          * concurrent reclaimers getting the freeable zswap object counts at the
613          * same time (before any of them made reasonable progress), the total
614          * number of reclaimed objects might be more than the number of unprotected
615          * objects (i.e the reclaimers will reclaim into the protected area of the
616          * zswap LRU).
617          */
618         if (nr_protected >= lru_size - sc->nr_to_scan) {
619                 sc->nr_scanned = 0;
620                 return SHRINK_STOP;
621         }
622
623         shrink_ret = list_lru_shrink_walk(&pool->list_lru, sc, &shrink_memcg_cb,
624                 &encountered_page_in_swapcache);
625
626         if (encountered_page_in_swapcache)
627                 return SHRINK_STOP;
628
629         return shrink_ret ? shrink_ret : SHRINK_STOP;
630 }
631
632 static unsigned long zswap_shrinker_count(struct shrinker *shrinker,
633                 struct shrink_control *sc)
634 {
635         struct zswap_pool *pool = shrinker->private_data;
636         struct mem_cgroup *memcg = sc->memcg;
637         struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(sc->nid));
638         unsigned long nr_backing, nr_stored, nr_freeable, nr_protected;
639
640         if (!zswap_shrinker_enabled)
641                 return 0;
642
643 #ifdef CONFIG_MEMCG_KMEM
644         mem_cgroup_flush_stats(memcg);
645         nr_backing = memcg_page_state(memcg, MEMCG_ZSWAP_B) >> PAGE_SHIFT;
646         nr_stored = memcg_page_state(memcg, MEMCG_ZSWAPPED);
647 #else
648         /* use pool stats instead of memcg stats */
649         nr_backing = get_zswap_pool_size(pool) >> PAGE_SHIFT;
650         nr_stored = atomic_read(&pool->nr_stored);
651 #endif
652
653         if (!nr_stored)
654                 return 0;
655
656         nr_protected =
657                 atomic_long_read(&lruvec->zswap_lruvec_state.nr_zswap_protected);
658         nr_freeable = list_lru_shrink_count(&pool->list_lru, sc);
659         /*
660          * Subtract the lru size by an estimate of the number of pages
661          * that should be protected.
662          */
663         nr_freeable = nr_freeable > nr_protected ? nr_freeable - nr_protected : 0;
664
665         /*
666          * Scale the number of freeable pages by the memory saving factor.
667          * This ensures that the better zswap compresses memory, the fewer
668          * pages we will evict to swap (as it will otherwise incur IO for
669          * relatively small memory saving).
670          */
671         return mult_frac(nr_freeable, nr_backing, nr_stored);
672 }
673
674 static void zswap_alloc_shrinker(struct zswap_pool *pool)
675 {
676         pool->shrinker =
677                 shrinker_alloc(SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE, "mm-zswap");
678         if (!pool->shrinker)
679                 return;
680
681         pool->shrinker->private_data = pool;
682         pool->shrinker->scan_objects = zswap_shrinker_scan;
683         pool->shrinker->count_objects = zswap_shrinker_count;
684         pool->shrinker->batch = 0;
685         pool->shrinker->seeks = DEFAULT_SEEKS;
686 }
687
688 /*********************************
689 * per-cpu code
690 **********************************/
691 static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
692 {
693         struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
694         struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
695         struct crypto_acomp *acomp;
696         struct acomp_req *req;
697         int ret;
698
699         mutex_init(&acomp_ctx->mutex);
700
701         acomp_ctx->buffer = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu));
702         if (!acomp_ctx->buffer)
703                 return -ENOMEM;
704
705         acomp = crypto_alloc_acomp_node(pool->tfm_name, 0, 0, cpu_to_node(cpu));
706         if (IS_ERR(acomp)) {
707                 pr_err("could not alloc crypto acomp %s : %ld\n",
708                                 pool->tfm_name, PTR_ERR(acomp));
709                 ret = PTR_ERR(acomp);
710                 goto acomp_fail;
711         }
712         acomp_ctx->acomp = acomp;
713
714         req = acomp_request_alloc(acomp_ctx->acomp);
715         if (!req) {
716                 pr_err("could not alloc crypto acomp_request %s\n",
717                        pool->tfm_name);
718                 ret = -ENOMEM;
719                 goto req_fail;
720         }
721         acomp_ctx->req = req;
722
723         crypto_init_wait(&acomp_ctx->wait);
724         /*
725          * if the backend of acomp is async zip, crypto_req_done() will wakeup
726          * crypto_wait_req(); if the backend of acomp is scomp, the callback
727          * won't be called, crypto_wait_req() will return without blocking.
728          */
729         acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
730                                    crypto_req_done, &acomp_ctx->wait);
731
732         return 0;
733
734 req_fail:
735         crypto_free_acomp(acomp_ctx->acomp);
736 acomp_fail:
737         kfree(acomp_ctx->buffer);
738         return ret;
739 }
740
741 static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node)
742 {
743         struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
744         struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
745
746         if (!IS_ERR_OR_NULL(acomp_ctx)) {
747                 if (!IS_ERR_OR_NULL(acomp_ctx->req))
748                         acomp_request_free(acomp_ctx->req);
749                 if (!IS_ERR_OR_NULL(acomp_ctx->acomp))
750                         crypto_free_acomp(acomp_ctx->acomp);
751                 kfree(acomp_ctx->buffer);
752         }
753
754         return 0;
755 }
756
757 /*********************************
758 * pool functions
759 **********************************/
760
761 static struct zswap_pool *__zswap_pool_current(void)
762 {
763         struct zswap_pool *pool;
764
765         pool = list_first_or_null_rcu(&zswap_pools, typeof(*pool), list);
766         WARN_ONCE(!pool && zswap_has_pool,
767                   "%s: no page storage pool!\n", __func__);
768
769         return pool;
770 }
771
772 static struct zswap_pool *zswap_pool_current(void)
773 {
774         assert_spin_locked(&zswap_pools_lock);
775
776         return __zswap_pool_current();
777 }
778
779 static struct zswap_pool *zswap_pool_current_get(void)
780 {
781         struct zswap_pool *pool;
782
783         rcu_read_lock();
784
785         pool = __zswap_pool_current();
786         if (!zswap_pool_get(pool))
787                 pool = NULL;
788
789         rcu_read_unlock();
790
791         return pool;
792 }
793
794 static struct zswap_pool *zswap_pool_last_get(void)
795 {
796         struct zswap_pool *pool, *last = NULL;
797
798         rcu_read_lock();
799
800         list_for_each_entry_rcu(pool, &zswap_pools, list)
801                 last = pool;
802         WARN_ONCE(!last && zswap_has_pool,
803                   "%s: no page storage pool!\n", __func__);
804         if (!zswap_pool_get(last))
805                 last = NULL;
806
807         rcu_read_unlock();
808
809         return last;
810 }
811
812 /* type and compressor must be null-terminated */
813 static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor)
814 {
815         struct zswap_pool *pool;
816
817         assert_spin_locked(&zswap_pools_lock);
818
819         list_for_each_entry_rcu(pool, &zswap_pools, list) {
820                 if (strcmp(pool->tfm_name, compressor))
821                         continue;
822                 /* all zpools share the same type */
823                 if (strcmp(zpool_get_type(pool->zpools[0]), type))
824                         continue;
825                 /* if we can't get it, it's about to be destroyed */
826                 if (!zswap_pool_get(pool))
827                         continue;
828                 return pool;
829         }
830
831         return NULL;
832 }
833
834 /*
835  * If the entry is still valid in the tree, drop the initial ref and remove it
836  * from the tree. This function must be called with an additional ref held,
837  * otherwise it may race with another invalidation freeing the entry.
838  */
839 static void zswap_invalidate_entry(struct zswap_tree *tree,
840                                    struct zswap_entry *entry)
841 {
842         if (zswap_rb_erase(&tree->rbroot, entry))
843                 zswap_entry_put(tree, entry);
844 }
845
846 static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_one *l,
847                                        spinlock_t *lock, void *arg)
848 {
849         struct zswap_entry *entry = container_of(item, struct zswap_entry, lru);
850         bool *encountered_page_in_swapcache = (bool *)arg;
851         struct zswap_tree *tree;
852         pgoff_t swpoffset;
853         enum lru_status ret = LRU_REMOVED_RETRY;
854         int writeback_result;
855
856         /*
857          * Once the lru lock is dropped, the entry might get freed. The
858          * swpoffset is copied to the stack, and entry isn't deref'd again
859          * until the entry is verified to still be alive in the tree.
860          */
861         swpoffset = swp_offset(entry->swpentry);
862         tree = zswap_trees[swp_type(entry->swpentry)];
863         list_lru_isolate(l, item);
864         /*
865          * It's safe to drop the lock here because we return either
866          * LRU_REMOVED_RETRY or LRU_RETRY.
867          */
868         spin_unlock(lock);
869
870         /* Check for invalidate() race */
871         spin_lock(&tree->lock);
872         if (entry != zswap_rb_search(&tree->rbroot, swpoffset))
873                 goto unlock;
874
875         /* Hold a reference to prevent a free during writeback */
876         zswap_entry_get(entry);
877         spin_unlock(&tree->lock);
878
879         writeback_result = zswap_writeback_entry(entry, tree);
880
881         spin_lock(&tree->lock);
882         if (writeback_result) {
883                 zswap_reject_reclaim_fail++;
884                 zswap_lru_putback(&entry->pool->list_lru, entry);
885                 ret = LRU_RETRY;
886
887                 /*
888                  * Encountering a page already in swap cache is a sign that we are shrinking
889                  * into the warmer region. We should terminate shrinking (if we're in the dynamic
890                  * shrinker context).
891                  */
892                 if (writeback_result == -EEXIST && encountered_page_in_swapcache) {
893                         ret = LRU_SKIP;
894                         *encountered_page_in_swapcache = true;
895                 }
896
897                 goto put_unlock;
898         }
899         zswap_written_back_pages++;
900
901         if (entry->objcg)
902                 count_objcg_event(entry->objcg, ZSWPWB);
903
904         count_vm_event(ZSWPWB);
905         /*
906          * Writeback started successfully, the page now belongs to the
907          * swapcache. Drop the entry from zswap - unless invalidate already
908          * took it out while we had the tree->lock released for IO.
909          */
910         zswap_invalidate_entry(tree, entry);
911
912 put_unlock:
913         /* Drop local reference */
914         zswap_entry_put(tree, entry);
915 unlock:
916         spin_unlock(&tree->lock);
917         spin_lock(lock);
918         return ret;
919 }
920
921 static int shrink_memcg(struct mem_cgroup *memcg)
922 {
923         struct zswap_pool *pool;
924         int nid, shrunk = 0;
925
926         /*
927          * Skip zombies because their LRUs are reparented and we would be
928          * reclaiming from the parent instead of the dead memcg.
929          */
930         if (memcg && !mem_cgroup_online(memcg))
931                 return -ENOENT;
932
933         pool = zswap_pool_current_get();
934         if (!pool)
935                 return -EINVAL;
936
937         for_each_node_state(nid, N_NORMAL_MEMORY) {
938                 unsigned long nr_to_walk = 1;
939
940                 shrunk += list_lru_walk_one(&pool->list_lru, nid, memcg,
941                                             &shrink_memcg_cb, NULL, &nr_to_walk);
942         }
943         zswap_pool_put(pool);
944         return shrunk ? 0 : -EAGAIN;
945 }
946
947 static void shrink_worker(struct work_struct *w)
948 {
949         struct zswap_pool *pool = container_of(w, typeof(*pool),
950                                                 shrink_work);
951         struct mem_cgroup *memcg;
952         int ret, failures = 0;
953
954         /* global reclaim will select cgroup in a round-robin fashion. */
955         do {
956                 spin_lock(&zswap_pools_lock);
957                 pool->next_shrink = mem_cgroup_iter(NULL, pool->next_shrink, NULL);
958                 memcg = pool->next_shrink;
959
960                 /*
961                  * We need to retry if we have gone through a full round trip, or if we
962                  * got an offline memcg (or else we risk undoing the effect of the
963                  * zswap memcg offlining cleanup callback). This is not catastrophic
964                  * per se, but it will keep the now offlined memcg hostage for a while.
965                  *
966                  * Note that if we got an online memcg, we will keep the extra
967                  * reference in case the original reference obtained by mem_cgroup_iter
968                  * is dropped by the zswap memcg offlining callback, ensuring that the
969                  * memcg is not killed when we are reclaiming.
970                  */
971                 if (!memcg) {
972                         spin_unlock(&zswap_pools_lock);
973                         if (++failures == MAX_RECLAIM_RETRIES)
974                                 break;
975
976                         goto resched;
977                 }
978
979                 if (!mem_cgroup_tryget_online(memcg)) {
980                         /* drop the reference from mem_cgroup_iter() */
981                         mem_cgroup_iter_break(NULL, memcg);
982                         pool->next_shrink = NULL;
983                         spin_unlock(&zswap_pools_lock);
984
985                         if (++failures == MAX_RECLAIM_RETRIES)
986                                 break;
987
988                         goto resched;
989                 }
990                 spin_unlock(&zswap_pools_lock);
991
992                 ret = shrink_memcg(memcg);
993                 /* drop the extra reference */
994                 mem_cgroup_put(memcg);
995
996                 if (ret == -EINVAL)
997                         break;
998                 if (ret && ++failures == MAX_RECLAIM_RETRIES)
999                         break;
1000
1001 resched:
1002                 cond_resched();
1003         } while (!zswap_can_accept());
1004         zswap_pool_put(pool);
1005 }
1006
1007 static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
1008 {
1009         int i;
1010         struct zswap_pool *pool;
1011         char name[38]; /* 'zswap' + 32 char (max) num + \0 */
1012         gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
1013         int ret;
1014
1015         if (!zswap_has_pool) {
1016                 /* if either are unset, pool initialization failed, and we
1017                  * need both params to be set correctly before trying to
1018                  * create a pool.
1019                  */
1020                 if (!strcmp(type, ZSWAP_PARAM_UNSET))
1021                         return NULL;
1022                 if (!strcmp(compressor, ZSWAP_PARAM_UNSET))
1023                         return NULL;
1024         }
1025
1026         pool = kzalloc(sizeof(*pool), GFP_KERNEL);
1027         if (!pool)
1028                 return NULL;
1029
1030         for (i = 0; i < ZSWAP_NR_ZPOOLS; i++) {
1031                 /* unique name for each pool specifically required by zsmalloc */
1032                 snprintf(name, 38, "zswap%x",
1033                          atomic_inc_return(&zswap_pools_count));
1034
1035                 pool->zpools[i] = zpool_create_pool(type, name, gfp);
1036                 if (!pool->zpools[i]) {
1037                         pr_err("%s zpool not available\n", type);
1038                         goto error;
1039                 }
1040         }
1041         pr_debug("using %s zpool\n", zpool_get_type(pool->zpools[0]));
1042
1043         strscpy(pool->tfm_name, compressor, sizeof(pool->tfm_name));
1044
1045         pool->acomp_ctx = alloc_percpu(*pool->acomp_ctx);
1046         if (!pool->acomp_ctx) {
1047                 pr_err("percpu alloc failed\n");
1048                 goto error;
1049         }
1050
1051         ret = cpuhp_state_add_instance(CPUHP_MM_ZSWP_POOL_PREPARE,
1052                                        &pool->node);
1053         if (ret)
1054                 goto error;
1055
1056         zswap_alloc_shrinker(pool);
1057         if (!pool->shrinker)
1058                 goto error;
1059
1060         pr_debug("using %s compressor\n", pool->tfm_name);
1061
1062         /* being the current pool takes 1 ref; this func expects the
1063          * caller to always add the new pool as the current pool
1064          */
1065         kref_init(&pool->kref);
1066         INIT_LIST_HEAD(&pool->list);
1067         if (list_lru_init_memcg(&pool->list_lru, pool->shrinker))
1068                 goto lru_fail;
1069         shrinker_register(pool->shrinker);
1070         INIT_WORK(&pool->shrink_work, shrink_worker);
1071         atomic_set(&pool->nr_stored, 0);
1072
1073         zswap_pool_debug("created", pool);
1074
1075         return pool;
1076
1077 lru_fail:
1078         list_lru_destroy(&pool->list_lru);
1079         shrinker_free(pool->shrinker);
1080 error:
1081         if (pool->acomp_ctx)
1082                 free_percpu(pool->acomp_ctx);
1083         while (i--)
1084                 zpool_destroy_pool(pool->zpools[i]);
1085         kfree(pool);
1086         return NULL;
1087 }
1088
1089 static struct zswap_pool *__zswap_pool_create_fallback(void)
1090 {
1091         bool has_comp, has_zpool;
1092
1093         has_comp = crypto_has_acomp(zswap_compressor, 0, 0);
1094         if (!has_comp && strcmp(zswap_compressor,
1095                                 CONFIG_ZSWAP_COMPRESSOR_DEFAULT)) {
1096                 pr_err("compressor %s not available, using default %s\n",
1097                        zswap_compressor, CONFIG_ZSWAP_COMPRESSOR_DEFAULT);
1098                 param_free_charp(&zswap_compressor);
1099                 zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT;
1100                 has_comp = crypto_has_acomp(zswap_compressor, 0, 0);
1101         }
1102         if (!has_comp) {
1103                 pr_err("default compressor %s not available\n",
1104                        zswap_compressor);
1105                 param_free_charp(&zswap_compressor);
1106                 zswap_compressor = ZSWAP_PARAM_UNSET;
1107         }
1108
1109         has_zpool = zpool_has_pool(zswap_zpool_type);
1110         if (!has_zpool && strcmp(zswap_zpool_type,
1111                                  CONFIG_ZSWAP_ZPOOL_DEFAULT)) {
1112                 pr_err("zpool %s not available, using default %s\n",
1113                        zswap_zpool_type, CONFIG_ZSWAP_ZPOOL_DEFAULT);
1114                 param_free_charp(&zswap_zpool_type);
1115                 zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT;
1116                 has_zpool = zpool_has_pool(zswap_zpool_type);
1117         }
1118         if (!has_zpool) {
1119                 pr_err("default zpool %s not available\n",
1120                        zswap_zpool_type);
1121                 param_free_charp(&zswap_zpool_type);
1122                 zswap_zpool_type = ZSWAP_PARAM_UNSET;
1123         }
1124
1125         if (!has_comp || !has_zpool)
1126                 return NULL;
1127
1128         return zswap_pool_create(zswap_zpool_type, zswap_compressor);
1129 }
1130
1131 static void zswap_pool_destroy(struct zswap_pool *pool)
1132 {
1133         int i;
1134
1135         zswap_pool_debug("destroying", pool);
1136
1137         shrinker_free(pool->shrinker);
1138         cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node);
1139         free_percpu(pool->acomp_ctx);
1140         list_lru_destroy(&pool->list_lru);
1141
1142         spin_lock(&zswap_pools_lock);
1143         mem_cgroup_iter_break(NULL, pool->next_shrink);
1144         pool->next_shrink = NULL;
1145         spin_unlock(&zswap_pools_lock);
1146
1147         for (i = 0; i < ZSWAP_NR_ZPOOLS; i++)
1148                 zpool_destroy_pool(pool->zpools[i]);
1149         kfree(pool);
1150 }
1151
1152 static int __must_check zswap_pool_get(struct zswap_pool *pool)
1153 {
1154         if (!pool)
1155                 return 0;
1156
1157         return kref_get_unless_zero(&pool->kref);
1158 }
1159
1160 static void __zswap_pool_release(struct work_struct *work)
1161 {
1162         struct zswap_pool *pool = container_of(work, typeof(*pool),
1163                                                 release_work);
1164
1165         synchronize_rcu();
1166
1167         /* nobody should have been able to get a kref... */
1168         WARN_ON(kref_get_unless_zero(&pool->kref));
1169
1170         /* pool is now off zswap_pools list and has no references. */
1171         zswap_pool_destroy(pool);
1172 }
1173
1174 static void __zswap_pool_empty(struct kref *kref)
1175 {
1176         struct zswap_pool *pool;
1177
1178         pool = container_of(kref, typeof(*pool), kref);
1179
1180         spin_lock(&zswap_pools_lock);
1181
1182         WARN_ON(pool == zswap_pool_current());
1183
1184         list_del_rcu(&pool->list);
1185
1186         INIT_WORK(&pool->release_work, __zswap_pool_release);
1187         schedule_work(&pool->release_work);
1188
1189         spin_unlock(&zswap_pools_lock);
1190 }
1191
1192 static void zswap_pool_put(struct zswap_pool *pool)
1193 {
1194         kref_put(&pool->kref, __zswap_pool_empty);
1195 }
1196
1197 /*********************************
1198 * param callbacks
1199 **********************************/
1200
1201 static bool zswap_pool_changed(const char *s, const struct kernel_param *kp)
1202 {
1203         /* no change required */
1204         if (!strcmp(s, *(char **)kp->arg) && zswap_has_pool)
1205                 return false;
1206         return true;
1207 }
1208
1209 /* val must be a null-terminated string */
1210 static int __zswap_param_set(const char *val, const struct kernel_param *kp,
1211                              char *type, char *compressor)
1212 {
1213         struct zswap_pool *pool, *put_pool = NULL;
1214         char *s = strstrip((char *)val);
1215         int ret = 0;
1216         bool new_pool = false;
1217
1218         mutex_lock(&zswap_init_lock);
1219         switch (zswap_init_state) {
1220         case ZSWAP_UNINIT:
1221                 /* if this is load-time (pre-init) param setting,
1222                  * don't create a pool; that's done during init.
1223                  */
1224                 ret = param_set_charp(s, kp);
1225                 break;
1226         case ZSWAP_INIT_SUCCEED:
1227                 new_pool = zswap_pool_changed(s, kp);
1228                 break;
1229         case ZSWAP_INIT_FAILED:
1230                 pr_err("can't set param, initialization failed\n");
1231                 ret = -ENODEV;
1232         }
1233         mutex_unlock(&zswap_init_lock);
1234
1235         /* no need to create a new pool, return directly */
1236         if (!new_pool)
1237                 return ret;
1238
1239         if (!type) {
1240                 if (!zpool_has_pool(s)) {
1241                         pr_err("zpool %s not available\n", s);
1242                         return -ENOENT;
1243                 }
1244                 type = s;
1245         } else if (!compressor) {
1246                 if (!crypto_has_acomp(s, 0, 0)) {
1247                         pr_err("compressor %s not available\n", s);
1248                         return -ENOENT;
1249                 }
1250                 compressor = s;
1251         } else {
1252                 WARN_ON(1);
1253                 return -EINVAL;
1254         }
1255
1256         spin_lock(&zswap_pools_lock);
1257
1258         pool = zswap_pool_find_get(type, compressor);
1259         if (pool) {
1260                 zswap_pool_debug("using existing", pool);
1261                 WARN_ON(pool == zswap_pool_current());
1262                 list_del_rcu(&pool->list);
1263         }
1264
1265         spin_unlock(&zswap_pools_lock);
1266
1267         if (!pool)
1268                 pool = zswap_pool_create(type, compressor);
1269
1270         if (pool)
1271                 ret = param_set_charp(s, kp);
1272         else
1273                 ret = -EINVAL;
1274
1275         spin_lock(&zswap_pools_lock);
1276
1277         if (!ret) {
1278                 put_pool = zswap_pool_current();
1279                 list_add_rcu(&pool->list, &zswap_pools);
1280                 zswap_has_pool = true;
1281         } else if (pool) {
1282                 /* add the possibly pre-existing pool to the end of the pools
1283                  * list; if it's new (and empty) then it'll be removed and
1284                  * destroyed by the put after we drop the lock
1285                  */
1286                 list_add_tail_rcu(&pool->list, &zswap_pools);
1287                 put_pool = pool;
1288         }
1289
1290         spin_unlock(&zswap_pools_lock);
1291
1292         if (!zswap_has_pool && !pool) {
1293                 /* if initial pool creation failed, and this pool creation also
1294                  * failed, maybe both compressor and zpool params were bad.
1295                  * Allow changing this param, so pool creation will succeed
1296                  * when the other param is changed. We already verified this
1297                  * param is ok in the zpool_has_pool() or crypto_has_acomp()
1298                  * checks above.
1299                  */
1300                 ret = param_set_charp(s, kp);
1301         }
1302
1303         /* drop the ref from either the old current pool,
1304          * or the new pool we failed to add
1305          */
1306         if (put_pool)
1307                 zswap_pool_put(put_pool);
1308
1309         return ret;
1310 }
1311
1312 static int zswap_compressor_param_set(const char *val,
1313                                       const struct kernel_param *kp)
1314 {
1315         return __zswap_param_set(val, kp, zswap_zpool_type, NULL);
1316 }
1317
1318 static int zswap_zpool_param_set(const char *val,
1319                                  const struct kernel_param *kp)
1320 {
1321         return __zswap_param_set(val, kp, NULL, zswap_compressor);
1322 }
1323
1324 static int zswap_enabled_param_set(const char *val,
1325                                    const struct kernel_param *kp)
1326 {
1327         int ret = -ENODEV;
1328
1329         /* if this is load-time (pre-init) param setting, only set param. */
1330         if (system_state != SYSTEM_RUNNING)
1331                 return param_set_bool(val, kp);
1332
1333         mutex_lock(&zswap_init_lock);
1334         switch (zswap_init_state) {
1335         case ZSWAP_UNINIT:
1336                 if (zswap_setup())
1337                         break;
1338                 fallthrough;
1339         case ZSWAP_INIT_SUCCEED:
1340                 if (!zswap_has_pool)
1341                         pr_err("can't enable, no pool configured\n");
1342                 else
1343                         ret = param_set_bool(val, kp);
1344                 break;
1345         case ZSWAP_INIT_FAILED:
1346                 pr_err("can't enable, initialization failed\n");
1347         }
1348         mutex_unlock(&zswap_init_lock);
1349
1350         return ret;
1351 }
1352
1353 static void __zswap_load(struct zswap_entry *entry, struct page *page)
1354 {
1355         struct zpool *zpool = zswap_find_zpool(entry);
1356         struct scatterlist input, output;
1357         struct crypto_acomp_ctx *acomp_ctx;
1358         u8 *src;
1359
1360         acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
1361         mutex_lock(&acomp_ctx->mutex);
1362
1363         src = zpool_map_handle(zpool, entry->handle, ZPOOL_MM_RO);
1364         if (!zpool_can_sleep_mapped(zpool)) {
1365                 memcpy(acomp_ctx->buffer, src, entry->length);
1366                 src = acomp_ctx->buffer;
1367                 zpool_unmap_handle(zpool, entry->handle);
1368         }
1369
1370         sg_init_one(&input, src, entry->length);
1371         sg_init_table(&output, 1);
1372         sg_set_page(&output, page, PAGE_SIZE, 0);
1373         acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, PAGE_SIZE);
1374         BUG_ON(crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait));
1375         BUG_ON(acomp_ctx->req->dlen != PAGE_SIZE);
1376         mutex_unlock(&acomp_ctx->mutex);
1377
1378         if (zpool_can_sleep_mapped(zpool))
1379                 zpool_unmap_handle(zpool, entry->handle);
1380 }
1381
1382 /*********************************
1383 * writeback code
1384 **********************************/
1385 /*
1386  * Attempts to free an entry by adding a folio to the swap cache,
1387  * decompressing the entry data into the folio, and issuing a
1388  * bio write to write the folio back to the swap device.
1389  *
1390  * This can be thought of as a "resumed writeback" of the folio
1391  * to the swap device.  We are basically resuming the same swap
1392  * writeback path that was intercepted with the zswap_store()
1393  * in the first place.  After the folio has been decompressed into
1394  * the swap cache, the compressed version stored by zswap can be
1395  * freed.
1396  */
1397 static int zswap_writeback_entry(struct zswap_entry *entry,
1398                                  struct zswap_tree *tree)
1399 {
1400         swp_entry_t swpentry = entry->swpentry;
1401         struct folio *folio;
1402         struct mempolicy *mpol;
1403         bool folio_was_allocated;
1404         struct writeback_control wbc = {
1405                 .sync_mode = WB_SYNC_NONE,
1406         };
1407
1408         /* try to allocate swap cache folio */
1409         mpol = get_task_policy(current);
1410         folio = __read_swap_cache_async(swpentry, GFP_KERNEL, mpol,
1411                                 NO_INTERLEAVE_INDEX, &folio_was_allocated, true);
1412         if (!folio)
1413                 return -ENOMEM;
1414
1415         /*
1416          * Found an existing folio, we raced with load/swapin. We generally
1417          * writeback cold folios from zswap, and swapin means the folio just
1418          * became hot. Skip this folio and let the caller find another one.
1419          */
1420         if (!folio_was_allocated) {
1421                 folio_put(folio);
1422                 return -EEXIST;
1423         }
1424
1425         /*
1426          * folio is locked, and the swapcache is now secured against
1427          * concurrent swapping to and from the slot. Verify that the
1428          * swap entry hasn't been invalidated and recycled behind our
1429          * backs (our zswap_entry reference doesn't prevent that), to
1430          * avoid overwriting a new swap folio with old compressed data.
1431          */
1432         spin_lock(&tree->lock);
1433         if (zswap_rb_search(&tree->rbroot, swp_offset(entry->swpentry)) != entry) {
1434                 spin_unlock(&tree->lock);
1435                 delete_from_swap_cache(folio);
1436                 return -ENOMEM;
1437         }
1438         spin_unlock(&tree->lock);
1439
1440         __zswap_load(entry, &folio->page);
1441
1442         /* folio is up to date */
1443         folio_mark_uptodate(folio);
1444
1445         /* move it to the tail of the inactive list after end_writeback */
1446         folio_set_reclaim(folio);
1447
1448         /* start writeback */
1449         __swap_writepage(&folio->page, &wbc);
1450         folio_put(folio);
1451
1452         return 0;
1453 }
1454
1455 static int zswap_is_page_same_filled(void *ptr, unsigned long *value)
1456 {
1457         unsigned long *page;
1458         unsigned long val;
1459         unsigned int pos, last_pos = PAGE_SIZE / sizeof(*page) - 1;
1460
1461         page = (unsigned long *)ptr;
1462         val = page[0];
1463
1464         if (val != page[last_pos])
1465                 return 0;
1466
1467         for (pos = 1; pos < last_pos; pos++) {
1468                 if (val != page[pos])
1469                         return 0;
1470         }
1471
1472         *value = val;
1473
1474         return 1;
1475 }
1476
1477 static void zswap_fill_page(void *ptr, unsigned long value)
1478 {
1479         unsigned long *page;
1480
1481         page = (unsigned long *)ptr;
1482         memset_l(page, value, PAGE_SIZE / sizeof(unsigned long));
1483 }
1484
1485 bool zswap_store(struct folio *folio)
1486 {
1487         swp_entry_t swp = folio->swap;
1488         int type = swp_type(swp);
1489         pgoff_t offset = swp_offset(swp);
1490         struct page *page = &folio->page;
1491         struct zswap_tree *tree = zswap_trees[type];
1492         struct zswap_entry *entry, *dupentry;
1493         struct scatterlist input, output;
1494         struct crypto_acomp_ctx *acomp_ctx;
1495         struct obj_cgroup *objcg = NULL;
1496         struct mem_cgroup *memcg = NULL;
1497         struct zswap_pool *pool;
1498         struct zpool *zpool;
1499         unsigned int dlen = PAGE_SIZE;
1500         unsigned long handle, value;
1501         char *buf;
1502         u8 *src, *dst;
1503         gfp_t gfp;
1504         int ret;
1505
1506         VM_WARN_ON_ONCE(!folio_test_locked(folio));
1507         VM_WARN_ON_ONCE(!folio_test_swapcache(folio));
1508
1509         /* Large folios aren't supported */
1510         if (folio_test_large(folio))
1511                 return false;
1512
1513         if (!zswap_enabled || !tree)
1514                 return false;
1515
1516         /*
1517          * If this is a duplicate, it must be removed before attempting to store
1518          * it, otherwise, if the store fails the old page won't be removed from
1519          * the tree, and it might be written back overriding the new data.
1520          */
1521         spin_lock(&tree->lock);
1522         dupentry = zswap_rb_search(&tree->rbroot, offset);
1523         if (dupentry) {
1524                 zswap_duplicate_entry++;
1525                 zswap_invalidate_entry(tree, dupentry);
1526         }
1527         spin_unlock(&tree->lock);
1528         objcg = get_obj_cgroup_from_folio(folio);
1529         if (objcg && !obj_cgroup_may_zswap(objcg)) {
1530                 memcg = get_mem_cgroup_from_objcg(objcg);
1531                 if (shrink_memcg(memcg)) {
1532                         mem_cgroup_put(memcg);
1533                         goto reject;
1534                 }
1535                 mem_cgroup_put(memcg);
1536         }
1537
1538         /* reclaim space if needed */
1539         if (zswap_is_full()) {
1540                 zswap_pool_limit_hit++;
1541                 zswap_pool_reached_full = true;
1542                 goto shrink;
1543         }
1544
1545         if (zswap_pool_reached_full) {
1546                if (!zswap_can_accept())
1547                         goto shrink;
1548                 else
1549                         zswap_pool_reached_full = false;
1550         }
1551
1552         /* allocate entry */
1553         entry = zswap_entry_cache_alloc(GFP_KERNEL, page_to_nid(page));
1554         if (!entry) {
1555                 zswap_reject_kmemcache_fail++;
1556                 goto reject;
1557         }
1558
1559         if (zswap_same_filled_pages_enabled) {
1560                 src = kmap_local_page(page);
1561                 if (zswap_is_page_same_filled(src, &value)) {
1562                         kunmap_local(src);
1563                         entry->swpentry = swp_entry(type, offset);
1564                         entry->length = 0;
1565                         entry->value = value;
1566                         atomic_inc(&zswap_same_filled_pages);
1567                         goto insert_entry;
1568                 }
1569                 kunmap_local(src);
1570         }
1571
1572         if (!zswap_non_same_filled_pages_enabled)
1573                 goto freepage;
1574
1575         /* if entry is successfully added, it keeps the reference */
1576         entry->pool = zswap_pool_current_get();
1577         if (!entry->pool)
1578                 goto freepage;
1579
1580         if (objcg) {
1581                 memcg = get_mem_cgroup_from_objcg(objcg);
1582                 if (memcg_list_lru_alloc(memcg, &entry->pool->list_lru, GFP_KERNEL)) {
1583                         mem_cgroup_put(memcg);
1584                         goto put_pool;
1585                 }
1586                 mem_cgroup_put(memcg);
1587         }
1588
1589         /* compress */
1590         acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
1591
1592         mutex_lock(&acomp_ctx->mutex);
1593
1594         dst = acomp_ctx->buffer;
1595         sg_init_table(&input, 1);
1596         sg_set_page(&input, &folio->page, PAGE_SIZE, 0);
1597
1598         /*
1599          * We need PAGE_SIZE * 2 here since there maybe over-compression case,
1600          * and hardware-accelerators may won't check the dst buffer size, so
1601          * giving the dst buffer with enough length to avoid buffer overflow.
1602          */
1603         sg_init_one(&output, dst, PAGE_SIZE * 2);
1604         acomp_request_set_params(acomp_ctx->req, &input, &output, PAGE_SIZE, dlen);
1605         /*
1606          * it maybe looks a little bit silly that we send an asynchronous request,
1607          * then wait for its completion synchronously. This makes the process look
1608          * synchronous in fact.
1609          * Theoretically, acomp supports users send multiple acomp requests in one
1610          * acomp instance, then get those requests done simultaneously. but in this
1611          * case, zswap actually does store and load page by page, there is no
1612          * existing method to send the second page before the first page is done
1613          * in one thread doing zwap.
1614          * but in different threads running on different cpu, we have different
1615          * acomp instance, so multiple threads can do (de)compression in parallel.
1616          */
1617         ret = crypto_wait_req(crypto_acomp_compress(acomp_ctx->req), &acomp_ctx->wait);
1618         dlen = acomp_ctx->req->dlen;
1619
1620         if (ret) {
1621                 zswap_reject_compress_fail++;
1622                 goto put_dstmem;
1623         }
1624
1625         /* store */
1626         zpool = zswap_find_zpool(entry);
1627         gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
1628         if (zpool_malloc_support_movable(zpool))
1629                 gfp |= __GFP_HIGHMEM | __GFP_MOVABLE;
1630         ret = zpool_malloc(zpool, dlen, gfp, &handle);
1631         if (ret == -ENOSPC) {
1632                 zswap_reject_compress_poor++;
1633                 goto put_dstmem;
1634         }
1635         if (ret) {
1636                 zswap_reject_alloc_fail++;
1637                 goto put_dstmem;
1638         }
1639         buf = zpool_map_handle(zpool, handle, ZPOOL_MM_WO);
1640         memcpy(buf, dst, dlen);
1641         zpool_unmap_handle(zpool, handle);
1642         mutex_unlock(&acomp_ctx->mutex);
1643
1644         /* populate entry */
1645         entry->swpentry = swp_entry(type, offset);
1646         entry->handle = handle;
1647         entry->length = dlen;
1648
1649 insert_entry:
1650         entry->objcg = objcg;
1651         if (objcg) {
1652                 obj_cgroup_charge_zswap(objcg, entry->length);
1653                 /* Account before objcg ref is moved to tree */
1654                 count_objcg_event(objcg, ZSWPOUT);
1655         }
1656
1657         /* map */
1658         spin_lock(&tree->lock);
1659         /*
1660          * A duplicate entry should have been removed at the beginning of this
1661          * function. Since the swap entry should be pinned, if a duplicate is
1662          * found again here it means that something went wrong in the swap
1663          * cache.
1664          */
1665         while (zswap_rb_insert(&tree->rbroot, entry, &dupentry) == -EEXIST) {
1666                 WARN_ON(1);
1667                 zswap_duplicate_entry++;
1668                 zswap_invalidate_entry(tree, dupentry);
1669         }
1670         if (entry->length) {
1671                 INIT_LIST_HEAD(&entry->lru);
1672                 zswap_lru_add(&entry->pool->list_lru, entry);
1673                 atomic_inc(&entry->pool->nr_stored);
1674         }
1675         spin_unlock(&tree->lock);
1676
1677         /* update stats */
1678         atomic_inc(&zswap_stored_pages);
1679         zswap_update_total_size();
1680         count_vm_event(ZSWPOUT);
1681
1682         return true;
1683
1684 put_dstmem:
1685         mutex_unlock(&acomp_ctx->mutex);
1686 put_pool:
1687         zswap_pool_put(entry->pool);
1688 freepage:
1689         zswap_entry_cache_free(entry);
1690 reject:
1691         if (objcg)
1692                 obj_cgroup_put(objcg);
1693         return false;
1694
1695 shrink:
1696         pool = zswap_pool_last_get();
1697         if (pool && !queue_work(shrink_wq, &pool->shrink_work))
1698                 zswap_pool_put(pool);
1699         goto reject;
1700 }
1701
1702 bool zswap_load(struct folio *folio)
1703 {
1704         swp_entry_t swp = folio->swap;
1705         int type = swp_type(swp);
1706         pgoff_t offset = swp_offset(swp);
1707         struct page *page = &folio->page;
1708         struct zswap_tree *tree = zswap_trees[type];
1709         struct zswap_entry *entry;
1710         u8 *dst;
1711
1712         VM_WARN_ON_ONCE(!folio_test_locked(folio));
1713
1714         /* find */
1715         spin_lock(&tree->lock);
1716         entry = zswap_entry_find_get(&tree->rbroot, offset);
1717         if (!entry) {
1718                 spin_unlock(&tree->lock);
1719                 return false;
1720         }
1721         spin_unlock(&tree->lock);
1722
1723         if (entry->length)
1724                 __zswap_load(entry, page);
1725         else {
1726                 dst = kmap_local_page(page);
1727                 zswap_fill_page(dst, entry->value);
1728                 kunmap_local(dst);
1729         }
1730
1731         count_vm_event(ZSWPIN);
1732         if (entry->objcg)
1733                 count_objcg_event(entry->objcg, ZSWPIN);
1734
1735         spin_lock(&tree->lock);
1736         if (zswap_exclusive_loads_enabled) {
1737                 zswap_invalidate_entry(tree, entry);
1738                 folio_mark_dirty(folio);
1739         } else if (entry->length) {
1740                 zswap_lru_del(&entry->pool->list_lru, entry);
1741                 zswap_lru_add(&entry->pool->list_lru, entry);
1742         }
1743         zswap_entry_put(tree, entry);
1744         spin_unlock(&tree->lock);
1745
1746         return true;
1747 }
1748
1749 void zswap_invalidate(int type, pgoff_t offset)
1750 {
1751         struct zswap_tree *tree = zswap_trees[type];
1752         struct zswap_entry *entry;
1753
1754         /* find */
1755         spin_lock(&tree->lock);
1756         entry = zswap_rb_search(&tree->rbroot, offset);
1757         if (!entry) {
1758                 /* entry was written back */
1759                 spin_unlock(&tree->lock);
1760                 return;
1761         }
1762         zswap_invalidate_entry(tree, entry);
1763         spin_unlock(&tree->lock);
1764 }
1765
1766 void zswap_swapon(int type)
1767 {
1768         struct zswap_tree *tree;
1769
1770         tree = kzalloc(sizeof(*tree), GFP_KERNEL);
1771         if (!tree) {
1772                 pr_err("alloc failed, zswap disabled for swap type %d\n", type);
1773                 return;
1774         }
1775
1776         tree->rbroot = RB_ROOT;
1777         spin_lock_init(&tree->lock);
1778         zswap_trees[type] = tree;
1779 }
1780
1781 void zswap_swapoff(int type)
1782 {
1783         struct zswap_tree *tree = zswap_trees[type];
1784         struct zswap_entry *entry, *n;
1785
1786         if (!tree)
1787                 return;
1788
1789         /* walk the tree and free everything */
1790         spin_lock(&tree->lock);
1791         rbtree_postorder_for_each_entry_safe(entry, n, &tree->rbroot, rbnode)
1792                 zswap_free_entry(entry);
1793         tree->rbroot = RB_ROOT;
1794         spin_unlock(&tree->lock);
1795         kfree(tree);
1796         zswap_trees[type] = NULL;
1797 }
1798
1799 /*********************************
1800 * debugfs functions
1801 **********************************/
1802 #ifdef CONFIG_DEBUG_FS
1803 #include <linux/debugfs.h>
1804
1805 static struct dentry *zswap_debugfs_root;
1806
1807 static int zswap_debugfs_init(void)
1808 {
1809         if (!debugfs_initialized())
1810                 return -ENODEV;
1811
1812         zswap_debugfs_root = debugfs_create_dir("zswap", NULL);
1813
1814         debugfs_create_u64("pool_limit_hit", 0444,
1815                            zswap_debugfs_root, &zswap_pool_limit_hit);
1816         debugfs_create_u64("reject_reclaim_fail", 0444,
1817                            zswap_debugfs_root, &zswap_reject_reclaim_fail);
1818         debugfs_create_u64("reject_alloc_fail", 0444,
1819                            zswap_debugfs_root, &zswap_reject_alloc_fail);
1820         debugfs_create_u64("reject_kmemcache_fail", 0444,
1821                            zswap_debugfs_root, &zswap_reject_kmemcache_fail);
1822         debugfs_create_u64("reject_compress_fail", 0444,
1823                            zswap_debugfs_root, &zswap_reject_compress_fail);
1824         debugfs_create_u64("reject_compress_poor", 0444,
1825                            zswap_debugfs_root, &zswap_reject_compress_poor);
1826         debugfs_create_u64("written_back_pages", 0444,
1827                            zswap_debugfs_root, &zswap_written_back_pages);
1828         debugfs_create_u64("duplicate_entry", 0444,
1829                            zswap_debugfs_root, &zswap_duplicate_entry);
1830         debugfs_create_u64("pool_total_size", 0444,
1831                            zswap_debugfs_root, &zswap_pool_total_size);
1832         debugfs_create_atomic_t("stored_pages", 0444,
1833                                 zswap_debugfs_root, &zswap_stored_pages);
1834         debugfs_create_atomic_t("same_filled_pages", 0444,
1835                                 zswap_debugfs_root, &zswap_same_filled_pages);
1836
1837         return 0;
1838 }
1839 #else
1840 static int zswap_debugfs_init(void)
1841 {
1842         return 0;
1843 }
1844 #endif
1845
1846 /*********************************
1847 * module init and exit
1848 **********************************/
1849 static int zswap_setup(void)
1850 {
1851         struct zswap_pool *pool;
1852         int ret;
1853
1854         zswap_entry_cache = KMEM_CACHE(zswap_entry, 0);
1855         if (!zswap_entry_cache) {
1856                 pr_err("entry cache creation failed\n");
1857                 goto cache_fail;
1858         }
1859
1860         ret = cpuhp_setup_state_multi(CPUHP_MM_ZSWP_POOL_PREPARE,
1861                                       "mm/zswap_pool:prepare",
1862                                       zswap_cpu_comp_prepare,
1863                                       zswap_cpu_comp_dead);
1864         if (ret)
1865                 goto hp_fail;
1866
1867         pool = __zswap_pool_create_fallback();
1868         if (pool) {
1869                 pr_info("loaded using pool %s/%s\n", pool->tfm_name,
1870                         zpool_get_type(pool->zpools[0]));
1871                 list_add(&pool->list, &zswap_pools);
1872                 zswap_has_pool = true;
1873         } else {
1874                 pr_err("pool creation failed\n");
1875                 zswap_enabled = false;
1876         }
1877
1878         shrink_wq = create_workqueue("zswap-shrink");
1879         if (!shrink_wq)
1880                 goto fallback_fail;
1881
1882         if (zswap_debugfs_init())
1883                 pr_warn("debugfs initialization failed\n");
1884         zswap_init_state = ZSWAP_INIT_SUCCEED;
1885         return 0;
1886
1887 fallback_fail:
1888         if (pool)
1889                 zswap_pool_destroy(pool);
1890 hp_fail:
1891         kmem_cache_destroy(zswap_entry_cache);
1892 cache_fail:
1893         /* if built-in, we aren't unloaded on failure; don't allow use */
1894         zswap_init_state = ZSWAP_INIT_FAILED;
1895         zswap_enabled = false;
1896         return -ENOMEM;
1897 }
1898
1899 static int __init zswap_init(void)
1900 {
1901         if (!zswap_enabled)
1902                 return 0;
1903         return zswap_setup();
1904 }
1905 /* must be late so crypto has time to come up */
1906 late_initcall(zswap_init);
1907
1908 MODULE_AUTHOR("Seth Jennings <sjennings@variantweb.net>");
1909 MODULE_DESCRIPTION("Compressed cache for swap pages");