Merge tag 'for-6.10/dm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/device...
[linux-2.6-block.git] / mm / zswap.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * zswap.c - zswap driver file
4  *
5  * zswap is a cache that takes pages that are in the process
6  * of being swapped out and attempts to compress and store them in a
7  * RAM-based memory pool.  This can result in a significant I/O reduction on
8  * the swap device and, in the case where decompressing from RAM is faster
9  * than reading from the swap device, can also improve workload performance.
10  *
11  * Copyright (C) 2012  Seth Jennings <sjenning@linux.vnet.ibm.com>
12 */
13
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
16 #include <linux/module.h>
17 #include <linux/cpu.h>
18 #include <linux/highmem.h>
19 #include <linux/slab.h>
20 #include <linux/spinlock.h>
21 #include <linux/types.h>
22 #include <linux/atomic.h>
23 #include <linux/swap.h>
24 #include <linux/crypto.h>
25 #include <linux/scatterlist.h>
26 #include <linux/mempolicy.h>
27 #include <linux/mempool.h>
28 #include <linux/zpool.h>
29 #include <crypto/acompress.h>
30 #include <linux/zswap.h>
31 #include <linux/mm_types.h>
32 #include <linux/page-flags.h>
33 #include <linux/swapops.h>
34 #include <linux/writeback.h>
35 #include <linux/pagemap.h>
36 #include <linux/workqueue.h>
37 #include <linux/list_lru.h>
38
39 #include "swap.h"
40 #include "internal.h"
41
42 /*********************************
43 * statistics
44 **********************************/
45 /* The number of compressed pages currently stored in zswap */
46 atomic_t zswap_stored_pages = ATOMIC_INIT(0);
47 /* The number of same-value filled pages currently stored in zswap */
48 static atomic_t zswap_same_filled_pages = ATOMIC_INIT(0);
49
50 /*
51  * The statistics below are not protected from concurrent access for
52  * performance reasons so they may not be a 100% accurate.  However,
53  * they do provide useful information on roughly how many times a
54  * certain event is occurring.
55 */
56
57 /* Pool limit was hit (see zswap_max_pool_percent) */
58 static u64 zswap_pool_limit_hit;
59 /* Pages written back when pool limit was reached */
60 static u64 zswap_written_back_pages;
61 /* Store failed due to a reclaim failure after pool limit was reached */
62 static u64 zswap_reject_reclaim_fail;
63 /* Store failed due to compression algorithm failure */
64 static u64 zswap_reject_compress_fail;
65 /* Compressed page was too big for the allocator to (optimally) store */
66 static u64 zswap_reject_compress_poor;
67 /* Store failed because underlying allocator could not get memory */
68 static u64 zswap_reject_alloc_fail;
69 /* Store failed because the entry metadata could not be allocated (rare) */
70 static u64 zswap_reject_kmemcache_fail;
71
72 /* Shrinker work queue */
73 static struct workqueue_struct *shrink_wq;
74 /* Pool limit was hit, we need to calm down */
75 static bool zswap_pool_reached_full;
76
77 /*********************************
78 * tunables
79 **********************************/
80
81 #define ZSWAP_PARAM_UNSET ""
82
83 static int zswap_setup(void);
84
85 /* Enable/disable zswap */
86 static bool zswap_enabled = IS_ENABLED(CONFIG_ZSWAP_DEFAULT_ON);
87 static int zswap_enabled_param_set(const char *,
88                                    const struct kernel_param *);
89 static const struct kernel_param_ops zswap_enabled_param_ops = {
90         .set =          zswap_enabled_param_set,
91         .get =          param_get_bool,
92 };
93 module_param_cb(enabled, &zswap_enabled_param_ops, &zswap_enabled, 0644);
94
95 /* Crypto compressor to use */
96 static char *zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT;
97 static int zswap_compressor_param_set(const char *,
98                                       const struct kernel_param *);
99 static const struct kernel_param_ops zswap_compressor_param_ops = {
100         .set =          zswap_compressor_param_set,
101         .get =          param_get_charp,
102         .free =         param_free_charp,
103 };
104 module_param_cb(compressor, &zswap_compressor_param_ops,
105                 &zswap_compressor, 0644);
106
107 /* Compressed storage zpool to use */
108 static char *zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT;
109 static int zswap_zpool_param_set(const char *, const struct kernel_param *);
110 static const struct kernel_param_ops zswap_zpool_param_ops = {
111         .set =          zswap_zpool_param_set,
112         .get =          param_get_charp,
113         .free =         param_free_charp,
114 };
115 module_param_cb(zpool, &zswap_zpool_param_ops, &zswap_zpool_type, 0644);
116
117 /* The maximum percentage of memory that the compressed pool can occupy */
118 static unsigned int zswap_max_pool_percent = 20;
119 module_param_named(max_pool_percent, zswap_max_pool_percent, uint, 0644);
120
121 /* The threshold for accepting new pages after the max_pool_percent was hit */
122 static unsigned int zswap_accept_thr_percent = 90; /* of max pool size */
123 module_param_named(accept_threshold_percent, zswap_accept_thr_percent,
124                    uint, 0644);
125
126 /* Number of zpools in zswap_pool (empirically determined for scalability) */
127 #define ZSWAP_NR_ZPOOLS 32
128
129 /* Enable/disable memory pressure-based shrinker. */
130 static bool zswap_shrinker_enabled = IS_ENABLED(
131                 CONFIG_ZSWAP_SHRINKER_DEFAULT_ON);
132 module_param_named(shrinker_enabled, zswap_shrinker_enabled, bool, 0644);
133
134 bool is_zswap_enabled(void)
135 {
136         return zswap_enabled;
137 }
138
139 /*********************************
140 * data structures
141 **********************************/
142
143 struct crypto_acomp_ctx {
144         struct crypto_acomp *acomp;
145         struct acomp_req *req;
146         struct crypto_wait wait;
147         u8 *buffer;
148         struct mutex mutex;
149         bool is_sleepable;
150 };
151
152 /*
153  * The lock ordering is zswap_tree.lock -> zswap_pool.lru_lock.
154  * The only case where lru_lock is not acquired while holding tree.lock is
155  * when a zswap_entry is taken off the lru for writeback, in that case it
156  * needs to be verified that it's still valid in the tree.
157  */
158 struct zswap_pool {
159         struct zpool *zpools[ZSWAP_NR_ZPOOLS];
160         struct crypto_acomp_ctx __percpu *acomp_ctx;
161         struct percpu_ref ref;
162         struct list_head list;
163         struct work_struct release_work;
164         struct hlist_node node;
165         char tfm_name[CRYPTO_MAX_ALG_NAME];
166 };
167
168 /* Global LRU lists shared by all zswap pools. */
169 static struct list_lru zswap_list_lru;
170
171 /* The lock protects zswap_next_shrink updates. */
172 static DEFINE_SPINLOCK(zswap_shrink_lock);
173 static struct mem_cgroup *zswap_next_shrink;
174 static struct work_struct zswap_shrink_work;
175 static struct shrinker *zswap_shrinker;
176
177 /*
178  * struct zswap_entry
179  *
180  * This structure contains the metadata for tracking a single compressed
181  * page within zswap.
182  *
183  * swpentry - associated swap entry, the offset indexes into the red-black tree
184  * length - the length in bytes of the compressed page data.  Needed during
185  *          decompression. For a same value filled page length is 0, and both
186  *          pool and lru are invalid and must be ignored.
187  * pool - the zswap_pool the entry's data is in
188  * handle - zpool allocation handle that stores the compressed page data
189  * value - value of the same-value filled pages which have same content
190  * objcg - the obj_cgroup that the compressed memory is charged to
191  * lru - handle to the pool's lru used to evict pages.
192  */
193 struct zswap_entry {
194         swp_entry_t swpentry;
195         unsigned int length;
196         struct zswap_pool *pool;
197         union {
198                 unsigned long handle;
199                 unsigned long value;
200         };
201         struct obj_cgroup *objcg;
202         struct list_head lru;
203 };
204
205 static struct xarray *zswap_trees[MAX_SWAPFILES];
206 static unsigned int nr_zswap_trees[MAX_SWAPFILES];
207
208 /* RCU-protected iteration */
209 static LIST_HEAD(zswap_pools);
210 /* protects zswap_pools list modification */
211 static DEFINE_SPINLOCK(zswap_pools_lock);
212 /* pool counter to provide unique names to zpool */
213 static atomic_t zswap_pools_count = ATOMIC_INIT(0);
214
215 enum zswap_init_type {
216         ZSWAP_UNINIT,
217         ZSWAP_INIT_SUCCEED,
218         ZSWAP_INIT_FAILED
219 };
220
221 static enum zswap_init_type zswap_init_state;
222
223 /* used to ensure the integrity of initialization */
224 static DEFINE_MUTEX(zswap_init_lock);
225
226 /* init completed, but couldn't create the initial pool */
227 static bool zswap_has_pool;
228
229 /*********************************
230 * helpers and fwd declarations
231 **********************************/
232
233 static inline struct xarray *swap_zswap_tree(swp_entry_t swp)
234 {
235         return &zswap_trees[swp_type(swp)][swp_offset(swp)
236                 >> SWAP_ADDRESS_SPACE_SHIFT];
237 }
238
239 #define zswap_pool_debug(msg, p)                                \
240         pr_debug("%s pool %s/%s\n", msg, (p)->tfm_name,         \
241                  zpool_get_type((p)->zpools[0]))
242
243 /*********************************
244 * pool functions
245 **********************************/
246 static void __zswap_pool_empty(struct percpu_ref *ref);
247
248 static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
249 {
250         int i;
251         struct zswap_pool *pool;
252         char name[38]; /* 'zswap' + 32 char (max) num + \0 */
253         gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
254         int ret;
255
256         if (!zswap_has_pool) {
257                 /* if either are unset, pool initialization failed, and we
258                  * need both params to be set correctly before trying to
259                  * create a pool.
260                  */
261                 if (!strcmp(type, ZSWAP_PARAM_UNSET))
262                         return NULL;
263                 if (!strcmp(compressor, ZSWAP_PARAM_UNSET))
264                         return NULL;
265         }
266
267         pool = kzalloc(sizeof(*pool), GFP_KERNEL);
268         if (!pool)
269                 return NULL;
270
271         for (i = 0; i < ZSWAP_NR_ZPOOLS; i++) {
272                 /* unique name for each pool specifically required by zsmalloc */
273                 snprintf(name, 38, "zswap%x",
274                          atomic_inc_return(&zswap_pools_count));
275
276                 pool->zpools[i] = zpool_create_pool(type, name, gfp);
277                 if (!pool->zpools[i]) {
278                         pr_err("%s zpool not available\n", type);
279                         goto error;
280                 }
281         }
282         pr_debug("using %s zpool\n", zpool_get_type(pool->zpools[0]));
283
284         strscpy(pool->tfm_name, compressor, sizeof(pool->tfm_name));
285
286         pool->acomp_ctx = alloc_percpu(*pool->acomp_ctx);
287         if (!pool->acomp_ctx) {
288                 pr_err("percpu alloc failed\n");
289                 goto error;
290         }
291
292         ret = cpuhp_state_add_instance(CPUHP_MM_ZSWP_POOL_PREPARE,
293                                        &pool->node);
294         if (ret)
295                 goto error;
296
297         /* being the current pool takes 1 ref; this func expects the
298          * caller to always add the new pool as the current pool
299          */
300         ret = percpu_ref_init(&pool->ref, __zswap_pool_empty,
301                               PERCPU_REF_ALLOW_REINIT, GFP_KERNEL);
302         if (ret)
303                 goto ref_fail;
304         INIT_LIST_HEAD(&pool->list);
305
306         zswap_pool_debug("created", pool);
307
308         return pool;
309
310 ref_fail:
311         cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node);
312 error:
313         if (pool->acomp_ctx)
314                 free_percpu(pool->acomp_ctx);
315         while (i--)
316                 zpool_destroy_pool(pool->zpools[i]);
317         kfree(pool);
318         return NULL;
319 }
320
321 static struct zswap_pool *__zswap_pool_create_fallback(void)
322 {
323         bool has_comp, has_zpool;
324
325         has_comp = crypto_has_acomp(zswap_compressor, 0, 0);
326         if (!has_comp && strcmp(zswap_compressor,
327                                 CONFIG_ZSWAP_COMPRESSOR_DEFAULT)) {
328                 pr_err("compressor %s not available, using default %s\n",
329                        zswap_compressor, CONFIG_ZSWAP_COMPRESSOR_DEFAULT);
330                 param_free_charp(&zswap_compressor);
331                 zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT;
332                 has_comp = crypto_has_acomp(zswap_compressor, 0, 0);
333         }
334         if (!has_comp) {
335                 pr_err("default compressor %s not available\n",
336                        zswap_compressor);
337                 param_free_charp(&zswap_compressor);
338                 zswap_compressor = ZSWAP_PARAM_UNSET;
339         }
340
341         has_zpool = zpool_has_pool(zswap_zpool_type);
342         if (!has_zpool && strcmp(zswap_zpool_type,
343                                  CONFIG_ZSWAP_ZPOOL_DEFAULT)) {
344                 pr_err("zpool %s not available, using default %s\n",
345                        zswap_zpool_type, CONFIG_ZSWAP_ZPOOL_DEFAULT);
346                 param_free_charp(&zswap_zpool_type);
347                 zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT;
348                 has_zpool = zpool_has_pool(zswap_zpool_type);
349         }
350         if (!has_zpool) {
351                 pr_err("default zpool %s not available\n",
352                        zswap_zpool_type);
353                 param_free_charp(&zswap_zpool_type);
354                 zswap_zpool_type = ZSWAP_PARAM_UNSET;
355         }
356
357         if (!has_comp || !has_zpool)
358                 return NULL;
359
360         return zswap_pool_create(zswap_zpool_type, zswap_compressor);
361 }
362
363 static void zswap_pool_destroy(struct zswap_pool *pool)
364 {
365         int i;
366
367         zswap_pool_debug("destroying", pool);
368
369         cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node);
370         free_percpu(pool->acomp_ctx);
371
372         for (i = 0; i < ZSWAP_NR_ZPOOLS; i++)
373                 zpool_destroy_pool(pool->zpools[i]);
374         kfree(pool);
375 }
376
377 static void __zswap_pool_release(struct work_struct *work)
378 {
379         struct zswap_pool *pool = container_of(work, typeof(*pool),
380                                                 release_work);
381
382         synchronize_rcu();
383
384         /* nobody should have been able to get a ref... */
385         WARN_ON(!percpu_ref_is_zero(&pool->ref));
386         percpu_ref_exit(&pool->ref);
387
388         /* pool is now off zswap_pools list and has no references. */
389         zswap_pool_destroy(pool);
390 }
391
392 static struct zswap_pool *zswap_pool_current(void);
393
394 static void __zswap_pool_empty(struct percpu_ref *ref)
395 {
396         struct zswap_pool *pool;
397
398         pool = container_of(ref, typeof(*pool), ref);
399
400         spin_lock_bh(&zswap_pools_lock);
401
402         WARN_ON(pool == zswap_pool_current());
403
404         list_del_rcu(&pool->list);
405
406         INIT_WORK(&pool->release_work, __zswap_pool_release);
407         schedule_work(&pool->release_work);
408
409         spin_unlock_bh(&zswap_pools_lock);
410 }
411
412 static int __must_check zswap_pool_get(struct zswap_pool *pool)
413 {
414         if (!pool)
415                 return 0;
416
417         return percpu_ref_tryget(&pool->ref);
418 }
419
420 static void zswap_pool_put(struct zswap_pool *pool)
421 {
422         percpu_ref_put(&pool->ref);
423 }
424
425 static struct zswap_pool *__zswap_pool_current(void)
426 {
427         struct zswap_pool *pool;
428
429         pool = list_first_or_null_rcu(&zswap_pools, typeof(*pool), list);
430         WARN_ONCE(!pool && zswap_has_pool,
431                   "%s: no page storage pool!\n", __func__);
432
433         return pool;
434 }
435
436 static struct zswap_pool *zswap_pool_current(void)
437 {
438         assert_spin_locked(&zswap_pools_lock);
439
440         return __zswap_pool_current();
441 }
442
443 static struct zswap_pool *zswap_pool_current_get(void)
444 {
445         struct zswap_pool *pool;
446
447         rcu_read_lock();
448
449         pool = __zswap_pool_current();
450         if (!zswap_pool_get(pool))
451                 pool = NULL;
452
453         rcu_read_unlock();
454
455         return pool;
456 }
457
458 /* type and compressor must be null-terminated */
459 static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor)
460 {
461         struct zswap_pool *pool;
462
463         assert_spin_locked(&zswap_pools_lock);
464
465         list_for_each_entry_rcu(pool, &zswap_pools, list) {
466                 if (strcmp(pool->tfm_name, compressor))
467                         continue;
468                 /* all zpools share the same type */
469                 if (strcmp(zpool_get_type(pool->zpools[0]), type))
470                         continue;
471                 /* if we can't get it, it's about to be destroyed */
472                 if (!zswap_pool_get(pool))
473                         continue;
474                 return pool;
475         }
476
477         return NULL;
478 }
479
480 static unsigned long zswap_max_pages(void)
481 {
482         return totalram_pages() * zswap_max_pool_percent / 100;
483 }
484
485 static unsigned long zswap_accept_thr_pages(void)
486 {
487         return zswap_max_pages() * zswap_accept_thr_percent / 100;
488 }
489
490 unsigned long zswap_total_pages(void)
491 {
492         struct zswap_pool *pool;
493         unsigned long total = 0;
494
495         rcu_read_lock();
496         list_for_each_entry_rcu(pool, &zswap_pools, list) {
497                 int i;
498
499                 for (i = 0; i < ZSWAP_NR_ZPOOLS; i++)
500                         total += zpool_get_total_pages(pool->zpools[i]);
501         }
502         rcu_read_unlock();
503
504         return total;
505 }
506
507 static bool zswap_check_limits(void)
508 {
509         unsigned long cur_pages = zswap_total_pages();
510         unsigned long max_pages = zswap_max_pages();
511
512         if (cur_pages >= max_pages) {
513                 zswap_pool_limit_hit++;
514                 zswap_pool_reached_full = true;
515         } else if (zswap_pool_reached_full &&
516                    cur_pages <= zswap_accept_thr_pages()) {
517                         zswap_pool_reached_full = false;
518         }
519         return zswap_pool_reached_full;
520 }
521
522 /*********************************
523 * param callbacks
524 **********************************/
525
526 static bool zswap_pool_changed(const char *s, const struct kernel_param *kp)
527 {
528         /* no change required */
529         if (!strcmp(s, *(char **)kp->arg) && zswap_has_pool)
530                 return false;
531         return true;
532 }
533
534 /* val must be a null-terminated string */
535 static int __zswap_param_set(const char *val, const struct kernel_param *kp,
536                              char *type, char *compressor)
537 {
538         struct zswap_pool *pool, *put_pool = NULL;
539         char *s = strstrip((char *)val);
540         int ret = 0;
541         bool new_pool = false;
542
543         mutex_lock(&zswap_init_lock);
544         switch (zswap_init_state) {
545         case ZSWAP_UNINIT:
546                 /* if this is load-time (pre-init) param setting,
547                  * don't create a pool; that's done during init.
548                  */
549                 ret = param_set_charp(s, kp);
550                 break;
551         case ZSWAP_INIT_SUCCEED:
552                 new_pool = zswap_pool_changed(s, kp);
553                 break;
554         case ZSWAP_INIT_FAILED:
555                 pr_err("can't set param, initialization failed\n");
556                 ret = -ENODEV;
557         }
558         mutex_unlock(&zswap_init_lock);
559
560         /* no need to create a new pool, return directly */
561         if (!new_pool)
562                 return ret;
563
564         if (!type) {
565                 if (!zpool_has_pool(s)) {
566                         pr_err("zpool %s not available\n", s);
567                         return -ENOENT;
568                 }
569                 type = s;
570         } else if (!compressor) {
571                 if (!crypto_has_acomp(s, 0, 0)) {
572                         pr_err("compressor %s not available\n", s);
573                         return -ENOENT;
574                 }
575                 compressor = s;
576         } else {
577                 WARN_ON(1);
578                 return -EINVAL;
579         }
580
581         spin_lock_bh(&zswap_pools_lock);
582
583         pool = zswap_pool_find_get(type, compressor);
584         if (pool) {
585                 zswap_pool_debug("using existing", pool);
586                 WARN_ON(pool == zswap_pool_current());
587                 list_del_rcu(&pool->list);
588         }
589
590         spin_unlock_bh(&zswap_pools_lock);
591
592         if (!pool)
593                 pool = zswap_pool_create(type, compressor);
594         else {
595                 /*
596                  * Restore the initial ref dropped by percpu_ref_kill()
597                  * when the pool was decommissioned and switch it again
598                  * to percpu mode.
599                  */
600                 percpu_ref_resurrect(&pool->ref);
601
602                 /* Drop the ref from zswap_pool_find_get(). */
603                 zswap_pool_put(pool);
604         }
605
606         if (pool)
607                 ret = param_set_charp(s, kp);
608         else
609                 ret = -EINVAL;
610
611         spin_lock_bh(&zswap_pools_lock);
612
613         if (!ret) {
614                 put_pool = zswap_pool_current();
615                 list_add_rcu(&pool->list, &zswap_pools);
616                 zswap_has_pool = true;
617         } else if (pool) {
618                 /* add the possibly pre-existing pool to the end of the pools
619                  * list; if it's new (and empty) then it'll be removed and
620                  * destroyed by the put after we drop the lock
621                  */
622                 list_add_tail_rcu(&pool->list, &zswap_pools);
623                 put_pool = pool;
624         }
625
626         spin_unlock_bh(&zswap_pools_lock);
627
628         if (!zswap_has_pool && !pool) {
629                 /* if initial pool creation failed, and this pool creation also
630                  * failed, maybe both compressor and zpool params were bad.
631                  * Allow changing this param, so pool creation will succeed
632                  * when the other param is changed. We already verified this
633                  * param is ok in the zpool_has_pool() or crypto_has_acomp()
634                  * checks above.
635                  */
636                 ret = param_set_charp(s, kp);
637         }
638
639         /* drop the ref from either the old current pool,
640          * or the new pool we failed to add
641          */
642         if (put_pool)
643                 percpu_ref_kill(&put_pool->ref);
644
645         return ret;
646 }
647
648 static int zswap_compressor_param_set(const char *val,
649                                       const struct kernel_param *kp)
650 {
651         return __zswap_param_set(val, kp, zswap_zpool_type, NULL);
652 }
653
654 static int zswap_zpool_param_set(const char *val,
655                                  const struct kernel_param *kp)
656 {
657         return __zswap_param_set(val, kp, NULL, zswap_compressor);
658 }
659
660 static int zswap_enabled_param_set(const char *val,
661                                    const struct kernel_param *kp)
662 {
663         int ret = -ENODEV;
664
665         /* if this is load-time (pre-init) param setting, only set param. */
666         if (system_state != SYSTEM_RUNNING)
667                 return param_set_bool(val, kp);
668
669         mutex_lock(&zswap_init_lock);
670         switch (zswap_init_state) {
671         case ZSWAP_UNINIT:
672                 if (zswap_setup())
673                         break;
674                 fallthrough;
675         case ZSWAP_INIT_SUCCEED:
676                 if (!zswap_has_pool)
677                         pr_err("can't enable, no pool configured\n");
678                 else
679                         ret = param_set_bool(val, kp);
680                 break;
681         case ZSWAP_INIT_FAILED:
682                 pr_err("can't enable, initialization failed\n");
683         }
684         mutex_unlock(&zswap_init_lock);
685
686         return ret;
687 }
688
689 /*********************************
690 * lru functions
691 **********************************/
692
693 /* should be called under RCU */
694 #ifdef CONFIG_MEMCG
695 static inline struct mem_cgroup *mem_cgroup_from_entry(struct zswap_entry *entry)
696 {
697         return entry->objcg ? obj_cgroup_memcg(entry->objcg) : NULL;
698 }
699 #else
700 static inline struct mem_cgroup *mem_cgroup_from_entry(struct zswap_entry *entry)
701 {
702         return NULL;
703 }
704 #endif
705
706 static inline int entry_to_nid(struct zswap_entry *entry)
707 {
708         return page_to_nid(virt_to_page(entry));
709 }
710
711 static void zswap_lru_add(struct list_lru *list_lru, struct zswap_entry *entry)
712 {
713         atomic_long_t *nr_zswap_protected;
714         unsigned long lru_size, old, new;
715         int nid = entry_to_nid(entry);
716         struct mem_cgroup *memcg;
717         struct lruvec *lruvec;
718
719         /*
720          * Note that it is safe to use rcu_read_lock() here, even in the face of
721          * concurrent memcg offlining. Thanks to the memcg->kmemcg_id indirection
722          * used in list_lru lookup, only two scenarios are possible:
723          *
724          * 1. list_lru_add() is called before memcg->kmemcg_id is updated. The
725          *    new entry will be reparented to memcg's parent's list_lru.
726          * 2. list_lru_add() is called after memcg->kmemcg_id is updated. The
727          *    new entry will be added directly to memcg's parent's list_lru.
728          *
729          * Similar reasoning holds for list_lru_del().
730          */
731         rcu_read_lock();
732         memcg = mem_cgroup_from_entry(entry);
733         /* will always succeed */
734         list_lru_add(list_lru, &entry->lru, nid, memcg);
735
736         /* Update the protection area */
737         lru_size = list_lru_count_one(list_lru, nid, memcg);
738         lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
739         nr_zswap_protected = &lruvec->zswap_lruvec_state.nr_zswap_protected;
740         old = atomic_long_inc_return(nr_zswap_protected);
741         /*
742          * Decay to avoid overflow and adapt to changing workloads.
743          * This is based on LRU reclaim cost decaying heuristics.
744          */
745         do {
746                 new = old > lru_size / 4 ? old / 2 : old;
747         } while (!atomic_long_try_cmpxchg(nr_zswap_protected, &old, new));
748         rcu_read_unlock();
749 }
750
751 static void zswap_lru_del(struct list_lru *list_lru, struct zswap_entry *entry)
752 {
753         int nid = entry_to_nid(entry);
754         struct mem_cgroup *memcg;
755
756         rcu_read_lock();
757         memcg = mem_cgroup_from_entry(entry);
758         /* will always succeed */
759         list_lru_del(list_lru, &entry->lru, nid, memcg);
760         rcu_read_unlock();
761 }
762
763 void zswap_lruvec_state_init(struct lruvec *lruvec)
764 {
765         atomic_long_set(&lruvec->zswap_lruvec_state.nr_zswap_protected, 0);
766 }
767
768 void zswap_folio_swapin(struct folio *folio)
769 {
770         struct lruvec *lruvec;
771
772         if (folio) {
773                 lruvec = folio_lruvec(folio);
774                 atomic_long_inc(&lruvec->zswap_lruvec_state.nr_zswap_protected);
775         }
776 }
777
778 void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg)
779 {
780         /* lock out zswap shrinker walking memcg tree */
781         spin_lock(&zswap_shrink_lock);
782         if (zswap_next_shrink == memcg)
783                 zswap_next_shrink = mem_cgroup_iter(NULL, zswap_next_shrink, NULL);
784         spin_unlock(&zswap_shrink_lock);
785 }
786
787 /*********************************
788 * zswap entry functions
789 **********************************/
790 static struct kmem_cache *zswap_entry_cache;
791
792 static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp, int nid)
793 {
794         struct zswap_entry *entry;
795         entry = kmem_cache_alloc_node(zswap_entry_cache, gfp, nid);
796         if (!entry)
797                 return NULL;
798         return entry;
799 }
800
801 static void zswap_entry_cache_free(struct zswap_entry *entry)
802 {
803         kmem_cache_free(zswap_entry_cache, entry);
804 }
805
806 static struct zpool *zswap_find_zpool(struct zswap_entry *entry)
807 {
808         return entry->pool->zpools[hash_ptr(entry, ilog2(ZSWAP_NR_ZPOOLS))];
809 }
810
811 /*
812  * Carries out the common pattern of freeing and entry's zpool allocation,
813  * freeing the entry itself, and decrementing the number of stored pages.
814  */
815 static void zswap_entry_free(struct zswap_entry *entry)
816 {
817         if (!entry->length)
818                 atomic_dec(&zswap_same_filled_pages);
819         else {
820                 zswap_lru_del(&zswap_list_lru, entry);
821                 zpool_free(zswap_find_zpool(entry), entry->handle);
822                 zswap_pool_put(entry->pool);
823         }
824         if (entry->objcg) {
825                 obj_cgroup_uncharge_zswap(entry->objcg, entry->length);
826                 obj_cgroup_put(entry->objcg);
827         }
828         zswap_entry_cache_free(entry);
829         atomic_dec(&zswap_stored_pages);
830 }
831
832 /*********************************
833 * compressed storage functions
834 **********************************/
835 static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
836 {
837         struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
838         struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
839         struct crypto_acomp *acomp;
840         struct acomp_req *req;
841         int ret;
842
843         mutex_init(&acomp_ctx->mutex);
844
845         acomp_ctx->buffer = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu));
846         if (!acomp_ctx->buffer)
847                 return -ENOMEM;
848
849         acomp = crypto_alloc_acomp_node(pool->tfm_name, 0, 0, cpu_to_node(cpu));
850         if (IS_ERR(acomp)) {
851                 pr_err("could not alloc crypto acomp %s : %ld\n",
852                                 pool->tfm_name, PTR_ERR(acomp));
853                 ret = PTR_ERR(acomp);
854                 goto acomp_fail;
855         }
856         acomp_ctx->acomp = acomp;
857         acomp_ctx->is_sleepable = acomp_is_async(acomp);
858
859         req = acomp_request_alloc(acomp_ctx->acomp);
860         if (!req) {
861                 pr_err("could not alloc crypto acomp_request %s\n",
862                        pool->tfm_name);
863                 ret = -ENOMEM;
864                 goto req_fail;
865         }
866         acomp_ctx->req = req;
867
868         crypto_init_wait(&acomp_ctx->wait);
869         /*
870          * if the backend of acomp is async zip, crypto_req_done() will wakeup
871          * crypto_wait_req(); if the backend of acomp is scomp, the callback
872          * won't be called, crypto_wait_req() will return without blocking.
873          */
874         acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
875                                    crypto_req_done, &acomp_ctx->wait);
876
877         return 0;
878
879 req_fail:
880         crypto_free_acomp(acomp_ctx->acomp);
881 acomp_fail:
882         kfree(acomp_ctx->buffer);
883         return ret;
884 }
885
886 static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node)
887 {
888         struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
889         struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
890
891         if (!IS_ERR_OR_NULL(acomp_ctx)) {
892                 if (!IS_ERR_OR_NULL(acomp_ctx->req))
893                         acomp_request_free(acomp_ctx->req);
894                 if (!IS_ERR_OR_NULL(acomp_ctx->acomp))
895                         crypto_free_acomp(acomp_ctx->acomp);
896                 kfree(acomp_ctx->buffer);
897         }
898
899         return 0;
900 }
901
902 static bool zswap_compress(struct folio *folio, struct zswap_entry *entry)
903 {
904         struct crypto_acomp_ctx *acomp_ctx;
905         struct scatterlist input, output;
906         int comp_ret = 0, alloc_ret = 0;
907         unsigned int dlen = PAGE_SIZE;
908         unsigned long handle;
909         struct zpool *zpool;
910         char *buf;
911         gfp_t gfp;
912         u8 *dst;
913
914         acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
915
916         mutex_lock(&acomp_ctx->mutex);
917
918         dst = acomp_ctx->buffer;
919         sg_init_table(&input, 1);
920         sg_set_page(&input, &folio->page, PAGE_SIZE, 0);
921
922         /*
923          * We need PAGE_SIZE * 2 here since there maybe over-compression case,
924          * and hardware-accelerators may won't check the dst buffer size, so
925          * giving the dst buffer with enough length to avoid buffer overflow.
926          */
927         sg_init_one(&output, dst, PAGE_SIZE * 2);
928         acomp_request_set_params(acomp_ctx->req, &input, &output, PAGE_SIZE, dlen);
929
930         /*
931          * it maybe looks a little bit silly that we send an asynchronous request,
932          * then wait for its completion synchronously. This makes the process look
933          * synchronous in fact.
934          * Theoretically, acomp supports users send multiple acomp requests in one
935          * acomp instance, then get those requests done simultaneously. but in this
936          * case, zswap actually does store and load page by page, there is no
937          * existing method to send the second page before the first page is done
938          * in one thread doing zwap.
939          * but in different threads running on different cpu, we have different
940          * acomp instance, so multiple threads can do (de)compression in parallel.
941          */
942         comp_ret = crypto_wait_req(crypto_acomp_compress(acomp_ctx->req), &acomp_ctx->wait);
943         dlen = acomp_ctx->req->dlen;
944         if (comp_ret)
945                 goto unlock;
946
947         zpool = zswap_find_zpool(entry);
948         gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
949         if (zpool_malloc_support_movable(zpool))
950                 gfp |= __GFP_HIGHMEM | __GFP_MOVABLE;
951         alloc_ret = zpool_malloc(zpool, dlen, gfp, &handle);
952         if (alloc_ret)
953                 goto unlock;
954
955         buf = zpool_map_handle(zpool, handle, ZPOOL_MM_WO);
956         memcpy(buf, dst, dlen);
957         zpool_unmap_handle(zpool, handle);
958
959         entry->handle = handle;
960         entry->length = dlen;
961
962 unlock:
963         if (comp_ret == -ENOSPC || alloc_ret == -ENOSPC)
964                 zswap_reject_compress_poor++;
965         else if (comp_ret)
966                 zswap_reject_compress_fail++;
967         else if (alloc_ret)
968                 zswap_reject_alloc_fail++;
969
970         mutex_unlock(&acomp_ctx->mutex);
971         return comp_ret == 0 && alloc_ret == 0;
972 }
973
974 static void zswap_decompress(struct zswap_entry *entry, struct page *page)
975 {
976         struct zpool *zpool = zswap_find_zpool(entry);
977         struct scatterlist input, output;
978         struct crypto_acomp_ctx *acomp_ctx;
979         u8 *src;
980
981         acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
982         mutex_lock(&acomp_ctx->mutex);
983
984         src = zpool_map_handle(zpool, entry->handle, ZPOOL_MM_RO);
985         /*
986          * If zpool_map_handle is atomic, we cannot reliably utilize its mapped buffer
987          * to do crypto_acomp_decompress() which might sleep. In such cases, we must
988          * resort to copying the buffer to a temporary one.
989          * Meanwhile, zpool_map_handle() might return a non-linearly mapped buffer,
990          * such as a kmap address of high memory or even ever a vmap address.
991          * However, sg_init_one is only equipped to handle linearly mapped low memory.
992          * In such cases, we also must copy the buffer to a temporary and lowmem one.
993          */
994         if ((acomp_ctx->is_sleepable && !zpool_can_sleep_mapped(zpool)) ||
995             !virt_addr_valid(src)) {
996                 memcpy(acomp_ctx->buffer, src, entry->length);
997                 src = acomp_ctx->buffer;
998                 zpool_unmap_handle(zpool, entry->handle);
999         }
1000
1001         sg_init_one(&input, src, entry->length);
1002         sg_init_table(&output, 1);
1003         sg_set_page(&output, page, PAGE_SIZE, 0);
1004         acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, PAGE_SIZE);
1005         BUG_ON(crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait));
1006         BUG_ON(acomp_ctx->req->dlen != PAGE_SIZE);
1007         mutex_unlock(&acomp_ctx->mutex);
1008
1009         if (src != acomp_ctx->buffer)
1010                 zpool_unmap_handle(zpool, entry->handle);
1011 }
1012
1013 /*********************************
1014 * writeback code
1015 **********************************/
1016 /*
1017  * Attempts to free an entry by adding a folio to the swap cache,
1018  * decompressing the entry data into the folio, and issuing a
1019  * bio write to write the folio back to the swap device.
1020  *
1021  * This can be thought of as a "resumed writeback" of the folio
1022  * to the swap device.  We are basically resuming the same swap
1023  * writeback path that was intercepted with the zswap_store()
1024  * in the first place.  After the folio has been decompressed into
1025  * the swap cache, the compressed version stored by zswap can be
1026  * freed.
1027  */
1028 static int zswap_writeback_entry(struct zswap_entry *entry,
1029                                  swp_entry_t swpentry)
1030 {
1031         struct xarray *tree;
1032         pgoff_t offset = swp_offset(swpentry);
1033         struct folio *folio;
1034         struct mempolicy *mpol;
1035         bool folio_was_allocated;
1036         struct writeback_control wbc = {
1037                 .sync_mode = WB_SYNC_NONE,
1038         };
1039
1040         /* try to allocate swap cache folio */
1041         mpol = get_task_policy(current);
1042         folio = __read_swap_cache_async(swpentry, GFP_KERNEL, mpol,
1043                                 NO_INTERLEAVE_INDEX, &folio_was_allocated, true);
1044         if (!folio)
1045                 return -ENOMEM;
1046
1047         /*
1048          * Found an existing folio, we raced with swapin or concurrent
1049          * shrinker. We generally writeback cold folios from zswap, and
1050          * swapin means the folio just became hot, so skip this folio.
1051          * For unlikely concurrent shrinker case, it will be unlinked
1052          * and freed when invalidated by the concurrent shrinker anyway.
1053          */
1054         if (!folio_was_allocated) {
1055                 folio_put(folio);
1056                 return -EEXIST;
1057         }
1058
1059         /*
1060          * folio is locked, and the swapcache is now secured against
1061          * concurrent swapping to and from the slot, and concurrent
1062          * swapoff so we can safely dereference the zswap tree here.
1063          * Verify that the swap entry hasn't been invalidated and recycled
1064          * behind our backs, to avoid overwriting a new swap folio with
1065          * old compressed data. Only when this is successful can the entry
1066          * be dereferenced.
1067          */
1068         tree = swap_zswap_tree(swpentry);
1069         if (entry != xa_cmpxchg(tree, offset, entry, NULL, GFP_KERNEL)) {
1070                 delete_from_swap_cache(folio);
1071                 folio_unlock(folio);
1072                 folio_put(folio);
1073                 return -ENOMEM;
1074         }
1075
1076         zswap_decompress(entry, &folio->page);
1077
1078         count_vm_event(ZSWPWB);
1079         if (entry->objcg)
1080                 count_objcg_event(entry->objcg, ZSWPWB);
1081
1082         zswap_entry_free(entry);
1083
1084         /* folio is up to date */
1085         folio_mark_uptodate(folio);
1086
1087         /* move it to the tail of the inactive list after end_writeback */
1088         folio_set_reclaim(folio);
1089
1090         /* start writeback */
1091         __swap_writepage(folio, &wbc);
1092         folio_put(folio);
1093
1094         return 0;
1095 }
1096
1097 /*********************************
1098 * shrinker functions
1099 **********************************/
1100 static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_one *l,
1101                                        spinlock_t *lock, void *arg)
1102 {
1103         struct zswap_entry *entry = container_of(item, struct zswap_entry, lru);
1104         bool *encountered_page_in_swapcache = (bool *)arg;
1105         swp_entry_t swpentry;
1106         enum lru_status ret = LRU_REMOVED_RETRY;
1107         int writeback_result;
1108
1109         /*
1110          * As soon as we drop the LRU lock, the entry can be freed by
1111          * a concurrent invalidation. This means the following:
1112          *
1113          * 1. We extract the swp_entry_t to the stack, allowing
1114          *    zswap_writeback_entry() to pin the swap entry and
1115          *    then validate the zwap entry against that swap entry's
1116          *    tree using pointer value comparison. Only when that
1117          *    is successful can the entry be dereferenced.
1118          *
1119          * 2. Usually, objects are taken off the LRU for reclaim. In
1120          *    this case this isn't possible, because if reclaim fails
1121          *    for whatever reason, we have no means of knowing if the
1122          *    entry is alive to put it back on the LRU.
1123          *
1124          *    So rotate it before dropping the lock. If the entry is
1125          *    written back or invalidated, the free path will unlink
1126          *    it. For failures, rotation is the right thing as well.
1127          *
1128          *    Temporary failures, where the same entry should be tried
1129          *    again immediately, almost never happen for this shrinker.
1130          *    We don't do any trylocking; -ENOMEM comes closest,
1131          *    but that's extremely rare and doesn't happen spuriously
1132          *    either. Don't bother distinguishing this case.
1133          */
1134         list_move_tail(item, &l->list);
1135
1136         /*
1137          * Once the lru lock is dropped, the entry might get freed. The
1138          * swpentry is copied to the stack, and entry isn't deref'd again
1139          * until the entry is verified to still be alive in the tree.
1140          */
1141         swpentry = entry->swpentry;
1142
1143         /*
1144          * It's safe to drop the lock here because we return either
1145          * LRU_REMOVED_RETRY or LRU_RETRY.
1146          */
1147         spin_unlock(lock);
1148
1149         writeback_result = zswap_writeback_entry(entry, swpentry);
1150
1151         if (writeback_result) {
1152                 zswap_reject_reclaim_fail++;
1153                 ret = LRU_RETRY;
1154
1155                 /*
1156                  * Encountering a page already in swap cache is a sign that we are shrinking
1157                  * into the warmer region. We should terminate shrinking (if we're in the dynamic
1158                  * shrinker context).
1159                  */
1160                 if (writeback_result == -EEXIST && encountered_page_in_swapcache) {
1161                         ret = LRU_STOP;
1162                         *encountered_page_in_swapcache = true;
1163                 }
1164         } else {
1165                 zswap_written_back_pages++;
1166         }
1167
1168         spin_lock(lock);
1169         return ret;
1170 }
1171
1172 static unsigned long zswap_shrinker_scan(struct shrinker *shrinker,
1173                 struct shrink_control *sc)
1174 {
1175         struct lruvec *lruvec = mem_cgroup_lruvec(sc->memcg, NODE_DATA(sc->nid));
1176         unsigned long shrink_ret, nr_protected, lru_size;
1177         bool encountered_page_in_swapcache = false;
1178
1179         if (!zswap_shrinker_enabled ||
1180                         !mem_cgroup_zswap_writeback_enabled(sc->memcg)) {
1181                 sc->nr_scanned = 0;
1182                 return SHRINK_STOP;
1183         }
1184
1185         nr_protected =
1186                 atomic_long_read(&lruvec->zswap_lruvec_state.nr_zswap_protected);
1187         lru_size = list_lru_shrink_count(&zswap_list_lru, sc);
1188
1189         /*
1190          * Abort if we are shrinking into the protected region.
1191          *
1192          * This short-circuiting is necessary because if we have too many multiple
1193          * concurrent reclaimers getting the freeable zswap object counts at the
1194          * same time (before any of them made reasonable progress), the total
1195          * number of reclaimed objects might be more than the number of unprotected
1196          * objects (i.e the reclaimers will reclaim into the protected area of the
1197          * zswap LRU).
1198          */
1199         if (nr_protected >= lru_size - sc->nr_to_scan) {
1200                 sc->nr_scanned = 0;
1201                 return SHRINK_STOP;
1202         }
1203
1204         shrink_ret = list_lru_shrink_walk(&zswap_list_lru, sc, &shrink_memcg_cb,
1205                 &encountered_page_in_swapcache);
1206
1207         if (encountered_page_in_swapcache)
1208                 return SHRINK_STOP;
1209
1210         return shrink_ret ? shrink_ret : SHRINK_STOP;
1211 }
1212
1213 static unsigned long zswap_shrinker_count(struct shrinker *shrinker,
1214                 struct shrink_control *sc)
1215 {
1216         struct mem_cgroup *memcg = sc->memcg;
1217         struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(sc->nid));
1218         unsigned long nr_backing, nr_stored, nr_freeable, nr_protected;
1219
1220         if (!zswap_shrinker_enabled || !mem_cgroup_zswap_writeback_enabled(memcg))
1221                 return 0;
1222
1223         /*
1224          * The shrinker resumes swap writeback, which will enter block
1225          * and may enter fs. XXX: Harmonize with vmscan.c __GFP_FS
1226          * rules (may_enter_fs()), which apply on a per-folio basis.
1227          */
1228         if (!gfp_has_io_fs(sc->gfp_mask))
1229                 return 0;
1230
1231         /*
1232          * For memcg, use the cgroup-wide ZSWAP stats since we don't
1233          * have them per-node and thus per-lruvec. Careful if memcg is
1234          * runtime-disabled: we can get sc->memcg == NULL, which is ok
1235          * for the lruvec, but not for memcg_page_state().
1236          *
1237          * Without memcg, use the zswap pool-wide metrics.
1238          */
1239         if (!mem_cgroup_disabled()) {
1240                 mem_cgroup_flush_stats(memcg);
1241                 nr_backing = memcg_page_state(memcg, MEMCG_ZSWAP_B) >> PAGE_SHIFT;
1242                 nr_stored = memcg_page_state(memcg, MEMCG_ZSWAPPED);
1243         } else {
1244                 nr_backing = zswap_total_pages();
1245                 nr_stored = atomic_read(&zswap_stored_pages);
1246         }
1247
1248         if (!nr_stored)
1249                 return 0;
1250
1251         nr_protected =
1252                 atomic_long_read(&lruvec->zswap_lruvec_state.nr_zswap_protected);
1253         nr_freeable = list_lru_shrink_count(&zswap_list_lru, sc);
1254         /*
1255          * Subtract the lru size by an estimate of the number of pages
1256          * that should be protected.
1257          */
1258         nr_freeable = nr_freeable > nr_protected ? nr_freeable - nr_protected : 0;
1259
1260         /*
1261          * Scale the number of freeable pages by the memory saving factor.
1262          * This ensures that the better zswap compresses memory, the fewer
1263          * pages we will evict to swap (as it will otherwise incur IO for
1264          * relatively small memory saving).
1265          *
1266          * The memory saving factor calculated here takes same-filled pages into
1267          * account, but those are not freeable since they almost occupy no
1268          * space. Hence, we may scale nr_freeable down a little bit more than we
1269          * should if we have a lot of same-filled pages.
1270          */
1271         return mult_frac(nr_freeable, nr_backing, nr_stored);
1272 }
1273
1274 static struct shrinker *zswap_alloc_shrinker(void)
1275 {
1276         struct shrinker *shrinker;
1277
1278         shrinker =
1279                 shrinker_alloc(SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE, "mm-zswap");
1280         if (!shrinker)
1281                 return NULL;
1282
1283         shrinker->scan_objects = zswap_shrinker_scan;
1284         shrinker->count_objects = zswap_shrinker_count;
1285         shrinker->batch = 0;
1286         shrinker->seeks = DEFAULT_SEEKS;
1287         return shrinker;
1288 }
1289
1290 static int shrink_memcg(struct mem_cgroup *memcg)
1291 {
1292         int nid, shrunk = 0;
1293
1294         if (!mem_cgroup_zswap_writeback_enabled(memcg))
1295                 return -EINVAL;
1296
1297         /*
1298          * Skip zombies because their LRUs are reparented and we would be
1299          * reclaiming from the parent instead of the dead memcg.
1300          */
1301         if (memcg && !mem_cgroup_online(memcg))
1302                 return -ENOENT;
1303
1304         for_each_node_state(nid, N_NORMAL_MEMORY) {
1305                 unsigned long nr_to_walk = 1;
1306
1307                 shrunk += list_lru_walk_one(&zswap_list_lru, nid, memcg,
1308                                             &shrink_memcg_cb, NULL, &nr_to_walk);
1309         }
1310         return shrunk ? 0 : -EAGAIN;
1311 }
1312
1313 static void shrink_worker(struct work_struct *w)
1314 {
1315         struct mem_cgroup *memcg;
1316         int ret, failures = 0;
1317         unsigned long thr;
1318
1319         /* Reclaim down to the accept threshold */
1320         thr = zswap_accept_thr_pages();
1321
1322         /* global reclaim will select cgroup in a round-robin fashion. */
1323         do {
1324                 spin_lock(&zswap_shrink_lock);
1325                 zswap_next_shrink = mem_cgroup_iter(NULL, zswap_next_shrink, NULL);
1326                 memcg = zswap_next_shrink;
1327
1328                 /*
1329                  * We need to retry if we have gone through a full round trip, or if we
1330                  * got an offline memcg (or else we risk undoing the effect of the
1331                  * zswap memcg offlining cleanup callback). This is not catastrophic
1332                  * per se, but it will keep the now offlined memcg hostage for a while.
1333                  *
1334                  * Note that if we got an online memcg, we will keep the extra
1335                  * reference in case the original reference obtained by mem_cgroup_iter
1336                  * is dropped by the zswap memcg offlining callback, ensuring that the
1337                  * memcg is not killed when we are reclaiming.
1338                  */
1339                 if (!memcg) {
1340                         spin_unlock(&zswap_shrink_lock);
1341                         if (++failures == MAX_RECLAIM_RETRIES)
1342                                 break;
1343
1344                         goto resched;
1345                 }
1346
1347                 if (!mem_cgroup_tryget_online(memcg)) {
1348                         /* drop the reference from mem_cgroup_iter() */
1349                         mem_cgroup_iter_break(NULL, memcg);
1350                         zswap_next_shrink = NULL;
1351                         spin_unlock(&zswap_shrink_lock);
1352
1353                         if (++failures == MAX_RECLAIM_RETRIES)
1354                                 break;
1355
1356                         goto resched;
1357                 }
1358                 spin_unlock(&zswap_shrink_lock);
1359
1360                 ret = shrink_memcg(memcg);
1361                 /* drop the extra reference */
1362                 mem_cgroup_put(memcg);
1363
1364                 if (ret == -EINVAL)
1365                         break;
1366                 if (ret && ++failures == MAX_RECLAIM_RETRIES)
1367                         break;
1368 resched:
1369                 cond_resched();
1370         } while (zswap_total_pages() > thr);
1371 }
1372
1373 /*********************************
1374 * same-filled functions
1375 **********************************/
1376 static bool zswap_is_folio_same_filled(struct folio *folio, unsigned long *value)
1377 {
1378         unsigned long *page;
1379         unsigned long val;
1380         unsigned int pos, last_pos = PAGE_SIZE / sizeof(*page) - 1;
1381         bool ret = false;
1382
1383         page = kmap_local_folio(folio, 0);
1384         val = page[0];
1385
1386         if (val != page[last_pos])
1387                 goto out;
1388
1389         for (pos = 1; pos < last_pos; pos++) {
1390                 if (val != page[pos])
1391                         goto out;
1392         }
1393
1394         *value = val;
1395         ret = true;
1396 out:
1397         kunmap_local(page);
1398         return ret;
1399 }
1400
1401 static void zswap_fill_page(void *ptr, unsigned long value)
1402 {
1403         unsigned long *page;
1404
1405         page = (unsigned long *)ptr;
1406         memset_l(page, value, PAGE_SIZE / sizeof(unsigned long));
1407 }
1408
1409 /*********************************
1410 * main API
1411 **********************************/
1412 bool zswap_store(struct folio *folio)
1413 {
1414         swp_entry_t swp = folio->swap;
1415         pgoff_t offset = swp_offset(swp);
1416         struct xarray *tree = swap_zswap_tree(swp);
1417         struct zswap_entry *entry, *old;
1418         struct obj_cgroup *objcg = NULL;
1419         struct mem_cgroup *memcg = NULL;
1420         unsigned long value;
1421
1422         VM_WARN_ON_ONCE(!folio_test_locked(folio));
1423         VM_WARN_ON_ONCE(!folio_test_swapcache(folio));
1424
1425         /* Large folios aren't supported */
1426         if (folio_test_large(folio))
1427                 return false;
1428
1429         if (!zswap_enabled)
1430                 goto check_old;
1431
1432         /* Check cgroup limits */
1433         objcg = get_obj_cgroup_from_folio(folio);
1434         if (objcg && !obj_cgroup_may_zswap(objcg)) {
1435                 memcg = get_mem_cgroup_from_objcg(objcg);
1436                 if (shrink_memcg(memcg)) {
1437                         mem_cgroup_put(memcg);
1438                         goto reject;
1439                 }
1440                 mem_cgroup_put(memcg);
1441         }
1442
1443         if (zswap_check_limits())
1444                 goto reject;
1445
1446         /* allocate entry */
1447         entry = zswap_entry_cache_alloc(GFP_KERNEL, folio_nid(folio));
1448         if (!entry) {
1449                 zswap_reject_kmemcache_fail++;
1450                 goto reject;
1451         }
1452
1453         if (zswap_is_folio_same_filled(folio, &value)) {
1454                 entry->length = 0;
1455                 entry->value = value;
1456                 atomic_inc(&zswap_same_filled_pages);
1457                 goto store_entry;
1458         }
1459
1460         /* if entry is successfully added, it keeps the reference */
1461         entry->pool = zswap_pool_current_get();
1462         if (!entry->pool)
1463                 goto freepage;
1464
1465         if (objcg) {
1466                 memcg = get_mem_cgroup_from_objcg(objcg);
1467                 if (memcg_list_lru_alloc(memcg, &zswap_list_lru, GFP_KERNEL)) {
1468                         mem_cgroup_put(memcg);
1469                         goto put_pool;
1470                 }
1471                 mem_cgroup_put(memcg);
1472         }
1473
1474         if (!zswap_compress(folio, entry))
1475                 goto put_pool;
1476
1477 store_entry:
1478         entry->swpentry = swp;
1479         entry->objcg = objcg;
1480
1481         old = xa_store(tree, offset, entry, GFP_KERNEL);
1482         if (xa_is_err(old)) {
1483                 int err = xa_err(old);
1484
1485                 WARN_ONCE(err != -ENOMEM, "unexpected xarray error: %d\n", err);
1486                 zswap_reject_alloc_fail++;
1487                 goto store_failed;
1488         }
1489
1490         /*
1491          * We may have had an existing entry that became stale when
1492          * the folio was redirtied and now the new version is being
1493          * swapped out. Get rid of the old.
1494          */
1495         if (old)
1496                 zswap_entry_free(old);
1497
1498         if (objcg) {
1499                 obj_cgroup_charge_zswap(objcg, entry->length);
1500                 count_objcg_event(objcg, ZSWPOUT);
1501         }
1502
1503         /*
1504          * We finish initializing the entry while it's already in xarray.
1505          * This is safe because:
1506          *
1507          * 1. Concurrent stores and invalidations are excluded by folio lock.
1508          *
1509          * 2. Writeback is excluded by the entry not being on the LRU yet.
1510          *    The publishing order matters to prevent writeback from seeing
1511          *    an incoherent entry.
1512          */
1513         if (entry->length) {
1514                 INIT_LIST_HEAD(&entry->lru);
1515                 zswap_lru_add(&zswap_list_lru, entry);
1516         }
1517
1518         /* update stats */
1519         atomic_inc(&zswap_stored_pages);
1520         count_vm_event(ZSWPOUT);
1521
1522         return true;
1523
1524 store_failed:
1525         if (!entry->length)
1526                 atomic_dec(&zswap_same_filled_pages);
1527         else {
1528                 zpool_free(zswap_find_zpool(entry), entry->handle);
1529 put_pool:
1530                 zswap_pool_put(entry->pool);
1531         }
1532 freepage:
1533         zswap_entry_cache_free(entry);
1534 reject:
1535         obj_cgroup_put(objcg);
1536         if (zswap_pool_reached_full)
1537                 queue_work(shrink_wq, &zswap_shrink_work);
1538 check_old:
1539         /*
1540          * If the zswap store fails or zswap is disabled, we must invalidate the
1541          * possibly stale entry which was previously stored at this offset.
1542          * Otherwise, writeback could overwrite the new data in the swapfile.
1543          */
1544         entry = xa_erase(tree, offset);
1545         if (entry)
1546                 zswap_entry_free(entry);
1547         return false;
1548 }
1549
1550 bool zswap_load(struct folio *folio)
1551 {
1552         swp_entry_t swp = folio->swap;
1553         pgoff_t offset = swp_offset(swp);
1554         struct page *page = &folio->page;
1555         bool swapcache = folio_test_swapcache(folio);
1556         struct xarray *tree = swap_zswap_tree(swp);
1557         struct zswap_entry *entry;
1558         u8 *dst;
1559
1560         VM_WARN_ON_ONCE(!folio_test_locked(folio));
1561
1562         /*
1563          * When reading into the swapcache, invalidate our entry. The
1564          * swapcache can be the authoritative owner of the page and
1565          * its mappings, and the pressure that results from having two
1566          * in-memory copies outweighs any benefits of caching the
1567          * compression work.
1568          *
1569          * (Most swapins go through the swapcache. The notable
1570          * exception is the singleton fault on SWP_SYNCHRONOUS_IO
1571          * files, which reads into a private page and may free it if
1572          * the fault fails. We remain the primary owner of the entry.)
1573          */
1574         if (swapcache)
1575                 entry = xa_erase(tree, offset);
1576         else
1577                 entry = xa_load(tree, offset);
1578
1579         if (!entry)
1580                 return false;
1581
1582         if (entry->length)
1583                 zswap_decompress(entry, page);
1584         else {
1585                 dst = kmap_local_page(page);
1586                 zswap_fill_page(dst, entry->value);
1587                 kunmap_local(dst);
1588         }
1589
1590         count_vm_event(ZSWPIN);
1591         if (entry->objcg)
1592                 count_objcg_event(entry->objcg, ZSWPIN);
1593
1594         if (swapcache) {
1595                 zswap_entry_free(entry);
1596                 folio_mark_dirty(folio);
1597         }
1598
1599         return true;
1600 }
1601
1602 void zswap_invalidate(swp_entry_t swp)
1603 {
1604         pgoff_t offset = swp_offset(swp);
1605         struct xarray *tree = swap_zswap_tree(swp);
1606         struct zswap_entry *entry;
1607
1608         entry = xa_erase(tree, offset);
1609         if (entry)
1610                 zswap_entry_free(entry);
1611 }
1612
1613 int zswap_swapon(int type, unsigned long nr_pages)
1614 {
1615         struct xarray *trees, *tree;
1616         unsigned int nr, i;
1617
1618         nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES);
1619         trees = kvcalloc(nr, sizeof(*tree), GFP_KERNEL);
1620         if (!trees) {
1621                 pr_err("alloc failed, zswap disabled for swap type %d\n", type);
1622                 return -ENOMEM;
1623         }
1624
1625         for (i = 0; i < nr; i++)
1626                 xa_init(trees + i);
1627
1628         nr_zswap_trees[type] = nr;
1629         zswap_trees[type] = trees;
1630         return 0;
1631 }
1632
1633 void zswap_swapoff(int type)
1634 {
1635         struct xarray *trees = zswap_trees[type];
1636         unsigned int i;
1637
1638         if (!trees)
1639                 return;
1640
1641         /* try_to_unuse() invalidated all the entries already */
1642         for (i = 0; i < nr_zswap_trees[type]; i++)
1643                 WARN_ON_ONCE(!xa_empty(trees + i));
1644
1645         kvfree(trees);
1646         nr_zswap_trees[type] = 0;
1647         zswap_trees[type] = NULL;
1648 }
1649
1650 /*********************************
1651 * debugfs functions
1652 **********************************/
1653 #ifdef CONFIG_DEBUG_FS
1654 #include <linux/debugfs.h>
1655
1656 static struct dentry *zswap_debugfs_root;
1657
1658 static int debugfs_get_total_size(void *data, u64 *val)
1659 {
1660         *val = zswap_total_pages() * PAGE_SIZE;
1661         return 0;
1662 }
1663 DEFINE_DEBUGFS_ATTRIBUTE(total_size_fops, debugfs_get_total_size, NULL, "%llu\n");
1664
1665 static int zswap_debugfs_init(void)
1666 {
1667         if (!debugfs_initialized())
1668                 return -ENODEV;
1669
1670         zswap_debugfs_root = debugfs_create_dir("zswap", NULL);
1671
1672         debugfs_create_u64("pool_limit_hit", 0444,
1673                            zswap_debugfs_root, &zswap_pool_limit_hit);
1674         debugfs_create_u64("reject_reclaim_fail", 0444,
1675                            zswap_debugfs_root, &zswap_reject_reclaim_fail);
1676         debugfs_create_u64("reject_alloc_fail", 0444,
1677                            zswap_debugfs_root, &zswap_reject_alloc_fail);
1678         debugfs_create_u64("reject_kmemcache_fail", 0444,
1679                            zswap_debugfs_root, &zswap_reject_kmemcache_fail);
1680         debugfs_create_u64("reject_compress_fail", 0444,
1681                            zswap_debugfs_root, &zswap_reject_compress_fail);
1682         debugfs_create_u64("reject_compress_poor", 0444,
1683                            zswap_debugfs_root, &zswap_reject_compress_poor);
1684         debugfs_create_u64("written_back_pages", 0444,
1685                            zswap_debugfs_root, &zswap_written_back_pages);
1686         debugfs_create_file("pool_total_size", 0444,
1687                             zswap_debugfs_root, NULL, &total_size_fops);
1688         debugfs_create_atomic_t("stored_pages", 0444,
1689                                 zswap_debugfs_root, &zswap_stored_pages);
1690         debugfs_create_atomic_t("same_filled_pages", 0444,
1691                                 zswap_debugfs_root, &zswap_same_filled_pages);
1692
1693         return 0;
1694 }
1695 #else
1696 static int zswap_debugfs_init(void)
1697 {
1698         return 0;
1699 }
1700 #endif
1701
1702 /*********************************
1703 * module init and exit
1704 **********************************/
1705 static int zswap_setup(void)
1706 {
1707         struct zswap_pool *pool;
1708         int ret;
1709
1710         zswap_entry_cache = KMEM_CACHE(zswap_entry, 0);
1711         if (!zswap_entry_cache) {
1712                 pr_err("entry cache creation failed\n");
1713                 goto cache_fail;
1714         }
1715
1716         ret = cpuhp_setup_state_multi(CPUHP_MM_ZSWP_POOL_PREPARE,
1717                                       "mm/zswap_pool:prepare",
1718                                       zswap_cpu_comp_prepare,
1719                                       zswap_cpu_comp_dead);
1720         if (ret)
1721                 goto hp_fail;
1722
1723         shrink_wq = alloc_workqueue("zswap-shrink",
1724                         WQ_UNBOUND|WQ_MEM_RECLAIM, 1);
1725         if (!shrink_wq)
1726                 goto shrink_wq_fail;
1727
1728         zswap_shrinker = zswap_alloc_shrinker();
1729         if (!zswap_shrinker)
1730                 goto shrinker_fail;
1731         if (list_lru_init_memcg(&zswap_list_lru, zswap_shrinker))
1732                 goto lru_fail;
1733         shrinker_register(zswap_shrinker);
1734
1735         INIT_WORK(&zswap_shrink_work, shrink_worker);
1736
1737         pool = __zswap_pool_create_fallback();
1738         if (pool) {
1739                 pr_info("loaded using pool %s/%s\n", pool->tfm_name,
1740                         zpool_get_type(pool->zpools[0]));
1741                 list_add(&pool->list, &zswap_pools);
1742                 zswap_has_pool = true;
1743         } else {
1744                 pr_err("pool creation failed\n");
1745                 zswap_enabled = false;
1746         }
1747
1748         if (zswap_debugfs_init())
1749                 pr_warn("debugfs initialization failed\n");
1750         zswap_init_state = ZSWAP_INIT_SUCCEED;
1751         return 0;
1752
1753 lru_fail:
1754         shrinker_free(zswap_shrinker);
1755 shrinker_fail:
1756         destroy_workqueue(shrink_wq);
1757 shrink_wq_fail:
1758         cpuhp_remove_multi_state(CPUHP_MM_ZSWP_POOL_PREPARE);
1759 hp_fail:
1760         kmem_cache_destroy(zswap_entry_cache);
1761 cache_fail:
1762         /* if built-in, we aren't unloaded on failure; don't allow use */
1763         zswap_init_state = ZSWAP_INIT_FAILED;
1764         zswap_enabled = false;
1765         return -ENOMEM;
1766 }
1767
1768 static int __init zswap_init(void)
1769 {
1770         if (!zswap_enabled)
1771                 return 0;
1772         return zswap_setup();
1773 }
1774 /* must be late so crypto has time to come up */
1775 late_initcall(zswap_init);
1776
1777 MODULE_AUTHOR("Seth Jennings <sjennings@variantweb.net>");
1778 MODULE_DESCRIPTION("Compressed cache for swap pages");