selftests/mm: uffd-unit-test check if huge page size is 0
[linux-block.git] / mm / zswap.c
CommitLineData
c942fddf 1// SPDX-License-Identifier: GPL-2.0-or-later
2b281117
SJ
2/*
3 * zswap.c - zswap driver file
4 *
42c06a0e 5 * zswap is a cache that takes pages that are in the process
2b281117
SJ
6 * of being swapped out and attempts to compress and store them in a
7 * RAM-based memory pool. This can result in a significant I/O reduction on
8 * the swap device and, in the case where decompressing from RAM is faster
9 * than reading from the swap device, can also improve workload performance.
10 *
11 * Copyright (C) 2012 Seth Jennings <sjenning@linux.vnet.ibm.com>
2b281117
SJ
12*/
13
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
16#include <linux/module.h>
17#include <linux/cpu.h>
18#include <linux/highmem.h>
19#include <linux/slab.h>
20#include <linux/spinlock.h>
21#include <linux/types.h>
22#include <linux/atomic.h>
2b281117
SJ
23#include <linux/rbtree.h>
24#include <linux/swap.h>
25#include <linux/crypto.h>
1ec3b5fe 26#include <linux/scatterlist.h>
ddc1a5cb 27#include <linux/mempolicy.h>
2b281117 28#include <linux/mempool.h>
12d79d64 29#include <linux/zpool.h>
1ec3b5fe 30#include <crypto/acompress.h>
42c06a0e 31#include <linux/zswap.h>
2b281117
SJ
32#include <linux/mm_types.h>
33#include <linux/page-flags.h>
34#include <linux/swapops.h>
35#include <linux/writeback.h>
36#include <linux/pagemap.h>
45190f01 37#include <linux/workqueue.h>
a65b0e76 38#include <linux/list_lru.h>
2b281117 39
014bb1de 40#include "swap.h"
e0228d59 41#include "internal.h"
014bb1de 42
2b281117
SJ
43/*********************************
44* statistics
45**********************************/
12d79d64 46/* Total bytes used by the compressed storage */
f6498b77 47u64 zswap_pool_total_size;
2b281117 48/* The number of compressed pages currently stored in zswap */
f6498b77 49atomic_t zswap_stored_pages = ATOMIC_INIT(0);
a85f878b
SD
50/* The number of same-value filled pages currently stored in zswap */
51static atomic_t zswap_same_filled_pages = ATOMIC_INIT(0);
2b281117
SJ
52
53/*
54 * The statistics below are not protected from concurrent access for
55 * performance reasons so they may not be a 100% accurate. However,
56 * they do provide useful information on roughly how many times a
57 * certain event is occurring.
58*/
59
60/* Pool limit was hit (see zswap_max_pool_percent) */
61static u64 zswap_pool_limit_hit;
62/* Pages written back when pool limit was reached */
63static u64 zswap_written_back_pages;
64/* Store failed due to a reclaim failure after pool limit was reached */
65static u64 zswap_reject_reclaim_fail;
cb61dad8
NP
66/* Store failed due to compression algorithm failure */
67static u64 zswap_reject_compress_fail;
2b281117
SJ
68/* Compressed page was too big for the allocator to (optimally) store */
69static u64 zswap_reject_compress_poor;
70/* Store failed because underlying allocator could not get memory */
71static u64 zswap_reject_alloc_fail;
72/* Store failed because the entry metadata could not be allocated (rare) */
73static u64 zswap_reject_kmemcache_fail;
74/* Duplicate store was encountered (rare) */
75static u64 zswap_duplicate_entry;
76
45190f01
VW
77/* Shrinker work queue */
78static struct workqueue_struct *shrink_wq;
79/* Pool limit was hit, we need to calm down */
80static bool zswap_pool_reached_full;
81
2b281117
SJ
82/*********************************
83* tunables
84**********************************/
c00ed16a 85
bae21db8
DS
86#define ZSWAP_PARAM_UNSET ""
87
141fdeec
LS
88static int zswap_setup(void);
89
bb8b93b5
MS
90/* Enable/disable zswap */
91static bool zswap_enabled = IS_ENABLED(CONFIG_ZSWAP_DEFAULT_ON);
d7b028f5
DS
92static int zswap_enabled_param_set(const char *,
93 const struct kernel_param *);
83aed6cd 94static const struct kernel_param_ops zswap_enabled_param_ops = {
d7b028f5
DS
95 .set = zswap_enabled_param_set,
96 .get = param_get_bool,
97};
98module_param_cb(enabled, &zswap_enabled_param_ops, &zswap_enabled, 0644);
2b281117 99
90b0fc26 100/* Crypto compressor to use */
bb8b93b5 101static char *zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT;
90b0fc26
DS
102static int zswap_compressor_param_set(const char *,
103 const struct kernel_param *);
83aed6cd 104static const struct kernel_param_ops zswap_compressor_param_ops = {
90b0fc26 105 .set = zswap_compressor_param_set,
c99b42c3
DS
106 .get = param_get_charp,
107 .free = param_free_charp,
90b0fc26
DS
108};
109module_param_cb(compressor, &zswap_compressor_param_ops,
c99b42c3 110 &zswap_compressor, 0644);
2b281117 111
90b0fc26 112/* Compressed storage zpool to use */
bb8b93b5 113static char *zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT;
90b0fc26 114static int zswap_zpool_param_set(const char *, const struct kernel_param *);
83aed6cd 115static const struct kernel_param_ops zswap_zpool_param_ops = {
c99b42c3
DS
116 .set = zswap_zpool_param_set,
117 .get = param_get_charp,
118 .free = param_free_charp,
90b0fc26 119};
c99b42c3 120module_param_cb(zpool, &zswap_zpool_param_ops, &zswap_zpool_type, 0644);
12d79d64 121
90b0fc26
DS
122/* The maximum percentage of memory that the compressed pool can occupy */
123static unsigned int zswap_max_pool_percent = 20;
124module_param_named(max_pool_percent, zswap_max_pool_percent, uint, 0644);
60105e12 125
45190f01
VW
126/* The threshold for accepting new pages after the max_pool_percent was hit */
127static unsigned int zswap_accept_thr_percent = 90; /* of max pool size */
128module_param_named(accept_threshold_percent, zswap_accept_thr_percent,
129 uint, 0644);
130
cb325ddd
MS
131/*
132 * Enable/disable handling same-value filled pages (enabled by default).
133 * If disabled every page is considered non-same-value filled.
134 */
a85f878b
SD
135static bool zswap_same_filled_pages_enabled = true;
136module_param_named(same_filled_pages_enabled, zswap_same_filled_pages_enabled,
137 bool, 0644);
138
cb325ddd
MS
139/* Enable/disable handling non-same-value filled pages (enabled by default) */
140static bool zswap_non_same_filled_pages_enabled = true;
141module_param_named(non_same_filled_pages_enabled, zswap_non_same_filled_pages_enabled,
142 bool, 0644);
143
b9c91c43
YA
144static bool zswap_exclusive_loads_enabled = IS_ENABLED(
145 CONFIG_ZSWAP_EXCLUSIVE_LOADS_DEFAULT_ON);
146module_param_named(exclusive_loads, zswap_exclusive_loads_enabled, bool, 0644);
147
b8cf32dc
YA
148/* Number of zpools in zswap_pool (empirically determined for scalability) */
149#define ZSWAP_NR_ZPOOLS 32
150
b5ba474f
NP
151/* Enable/disable memory pressure-based shrinker. */
152static bool zswap_shrinker_enabled = IS_ENABLED(
153 CONFIG_ZSWAP_SHRINKER_DEFAULT_ON);
154module_param_named(shrinker_enabled, zswap_shrinker_enabled, bool, 0644);
155
501a06fe
NP
156bool is_zswap_enabled(void)
157{
158 return zswap_enabled;
159}
160
2b281117 161/*********************************
f1c54846 162* data structures
2b281117 163**********************************/
2b281117 164
1ec3b5fe
BS
165struct crypto_acomp_ctx {
166 struct crypto_acomp *acomp;
167 struct acomp_req *req;
168 struct crypto_wait wait;
8ba2f844
CZ
169 u8 *buffer;
170 struct mutex mutex;
1ec3b5fe
BS
171};
172
f999f38b
DC
173/*
174 * The lock ordering is zswap_tree.lock -> zswap_pool.lru_lock.
175 * The only case where lru_lock is not acquired while holding tree.lock is
176 * when a zswap_entry is taken off the lru for writeback, in that case it
177 * needs to be verified that it's still valid in the tree.
178 */
f1c54846 179struct zswap_pool {
b8cf32dc 180 struct zpool *zpools[ZSWAP_NR_ZPOOLS];
1ec3b5fe 181 struct crypto_acomp_ctx __percpu *acomp_ctx;
f1c54846
DS
182 struct kref kref;
183 struct list_head list;
45190f01
VW
184 struct work_struct release_work;
185 struct work_struct shrink_work;
cab7a7e5 186 struct hlist_node node;
f1c54846 187 char tfm_name[CRYPTO_MAX_ALG_NAME];
a65b0e76
DC
188 struct list_lru list_lru;
189 struct mem_cgroup *next_shrink;
b5ba474f
NP
190 struct shrinker *shrinker;
191 atomic_t nr_stored;
2b281117
SJ
192};
193
2b281117
SJ
194/*
195 * struct zswap_entry
196 *
197 * This structure contains the metadata for tracking a single compressed
198 * page within zswap.
199 *
200 * rbnode - links the entry into red-black tree for the appropriate swap type
97157d89 201 * swpentry - associated swap entry, the offset indexes into the red-black tree
2b281117
SJ
202 * refcount - the number of outstanding reference to the entry. This is needed
203 * to protect against premature freeing of the entry by code
6b452516 204 * concurrent calls to load, invalidate, and writeback. The lock
2b281117
SJ
205 * for the zswap_tree structure that contains the entry must
206 * be held while changing the refcount. Since the lock must
207 * be held, there is no reason to also make refcount atomic.
2b281117 208 * length - the length in bytes of the compressed page data. Needed during
f999f38b
DC
209 * decompression. For a same value filled page length is 0, and both
210 * pool and lru are invalid and must be ignored.
f1c54846
DS
211 * pool - the zswap_pool the entry's data is in
212 * handle - zpool allocation handle that stores the compressed page data
a85f878b 213 * value - value of the same-value filled pages which have same content
97157d89 214 * objcg - the obj_cgroup that the compressed memory is charged to
f999f38b 215 * lru - handle to the pool's lru used to evict pages.
2b281117
SJ
216 */
217struct zswap_entry {
218 struct rb_node rbnode;
0bb48849 219 swp_entry_t swpentry;
2b281117
SJ
220 int refcount;
221 unsigned int length;
f1c54846 222 struct zswap_pool *pool;
a85f878b
SD
223 union {
224 unsigned long handle;
225 unsigned long value;
226 };
f4840ccf 227 struct obj_cgroup *objcg;
f999f38b 228 struct list_head lru;
2b281117
SJ
229};
230
2b281117
SJ
231/*
232 * The tree lock in the zswap_tree struct protects a few things:
233 * - the rbtree
234 * - the refcount field of each entry in the tree
235 */
236struct zswap_tree {
237 struct rb_root rbroot;
238 spinlock_t lock;
2b281117
SJ
239};
240
241static struct zswap_tree *zswap_trees[MAX_SWAPFILES];
242
f1c54846
DS
243/* RCU-protected iteration */
244static LIST_HEAD(zswap_pools);
245/* protects zswap_pools list modification */
246static DEFINE_SPINLOCK(zswap_pools_lock);
32a4e169
DS
247/* pool counter to provide unique names to zpool */
248static atomic_t zswap_pools_count = ATOMIC_INIT(0);
f1c54846 249
9021ccec
LS
250enum zswap_init_type {
251 ZSWAP_UNINIT,
252 ZSWAP_INIT_SUCCEED,
253 ZSWAP_INIT_FAILED
254};
90b0fc26 255
9021ccec 256static enum zswap_init_type zswap_init_state;
90b0fc26 257
141fdeec
LS
258/* used to ensure the integrity of initialization */
259static DEFINE_MUTEX(zswap_init_lock);
d7b028f5 260
ae3d89a7
DS
261/* init completed, but couldn't create the initial pool */
262static bool zswap_has_pool;
263
f1c54846
DS
264/*********************************
265* helpers and fwd declarations
266**********************************/
267
268#define zswap_pool_debug(msg, p) \
269 pr_debug("%s pool %s/%s\n", msg, (p)->tfm_name, \
b8cf32dc 270 zpool_get_type((p)->zpools[0]))
f1c54846 271
0bb48849 272static int zswap_writeback_entry(struct zswap_entry *entry,
ff9d5ba2 273 struct zswap_tree *tree);
f1c54846
DS
274static int zswap_pool_get(struct zswap_pool *pool);
275static void zswap_pool_put(struct zswap_pool *pool);
276
f1c54846
DS
277static bool zswap_is_full(void)
278{
ca79b0c2
AK
279 return totalram_pages() * zswap_max_pool_percent / 100 <
280 DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE);
f1c54846
DS
281}
282
45190f01
VW
283static bool zswap_can_accept(void)
284{
285 return totalram_pages() * zswap_accept_thr_percent / 100 *
286 zswap_max_pool_percent / 100 >
287 DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE);
288}
289
b5ba474f
NP
290static u64 get_zswap_pool_size(struct zswap_pool *pool)
291{
292 u64 pool_size = 0;
293 int i;
294
295 for (i = 0; i < ZSWAP_NR_ZPOOLS; i++)
296 pool_size += zpool_get_total_size(pool->zpools[i]);
297
298 return pool_size;
299}
300
f1c54846
DS
301static void zswap_update_total_size(void)
302{
303 struct zswap_pool *pool;
304 u64 total = 0;
305
306 rcu_read_lock();
307
308 list_for_each_entry_rcu(pool, &zswap_pools, list)
b5ba474f 309 total += get_zswap_pool_size(pool);
f1c54846
DS
310
311 rcu_read_unlock();
312
313 zswap_pool_total_size = total;
314}
315
a65b0e76
DC
316/* should be called under RCU */
317#ifdef CONFIG_MEMCG
318static inline struct mem_cgroup *mem_cgroup_from_entry(struct zswap_entry *entry)
319{
320 return entry->objcg ? obj_cgroup_memcg(entry->objcg) : NULL;
321}
322#else
323static inline struct mem_cgroup *mem_cgroup_from_entry(struct zswap_entry *entry)
324{
325 return NULL;
326}
327#endif
328
329static inline int entry_to_nid(struct zswap_entry *entry)
330{
331 return page_to_nid(virt_to_page(entry));
332}
333
334void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg)
335{
336 struct zswap_pool *pool;
337
338 /* lock out zswap pools list modification */
339 spin_lock(&zswap_pools_lock);
340 list_for_each_entry(pool, &zswap_pools, list) {
341 if (pool->next_shrink == memcg)
342 pool->next_shrink = mem_cgroup_iter(NULL, pool->next_shrink, NULL);
343 }
344 spin_unlock(&zswap_pools_lock);
345}
346
2b281117
SJ
347/*********************************
348* zswap entry functions
349**********************************/
350static struct kmem_cache *zswap_entry_cache;
351
a65b0e76 352static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp, int nid)
2b281117
SJ
353{
354 struct zswap_entry *entry;
a65b0e76 355 entry = kmem_cache_alloc_node(zswap_entry_cache, gfp, nid);
2b281117
SJ
356 if (!entry)
357 return NULL;
358 entry->refcount = 1;
0ab0abcf 359 RB_CLEAR_NODE(&entry->rbnode);
2b281117
SJ
360 return entry;
361}
362
363static void zswap_entry_cache_free(struct zswap_entry *entry)
364{
365 kmem_cache_free(zswap_entry_cache, entry);
366}
367
b5ba474f
NP
368/*********************************
369* zswap lruvec functions
370**********************************/
371void zswap_lruvec_state_init(struct lruvec *lruvec)
372{
373 atomic_long_set(&lruvec->zswap_lruvec_state.nr_zswap_protected, 0);
374}
375
96c7b0b4 376void zswap_folio_swapin(struct folio *folio)
b5ba474f
NP
377{
378 struct lruvec *lruvec;
379
96c7b0b4
MWO
380 if (folio) {
381 lruvec = folio_lruvec(folio);
b5ba474f
NP
382 atomic_long_inc(&lruvec->zswap_lruvec_state.nr_zswap_protected);
383 }
384}
385
a65b0e76
DC
386/*********************************
387* lru functions
388**********************************/
389static void zswap_lru_add(struct list_lru *list_lru, struct zswap_entry *entry)
390{
b5ba474f
NP
391 atomic_long_t *nr_zswap_protected;
392 unsigned long lru_size, old, new;
a65b0e76
DC
393 int nid = entry_to_nid(entry);
394 struct mem_cgroup *memcg;
b5ba474f 395 struct lruvec *lruvec;
a65b0e76
DC
396
397 /*
398 * Note that it is safe to use rcu_read_lock() here, even in the face of
399 * concurrent memcg offlining. Thanks to the memcg->kmemcg_id indirection
400 * used in list_lru lookup, only two scenarios are possible:
401 *
402 * 1. list_lru_add() is called before memcg->kmemcg_id is updated. The
403 * new entry will be reparented to memcg's parent's list_lru.
404 * 2. list_lru_add() is called after memcg->kmemcg_id is updated. The
405 * new entry will be added directly to memcg's parent's list_lru.
406 *
407 * Similar reasoning holds for list_lru_del() and list_lru_putback().
408 */
409 rcu_read_lock();
410 memcg = mem_cgroup_from_entry(entry);
411 /* will always succeed */
412 list_lru_add(list_lru, &entry->lru, nid, memcg);
b5ba474f
NP
413
414 /* Update the protection area */
415 lru_size = list_lru_count_one(list_lru, nid, memcg);
416 lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
417 nr_zswap_protected = &lruvec->zswap_lruvec_state.nr_zswap_protected;
418 old = atomic_long_inc_return(nr_zswap_protected);
419 /*
420 * Decay to avoid overflow and adapt to changing workloads.
421 * This is based on LRU reclaim cost decaying heuristics.
422 */
423 do {
424 new = old > lru_size / 4 ? old / 2 : old;
425 } while (!atomic_long_try_cmpxchg(nr_zswap_protected, &old, new));
a65b0e76
DC
426 rcu_read_unlock();
427}
428
429static void zswap_lru_del(struct list_lru *list_lru, struct zswap_entry *entry)
430{
431 int nid = entry_to_nid(entry);
432 struct mem_cgroup *memcg;
433
434 rcu_read_lock();
435 memcg = mem_cgroup_from_entry(entry);
436 /* will always succeed */
437 list_lru_del(list_lru, &entry->lru, nid, memcg);
438 rcu_read_unlock();
439}
440
441static void zswap_lru_putback(struct list_lru *list_lru,
442 struct zswap_entry *entry)
443{
444 int nid = entry_to_nid(entry);
445 spinlock_t *lock = &list_lru->node[nid].lock;
446 struct mem_cgroup *memcg;
b5ba474f 447 struct lruvec *lruvec;
a65b0e76
DC
448
449 rcu_read_lock();
450 memcg = mem_cgroup_from_entry(entry);
451 spin_lock(lock);
452 /* we cannot use list_lru_add here, because it increments node's lru count */
453 list_lru_putback(list_lru, &entry->lru, nid, memcg);
454 spin_unlock(lock);
b5ba474f
NP
455
456 lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(entry_to_nid(entry)));
457 /* increment the protection area to account for the LRU rotation. */
458 atomic_long_inc(&lruvec->zswap_lruvec_state.nr_zswap_protected);
a65b0e76
DC
459 rcu_read_unlock();
460}
461
2b281117
SJ
462/*********************************
463* rbtree functions
464**********************************/
465static struct zswap_entry *zswap_rb_search(struct rb_root *root, pgoff_t offset)
466{
467 struct rb_node *node = root->rb_node;
468 struct zswap_entry *entry;
0bb48849 469 pgoff_t entry_offset;
2b281117
SJ
470
471 while (node) {
472 entry = rb_entry(node, struct zswap_entry, rbnode);
0bb48849
DC
473 entry_offset = swp_offset(entry->swpentry);
474 if (entry_offset > offset)
2b281117 475 node = node->rb_left;
0bb48849 476 else if (entry_offset < offset)
2b281117
SJ
477 node = node->rb_right;
478 else
479 return entry;
480 }
481 return NULL;
482}
483
484/*
485 * In the case that a entry with the same offset is found, a pointer to
486 * the existing entry is stored in dupentry and the function returns -EEXIST
487 */
488static int zswap_rb_insert(struct rb_root *root, struct zswap_entry *entry,
489 struct zswap_entry **dupentry)
490{
491 struct rb_node **link = &root->rb_node, *parent = NULL;
492 struct zswap_entry *myentry;
0bb48849 493 pgoff_t myentry_offset, entry_offset = swp_offset(entry->swpentry);
2b281117
SJ
494
495 while (*link) {
496 parent = *link;
497 myentry = rb_entry(parent, struct zswap_entry, rbnode);
0bb48849
DC
498 myentry_offset = swp_offset(myentry->swpentry);
499 if (myentry_offset > entry_offset)
2b281117 500 link = &(*link)->rb_left;
0bb48849 501 else if (myentry_offset < entry_offset)
2b281117
SJ
502 link = &(*link)->rb_right;
503 else {
504 *dupentry = myentry;
505 return -EEXIST;
506 }
507 }
508 rb_link_node(&entry->rbnode, parent, link);
509 rb_insert_color(&entry->rbnode, root);
510 return 0;
511}
512
18a93707 513static bool zswap_rb_erase(struct rb_root *root, struct zswap_entry *entry)
0ab0abcf
WY
514{
515 if (!RB_EMPTY_NODE(&entry->rbnode)) {
516 rb_erase(&entry->rbnode, root);
517 RB_CLEAR_NODE(&entry->rbnode);
18a93707 518 return true;
0ab0abcf 519 }
18a93707 520 return false;
0ab0abcf
WY
521}
522
b8cf32dc
YA
523static struct zpool *zswap_find_zpool(struct zswap_entry *entry)
524{
525 int i = 0;
526
527 if (ZSWAP_NR_ZPOOLS > 1)
528 i = hash_ptr(entry, ilog2(ZSWAP_NR_ZPOOLS));
529
530 return entry->pool->zpools[i];
531}
532
0ab0abcf 533/*
12d79d64 534 * Carries out the common pattern of freeing and entry's zpool allocation,
0ab0abcf
WY
535 * freeing the entry itself, and decrementing the number of stored pages.
536 */
60105e12 537static void zswap_free_entry(struct zswap_entry *entry)
0ab0abcf 538{
a85f878b
SD
539 if (!entry->length)
540 atomic_dec(&zswap_same_filled_pages);
541 else {
a65b0e76 542 zswap_lru_del(&entry->pool->list_lru, entry);
b8cf32dc 543 zpool_free(zswap_find_zpool(entry), entry->handle);
b5ba474f 544 atomic_dec(&entry->pool->nr_stored);
a85f878b
SD
545 zswap_pool_put(entry->pool);
546 }
2e601e1e
JW
547 if (entry->objcg) {
548 obj_cgroup_uncharge_zswap(entry->objcg, entry->length);
549 obj_cgroup_put(entry->objcg);
550 }
0ab0abcf
WY
551 zswap_entry_cache_free(entry);
552 atomic_dec(&zswap_stored_pages);
f1c54846 553 zswap_update_total_size();
0ab0abcf
WY
554}
555
556/* caller must hold the tree lock */
557static void zswap_entry_get(struct zswap_entry *entry)
558{
559 entry->refcount++;
560}
561
562/* caller must hold the tree lock
563* remove from the tree and free it, if nobody reference the entry
564*/
565static void zswap_entry_put(struct zswap_tree *tree,
566 struct zswap_entry *entry)
567{
568 int refcount = --entry->refcount;
569
73108957 570 WARN_ON_ONCE(refcount < 0);
0ab0abcf 571 if (refcount == 0) {
73108957 572 WARN_ON_ONCE(!RB_EMPTY_NODE(&entry->rbnode));
60105e12 573 zswap_free_entry(entry);
0ab0abcf
WY
574 }
575}
576
577/* caller must hold the tree lock */
578static struct zswap_entry *zswap_entry_find_get(struct rb_root *root,
579 pgoff_t offset)
580{
b0c9865f 581 struct zswap_entry *entry;
0ab0abcf
WY
582
583 entry = zswap_rb_search(root, offset);
584 if (entry)
585 zswap_entry_get(entry);
586
587 return entry;
588}
589
b5ba474f
NP
590/*********************************
591* shrinker functions
592**********************************/
593static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_one *l,
594 spinlock_t *lock, void *arg);
595
596static unsigned long zswap_shrinker_scan(struct shrinker *shrinker,
597 struct shrink_control *sc)
598{
599 struct lruvec *lruvec = mem_cgroup_lruvec(sc->memcg, NODE_DATA(sc->nid));
600 unsigned long shrink_ret, nr_protected, lru_size;
601 struct zswap_pool *pool = shrinker->private_data;
602 bool encountered_page_in_swapcache = false;
603
501a06fe
NP
604 if (!zswap_shrinker_enabled ||
605 !mem_cgroup_zswap_writeback_enabled(sc->memcg)) {
b5ba474f
NP
606 sc->nr_scanned = 0;
607 return SHRINK_STOP;
608 }
609
610 nr_protected =
611 atomic_long_read(&lruvec->zswap_lruvec_state.nr_zswap_protected);
612 lru_size = list_lru_shrink_count(&pool->list_lru, sc);
613
614 /*
615 * Abort if we are shrinking into the protected region.
616 *
617 * This short-circuiting is necessary because if we have too many multiple
618 * concurrent reclaimers getting the freeable zswap object counts at the
619 * same time (before any of them made reasonable progress), the total
620 * number of reclaimed objects might be more than the number of unprotected
621 * objects (i.e the reclaimers will reclaim into the protected area of the
622 * zswap LRU).
623 */
624 if (nr_protected >= lru_size - sc->nr_to_scan) {
625 sc->nr_scanned = 0;
626 return SHRINK_STOP;
627 }
628
629 shrink_ret = list_lru_shrink_walk(&pool->list_lru, sc, &shrink_memcg_cb,
630 &encountered_page_in_swapcache);
631
632 if (encountered_page_in_swapcache)
633 return SHRINK_STOP;
634
635 return shrink_ret ? shrink_ret : SHRINK_STOP;
636}
637
638static unsigned long zswap_shrinker_count(struct shrinker *shrinker,
639 struct shrink_control *sc)
640{
641 struct zswap_pool *pool = shrinker->private_data;
642 struct mem_cgroup *memcg = sc->memcg;
643 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(sc->nid));
644 unsigned long nr_backing, nr_stored, nr_freeable, nr_protected;
645
501a06fe 646 if (!zswap_shrinker_enabled || !mem_cgroup_zswap_writeback_enabled(memcg))
b5ba474f
NP
647 return 0;
648
649#ifdef CONFIG_MEMCG_KMEM
7d7ef0a4 650 mem_cgroup_flush_stats(memcg);
b5ba474f
NP
651 nr_backing = memcg_page_state(memcg, MEMCG_ZSWAP_B) >> PAGE_SHIFT;
652 nr_stored = memcg_page_state(memcg, MEMCG_ZSWAPPED);
653#else
654 /* use pool stats instead of memcg stats */
655 nr_backing = get_zswap_pool_size(pool) >> PAGE_SHIFT;
656 nr_stored = atomic_read(&pool->nr_stored);
657#endif
658
659 if (!nr_stored)
660 return 0;
661
662 nr_protected =
663 atomic_long_read(&lruvec->zswap_lruvec_state.nr_zswap_protected);
664 nr_freeable = list_lru_shrink_count(&pool->list_lru, sc);
665 /*
666 * Subtract the lru size by an estimate of the number of pages
667 * that should be protected.
668 */
669 nr_freeable = nr_freeable > nr_protected ? nr_freeable - nr_protected : 0;
670
671 /*
672 * Scale the number of freeable pages by the memory saving factor.
673 * This ensures that the better zswap compresses memory, the fewer
674 * pages we will evict to swap (as it will otherwise incur IO for
675 * relatively small memory saving).
676 */
677 return mult_frac(nr_freeable, nr_backing, nr_stored);
678}
679
680static void zswap_alloc_shrinker(struct zswap_pool *pool)
681{
682 pool->shrinker =
683 shrinker_alloc(SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE, "mm-zswap");
684 if (!pool->shrinker)
685 return;
686
687 pool->shrinker->private_data = pool;
688 pool->shrinker->scan_objects = zswap_shrinker_scan;
689 pool->shrinker->count_objects = zswap_shrinker_count;
690 pool->shrinker->batch = 0;
691 pool->shrinker->seeks = DEFAULT_SEEKS;
692}
693
2b281117
SJ
694/*********************************
695* per-cpu code
696**********************************/
cab7a7e5 697static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
f1c54846 698{
cab7a7e5 699 struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
1ec3b5fe
BS
700 struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
701 struct crypto_acomp *acomp;
702 struct acomp_req *req;
8ba2f844
CZ
703 int ret;
704
705 mutex_init(&acomp_ctx->mutex);
706
707 acomp_ctx->buffer = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu));
708 if (!acomp_ctx->buffer)
709 return -ENOMEM;
1ec3b5fe
BS
710
711 acomp = crypto_alloc_acomp_node(pool->tfm_name, 0, 0, cpu_to_node(cpu));
712 if (IS_ERR(acomp)) {
713 pr_err("could not alloc crypto acomp %s : %ld\n",
714 pool->tfm_name, PTR_ERR(acomp));
8ba2f844
CZ
715 ret = PTR_ERR(acomp);
716 goto acomp_fail;
1ec3b5fe
BS
717 }
718 acomp_ctx->acomp = acomp;
f1c54846 719
1ec3b5fe
BS
720 req = acomp_request_alloc(acomp_ctx->acomp);
721 if (!req) {
722 pr_err("could not alloc crypto acomp_request %s\n",
723 pool->tfm_name);
8ba2f844
CZ
724 ret = -ENOMEM;
725 goto req_fail;
cab7a7e5 726 }
1ec3b5fe
BS
727 acomp_ctx->req = req;
728
729 crypto_init_wait(&acomp_ctx->wait);
730 /*
731 * if the backend of acomp is async zip, crypto_req_done() will wakeup
732 * crypto_wait_req(); if the backend of acomp is scomp, the callback
733 * won't be called, crypto_wait_req() will return without blocking.
734 */
735 acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
736 crypto_req_done, &acomp_ctx->wait);
737
2b281117 738 return 0;
8ba2f844
CZ
739
740req_fail:
741 crypto_free_acomp(acomp_ctx->acomp);
742acomp_fail:
743 kfree(acomp_ctx->buffer);
744 return ret;
2b281117
SJ
745}
746
cab7a7e5 747static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node)
f1c54846 748{
cab7a7e5 749 struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
1ec3b5fe
BS
750 struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
751
752 if (!IS_ERR_OR_NULL(acomp_ctx)) {
753 if (!IS_ERR_OR_NULL(acomp_ctx->req))
754 acomp_request_free(acomp_ctx->req);
755 if (!IS_ERR_OR_NULL(acomp_ctx->acomp))
756 crypto_free_acomp(acomp_ctx->acomp);
8ba2f844 757 kfree(acomp_ctx->buffer);
1ec3b5fe 758 }
f1c54846 759
cab7a7e5 760 return 0;
f1c54846
DS
761}
762
2b281117 763/*********************************
f1c54846 764* pool functions
2b281117 765**********************************/
f1c54846
DS
766
767static struct zswap_pool *__zswap_pool_current(void)
2b281117 768{
f1c54846
DS
769 struct zswap_pool *pool;
770
771 pool = list_first_or_null_rcu(&zswap_pools, typeof(*pool), list);
ae3d89a7
DS
772 WARN_ONCE(!pool && zswap_has_pool,
773 "%s: no page storage pool!\n", __func__);
f1c54846
DS
774
775 return pool;
776}
777
778static struct zswap_pool *zswap_pool_current(void)
779{
780 assert_spin_locked(&zswap_pools_lock);
781
782 return __zswap_pool_current();
783}
784
785static struct zswap_pool *zswap_pool_current_get(void)
786{
787 struct zswap_pool *pool;
788
789 rcu_read_lock();
790
791 pool = __zswap_pool_current();
ae3d89a7 792 if (!zswap_pool_get(pool))
f1c54846
DS
793 pool = NULL;
794
795 rcu_read_unlock();
796
797 return pool;
798}
799
800static struct zswap_pool *zswap_pool_last_get(void)
801{
802 struct zswap_pool *pool, *last = NULL;
803
804 rcu_read_lock();
805
806 list_for_each_entry_rcu(pool, &zswap_pools, list)
807 last = pool;
ae3d89a7
DS
808 WARN_ONCE(!last && zswap_has_pool,
809 "%s: no page storage pool!\n", __func__);
810 if (!zswap_pool_get(last))
f1c54846
DS
811 last = NULL;
812
813 rcu_read_unlock();
814
815 return last;
816}
817
8bc8b228 818/* type and compressor must be null-terminated */
f1c54846
DS
819static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor)
820{
821 struct zswap_pool *pool;
822
823 assert_spin_locked(&zswap_pools_lock);
824
825 list_for_each_entry_rcu(pool, &zswap_pools, list) {
8bc8b228 826 if (strcmp(pool->tfm_name, compressor))
f1c54846 827 continue;
b8cf32dc
YA
828 /* all zpools share the same type */
829 if (strcmp(zpool_get_type(pool->zpools[0]), type))
f1c54846
DS
830 continue;
831 /* if we can't get it, it's about to be destroyed */
832 if (!zswap_pool_get(pool))
833 continue;
834 return pool;
835 }
836
837 return NULL;
838}
839
18a93707
YA
840/*
841 * If the entry is still valid in the tree, drop the initial ref and remove it
842 * from the tree. This function must be called with an additional ref held,
843 * otherwise it may race with another invalidation freeing the entry.
844 */
418fd29d
DC
845static void zswap_invalidate_entry(struct zswap_tree *tree,
846 struct zswap_entry *entry)
847{
18a93707
YA
848 if (zswap_rb_erase(&tree->rbroot, entry))
849 zswap_entry_put(tree, entry);
418fd29d
DC
850}
851
a65b0e76
DC
852static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_one *l,
853 spinlock_t *lock, void *arg)
f999f38b 854{
a65b0e76 855 struct zswap_entry *entry = container_of(item, struct zswap_entry, lru);
b5ba474f 856 bool *encountered_page_in_swapcache = (bool *)arg;
f999f38b
DC
857 struct zswap_tree *tree;
858 pgoff_t swpoffset;
a65b0e76
DC
859 enum lru_status ret = LRU_REMOVED_RETRY;
860 int writeback_result;
f999f38b 861
f999f38b
DC
862 /*
863 * Once the lru lock is dropped, the entry might get freed. The
864 * swpoffset is copied to the stack, and entry isn't deref'd again
865 * until the entry is verified to still be alive in the tree.
866 */
0bb48849
DC
867 swpoffset = swp_offset(entry->swpentry);
868 tree = zswap_trees[swp_type(entry->swpentry)];
a65b0e76
DC
869 list_lru_isolate(l, item);
870 /*
871 * It's safe to drop the lock here because we return either
872 * LRU_REMOVED_RETRY or LRU_RETRY.
873 */
874 spin_unlock(lock);
f999f38b
DC
875
876 /* Check for invalidate() race */
877 spin_lock(&tree->lock);
a65b0e76 878 if (entry != zswap_rb_search(&tree->rbroot, swpoffset))
f999f38b 879 goto unlock;
a65b0e76 880
f999f38b
DC
881 /* Hold a reference to prevent a free during writeback */
882 zswap_entry_get(entry);
883 spin_unlock(&tree->lock);
884
a65b0e76 885 writeback_result = zswap_writeback_entry(entry, tree);
f999f38b
DC
886
887 spin_lock(&tree->lock);
a65b0e76
DC
888 if (writeback_result) {
889 zswap_reject_reclaim_fail++;
890 zswap_lru_putback(&entry->pool->list_lru, entry);
891 ret = LRU_RETRY;
b5ba474f
NP
892
893 /*
894 * Encountering a page already in swap cache is a sign that we are shrinking
895 * into the warmer region. We should terminate shrinking (if we're in the dynamic
896 * shrinker context).
897 */
27d3969b 898 if (writeback_result == -EEXIST && encountered_page_in_swapcache)
b5ba474f 899 *encountered_page_in_swapcache = true;
b5ba474f 900
ff9d5ba2 901 goto put_unlock;
f999f38b 902 }
a65b0e76 903 zswap_written_back_pages++;
f999f38b 904
7108cc3f
DC
905 if (entry->objcg)
906 count_objcg_event(entry->objcg, ZSWPWB);
907
908 count_vm_event(ZSWPWB);
418fd29d
DC
909 /*
910 * Writeback started successfully, the page now belongs to the
911 * swapcache. Drop the entry from zswap - unless invalidate already
912 * took it out while we had the tree->lock released for IO.
913 */
18a93707 914 zswap_invalidate_entry(tree, entry);
ff9d5ba2
DC
915
916put_unlock:
f999f38b
DC
917 /* Drop local reference */
918 zswap_entry_put(tree, entry);
919unlock:
920 spin_unlock(&tree->lock);
a65b0e76
DC
921 spin_lock(lock);
922 return ret;
923}
924
925static int shrink_memcg(struct mem_cgroup *memcg)
926{
927 struct zswap_pool *pool;
928 int nid, shrunk = 0;
929
501a06fe
NP
930 if (!mem_cgroup_zswap_writeback_enabled(memcg))
931 return -EINVAL;
932
a65b0e76
DC
933 /*
934 * Skip zombies because their LRUs are reparented and we would be
935 * reclaiming from the parent instead of the dead memcg.
936 */
937 if (memcg && !mem_cgroup_online(memcg))
938 return -ENOENT;
939
940 pool = zswap_pool_current_get();
941 if (!pool)
942 return -EINVAL;
943
944 for_each_node_state(nid, N_NORMAL_MEMORY) {
945 unsigned long nr_to_walk = 1;
946
947 shrunk += list_lru_walk_one(&pool->list_lru, nid, memcg,
948 &shrink_memcg_cb, NULL, &nr_to_walk);
949 }
950 zswap_pool_put(pool);
951 return shrunk ? 0 : -EAGAIN;
f999f38b
DC
952}
953
45190f01
VW
954static void shrink_worker(struct work_struct *w)
955{
956 struct zswap_pool *pool = container_of(w, typeof(*pool),
957 shrink_work);
a65b0e76 958 struct mem_cgroup *memcg;
e0228d59
DC
959 int ret, failures = 0;
960
a65b0e76 961 /* global reclaim will select cgroup in a round-robin fashion. */
e0228d59 962 do {
a65b0e76
DC
963 spin_lock(&zswap_pools_lock);
964 pool->next_shrink = mem_cgroup_iter(NULL, pool->next_shrink, NULL);
965 memcg = pool->next_shrink;
966
967 /*
968 * We need to retry if we have gone through a full round trip, or if we
969 * got an offline memcg (or else we risk undoing the effect of the
970 * zswap memcg offlining cleanup callback). This is not catastrophic
971 * per se, but it will keep the now offlined memcg hostage for a while.
972 *
973 * Note that if we got an online memcg, we will keep the extra
974 * reference in case the original reference obtained by mem_cgroup_iter
975 * is dropped by the zswap memcg offlining callback, ensuring that the
976 * memcg is not killed when we are reclaiming.
977 */
978 if (!memcg) {
979 spin_unlock(&zswap_pools_lock);
980 if (++failures == MAX_RECLAIM_RETRIES)
e0228d59 981 break;
a65b0e76
DC
982
983 goto resched;
984 }
985
986 if (!mem_cgroup_tryget_online(memcg)) {
987 /* drop the reference from mem_cgroup_iter() */
988 mem_cgroup_iter_break(NULL, memcg);
989 pool->next_shrink = NULL;
990 spin_unlock(&zswap_pools_lock);
991
e0228d59
DC
992 if (++failures == MAX_RECLAIM_RETRIES)
993 break;
a65b0e76
DC
994
995 goto resched;
e0228d59 996 }
a65b0e76
DC
997 spin_unlock(&zswap_pools_lock);
998
999 ret = shrink_memcg(memcg);
1000 /* drop the extra reference */
1001 mem_cgroup_put(memcg);
1002
1003 if (ret == -EINVAL)
1004 break;
1005 if (ret && ++failures == MAX_RECLAIM_RETRIES)
1006 break;
1007
1008resched:
e0228d59
DC
1009 cond_resched();
1010 } while (!zswap_can_accept());
45190f01
VW
1011 zswap_pool_put(pool);
1012}
1013
f1c54846
DS
1014static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
1015{
b8cf32dc 1016 int i;
f1c54846 1017 struct zswap_pool *pool;
32a4e169 1018 char name[38]; /* 'zswap' + 32 char (max) num + \0 */
d0164adc 1019 gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
cab7a7e5 1020 int ret;
f1c54846 1021
bae21db8
DS
1022 if (!zswap_has_pool) {
1023 /* if either are unset, pool initialization failed, and we
1024 * need both params to be set correctly before trying to
1025 * create a pool.
1026 */
1027 if (!strcmp(type, ZSWAP_PARAM_UNSET))
1028 return NULL;
1029 if (!strcmp(compressor, ZSWAP_PARAM_UNSET))
1030 return NULL;
1031 }
1032
f1c54846 1033 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
f4ae0ce0 1034 if (!pool)
f1c54846 1035 return NULL;
f1c54846 1036
b8cf32dc
YA
1037 for (i = 0; i < ZSWAP_NR_ZPOOLS; i++) {
1038 /* unique name for each pool specifically required by zsmalloc */
1039 snprintf(name, 38, "zswap%x",
1040 atomic_inc_return(&zswap_pools_count));
32a4e169 1041
b8cf32dc
YA
1042 pool->zpools[i] = zpool_create_pool(type, name, gfp);
1043 if (!pool->zpools[i]) {
1044 pr_err("%s zpool not available\n", type);
1045 goto error;
1046 }
f1c54846 1047 }
b8cf32dc 1048 pr_debug("using %s zpool\n", zpool_get_type(pool->zpools[0]));
f1c54846 1049
79cd4202 1050 strscpy(pool->tfm_name, compressor, sizeof(pool->tfm_name));
1ec3b5fe
BS
1051
1052 pool->acomp_ctx = alloc_percpu(*pool->acomp_ctx);
1053 if (!pool->acomp_ctx) {
f1c54846
DS
1054 pr_err("percpu alloc failed\n");
1055 goto error;
1056 }
1057
cab7a7e5
SAS
1058 ret = cpuhp_state_add_instance(CPUHP_MM_ZSWP_POOL_PREPARE,
1059 &pool->node);
1060 if (ret)
f1c54846 1061 goto error;
b5ba474f
NP
1062
1063 zswap_alloc_shrinker(pool);
1064 if (!pool->shrinker)
1065 goto error;
1066
f1c54846
DS
1067 pr_debug("using %s compressor\n", pool->tfm_name);
1068
1069 /* being the current pool takes 1 ref; this func expects the
1070 * caller to always add the new pool as the current pool
1071 */
1072 kref_init(&pool->kref);
1073 INIT_LIST_HEAD(&pool->list);
b5ba474f
NP
1074 if (list_lru_init_memcg(&pool->list_lru, pool->shrinker))
1075 goto lru_fail;
1076 shrinker_register(pool->shrinker);
45190f01 1077 INIT_WORK(&pool->shrink_work, shrink_worker);
b5ba474f 1078 atomic_set(&pool->nr_stored, 0);
f1c54846
DS
1079
1080 zswap_pool_debug("created", pool);
1081
1082 return pool;
1083
b5ba474f
NP
1084lru_fail:
1085 list_lru_destroy(&pool->list_lru);
1086 shrinker_free(pool->shrinker);
f1c54846 1087error:
1ec3b5fe
BS
1088 if (pool->acomp_ctx)
1089 free_percpu(pool->acomp_ctx);
b8cf32dc
YA
1090 while (i--)
1091 zpool_destroy_pool(pool->zpools[i]);
f1c54846
DS
1092 kfree(pool);
1093 return NULL;
1094}
1095
141fdeec 1096static struct zswap_pool *__zswap_pool_create_fallback(void)
f1c54846 1097{
bae21db8
DS
1098 bool has_comp, has_zpool;
1099
1ec3b5fe 1100 has_comp = crypto_has_acomp(zswap_compressor, 0, 0);
bb8b93b5
MS
1101 if (!has_comp && strcmp(zswap_compressor,
1102 CONFIG_ZSWAP_COMPRESSOR_DEFAULT)) {
f1c54846 1103 pr_err("compressor %s not available, using default %s\n",
bb8b93b5 1104 zswap_compressor, CONFIG_ZSWAP_COMPRESSOR_DEFAULT);
c99b42c3 1105 param_free_charp(&zswap_compressor);
bb8b93b5 1106 zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT;
1ec3b5fe 1107 has_comp = crypto_has_acomp(zswap_compressor, 0, 0);
f1c54846 1108 }
bae21db8
DS
1109 if (!has_comp) {
1110 pr_err("default compressor %s not available\n",
1111 zswap_compressor);
1112 param_free_charp(&zswap_compressor);
1113 zswap_compressor = ZSWAP_PARAM_UNSET;
1114 }
1115
1116 has_zpool = zpool_has_pool(zswap_zpool_type);
bb8b93b5
MS
1117 if (!has_zpool && strcmp(zswap_zpool_type,
1118 CONFIG_ZSWAP_ZPOOL_DEFAULT)) {
f1c54846 1119 pr_err("zpool %s not available, using default %s\n",
bb8b93b5 1120 zswap_zpool_type, CONFIG_ZSWAP_ZPOOL_DEFAULT);
c99b42c3 1121 param_free_charp(&zswap_zpool_type);
bb8b93b5 1122 zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT;
bae21db8 1123 has_zpool = zpool_has_pool(zswap_zpool_type);
f1c54846 1124 }
bae21db8
DS
1125 if (!has_zpool) {
1126 pr_err("default zpool %s not available\n",
1127 zswap_zpool_type);
1128 param_free_charp(&zswap_zpool_type);
1129 zswap_zpool_type = ZSWAP_PARAM_UNSET;
1130 }
1131
1132 if (!has_comp || !has_zpool)
1133 return NULL;
f1c54846
DS
1134
1135 return zswap_pool_create(zswap_zpool_type, zswap_compressor);
1136}
1137
1138static void zswap_pool_destroy(struct zswap_pool *pool)
1139{
b8cf32dc
YA
1140 int i;
1141
f1c54846
DS
1142 zswap_pool_debug("destroying", pool);
1143
b5ba474f 1144 shrinker_free(pool->shrinker);
cab7a7e5 1145 cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node);
1ec3b5fe 1146 free_percpu(pool->acomp_ctx);
a65b0e76
DC
1147 list_lru_destroy(&pool->list_lru);
1148
1149 spin_lock(&zswap_pools_lock);
1150 mem_cgroup_iter_break(NULL, pool->next_shrink);
1151 pool->next_shrink = NULL;
1152 spin_unlock(&zswap_pools_lock);
1153
b8cf32dc
YA
1154 for (i = 0; i < ZSWAP_NR_ZPOOLS; i++)
1155 zpool_destroy_pool(pool->zpools[i]);
f1c54846
DS
1156 kfree(pool);
1157}
1158
1159static int __must_check zswap_pool_get(struct zswap_pool *pool)
1160{
ae3d89a7
DS
1161 if (!pool)
1162 return 0;
1163
f1c54846
DS
1164 return kref_get_unless_zero(&pool->kref);
1165}
1166
200867af 1167static void __zswap_pool_release(struct work_struct *work)
f1c54846 1168{
45190f01
VW
1169 struct zswap_pool *pool = container_of(work, typeof(*pool),
1170 release_work);
200867af
DS
1171
1172 synchronize_rcu();
f1c54846
DS
1173
1174 /* nobody should have been able to get a kref... */
1175 WARN_ON(kref_get_unless_zero(&pool->kref));
1176
1177 /* pool is now off zswap_pools list and has no references. */
1178 zswap_pool_destroy(pool);
1179}
1180
1181static void __zswap_pool_empty(struct kref *kref)
1182{
1183 struct zswap_pool *pool;
1184
1185 pool = container_of(kref, typeof(*pool), kref);
1186
1187 spin_lock(&zswap_pools_lock);
1188
1189 WARN_ON(pool == zswap_pool_current());
1190
1191 list_del_rcu(&pool->list);
200867af 1192
45190f01
VW
1193 INIT_WORK(&pool->release_work, __zswap_pool_release);
1194 schedule_work(&pool->release_work);
f1c54846
DS
1195
1196 spin_unlock(&zswap_pools_lock);
1197}
1198
1199static void zswap_pool_put(struct zswap_pool *pool)
1200{
1201 kref_put(&pool->kref, __zswap_pool_empty);
2b281117
SJ
1202}
1203
90b0fc26
DS
1204/*********************************
1205* param callbacks
1206**********************************/
1207
141fdeec
LS
1208static bool zswap_pool_changed(const char *s, const struct kernel_param *kp)
1209{
1210 /* no change required */
1211 if (!strcmp(s, *(char **)kp->arg) && zswap_has_pool)
1212 return false;
1213 return true;
1214}
1215
c99b42c3 1216/* val must be a null-terminated string */
90b0fc26
DS
1217static int __zswap_param_set(const char *val, const struct kernel_param *kp,
1218 char *type, char *compressor)
1219{
1220 struct zswap_pool *pool, *put_pool = NULL;
c99b42c3 1221 char *s = strstrip((char *)val);
141fdeec
LS
1222 int ret = 0;
1223 bool new_pool = false;
90b0fc26 1224
141fdeec 1225 mutex_lock(&zswap_init_lock);
9021ccec
LS
1226 switch (zswap_init_state) {
1227 case ZSWAP_UNINIT:
1228 /* if this is load-time (pre-init) param setting,
1229 * don't create a pool; that's done during init.
1230 */
141fdeec
LS
1231 ret = param_set_charp(s, kp);
1232 break;
9021ccec 1233 case ZSWAP_INIT_SUCCEED:
141fdeec 1234 new_pool = zswap_pool_changed(s, kp);
9021ccec
LS
1235 break;
1236 case ZSWAP_INIT_FAILED:
d7b028f5 1237 pr_err("can't set param, initialization failed\n");
141fdeec 1238 ret = -ENODEV;
d7b028f5 1239 }
141fdeec 1240 mutex_unlock(&zswap_init_lock);
d7b028f5 1241
141fdeec
LS
1242 /* no need to create a new pool, return directly */
1243 if (!new_pool)
1244 return ret;
90b0fc26
DS
1245
1246 if (!type) {
c99b42c3
DS
1247 if (!zpool_has_pool(s)) {
1248 pr_err("zpool %s not available\n", s);
90b0fc26
DS
1249 return -ENOENT;
1250 }
c99b42c3 1251 type = s;
90b0fc26 1252 } else if (!compressor) {
1ec3b5fe 1253 if (!crypto_has_acomp(s, 0, 0)) {
c99b42c3 1254 pr_err("compressor %s not available\n", s);
90b0fc26
DS
1255 return -ENOENT;
1256 }
c99b42c3
DS
1257 compressor = s;
1258 } else {
1259 WARN_ON(1);
1260 return -EINVAL;
90b0fc26
DS
1261 }
1262
1263 spin_lock(&zswap_pools_lock);
1264
1265 pool = zswap_pool_find_get(type, compressor);
1266 if (pool) {
1267 zswap_pool_debug("using existing", pool);
fd5bb66c 1268 WARN_ON(pool == zswap_pool_current());
90b0fc26 1269 list_del_rcu(&pool->list);
90b0fc26
DS
1270 }
1271
fd5bb66c
DS
1272 spin_unlock(&zswap_pools_lock);
1273
1274 if (!pool)
1275 pool = zswap_pool_create(type, compressor);
1276
90b0fc26 1277 if (pool)
c99b42c3 1278 ret = param_set_charp(s, kp);
90b0fc26
DS
1279 else
1280 ret = -EINVAL;
1281
fd5bb66c
DS
1282 spin_lock(&zswap_pools_lock);
1283
90b0fc26
DS
1284 if (!ret) {
1285 put_pool = zswap_pool_current();
1286 list_add_rcu(&pool->list, &zswap_pools);
ae3d89a7 1287 zswap_has_pool = true;
90b0fc26
DS
1288 } else if (pool) {
1289 /* add the possibly pre-existing pool to the end of the pools
1290 * list; if it's new (and empty) then it'll be removed and
1291 * destroyed by the put after we drop the lock
1292 */
1293 list_add_tail_rcu(&pool->list, &zswap_pools);
1294 put_pool = pool;
fd5bb66c
DS
1295 }
1296
1297 spin_unlock(&zswap_pools_lock);
1298
1299 if (!zswap_has_pool && !pool) {
ae3d89a7
DS
1300 /* if initial pool creation failed, and this pool creation also
1301 * failed, maybe both compressor and zpool params were bad.
1302 * Allow changing this param, so pool creation will succeed
1303 * when the other param is changed. We already verified this
1ec3b5fe 1304 * param is ok in the zpool_has_pool() or crypto_has_acomp()
ae3d89a7
DS
1305 * checks above.
1306 */
1307 ret = param_set_charp(s, kp);
90b0fc26
DS
1308 }
1309
90b0fc26
DS
1310 /* drop the ref from either the old current pool,
1311 * or the new pool we failed to add
1312 */
1313 if (put_pool)
1314 zswap_pool_put(put_pool);
1315
1316 return ret;
1317}
1318
1319static int zswap_compressor_param_set(const char *val,
1320 const struct kernel_param *kp)
1321{
1322 return __zswap_param_set(val, kp, zswap_zpool_type, NULL);
1323}
1324
1325static int zswap_zpool_param_set(const char *val,
1326 const struct kernel_param *kp)
1327{
1328 return __zswap_param_set(val, kp, NULL, zswap_compressor);
1329}
1330
d7b028f5
DS
1331static int zswap_enabled_param_set(const char *val,
1332 const struct kernel_param *kp)
1333{
141fdeec
LS
1334 int ret = -ENODEV;
1335
1336 /* if this is load-time (pre-init) param setting, only set param. */
1337 if (system_state != SYSTEM_RUNNING)
1338 return param_set_bool(val, kp);
1339
1340 mutex_lock(&zswap_init_lock);
9021ccec
LS
1341 switch (zswap_init_state) {
1342 case ZSWAP_UNINIT:
141fdeec
LS
1343 if (zswap_setup())
1344 break;
1345 fallthrough;
9021ccec 1346 case ZSWAP_INIT_SUCCEED:
141fdeec 1347 if (!zswap_has_pool)
9021ccec 1348 pr_err("can't enable, no pool configured\n");
141fdeec
LS
1349 else
1350 ret = param_set_bool(val, kp);
1351 break;
9021ccec 1352 case ZSWAP_INIT_FAILED:
d7b028f5 1353 pr_err("can't enable, initialization failed\n");
ae3d89a7 1354 }
141fdeec 1355 mutex_unlock(&zswap_init_lock);
d7b028f5 1356
141fdeec 1357 return ret;
d7b028f5
DS
1358}
1359
32acba4c
CZ
1360static void __zswap_load(struct zswap_entry *entry, struct page *page)
1361{
1362 struct zpool *zpool = zswap_find_zpool(entry);
1363 struct scatterlist input, output;
1364 struct crypto_acomp_ctx *acomp_ctx;
1365 u8 *src;
1366
1367 acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
8ba2f844 1368 mutex_lock(&acomp_ctx->mutex);
32acba4c
CZ
1369
1370 src = zpool_map_handle(zpool, entry->handle, ZPOOL_MM_RO);
1371 if (!zpool_can_sleep_mapped(zpool)) {
8ba2f844
CZ
1372 memcpy(acomp_ctx->buffer, src, entry->length);
1373 src = acomp_ctx->buffer;
32acba4c
CZ
1374 zpool_unmap_handle(zpool, entry->handle);
1375 }
1376
1377 sg_init_one(&input, src, entry->length);
1378 sg_init_table(&output, 1);
1379 sg_set_page(&output, page, PAGE_SIZE, 0);
1380 acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, PAGE_SIZE);
1381 BUG_ON(crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait));
1382 BUG_ON(acomp_ctx->req->dlen != PAGE_SIZE);
8ba2f844 1383 mutex_unlock(&acomp_ctx->mutex);
32acba4c
CZ
1384
1385 if (zpool_can_sleep_mapped(zpool))
1386 zpool_unmap_handle(zpool, entry->handle);
1387}
1388
2b281117
SJ
1389/*********************************
1390* writeback code
1391**********************************/
2b281117 1392/*
96c7b0b4
MWO
1393 * Attempts to free an entry by adding a folio to the swap cache,
1394 * decompressing the entry data into the folio, and issuing a
1395 * bio write to write the folio back to the swap device.
2b281117 1396 *
96c7b0b4 1397 * This can be thought of as a "resumed writeback" of the folio
2b281117 1398 * to the swap device. We are basically resuming the same swap
42c06a0e 1399 * writeback path that was intercepted with the zswap_store()
96c7b0b4 1400 * in the first place. After the folio has been decompressed into
2b281117
SJ
1401 * the swap cache, the compressed version stored by zswap can be
1402 * freed.
1403 */
0bb48849 1404static int zswap_writeback_entry(struct zswap_entry *entry,
ff9d5ba2 1405 struct zswap_tree *tree)
2b281117 1406{
0bb48849 1407 swp_entry_t swpentry = entry->swpentry;
96c7b0b4 1408 struct folio *folio;
ddc1a5cb 1409 struct mempolicy *mpol;
96c7b0b4 1410 bool folio_was_allocated;
2b281117
SJ
1411 struct writeback_control wbc = {
1412 .sync_mode = WB_SYNC_NONE,
1413 };
1414
96c7b0b4 1415 /* try to allocate swap cache folio */
ddc1a5cb 1416 mpol = get_task_policy(current);
96c7b0b4
MWO
1417 folio = __read_swap_cache_async(swpentry, GFP_KERNEL, mpol,
1418 NO_INTERLEAVE_INDEX, &folio_was_allocated, true);
1419 if (!folio)
e947ba0b 1420 return -ENOMEM;
2b281117 1421
e947ba0b 1422 /*
96c7b0b4
MWO
1423 * Found an existing folio, we raced with load/swapin. We generally
1424 * writeback cold folios from zswap, and swapin means the folio just
1425 * became hot. Skip this folio and let the caller find another one.
e947ba0b 1426 */
96c7b0b4
MWO
1427 if (!folio_was_allocated) {
1428 folio_put(folio);
e947ba0b 1429 return -EEXIST;
98804a94 1430 }
2b281117 1431
98804a94 1432 /*
96c7b0b4 1433 * folio is locked, and the swapcache is now secured against
98804a94
JW
1434 * concurrent swapping to and from the slot. Verify that the
1435 * swap entry hasn't been invalidated and recycled behind our
1436 * backs (our zswap_entry reference doesn't prevent that), to
96c7b0b4 1437 * avoid overwriting a new swap folio with old compressed data.
98804a94
JW
1438 */
1439 spin_lock(&tree->lock);
1440 if (zswap_rb_search(&tree->rbroot, swp_offset(entry->swpentry)) != entry) {
04fc7816 1441 spin_unlock(&tree->lock);
96c7b0b4 1442 delete_from_swap_cache(folio);
e3b63e96
YA
1443 folio_unlock(folio);
1444 folio_put(folio);
e947ba0b 1445 return -ENOMEM;
98804a94
JW
1446 }
1447 spin_unlock(&tree->lock);
04fc7816 1448
96c7b0b4 1449 __zswap_load(entry, &folio->page);
98804a94 1450
96c7b0b4
MWO
1451 /* folio is up to date */
1452 folio_mark_uptodate(folio);
2b281117 1453
b349acc7 1454 /* move it to the tail of the inactive list after end_writeback */
96c7b0b4 1455 folio_set_reclaim(folio);
b349acc7 1456
2b281117 1457 /* start writeback */
b99b4e0d 1458 __swap_writepage(folio, &wbc);
96c7b0b4 1459 folio_put(folio);
2b281117 1460
e947ba0b 1461 return 0;
2b281117
SJ
1462}
1463
a85f878b
SD
1464static int zswap_is_page_same_filled(void *ptr, unsigned long *value)
1465{
a85f878b 1466 unsigned long *page;
62bf1258
TS
1467 unsigned long val;
1468 unsigned int pos, last_pos = PAGE_SIZE / sizeof(*page) - 1;
a85f878b
SD
1469
1470 page = (unsigned long *)ptr;
62bf1258
TS
1471 val = page[0];
1472
1473 if (val != page[last_pos])
1474 return 0;
1475
1476 for (pos = 1; pos < last_pos; pos++) {
1477 if (val != page[pos])
a85f878b
SD
1478 return 0;
1479 }
62bf1258
TS
1480
1481 *value = val;
1482
a85f878b
SD
1483 return 1;
1484}
1485
1486static void zswap_fill_page(void *ptr, unsigned long value)
1487{
1488 unsigned long *page;
1489
1490 page = (unsigned long *)ptr;
1491 memset_l(page, value, PAGE_SIZE / sizeof(unsigned long));
1492}
1493
34f4c198 1494bool zswap_store(struct folio *folio)
2b281117 1495{
3d2c9087 1496 swp_entry_t swp = folio->swap;
42c06a0e
JW
1497 int type = swp_type(swp);
1498 pgoff_t offset = swp_offset(swp);
34f4c198 1499 struct page *page = &folio->page;
2b281117
SJ
1500 struct zswap_tree *tree = zswap_trees[type];
1501 struct zswap_entry *entry, *dupentry;
1ec3b5fe
BS
1502 struct scatterlist input, output;
1503 struct crypto_acomp_ctx *acomp_ctx;
f4840ccf 1504 struct obj_cgroup *objcg = NULL;
a65b0e76 1505 struct mem_cgroup *memcg = NULL;
f4840ccf 1506 struct zswap_pool *pool;
b8cf32dc 1507 struct zpool *zpool;
0bb48849 1508 unsigned int dlen = PAGE_SIZE;
a85f878b 1509 unsigned long handle, value;
2b281117
SJ
1510 char *buf;
1511 u8 *src, *dst;
d2fcd82b 1512 gfp_t gfp;
42c06a0e
JW
1513 int ret;
1514
34f4c198
MWO
1515 VM_WARN_ON_ONCE(!folio_test_locked(folio));
1516 VM_WARN_ON_ONCE(!folio_test_swapcache(folio));
2b281117 1517
34f4c198
MWO
1518 /* Large folios aren't supported */
1519 if (folio_test_large(folio))
42c06a0e 1520 return false;
7ba71669 1521
42c06a0e
JW
1522 if (!zswap_enabled || !tree)
1523 return false;
2b281117 1524
ca56489c
DC
1525 /*
1526 * If this is a duplicate, it must be removed before attempting to store
1527 * it, otherwise, if the store fails the old page won't be removed from
1528 * the tree, and it might be written back overriding the new data.
1529 */
1530 spin_lock(&tree->lock);
1531 dupentry = zswap_rb_search(&tree->rbroot, offset);
1532 if (dupentry) {
1533 zswap_duplicate_entry++;
1534 zswap_invalidate_entry(tree, dupentry);
1535 }
1536 spin_unlock(&tree->lock);
074e3e26 1537 objcg = get_obj_cgroup_from_folio(folio);
a65b0e76
DC
1538 if (objcg && !obj_cgroup_may_zswap(objcg)) {
1539 memcg = get_mem_cgroup_from_objcg(objcg);
1540 if (shrink_memcg(memcg)) {
1541 mem_cgroup_put(memcg);
1542 goto reject;
1543 }
1544 mem_cgroup_put(memcg);
1545 }
f4840ccf 1546
2b281117
SJ
1547 /* reclaim space if needed */
1548 if (zswap_is_full()) {
1549 zswap_pool_limit_hit++;
45190f01 1550 zswap_pool_reached_full = true;
f4840ccf 1551 goto shrink;
45190f01 1552 }
16e536ef 1553
45190f01 1554 if (zswap_pool_reached_full) {
42c06a0e 1555 if (!zswap_can_accept())
e0228d59 1556 goto shrink;
42c06a0e 1557 else
45190f01 1558 zswap_pool_reached_full = false;
2b281117
SJ
1559 }
1560
1561 /* allocate entry */
a65b0e76 1562 entry = zswap_entry_cache_alloc(GFP_KERNEL, page_to_nid(page));
2b281117
SJ
1563 if (!entry) {
1564 zswap_reject_kmemcache_fail++;
2b281117
SJ
1565 goto reject;
1566 }
1567
a85f878b 1568 if (zswap_same_filled_pages_enabled) {
003ae2fb 1569 src = kmap_local_page(page);
a85f878b 1570 if (zswap_is_page_same_filled(src, &value)) {
003ae2fb 1571 kunmap_local(src);
0bb48849 1572 entry->swpentry = swp_entry(type, offset);
a85f878b
SD
1573 entry->length = 0;
1574 entry->value = value;
1575 atomic_inc(&zswap_same_filled_pages);
1576 goto insert_entry;
1577 }
003ae2fb 1578 kunmap_local(src);
a85f878b
SD
1579 }
1580
42c06a0e 1581 if (!zswap_non_same_filled_pages_enabled)
cb325ddd 1582 goto freepage;
cb325ddd 1583
f1c54846
DS
1584 /* if entry is successfully added, it keeps the reference */
1585 entry->pool = zswap_pool_current_get();
42c06a0e 1586 if (!entry->pool)
f1c54846 1587 goto freepage;
f1c54846 1588
a65b0e76
DC
1589 if (objcg) {
1590 memcg = get_mem_cgroup_from_objcg(objcg);
1591 if (memcg_list_lru_alloc(memcg, &entry->pool->list_lru, GFP_KERNEL)) {
1592 mem_cgroup_put(memcg);
1593 goto put_pool;
1594 }
1595 mem_cgroup_put(memcg);
1596 }
1597
2b281117 1598 /* compress */
1ec3b5fe
BS
1599 acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
1600
8ba2f844 1601 mutex_lock(&acomp_ctx->mutex);
1ec3b5fe 1602
8ba2f844 1603 dst = acomp_ctx->buffer;
1ec3b5fe 1604 sg_init_table(&input, 1);
96c7b0b4 1605 sg_set_page(&input, &folio->page, PAGE_SIZE, 0);
1ec3b5fe 1606
8ba2f844
CZ
1607 /*
1608 * We need PAGE_SIZE * 2 here since there maybe over-compression case,
1609 * and hardware-accelerators may won't check the dst buffer size, so
1610 * giving the dst buffer with enough length to avoid buffer overflow.
1611 */
1ec3b5fe
BS
1612 sg_init_one(&output, dst, PAGE_SIZE * 2);
1613 acomp_request_set_params(acomp_ctx->req, &input, &output, PAGE_SIZE, dlen);
1614 /*
1615 * it maybe looks a little bit silly that we send an asynchronous request,
1616 * then wait for its completion synchronously. This makes the process look
1617 * synchronous in fact.
1618 * Theoretically, acomp supports users send multiple acomp requests in one
1619 * acomp instance, then get those requests done simultaneously. but in this
42c06a0e 1620 * case, zswap actually does store and load page by page, there is no
1ec3b5fe 1621 * existing method to send the second page before the first page is done
42c06a0e 1622 * in one thread doing zwap.
1ec3b5fe
BS
1623 * but in different threads running on different cpu, we have different
1624 * acomp instance, so multiple threads can do (de)compression in parallel.
1625 */
1626 ret = crypto_wait_req(crypto_acomp_compress(acomp_ctx->req), &acomp_ctx->wait);
1627 dlen = acomp_ctx->req->dlen;
1628
cb61dad8
NP
1629 if (ret) {
1630 zswap_reject_compress_fail++;
f1c54846 1631 goto put_dstmem;
cb61dad8 1632 }
2b281117
SJ
1633
1634 /* store */
b8cf32dc 1635 zpool = zswap_find_zpool(entry);
d2fcd82b 1636 gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
b8cf32dc 1637 if (zpool_malloc_support_movable(zpool))
d2fcd82b 1638 gfp |= __GFP_HIGHMEM | __GFP_MOVABLE;
b8cf32dc 1639 ret = zpool_malloc(zpool, dlen, gfp, &handle);
2b281117
SJ
1640 if (ret == -ENOSPC) {
1641 zswap_reject_compress_poor++;
f1c54846 1642 goto put_dstmem;
2b281117
SJ
1643 }
1644 if (ret) {
1645 zswap_reject_alloc_fail++;
f1c54846 1646 goto put_dstmem;
2b281117 1647 }
b8cf32dc 1648 buf = zpool_map_handle(zpool, handle, ZPOOL_MM_WO);
0bb48849 1649 memcpy(buf, dst, dlen);
b8cf32dc 1650 zpool_unmap_handle(zpool, handle);
8ba2f844 1651 mutex_unlock(&acomp_ctx->mutex);
2b281117
SJ
1652
1653 /* populate entry */
0bb48849 1654 entry->swpentry = swp_entry(type, offset);
2b281117
SJ
1655 entry->handle = handle;
1656 entry->length = dlen;
1657
a85f878b 1658insert_entry:
f4840ccf
JW
1659 entry->objcg = objcg;
1660 if (objcg) {
1661 obj_cgroup_charge_zswap(objcg, entry->length);
1662 /* Account before objcg ref is moved to tree */
1663 count_objcg_event(objcg, ZSWPOUT);
1664 }
1665
2b281117
SJ
1666 /* map */
1667 spin_lock(&tree->lock);
ca56489c
DC
1668 /*
1669 * A duplicate entry should have been removed at the beginning of this
1670 * function. Since the swap entry should be pinned, if a duplicate is
1671 * found again here it means that something went wrong in the swap
1672 * cache.
1673 */
42c06a0e 1674 while (zswap_rb_insert(&tree->rbroot, entry, &dupentry) == -EEXIST) {
ca56489c 1675 WARN_ON(1);
42c06a0e 1676 zswap_duplicate_entry++;
56c67049 1677 zswap_invalidate_entry(tree, dupentry);
42c06a0e 1678 }
35499e2b 1679 if (entry->length) {
a65b0e76
DC
1680 INIT_LIST_HEAD(&entry->lru);
1681 zswap_lru_add(&entry->pool->list_lru, entry);
b5ba474f 1682 atomic_inc(&entry->pool->nr_stored);
f999f38b 1683 }
2b281117
SJ
1684 spin_unlock(&tree->lock);
1685
1686 /* update stats */
1687 atomic_inc(&zswap_stored_pages);
f1c54846 1688 zswap_update_total_size();
f6498b77 1689 count_vm_event(ZSWPOUT);
2b281117 1690
42c06a0e 1691 return true;
2b281117 1692
f1c54846 1693put_dstmem:
8ba2f844 1694 mutex_unlock(&acomp_ctx->mutex);
a65b0e76 1695put_pool:
f1c54846
DS
1696 zswap_pool_put(entry->pool);
1697freepage:
2b281117
SJ
1698 zswap_entry_cache_free(entry);
1699reject:
f4840ccf
JW
1700 if (objcg)
1701 obj_cgroup_put(objcg);
42c06a0e 1702 return false;
f4840ccf
JW
1703
1704shrink:
1705 pool = zswap_pool_last_get();
969d63e1
JW
1706 if (pool && !queue_work(shrink_wq, &pool->shrink_work))
1707 zswap_pool_put(pool);
f4840ccf 1708 goto reject;
2b281117
SJ
1709}
1710
ca54f6d8 1711bool zswap_load(struct folio *folio)
2b281117 1712{
3d2c9087 1713 swp_entry_t swp = folio->swap;
42c06a0e
JW
1714 int type = swp_type(swp);
1715 pgoff_t offset = swp_offset(swp);
ca54f6d8 1716 struct page *page = &folio->page;
2b281117
SJ
1717 struct zswap_tree *tree = zswap_trees[type];
1718 struct zswap_entry *entry;
32acba4c 1719 u8 *dst;
42c06a0e 1720
ca54f6d8 1721 VM_WARN_ON_ONCE(!folio_test_locked(folio));
2b281117
SJ
1722
1723 /* find */
1724 spin_lock(&tree->lock);
0ab0abcf 1725 entry = zswap_entry_find_get(&tree->rbroot, offset);
2b281117 1726 if (!entry) {
2b281117 1727 spin_unlock(&tree->lock);
42c06a0e 1728 return false;
2b281117 1729 }
2b281117
SJ
1730 spin_unlock(&tree->lock);
1731
66447fd0
CZ
1732 if (entry->length)
1733 __zswap_load(entry, page);
1734 else {
003ae2fb 1735 dst = kmap_local_page(page);
a85f878b 1736 zswap_fill_page(dst, entry->value);
003ae2fb 1737 kunmap_local(dst);
a85f878b
SD
1738 }
1739
f6498b77 1740 count_vm_event(ZSWPIN);
f4840ccf
JW
1741 if (entry->objcg)
1742 count_objcg_event(entry->objcg, ZSWPIN);
c75f5c1e 1743
2b281117 1744 spin_lock(&tree->lock);
66447fd0 1745 if (zswap_exclusive_loads_enabled) {
b9c91c43 1746 zswap_invalidate_entry(tree, entry);
ca54f6d8 1747 folio_mark_dirty(folio);
35499e2b 1748 } else if (entry->length) {
a65b0e76
DC
1749 zswap_lru_del(&entry->pool->list_lru, entry);
1750 zswap_lru_add(&entry->pool->list_lru, entry);
b9c91c43 1751 }
18a93707 1752 zswap_entry_put(tree, entry);
2b281117
SJ
1753 spin_unlock(&tree->lock);
1754
66447fd0 1755 return true;
2b281117
SJ
1756}
1757
42c06a0e 1758void zswap_invalidate(int type, pgoff_t offset)
2b281117
SJ
1759{
1760 struct zswap_tree *tree = zswap_trees[type];
1761 struct zswap_entry *entry;
2b281117
SJ
1762
1763 /* find */
1764 spin_lock(&tree->lock);
1765 entry = zswap_rb_search(&tree->rbroot, offset);
1766 if (!entry) {
1767 /* entry was written back */
1768 spin_unlock(&tree->lock);
1769 return;
1770 }
b9c91c43 1771 zswap_invalidate_entry(tree, entry);
2b281117 1772 spin_unlock(&tree->lock);
2b281117
SJ
1773}
1774
42c06a0e
JW
1775void zswap_swapon(int type)
1776{
1777 struct zswap_tree *tree;
1778
1779 tree = kzalloc(sizeof(*tree), GFP_KERNEL);
1780 if (!tree) {
1781 pr_err("alloc failed, zswap disabled for swap type %d\n", type);
1782 return;
1783 }
1784
1785 tree->rbroot = RB_ROOT;
1786 spin_lock_init(&tree->lock);
1787 zswap_trees[type] = tree;
1788}
1789
1790void zswap_swapoff(int type)
2b281117
SJ
1791{
1792 struct zswap_tree *tree = zswap_trees[type];
0bd42136 1793 struct zswap_entry *entry, *n;
2b281117
SJ
1794
1795 if (!tree)
1796 return;
1797
1798 /* walk the tree and free everything */
1799 spin_lock(&tree->lock);
0ab0abcf 1800 rbtree_postorder_for_each_entry_safe(entry, n, &tree->rbroot, rbnode)
60105e12 1801 zswap_free_entry(entry);
2b281117
SJ
1802 tree->rbroot = RB_ROOT;
1803 spin_unlock(&tree->lock);
aa9bca05
WY
1804 kfree(tree);
1805 zswap_trees[type] = NULL;
2b281117
SJ
1806}
1807
2b281117
SJ
1808/*********************************
1809* debugfs functions
1810**********************************/
1811#ifdef CONFIG_DEBUG_FS
1812#include <linux/debugfs.h>
1813
1814static struct dentry *zswap_debugfs_root;
1815
141fdeec 1816static int zswap_debugfs_init(void)
2b281117
SJ
1817{
1818 if (!debugfs_initialized())
1819 return -ENODEV;
1820
1821 zswap_debugfs_root = debugfs_create_dir("zswap", NULL);
2b281117 1822
0825a6f9
JP
1823 debugfs_create_u64("pool_limit_hit", 0444,
1824 zswap_debugfs_root, &zswap_pool_limit_hit);
1825 debugfs_create_u64("reject_reclaim_fail", 0444,
1826 zswap_debugfs_root, &zswap_reject_reclaim_fail);
1827 debugfs_create_u64("reject_alloc_fail", 0444,
1828 zswap_debugfs_root, &zswap_reject_alloc_fail);
1829 debugfs_create_u64("reject_kmemcache_fail", 0444,
1830 zswap_debugfs_root, &zswap_reject_kmemcache_fail);
cb61dad8
NP
1831 debugfs_create_u64("reject_compress_fail", 0444,
1832 zswap_debugfs_root, &zswap_reject_compress_fail);
0825a6f9
JP
1833 debugfs_create_u64("reject_compress_poor", 0444,
1834 zswap_debugfs_root, &zswap_reject_compress_poor);
1835 debugfs_create_u64("written_back_pages", 0444,
1836 zswap_debugfs_root, &zswap_written_back_pages);
1837 debugfs_create_u64("duplicate_entry", 0444,
1838 zswap_debugfs_root, &zswap_duplicate_entry);
1839 debugfs_create_u64("pool_total_size", 0444,
1840 zswap_debugfs_root, &zswap_pool_total_size);
1841 debugfs_create_atomic_t("stored_pages", 0444,
1842 zswap_debugfs_root, &zswap_stored_pages);
a85f878b 1843 debugfs_create_atomic_t("same_filled_pages", 0444,
0825a6f9 1844 zswap_debugfs_root, &zswap_same_filled_pages);
2b281117
SJ
1845
1846 return 0;
1847}
2b281117 1848#else
141fdeec 1849static int zswap_debugfs_init(void)
2b281117
SJ
1850{
1851 return 0;
1852}
2b281117
SJ
1853#endif
1854
1855/*********************************
1856* module init and exit
1857**********************************/
141fdeec 1858static int zswap_setup(void)
2b281117 1859{
f1c54846 1860 struct zswap_pool *pool;
ad7ed770 1861 int ret;
60105e12 1862
b7919122
LS
1863 zswap_entry_cache = KMEM_CACHE(zswap_entry, 0);
1864 if (!zswap_entry_cache) {
2b281117 1865 pr_err("entry cache creation failed\n");
f1c54846 1866 goto cache_fail;
2b281117 1867 }
f1c54846 1868
cab7a7e5
SAS
1869 ret = cpuhp_setup_state_multi(CPUHP_MM_ZSWP_POOL_PREPARE,
1870 "mm/zswap_pool:prepare",
1871 zswap_cpu_comp_prepare,
1872 zswap_cpu_comp_dead);
1873 if (ret)
1874 goto hp_fail;
1875
f1c54846 1876 pool = __zswap_pool_create_fallback();
ae3d89a7
DS
1877 if (pool) {
1878 pr_info("loaded using pool %s/%s\n", pool->tfm_name,
b8cf32dc 1879 zpool_get_type(pool->zpools[0]));
ae3d89a7
DS
1880 list_add(&pool->list, &zswap_pools);
1881 zswap_has_pool = true;
1882 } else {
f1c54846 1883 pr_err("pool creation failed\n");
ae3d89a7 1884 zswap_enabled = false;
2b281117 1885 }
60105e12 1886
45190f01
VW
1887 shrink_wq = create_workqueue("zswap-shrink");
1888 if (!shrink_wq)
1889 goto fallback_fail;
1890
2b281117
SJ
1891 if (zswap_debugfs_init())
1892 pr_warn("debugfs initialization failed\n");
9021ccec 1893 zswap_init_state = ZSWAP_INIT_SUCCEED;
2b281117 1894 return 0;
f1c54846 1895
45190f01 1896fallback_fail:
38aeb071
DC
1897 if (pool)
1898 zswap_pool_destroy(pool);
cab7a7e5 1899hp_fail:
b7919122 1900 kmem_cache_destroy(zswap_entry_cache);
f1c54846 1901cache_fail:
d7b028f5 1902 /* if built-in, we aren't unloaded on failure; don't allow use */
9021ccec 1903 zswap_init_state = ZSWAP_INIT_FAILED;
d7b028f5 1904 zswap_enabled = false;
2b281117
SJ
1905 return -ENOMEM;
1906}
141fdeec
LS
1907
1908static int __init zswap_init(void)
1909{
1910 if (!zswap_enabled)
1911 return 0;
1912 return zswap_setup();
1913}
2b281117 1914/* must be late so crypto has time to come up */
141fdeec 1915late_initcall(zswap_init);
2b281117 1916
68386da8 1917MODULE_AUTHOR("Seth Jennings <sjennings@variantweb.net>");
2b281117 1918MODULE_DESCRIPTION("Compressed cache for swap pages");