mm/zswap: change per-cpu mutex and buffer to per-acomp_ctx
[linux-2.6-block.git] / mm / zswap.c
CommitLineData
c942fddf 1// SPDX-License-Identifier: GPL-2.0-or-later
2b281117
SJ
2/*
3 * zswap.c - zswap driver file
4 *
42c06a0e 5 * zswap is a cache that takes pages that are in the process
2b281117
SJ
6 * of being swapped out and attempts to compress and store them in a
7 * RAM-based memory pool. This can result in a significant I/O reduction on
8 * the swap device and, in the case where decompressing from RAM is faster
9 * than reading from the swap device, can also improve workload performance.
10 *
11 * Copyright (C) 2012 Seth Jennings <sjenning@linux.vnet.ibm.com>
2b281117
SJ
12*/
13
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
16#include <linux/module.h>
17#include <linux/cpu.h>
18#include <linux/highmem.h>
19#include <linux/slab.h>
20#include <linux/spinlock.h>
21#include <linux/types.h>
22#include <linux/atomic.h>
2b281117
SJ
23#include <linux/rbtree.h>
24#include <linux/swap.h>
25#include <linux/crypto.h>
1ec3b5fe 26#include <linux/scatterlist.h>
ddc1a5cb 27#include <linux/mempolicy.h>
2b281117 28#include <linux/mempool.h>
12d79d64 29#include <linux/zpool.h>
1ec3b5fe 30#include <crypto/acompress.h>
42c06a0e 31#include <linux/zswap.h>
2b281117
SJ
32#include <linux/mm_types.h>
33#include <linux/page-flags.h>
34#include <linux/swapops.h>
35#include <linux/writeback.h>
36#include <linux/pagemap.h>
45190f01 37#include <linux/workqueue.h>
a65b0e76 38#include <linux/list_lru.h>
2b281117 39
014bb1de 40#include "swap.h"
e0228d59 41#include "internal.h"
014bb1de 42
2b281117
SJ
43/*********************************
44* statistics
45**********************************/
12d79d64 46/* Total bytes used by the compressed storage */
f6498b77 47u64 zswap_pool_total_size;
2b281117 48/* The number of compressed pages currently stored in zswap */
f6498b77 49atomic_t zswap_stored_pages = ATOMIC_INIT(0);
a85f878b
SD
50/* The number of same-value filled pages currently stored in zswap */
51static atomic_t zswap_same_filled_pages = ATOMIC_INIT(0);
2b281117
SJ
52
53/*
54 * The statistics below are not protected from concurrent access for
55 * performance reasons so they may not be a 100% accurate. However,
56 * they do provide useful information on roughly how many times a
57 * certain event is occurring.
58*/
59
60/* Pool limit was hit (see zswap_max_pool_percent) */
61static u64 zswap_pool_limit_hit;
62/* Pages written back when pool limit was reached */
63static u64 zswap_written_back_pages;
64/* Store failed due to a reclaim failure after pool limit was reached */
65static u64 zswap_reject_reclaim_fail;
cb61dad8
NP
66/* Store failed due to compression algorithm failure */
67static u64 zswap_reject_compress_fail;
2b281117
SJ
68/* Compressed page was too big for the allocator to (optimally) store */
69static u64 zswap_reject_compress_poor;
70/* Store failed because underlying allocator could not get memory */
71static u64 zswap_reject_alloc_fail;
72/* Store failed because the entry metadata could not be allocated (rare) */
73static u64 zswap_reject_kmemcache_fail;
74/* Duplicate store was encountered (rare) */
75static u64 zswap_duplicate_entry;
76
45190f01
VW
77/* Shrinker work queue */
78static struct workqueue_struct *shrink_wq;
79/* Pool limit was hit, we need to calm down */
80static bool zswap_pool_reached_full;
81
2b281117
SJ
82/*********************************
83* tunables
84**********************************/
c00ed16a 85
bae21db8
DS
86#define ZSWAP_PARAM_UNSET ""
87
141fdeec
LS
88static int zswap_setup(void);
89
bb8b93b5
MS
90/* Enable/disable zswap */
91static bool zswap_enabled = IS_ENABLED(CONFIG_ZSWAP_DEFAULT_ON);
d7b028f5
DS
92static int zswap_enabled_param_set(const char *,
93 const struct kernel_param *);
83aed6cd 94static const struct kernel_param_ops zswap_enabled_param_ops = {
d7b028f5
DS
95 .set = zswap_enabled_param_set,
96 .get = param_get_bool,
97};
98module_param_cb(enabled, &zswap_enabled_param_ops, &zswap_enabled, 0644);
2b281117 99
90b0fc26 100/* Crypto compressor to use */
bb8b93b5 101static char *zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT;
90b0fc26
DS
102static int zswap_compressor_param_set(const char *,
103 const struct kernel_param *);
83aed6cd 104static const struct kernel_param_ops zswap_compressor_param_ops = {
90b0fc26 105 .set = zswap_compressor_param_set,
c99b42c3
DS
106 .get = param_get_charp,
107 .free = param_free_charp,
90b0fc26
DS
108};
109module_param_cb(compressor, &zswap_compressor_param_ops,
c99b42c3 110 &zswap_compressor, 0644);
2b281117 111
90b0fc26 112/* Compressed storage zpool to use */
bb8b93b5 113static char *zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT;
90b0fc26 114static int zswap_zpool_param_set(const char *, const struct kernel_param *);
83aed6cd 115static const struct kernel_param_ops zswap_zpool_param_ops = {
c99b42c3
DS
116 .set = zswap_zpool_param_set,
117 .get = param_get_charp,
118 .free = param_free_charp,
90b0fc26 119};
c99b42c3 120module_param_cb(zpool, &zswap_zpool_param_ops, &zswap_zpool_type, 0644);
12d79d64 121
90b0fc26
DS
122/* The maximum percentage of memory that the compressed pool can occupy */
123static unsigned int zswap_max_pool_percent = 20;
124module_param_named(max_pool_percent, zswap_max_pool_percent, uint, 0644);
60105e12 125
45190f01
VW
126/* The threshold for accepting new pages after the max_pool_percent was hit */
127static unsigned int zswap_accept_thr_percent = 90; /* of max pool size */
128module_param_named(accept_threshold_percent, zswap_accept_thr_percent,
129 uint, 0644);
130
cb325ddd
MS
131/*
132 * Enable/disable handling same-value filled pages (enabled by default).
133 * If disabled every page is considered non-same-value filled.
134 */
a85f878b
SD
135static bool zswap_same_filled_pages_enabled = true;
136module_param_named(same_filled_pages_enabled, zswap_same_filled_pages_enabled,
137 bool, 0644);
138
cb325ddd
MS
139/* Enable/disable handling non-same-value filled pages (enabled by default) */
140static bool zswap_non_same_filled_pages_enabled = true;
141module_param_named(non_same_filled_pages_enabled, zswap_non_same_filled_pages_enabled,
142 bool, 0644);
143
b9c91c43
YA
144static bool zswap_exclusive_loads_enabled = IS_ENABLED(
145 CONFIG_ZSWAP_EXCLUSIVE_LOADS_DEFAULT_ON);
146module_param_named(exclusive_loads, zswap_exclusive_loads_enabled, bool, 0644);
147
b8cf32dc
YA
148/* Number of zpools in zswap_pool (empirically determined for scalability) */
149#define ZSWAP_NR_ZPOOLS 32
150
b5ba474f
NP
151/* Enable/disable memory pressure-based shrinker. */
152static bool zswap_shrinker_enabled = IS_ENABLED(
153 CONFIG_ZSWAP_SHRINKER_DEFAULT_ON);
154module_param_named(shrinker_enabled, zswap_shrinker_enabled, bool, 0644);
155
2b281117 156/*********************************
f1c54846 157* data structures
2b281117 158**********************************/
2b281117 159
1ec3b5fe
BS
160struct crypto_acomp_ctx {
161 struct crypto_acomp *acomp;
162 struct acomp_req *req;
163 struct crypto_wait wait;
8ba2f844
CZ
164 u8 *buffer;
165 struct mutex mutex;
1ec3b5fe
BS
166};
167
f999f38b
DC
168/*
169 * The lock ordering is zswap_tree.lock -> zswap_pool.lru_lock.
170 * The only case where lru_lock is not acquired while holding tree.lock is
171 * when a zswap_entry is taken off the lru for writeback, in that case it
172 * needs to be verified that it's still valid in the tree.
173 */
f1c54846 174struct zswap_pool {
b8cf32dc 175 struct zpool *zpools[ZSWAP_NR_ZPOOLS];
1ec3b5fe 176 struct crypto_acomp_ctx __percpu *acomp_ctx;
f1c54846
DS
177 struct kref kref;
178 struct list_head list;
45190f01
VW
179 struct work_struct release_work;
180 struct work_struct shrink_work;
cab7a7e5 181 struct hlist_node node;
f1c54846 182 char tfm_name[CRYPTO_MAX_ALG_NAME];
a65b0e76
DC
183 struct list_lru list_lru;
184 struct mem_cgroup *next_shrink;
b5ba474f
NP
185 struct shrinker *shrinker;
186 atomic_t nr_stored;
2b281117
SJ
187};
188
2b281117
SJ
189/*
190 * struct zswap_entry
191 *
192 * This structure contains the metadata for tracking a single compressed
193 * page within zswap.
194 *
195 * rbnode - links the entry into red-black tree for the appropriate swap type
97157d89 196 * swpentry - associated swap entry, the offset indexes into the red-black tree
2b281117
SJ
197 * refcount - the number of outstanding reference to the entry. This is needed
198 * to protect against premature freeing of the entry by code
6b452516 199 * concurrent calls to load, invalidate, and writeback. The lock
2b281117
SJ
200 * for the zswap_tree structure that contains the entry must
201 * be held while changing the refcount. Since the lock must
202 * be held, there is no reason to also make refcount atomic.
2b281117 203 * length - the length in bytes of the compressed page data. Needed during
f999f38b
DC
204 * decompression. For a same value filled page length is 0, and both
205 * pool and lru are invalid and must be ignored.
f1c54846
DS
206 * pool - the zswap_pool the entry's data is in
207 * handle - zpool allocation handle that stores the compressed page data
a85f878b 208 * value - value of the same-value filled pages which have same content
97157d89 209 * objcg - the obj_cgroup that the compressed memory is charged to
f999f38b 210 * lru - handle to the pool's lru used to evict pages.
2b281117
SJ
211 */
212struct zswap_entry {
213 struct rb_node rbnode;
0bb48849 214 swp_entry_t swpentry;
2b281117
SJ
215 int refcount;
216 unsigned int length;
f1c54846 217 struct zswap_pool *pool;
a85f878b
SD
218 union {
219 unsigned long handle;
220 unsigned long value;
221 };
f4840ccf 222 struct obj_cgroup *objcg;
f999f38b 223 struct list_head lru;
2b281117
SJ
224};
225
2b281117
SJ
226/*
227 * The tree lock in the zswap_tree struct protects a few things:
228 * - the rbtree
229 * - the refcount field of each entry in the tree
230 */
231struct zswap_tree {
232 struct rb_root rbroot;
233 spinlock_t lock;
2b281117
SJ
234};
235
236static struct zswap_tree *zswap_trees[MAX_SWAPFILES];
237
f1c54846
DS
238/* RCU-protected iteration */
239static LIST_HEAD(zswap_pools);
240/* protects zswap_pools list modification */
241static DEFINE_SPINLOCK(zswap_pools_lock);
32a4e169
DS
242/* pool counter to provide unique names to zpool */
243static atomic_t zswap_pools_count = ATOMIC_INIT(0);
f1c54846 244
9021ccec
LS
245enum zswap_init_type {
246 ZSWAP_UNINIT,
247 ZSWAP_INIT_SUCCEED,
248 ZSWAP_INIT_FAILED
249};
90b0fc26 250
9021ccec 251static enum zswap_init_type zswap_init_state;
90b0fc26 252
141fdeec
LS
253/* used to ensure the integrity of initialization */
254static DEFINE_MUTEX(zswap_init_lock);
d7b028f5 255
ae3d89a7
DS
256/* init completed, but couldn't create the initial pool */
257static bool zswap_has_pool;
258
f1c54846
DS
259/*********************************
260* helpers and fwd declarations
261**********************************/
262
263#define zswap_pool_debug(msg, p) \
264 pr_debug("%s pool %s/%s\n", msg, (p)->tfm_name, \
b8cf32dc 265 zpool_get_type((p)->zpools[0]))
f1c54846 266
0bb48849 267static int zswap_writeback_entry(struct zswap_entry *entry,
ff9d5ba2 268 struct zswap_tree *tree);
f1c54846
DS
269static int zswap_pool_get(struct zswap_pool *pool);
270static void zswap_pool_put(struct zswap_pool *pool);
271
f1c54846
DS
272static bool zswap_is_full(void)
273{
ca79b0c2
AK
274 return totalram_pages() * zswap_max_pool_percent / 100 <
275 DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE);
f1c54846
DS
276}
277
45190f01
VW
278static bool zswap_can_accept(void)
279{
280 return totalram_pages() * zswap_accept_thr_percent / 100 *
281 zswap_max_pool_percent / 100 >
282 DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE);
283}
284
b5ba474f
NP
285static u64 get_zswap_pool_size(struct zswap_pool *pool)
286{
287 u64 pool_size = 0;
288 int i;
289
290 for (i = 0; i < ZSWAP_NR_ZPOOLS; i++)
291 pool_size += zpool_get_total_size(pool->zpools[i]);
292
293 return pool_size;
294}
295
f1c54846
DS
296static void zswap_update_total_size(void)
297{
298 struct zswap_pool *pool;
299 u64 total = 0;
300
301 rcu_read_lock();
302
303 list_for_each_entry_rcu(pool, &zswap_pools, list)
b5ba474f 304 total += get_zswap_pool_size(pool);
f1c54846
DS
305
306 rcu_read_unlock();
307
308 zswap_pool_total_size = total;
309}
310
a65b0e76
DC
311/* should be called under RCU */
312#ifdef CONFIG_MEMCG
313static inline struct mem_cgroup *mem_cgroup_from_entry(struct zswap_entry *entry)
314{
315 return entry->objcg ? obj_cgroup_memcg(entry->objcg) : NULL;
316}
317#else
318static inline struct mem_cgroup *mem_cgroup_from_entry(struct zswap_entry *entry)
319{
320 return NULL;
321}
322#endif
323
324static inline int entry_to_nid(struct zswap_entry *entry)
325{
326 return page_to_nid(virt_to_page(entry));
327}
328
329void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg)
330{
331 struct zswap_pool *pool;
332
333 /* lock out zswap pools list modification */
334 spin_lock(&zswap_pools_lock);
335 list_for_each_entry(pool, &zswap_pools, list) {
336 if (pool->next_shrink == memcg)
337 pool->next_shrink = mem_cgroup_iter(NULL, pool->next_shrink, NULL);
338 }
339 spin_unlock(&zswap_pools_lock);
340}
341
2b281117
SJ
342/*********************************
343* zswap entry functions
344**********************************/
345static struct kmem_cache *zswap_entry_cache;
346
a65b0e76 347static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp, int nid)
2b281117
SJ
348{
349 struct zswap_entry *entry;
a65b0e76 350 entry = kmem_cache_alloc_node(zswap_entry_cache, gfp, nid);
2b281117
SJ
351 if (!entry)
352 return NULL;
353 entry->refcount = 1;
0ab0abcf 354 RB_CLEAR_NODE(&entry->rbnode);
2b281117
SJ
355 return entry;
356}
357
358static void zswap_entry_cache_free(struct zswap_entry *entry)
359{
360 kmem_cache_free(zswap_entry_cache, entry);
361}
362
b5ba474f
NP
363/*********************************
364* zswap lruvec functions
365**********************************/
366void zswap_lruvec_state_init(struct lruvec *lruvec)
367{
368 atomic_long_set(&lruvec->zswap_lruvec_state.nr_zswap_protected, 0);
369}
370
371void zswap_page_swapin(struct page *page)
372{
373 struct lruvec *lruvec;
374
375 if (page) {
376 lruvec = folio_lruvec(page_folio(page));
377 atomic_long_inc(&lruvec->zswap_lruvec_state.nr_zswap_protected);
378 }
379}
380
a65b0e76
DC
381/*********************************
382* lru functions
383**********************************/
384static void zswap_lru_add(struct list_lru *list_lru, struct zswap_entry *entry)
385{
b5ba474f
NP
386 atomic_long_t *nr_zswap_protected;
387 unsigned long lru_size, old, new;
a65b0e76
DC
388 int nid = entry_to_nid(entry);
389 struct mem_cgroup *memcg;
b5ba474f 390 struct lruvec *lruvec;
a65b0e76
DC
391
392 /*
393 * Note that it is safe to use rcu_read_lock() here, even in the face of
394 * concurrent memcg offlining. Thanks to the memcg->kmemcg_id indirection
395 * used in list_lru lookup, only two scenarios are possible:
396 *
397 * 1. list_lru_add() is called before memcg->kmemcg_id is updated. The
398 * new entry will be reparented to memcg's parent's list_lru.
399 * 2. list_lru_add() is called after memcg->kmemcg_id is updated. The
400 * new entry will be added directly to memcg's parent's list_lru.
401 *
402 * Similar reasoning holds for list_lru_del() and list_lru_putback().
403 */
404 rcu_read_lock();
405 memcg = mem_cgroup_from_entry(entry);
406 /* will always succeed */
407 list_lru_add(list_lru, &entry->lru, nid, memcg);
b5ba474f
NP
408
409 /* Update the protection area */
410 lru_size = list_lru_count_one(list_lru, nid, memcg);
411 lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
412 nr_zswap_protected = &lruvec->zswap_lruvec_state.nr_zswap_protected;
413 old = atomic_long_inc_return(nr_zswap_protected);
414 /*
415 * Decay to avoid overflow and adapt to changing workloads.
416 * This is based on LRU reclaim cost decaying heuristics.
417 */
418 do {
419 new = old > lru_size / 4 ? old / 2 : old;
420 } while (!atomic_long_try_cmpxchg(nr_zswap_protected, &old, new));
a65b0e76
DC
421 rcu_read_unlock();
422}
423
424static void zswap_lru_del(struct list_lru *list_lru, struct zswap_entry *entry)
425{
426 int nid = entry_to_nid(entry);
427 struct mem_cgroup *memcg;
428
429 rcu_read_lock();
430 memcg = mem_cgroup_from_entry(entry);
431 /* will always succeed */
432 list_lru_del(list_lru, &entry->lru, nid, memcg);
433 rcu_read_unlock();
434}
435
436static void zswap_lru_putback(struct list_lru *list_lru,
437 struct zswap_entry *entry)
438{
439 int nid = entry_to_nid(entry);
440 spinlock_t *lock = &list_lru->node[nid].lock;
441 struct mem_cgroup *memcg;
b5ba474f 442 struct lruvec *lruvec;
a65b0e76
DC
443
444 rcu_read_lock();
445 memcg = mem_cgroup_from_entry(entry);
446 spin_lock(lock);
447 /* we cannot use list_lru_add here, because it increments node's lru count */
448 list_lru_putback(list_lru, &entry->lru, nid, memcg);
449 spin_unlock(lock);
b5ba474f
NP
450
451 lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(entry_to_nid(entry)));
452 /* increment the protection area to account for the LRU rotation. */
453 atomic_long_inc(&lruvec->zswap_lruvec_state.nr_zswap_protected);
a65b0e76
DC
454 rcu_read_unlock();
455}
456
2b281117
SJ
457/*********************************
458* rbtree functions
459**********************************/
460static struct zswap_entry *zswap_rb_search(struct rb_root *root, pgoff_t offset)
461{
462 struct rb_node *node = root->rb_node;
463 struct zswap_entry *entry;
0bb48849 464 pgoff_t entry_offset;
2b281117
SJ
465
466 while (node) {
467 entry = rb_entry(node, struct zswap_entry, rbnode);
0bb48849
DC
468 entry_offset = swp_offset(entry->swpentry);
469 if (entry_offset > offset)
2b281117 470 node = node->rb_left;
0bb48849 471 else if (entry_offset < offset)
2b281117
SJ
472 node = node->rb_right;
473 else
474 return entry;
475 }
476 return NULL;
477}
478
479/*
480 * In the case that a entry with the same offset is found, a pointer to
481 * the existing entry is stored in dupentry and the function returns -EEXIST
482 */
483static int zswap_rb_insert(struct rb_root *root, struct zswap_entry *entry,
484 struct zswap_entry **dupentry)
485{
486 struct rb_node **link = &root->rb_node, *parent = NULL;
487 struct zswap_entry *myentry;
0bb48849 488 pgoff_t myentry_offset, entry_offset = swp_offset(entry->swpentry);
2b281117
SJ
489
490 while (*link) {
491 parent = *link;
492 myentry = rb_entry(parent, struct zswap_entry, rbnode);
0bb48849
DC
493 myentry_offset = swp_offset(myentry->swpentry);
494 if (myentry_offset > entry_offset)
2b281117 495 link = &(*link)->rb_left;
0bb48849 496 else if (myentry_offset < entry_offset)
2b281117
SJ
497 link = &(*link)->rb_right;
498 else {
499 *dupentry = myentry;
500 return -EEXIST;
501 }
502 }
503 rb_link_node(&entry->rbnode, parent, link);
504 rb_insert_color(&entry->rbnode, root);
505 return 0;
506}
507
18a93707 508static bool zswap_rb_erase(struct rb_root *root, struct zswap_entry *entry)
0ab0abcf
WY
509{
510 if (!RB_EMPTY_NODE(&entry->rbnode)) {
511 rb_erase(&entry->rbnode, root);
512 RB_CLEAR_NODE(&entry->rbnode);
18a93707 513 return true;
0ab0abcf 514 }
18a93707 515 return false;
0ab0abcf
WY
516}
517
b8cf32dc
YA
518static struct zpool *zswap_find_zpool(struct zswap_entry *entry)
519{
520 int i = 0;
521
522 if (ZSWAP_NR_ZPOOLS > 1)
523 i = hash_ptr(entry, ilog2(ZSWAP_NR_ZPOOLS));
524
525 return entry->pool->zpools[i];
526}
527
0ab0abcf 528/*
12d79d64 529 * Carries out the common pattern of freeing and entry's zpool allocation,
0ab0abcf
WY
530 * freeing the entry itself, and decrementing the number of stored pages.
531 */
60105e12 532static void zswap_free_entry(struct zswap_entry *entry)
0ab0abcf 533{
f4840ccf
JW
534 if (entry->objcg) {
535 obj_cgroup_uncharge_zswap(entry->objcg, entry->length);
536 obj_cgroup_put(entry->objcg);
537 }
a85f878b
SD
538 if (!entry->length)
539 atomic_dec(&zswap_same_filled_pages);
540 else {
a65b0e76 541 zswap_lru_del(&entry->pool->list_lru, entry);
b8cf32dc 542 zpool_free(zswap_find_zpool(entry), entry->handle);
b5ba474f 543 atomic_dec(&entry->pool->nr_stored);
a85f878b
SD
544 zswap_pool_put(entry->pool);
545 }
0ab0abcf
WY
546 zswap_entry_cache_free(entry);
547 atomic_dec(&zswap_stored_pages);
f1c54846 548 zswap_update_total_size();
0ab0abcf
WY
549}
550
551/* caller must hold the tree lock */
552static void zswap_entry_get(struct zswap_entry *entry)
553{
554 entry->refcount++;
555}
556
557/* caller must hold the tree lock
558* remove from the tree and free it, if nobody reference the entry
559*/
560static void zswap_entry_put(struct zswap_tree *tree,
561 struct zswap_entry *entry)
562{
563 int refcount = --entry->refcount;
564
73108957 565 WARN_ON_ONCE(refcount < 0);
0ab0abcf 566 if (refcount == 0) {
73108957 567 WARN_ON_ONCE(!RB_EMPTY_NODE(&entry->rbnode));
60105e12 568 zswap_free_entry(entry);
0ab0abcf
WY
569 }
570}
571
572/* caller must hold the tree lock */
573static struct zswap_entry *zswap_entry_find_get(struct rb_root *root,
574 pgoff_t offset)
575{
b0c9865f 576 struct zswap_entry *entry;
0ab0abcf
WY
577
578 entry = zswap_rb_search(root, offset);
579 if (entry)
580 zswap_entry_get(entry);
581
582 return entry;
583}
584
b5ba474f
NP
585/*********************************
586* shrinker functions
587**********************************/
588static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_one *l,
589 spinlock_t *lock, void *arg);
590
591static unsigned long zswap_shrinker_scan(struct shrinker *shrinker,
592 struct shrink_control *sc)
593{
594 struct lruvec *lruvec = mem_cgroup_lruvec(sc->memcg, NODE_DATA(sc->nid));
595 unsigned long shrink_ret, nr_protected, lru_size;
596 struct zswap_pool *pool = shrinker->private_data;
597 bool encountered_page_in_swapcache = false;
598
599 if (!zswap_shrinker_enabled) {
600 sc->nr_scanned = 0;
601 return SHRINK_STOP;
602 }
603
604 nr_protected =
605 atomic_long_read(&lruvec->zswap_lruvec_state.nr_zswap_protected);
606 lru_size = list_lru_shrink_count(&pool->list_lru, sc);
607
608 /*
609 * Abort if we are shrinking into the protected region.
610 *
611 * This short-circuiting is necessary because if we have too many multiple
612 * concurrent reclaimers getting the freeable zswap object counts at the
613 * same time (before any of them made reasonable progress), the total
614 * number of reclaimed objects might be more than the number of unprotected
615 * objects (i.e the reclaimers will reclaim into the protected area of the
616 * zswap LRU).
617 */
618 if (nr_protected >= lru_size - sc->nr_to_scan) {
619 sc->nr_scanned = 0;
620 return SHRINK_STOP;
621 }
622
623 shrink_ret = list_lru_shrink_walk(&pool->list_lru, sc, &shrink_memcg_cb,
624 &encountered_page_in_swapcache);
625
626 if (encountered_page_in_swapcache)
627 return SHRINK_STOP;
628
629 return shrink_ret ? shrink_ret : SHRINK_STOP;
630}
631
632static unsigned long zswap_shrinker_count(struct shrinker *shrinker,
633 struct shrink_control *sc)
634{
635 struct zswap_pool *pool = shrinker->private_data;
636 struct mem_cgroup *memcg = sc->memcg;
637 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(sc->nid));
638 unsigned long nr_backing, nr_stored, nr_freeable, nr_protected;
639
640 if (!zswap_shrinker_enabled)
641 return 0;
642
643#ifdef CONFIG_MEMCG_KMEM
7d7ef0a4 644 mem_cgroup_flush_stats(memcg);
b5ba474f
NP
645 nr_backing = memcg_page_state(memcg, MEMCG_ZSWAP_B) >> PAGE_SHIFT;
646 nr_stored = memcg_page_state(memcg, MEMCG_ZSWAPPED);
647#else
648 /* use pool stats instead of memcg stats */
649 nr_backing = get_zswap_pool_size(pool) >> PAGE_SHIFT;
650 nr_stored = atomic_read(&pool->nr_stored);
651#endif
652
653 if (!nr_stored)
654 return 0;
655
656 nr_protected =
657 atomic_long_read(&lruvec->zswap_lruvec_state.nr_zswap_protected);
658 nr_freeable = list_lru_shrink_count(&pool->list_lru, sc);
659 /*
660 * Subtract the lru size by an estimate of the number of pages
661 * that should be protected.
662 */
663 nr_freeable = nr_freeable > nr_protected ? nr_freeable - nr_protected : 0;
664
665 /*
666 * Scale the number of freeable pages by the memory saving factor.
667 * This ensures that the better zswap compresses memory, the fewer
668 * pages we will evict to swap (as it will otherwise incur IO for
669 * relatively small memory saving).
670 */
671 return mult_frac(nr_freeable, nr_backing, nr_stored);
672}
673
674static void zswap_alloc_shrinker(struct zswap_pool *pool)
675{
676 pool->shrinker =
677 shrinker_alloc(SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE, "mm-zswap");
678 if (!pool->shrinker)
679 return;
680
681 pool->shrinker->private_data = pool;
682 pool->shrinker->scan_objects = zswap_shrinker_scan;
683 pool->shrinker->count_objects = zswap_shrinker_count;
684 pool->shrinker->batch = 0;
685 pool->shrinker->seeks = DEFAULT_SEEKS;
686}
687
2b281117
SJ
688/*********************************
689* per-cpu code
690**********************************/
cab7a7e5 691static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
f1c54846 692{
cab7a7e5 693 struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
1ec3b5fe
BS
694 struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
695 struct crypto_acomp *acomp;
696 struct acomp_req *req;
8ba2f844
CZ
697 int ret;
698
699 mutex_init(&acomp_ctx->mutex);
700
701 acomp_ctx->buffer = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu));
702 if (!acomp_ctx->buffer)
703 return -ENOMEM;
1ec3b5fe
BS
704
705 acomp = crypto_alloc_acomp_node(pool->tfm_name, 0, 0, cpu_to_node(cpu));
706 if (IS_ERR(acomp)) {
707 pr_err("could not alloc crypto acomp %s : %ld\n",
708 pool->tfm_name, PTR_ERR(acomp));
8ba2f844
CZ
709 ret = PTR_ERR(acomp);
710 goto acomp_fail;
1ec3b5fe
BS
711 }
712 acomp_ctx->acomp = acomp;
f1c54846 713
1ec3b5fe
BS
714 req = acomp_request_alloc(acomp_ctx->acomp);
715 if (!req) {
716 pr_err("could not alloc crypto acomp_request %s\n",
717 pool->tfm_name);
8ba2f844
CZ
718 ret = -ENOMEM;
719 goto req_fail;
cab7a7e5 720 }
1ec3b5fe
BS
721 acomp_ctx->req = req;
722
723 crypto_init_wait(&acomp_ctx->wait);
724 /*
725 * if the backend of acomp is async zip, crypto_req_done() will wakeup
726 * crypto_wait_req(); if the backend of acomp is scomp, the callback
727 * won't be called, crypto_wait_req() will return without blocking.
728 */
729 acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
730 crypto_req_done, &acomp_ctx->wait);
731
2b281117 732 return 0;
8ba2f844
CZ
733
734req_fail:
735 crypto_free_acomp(acomp_ctx->acomp);
736acomp_fail:
737 kfree(acomp_ctx->buffer);
738 return ret;
2b281117
SJ
739}
740
cab7a7e5 741static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node)
f1c54846 742{
cab7a7e5 743 struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
1ec3b5fe
BS
744 struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
745
746 if (!IS_ERR_OR_NULL(acomp_ctx)) {
747 if (!IS_ERR_OR_NULL(acomp_ctx->req))
748 acomp_request_free(acomp_ctx->req);
749 if (!IS_ERR_OR_NULL(acomp_ctx->acomp))
750 crypto_free_acomp(acomp_ctx->acomp);
8ba2f844 751 kfree(acomp_ctx->buffer);
1ec3b5fe 752 }
f1c54846 753
cab7a7e5 754 return 0;
f1c54846
DS
755}
756
2b281117 757/*********************************
f1c54846 758* pool functions
2b281117 759**********************************/
f1c54846
DS
760
761static struct zswap_pool *__zswap_pool_current(void)
2b281117 762{
f1c54846
DS
763 struct zswap_pool *pool;
764
765 pool = list_first_or_null_rcu(&zswap_pools, typeof(*pool), list);
ae3d89a7
DS
766 WARN_ONCE(!pool && zswap_has_pool,
767 "%s: no page storage pool!\n", __func__);
f1c54846
DS
768
769 return pool;
770}
771
772static struct zswap_pool *zswap_pool_current(void)
773{
774 assert_spin_locked(&zswap_pools_lock);
775
776 return __zswap_pool_current();
777}
778
779static struct zswap_pool *zswap_pool_current_get(void)
780{
781 struct zswap_pool *pool;
782
783 rcu_read_lock();
784
785 pool = __zswap_pool_current();
ae3d89a7 786 if (!zswap_pool_get(pool))
f1c54846
DS
787 pool = NULL;
788
789 rcu_read_unlock();
790
791 return pool;
792}
793
794static struct zswap_pool *zswap_pool_last_get(void)
795{
796 struct zswap_pool *pool, *last = NULL;
797
798 rcu_read_lock();
799
800 list_for_each_entry_rcu(pool, &zswap_pools, list)
801 last = pool;
ae3d89a7
DS
802 WARN_ONCE(!last && zswap_has_pool,
803 "%s: no page storage pool!\n", __func__);
804 if (!zswap_pool_get(last))
f1c54846
DS
805 last = NULL;
806
807 rcu_read_unlock();
808
809 return last;
810}
811
8bc8b228 812/* type and compressor must be null-terminated */
f1c54846
DS
813static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor)
814{
815 struct zswap_pool *pool;
816
817 assert_spin_locked(&zswap_pools_lock);
818
819 list_for_each_entry_rcu(pool, &zswap_pools, list) {
8bc8b228 820 if (strcmp(pool->tfm_name, compressor))
f1c54846 821 continue;
b8cf32dc
YA
822 /* all zpools share the same type */
823 if (strcmp(zpool_get_type(pool->zpools[0]), type))
f1c54846
DS
824 continue;
825 /* if we can't get it, it's about to be destroyed */
826 if (!zswap_pool_get(pool))
827 continue;
828 return pool;
829 }
830
831 return NULL;
832}
833
18a93707
YA
834/*
835 * If the entry is still valid in the tree, drop the initial ref and remove it
836 * from the tree. This function must be called with an additional ref held,
837 * otherwise it may race with another invalidation freeing the entry.
838 */
418fd29d
DC
839static void zswap_invalidate_entry(struct zswap_tree *tree,
840 struct zswap_entry *entry)
841{
18a93707
YA
842 if (zswap_rb_erase(&tree->rbroot, entry))
843 zswap_entry_put(tree, entry);
418fd29d
DC
844}
845
a65b0e76
DC
846static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_one *l,
847 spinlock_t *lock, void *arg)
f999f38b 848{
a65b0e76 849 struct zswap_entry *entry = container_of(item, struct zswap_entry, lru);
b5ba474f 850 bool *encountered_page_in_swapcache = (bool *)arg;
f999f38b
DC
851 struct zswap_tree *tree;
852 pgoff_t swpoffset;
a65b0e76
DC
853 enum lru_status ret = LRU_REMOVED_RETRY;
854 int writeback_result;
f999f38b 855
f999f38b
DC
856 /*
857 * Once the lru lock is dropped, the entry might get freed. The
858 * swpoffset is copied to the stack, and entry isn't deref'd again
859 * until the entry is verified to still be alive in the tree.
860 */
0bb48849
DC
861 swpoffset = swp_offset(entry->swpentry);
862 tree = zswap_trees[swp_type(entry->swpentry)];
a65b0e76
DC
863 list_lru_isolate(l, item);
864 /*
865 * It's safe to drop the lock here because we return either
866 * LRU_REMOVED_RETRY or LRU_RETRY.
867 */
868 spin_unlock(lock);
f999f38b
DC
869
870 /* Check for invalidate() race */
871 spin_lock(&tree->lock);
a65b0e76 872 if (entry != zswap_rb_search(&tree->rbroot, swpoffset))
f999f38b 873 goto unlock;
a65b0e76 874
f999f38b
DC
875 /* Hold a reference to prevent a free during writeback */
876 zswap_entry_get(entry);
877 spin_unlock(&tree->lock);
878
a65b0e76 879 writeback_result = zswap_writeback_entry(entry, tree);
f999f38b
DC
880
881 spin_lock(&tree->lock);
a65b0e76
DC
882 if (writeback_result) {
883 zswap_reject_reclaim_fail++;
884 zswap_lru_putback(&entry->pool->list_lru, entry);
885 ret = LRU_RETRY;
b5ba474f
NP
886
887 /*
888 * Encountering a page already in swap cache is a sign that we are shrinking
889 * into the warmer region. We should terminate shrinking (if we're in the dynamic
890 * shrinker context).
891 */
892 if (writeback_result == -EEXIST && encountered_page_in_swapcache) {
893 ret = LRU_SKIP;
894 *encountered_page_in_swapcache = true;
895 }
896
ff9d5ba2 897 goto put_unlock;
f999f38b 898 }
a65b0e76 899 zswap_written_back_pages++;
f999f38b 900
7108cc3f
DC
901 if (entry->objcg)
902 count_objcg_event(entry->objcg, ZSWPWB);
903
904 count_vm_event(ZSWPWB);
418fd29d
DC
905 /*
906 * Writeback started successfully, the page now belongs to the
907 * swapcache. Drop the entry from zswap - unless invalidate already
908 * took it out while we had the tree->lock released for IO.
909 */
18a93707 910 zswap_invalidate_entry(tree, entry);
ff9d5ba2
DC
911
912put_unlock:
f999f38b
DC
913 /* Drop local reference */
914 zswap_entry_put(tree, entry);
915unlock:
916 spin_unlock(&tree->lock);
a65b0e76
DC
917 spin_lock(lock);
918 return ret;
919}
920
921static int shrink_memcg(struct mem_cgroup *memcg)
922{
923 struct zswap_pool *pool;
924 int nid, shrunk = 0;
925
926 /*
927 * Skip zombies because their LRUs are reparented and we would be
928 * reclaiming from the parent instead of the dead memcg.
929 */
930 if (memcg && !mem_cgroup_online(memcg))
931 return -ENOENT;
932
933 pool = zswap_pool_current_get();
934 if (!pool)
935 return -EINVAL;
936
937 for_each_node_state(nid, N_NORMAL_MEMORY) {
938 unsigned long nr_to_walk = 1;
939
940 shrunk += list_lru_walk_one(&pool->list_lru, nid, memcg,
941 &shrink_memcg_cb, NULL, &nr_to_walk);
942 }
943 zswap_pool_put(pool);
944 return shrunk ? 0 : -EAGAIN;
f999f38b
DC
945}
946
45190f01
VW
947static void shrink_worker(struct work_struct *w)
948{
949 struct zswap_pool *pool = container_of(w, typeof(*pool),
950 shrink_work);
a65b0e76 951 struct mem_cgroup *memcg;
e0228d59
DC
952 int ret, failures = 0;
953
a65b0e76 954 /* global reclaim will select cgroup in a round-robin fashion. */
e0228d59 955 do {
a65b0e76
DC
956 spin_lock(&zswap_pools_lock);
957 pool->next_shrink = mem_cgroup_iter(NULL, pool->next_shrink, NULL);
958 memcg = pool->next_shrink;
959
960 /*
961 * We need to retry if we have gone through a full round trip, or if we
962 * got an offline memcg (or else we risk undoing the effect of the
963 * zswap memcg offlining cleanup callback). This is not catastrophic
964 * per se, but it will keep the now offlined memcg hostage for a while.
965 *
966 * Note that if we got an online memcg, we will keep the extra
967 * reference in case the original reference obtained by mem_cgroup_iter
968 * is dropped by the zswap memcg offlining callback, ensuring that the
969 * memcg is not killed when we are reclaiming.
970 */
971 if (!memcg) {
972 spin_unlock(&zswap_pools_lock);
973 if (++failures == MAX_RECLAIM_RETRIES)
e0228d59 974 break;
a65b0e76
DC
975
976 goto resched;
977 }
978
979 if (!mem_cgroup_tryget_online(memcg)) {
980 /* drop the reference from mem_cgroup_iter() */
981 mem_cgroup_iter_break(NULL, memcg);
982 pool->next_shrink = NULL;
983 spin_unlock(&zswap_pools_lock);
984
e0228d59
DC
985 if (++failures == MAX_RECLAIM_RETRIES)
986 break;
a65b0e76
DC
987
988 goto resched;
e0228d59 989 }
a65b0e76
DC
990 spin_unlock(&zswap_pools_lock);
991
992 ret = shrink_memcg(memcg);
993 /* drop the extra reference */
994 mem_cgroup_put(memcg);
995
996 if (ret == -EINVAL)
997 break;
998 if (ret && ++failures == MAX_RECLAIM_RETRIES)
999 break;
1000
1001resched:
e0228d59
DC
1002 cond_resched();
1003 } while (!zswap_can_accept());
45190f01
VW
1004 zswap_pool_put(pool);
1005}
1006
f1c54846
DS
1007static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
1008{
b8cf32dc 1009 int i;
f1c54846 1010 struct zswap_pool *pool;
32a4e169 1011 char name[38]; /* 'zswap' + 32 char (max) num + \0 */
d0164adc 1012 gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
cab7a7e5 1013 int ret;
f1c54846 1014
bae21db8
DS
1015 if (!zswap_has_pool) {
1016 /* if either are unset, pool initialization failed, and we
1017 * need both params to be set correctly before trying to
1018 * create a pool.
1019 */
1020 if (!strcmp(type, ZSWAP_PARAM_UNSET))
1021 return NULL;
1022 if (!strcmp(compressor, ZSWAP_PARAM_UNSET))
1023 return NULL;
1024 }
1025
f1c54846 1026 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
f4ae0ce0 1027 if (!pool)
f1c54846 1028 return NULL;
f1c54846 1029
b8cf32dc
YA
1030 for (i = 0; i < ZSWAP_NR_ZPOOLS; i++) {
1031 /* unique name for each pool specifically required by zsmalloc */
1032 snprintf(name, 38, "zswap%x",
1033 atomic_inc_return(&zswap_pools_count));
32a4e169 1034
b8cf32dc
YA
1035 pool->zpools[i] = zpool_create_pool(type, name, gfp);
1036 if (!pool->zpools[i]) {
1037 pr_err("%s zpool not available\n", type);
1038 goto error;
1039 }
f1c54846 1040 }
b8cf32dc 1041 pr_debug("using %s zpool\n", zpool_get_type(pool->zpools[0]));
f1c54846 1042
79cd4202 1043 strscpy(pool->tfm_name, compressor, sizeof(pool->tfm_name));
1ec3b5fe
BS
1044
1045 pool->acomp_ctx = alloc_percpu(*pool->acomp_ctx);
1046 if (!pool->acomp_ctx) {
f1c54846
DS
1047 pr_err("percpu alloc failed\n");
1048 goto error;
1049 }
1050
cab7a7e5
SAS
1051 ret = cpuhp_state_add_instance(CPUHP_MM_ZSWP_POOL_PREPARE,
1052 &pool->node);
1053 if (ret)
f1c54846 1054 goto error;
b5ba474f
NP
1055
1056 zswap_alloc_shrinker(pool);
1057 if (!pool->shrinker)
1058 goto error;
1059
f1c54846
DS
1060 pr_debug("using %s compressor\n", pool->tfm_name);
1061
1062 /* being the current pool takes 1 ref; this func expects the
1063 * caller to always add the new pool as the current pool
1064 */
1065 kref_init(&pool->kref);
1066 INIT_LIST_HEAD(&pool->list);
b5ba474f
NP
1067 if (list_lru_init_memcg(&pool->list_lru, pool->shrinker))
1068 goto lru_fail;
1069 shrinker_register(pool->shrinker);
45190f01 1070 INIT_WORK(&pool->shrink_work, shrink_worker);
b5ba474f 1071 atomic_set(&pool->nr_stored, 0);
f1c54846
DS
1072
1073 zswap_pool_debug("created", pool);
1074
1075 return pool;
1076
b5ba474f
NP
1077lru_fail:
1078 list_lru_destroy(&pool->list_lru);
1079 shrinker_free(pool->shrinker);
f1c54846 1080error:
1ec3b5fe
BS
1081 if (pool->acomp_ctx)
1082 free_percpu(pool->acomp_ctx);
b8cf32dc
YA
1083 while (i--)
1084 zpool_destroy_pool(pool->zpools[i]);
f1c54846
DS
1085 kfree(pool);
1086 return NULL;
1087}
1088
141fdeec 1089static struct zswap_pool *__zswap_pool_create_fallback(void)
f1c54846 1090{
bae21db8
DS
1091 bool has_comp, has_zpool;
1092
1ec3b5fe 1093 has_comp = crypto_has_acomp(zswap_compressor, 0, 0);
bb8b93b5
MS
1094 if (!has_comp && strcmp(zswap_compressor,
1095 CONFIG_ZSWAP_COMPRESSOR_DEFAULT)) {
f1c54846 1096 pr_err("compressor %s not available, using default %s\n",
bb8b93b5 1097 zswap_compressor, CONFIG_ZSWAP_COMPRESSOR_DEFAULT);
c99b42c3 1098 param_free_charp(&zswap_compressor);
bb8b93b5 1099 zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT;
1ec3b5fe 1100 has_comp = crypto_has_acomp(zswap_compressor, 0, 0);
f1c54846 1101 }
bae21db8
DS
1102 if (!has_comp) {
1103 pr_err("default compressor %s not available\n",
1104 zswap_compressor);
1105 param_free_charp(&zswap_compressor);
1106 zswap_compressor = ZSWAP_PARAM_UNSET;
1107 }
1108
1109 has_zpool = zpool_has_pool(zswap_zpool_type);
bb8b93b5
MS
1110 if (!has_zpool && strcmp(zswap_zpool_type,
1111 CONFIG_ZSWAP_ZPOOL_DEFAULT)) {
f1c54846 1112 pr_err("zpool %s not available, using default %s\n",
bb8b93b5 1113 zswap_zpool_type, CONFIG_ZSWAP_ZPOOL_DEFAULT);
c99b42c3 1114 param_free_charp(&zswap_zpool_type);
bb8b93b5 1115 zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT;
bae21db8 1116 has_zpool = zpool_has_pool(zswap_zpool_type);
f1c54846 1117 }
bae21db8
DS
1118 if (!has_zpool) {
1119 pr_err("default zpool %s not available\n",
1120 zswap_zpool_type);
1121 param_free_charp(&zswap_zpool_type);
1122 zswap_zpool_type = ZSWAP_PARAM_UNSET;
1123 }
1124
1125 if (!has_comp || !has_zpool)
1126 return NULL;
f1c54846
DS
1127
1128 return zswap_pool_create(zswap_zpool_type, zswap_compressor);
1129}
1130
1131static void zswap_pool_destroy(struct zswap_pool *pool)
1132{
b8cf32dc
YA
1133 int i;
1134
f1c54846
DS
1135 zswap_pool_debug("destroying", pool);
1136
b5ba474f 1137 shrinker_free(pool->shrinker);
cab7a7e5 1138 cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node);
1ec3b5fe 1139 free_percpu(pool->acomp_ctx);
a65b0e76
DC
1140 list_lru_destroy(&pool->list_lru);
1141
1142 spin_lock(&zswap_pools_lock);
1143 mem_cgroup_iter_break(NULL, pool->next_shrink);
1144 pool->next_shrink = NULL;
1145 spin_unlock(&zswap_pools_lock);
1146
b8cf32dc
YA
1147 for (i = 0; i < ZSWAP_NR_ZPOOLS; i++)
1148 zpool_destroy_pool(pool->zpools[i]);
f1c54846
DS
1149 kfree(pool);
1150}
1151
1152static int __must_check zswap_pool_get(struct zswap_pool *pool)
1153{
ae3d89a7
DS
1154 if (!pool)
1155 return 0;
1156
f1c54846
DS
1157 return kref_get_unless_zero(&pool->kref);
1158}
1159
200867af 1160static void __zswap_pool_release(struct work_struct *work)
f1c54846 1161{
45190f01
VW
1162 struct zswap_pool *pool = container_of(work, typeof(*pool),
1163 release_work);
200867af
DS
1164
1165 synchronize_rcu();
f1c54846
DS
1166
1167 /* nobody should have been able to get a kref... */
1168 WARN_ON(kref_get_unless_zero(&pool->kref));
1169
1170 /* pool is now off zswap_pools list and has no references. */
1171 zswap_pool_destroy(pool);
1172}
1173
1174static void __zswap_pool_empty(struct kref *kref)
1175{
1176 struct zswap_pool *pool;
1177
1178 pool = container_of(kref, typeof(*pool), kref);
1179
1180 spin_lock(&zswap_pools_lock);
1181
1182 WARN_ON(pool == zswap_pool_current());
1183
1184 list_del_rcu(&pool->list);
200867af 1185
45190f01
VW
1186 INIT_WORK(&pool->release_work, __zswap_pool_release);
1187 schedule_work(&pool->release_work);
f1c54846
DS
1188
1189 spin_unlock(&zswap_pools_lock);
1190}
1191
1192static void zswap_pool_put(struct zswap_pool *pool)
1193{
1194 kref_put(&pool->kref, __zswap_pool_empty);
2b281117
SJ
1195}
1196
90b0fc26
DS
1197/*********************************
1198* param callbacks
1199**********************************/
1200
141fdeec
LS
1201static bool zswap_pool_changed(const char *s, const struct kernel_param *kp)
1202{
1203 /* no change required */
1204 if (!strcmp(s, *(char **)kp->arg) && zswap_has_pool)
1205 return false;
1206 return true;
1207}
1208
c99b42c3 1209/* val must be a null-terminated string */
90b0fc26
DS
1210static int __zswap_param_set(const char *val, const struct kernel_param *kp,
1211 char *type, char *compressor)
1212{
1213 struct zswap_pool *pool, *put_pool = NULL;
c99b42c3 1214 char *s = strstrip((char *)val);
141fdeec
LS
1215 int ret = 0;
1216 bool new_pool = false;
90b0fc26 1217
141fdeec 1218 mutex_lock(&zswap_init_lock);
9021ccec
LS
1219 switch (zswap_init_state) {
1220 case ZSWAP_UNINIT:
1221 /* if this is load-time (pre-init) param setting,
1222 * don't create a pool; that's done during init.
1223 */
141fdeec
LS
1224 ret = param_set_charp(s, kp);
1225 break;
9021ccec 1226 case ZSWAP_INIT_SUCCEED:
141fdeec 1227 new_pool = zswap_pool_changed(s, kp);
9021ccec
LS
1228 break;
1229 case ZSWAP_INIT_FAILED:
d7b028f5 1230 pr_err("can't set param, initialization failed\n");
141fdeec 1231 ret = -ENODEV;
d7b028f5 1232 }
141fdeec 1233 mutex_unlock(&zswap_init_lock);
d7b028f5 1234
141fdeec
LS
1235 /* no need to create a new pool, return directly */
1236 if (!new_pool)
1237 return ret;
90b0fc26
DS
1238
1239 if (!type) {
c99b42c3
DS
1240 if (!zpool_has_pool(s)) {
1241 pr_err("zpool %s not available\n", s);
90b0fc26
DS
1242 return -ENOENT;
1243 }
c99b42c3 1244 type = s;
90b0fc26 1245 } else if (!compressor) {
1ec3b5fe 1246 if (!crypto_has_acomp(s, 0, 0)) {
c99b42c3 1247 pr_err("compressor %s not available\n", s);
90b0fc26
DS
1248 return -ENOENT;
1249 }
c99b42c3
DS
1250 compressor = s;
1251 } else {
1252 WARN_ON(1);
1253 return -EINVAL;
90b0fc26
DS
1254 }
1255
1256 spin_lock(&zswap_pools_lock);
1257
1258 pool = zswap_pool_find_get(type, compressor);
1259 if (pool) {
1260 zswap_pool_debug("using existing", pool);
fd5bb66c 1261 WARN_ON(pool == zswap_pool_current());
90b0fc26 1262 list_del_rcu(&pool->list);
90b0fc26
DS
1263 }
1264
fd5bb66c
DS
1265 spin_unlock(&zswap_pools_lock);
1266
1267 if (!pool)
1268 pool = zswap_pool_create(type, compressor);
1269
90b0fc26 1270 if (pool)
c99b42c3 1271 ret = param_set_charp(s, kp);
90b0fc26
DS
1272 else
1273 ret = -EINVAL;
1274
fd5bb66c
DS
1275 spin_lock(&zswap_pools_lock);
1276
90b0fc26
DS
1277 if (!ret) {
1278 put_pool = zswap_pool_current();
1279 list_add_rcu(&pool->list, &zswap_pools);
ae3d89a7 1280 zswap_has_pool = true;
90b0fc26
DS
1281 } else if (pool) {
1282 /* add the possibly pre-existing pool to the end of the pools
1283 * list; if it's new (and empty) then it'll be removed and
1284 * destroyed by the put after we drop the lock
1285 */
1286 list_add_tail_rcu(&pool->list, &zswap_pools);
1287 put_pool = pool;
fd5bb66c
DS
1288 }
1289
1290 spin_unlock(&zswap_pools_lock);
1291
1292 if (!zswap_has_pool && !pool) {
ae3d89a7
DS
1293 /* if initial pool creation failed, and this pool creation also
1294 * failed, maybe both compressor and zpool params were bad.
1295 * Allow changing this param, so pool creation will succeed
1296 * when the other param is changed. We already verified this
1ec3b5fe 1297 * param is ok in the zpool_has_pool() or crypto_has_acomp()
ae3d89a7
DS
1298 * checks above.
1299 */
1300 ret = param_set_charp(s, kp);
90b0fc26
DS
1301 }
1302
90b0fc26
DS
1303 /* drop the ref from either the old current pool,
1304 * or the new pool we failed to add
1305 */
1306 if (put_pool)
1307 zswap_pool_put(put_pool);
1308
1309 return ret;
1310}
1311
1312static int zswap_compressor_param_set(const char *val,
1313 const struct kernel_param *kp)
1314{
1315 return __zswap_param_set(val, kp, zswap_zpool_type, NULL);
1316}
1317
1318static int zswap_zpool_param_set(const char *val,
1319 const struct kernel_param *kp)
1320{
1321 return __zswap_param_set(val, kp, NULL, zswap_compressor);
1322}
1323
d7b028f5
DS
1324static int zswap_enabled_param_set(const char *val,
1325 const struct kernel_param *kp)
1326{
141fdeec
LS
1327 int ret = -ENODEV;
1328
1329 /* if this is load-time (pre-init) param setting, only set param. */
1330 if (system_state != SYSTEM_RUNNING)
1331 return param_set_bool(val, kp);
1332
1333 mutex_lock(&zswap_init_lock);
9021ccec
LS
1334 switch (zswap_init_state) {
1335 case ZSWAP_UNINIT:
141fdeec
LS
1336 if (zswap_setup())
1337 break;
1338 fallthrough;
9021ccec 1339 case ZSWAP_INIT_SUCCEED:
141fdeec 1340 if (!zswap_has_pool)
9021ccec 1341 pr_err("can't enable, no pool configured\n");
141fdeec
LS
1342 else
1343 ret = param_set_bool(val, kp);
1344 break;
9021ccec 1345 case ZSWAP_INIT_FAILED:
d7b028f5 1346 pr_err("can't enable, initialization failed\n");
ae3d89a7 1347 }
141fdeec 1348 mutex_unlock(&zswap_init_lock);
d7b028f5 1349
141fdeec 1350 return ret;
d7b028f5
DS
1351}
1352
32acba4c
CZ
1353static void __zswap_load(struct zswap_entry *entry, struct page *page)
1354{
1355 struct zpool *zpool = zswap_find_zpool(entry);
1356 struct scatterlist input, output;
1357 struct crypto_acomp_ctx *acomp_ctx;
1358 u8 *src;
1359
1360 acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
8ba2f844 1361 mutex_lock(&acomp_ctx->mutex);
32acba4c
CZ
1362
1363 src = zpool_map_handle(zpool, entry->handle, ZPOOL_MM_RO);
1364 if (!zpool_can_sleep_mapped(zpool)) {
8ba2f844
CZ
1365 memcpy(acomp_ctx->buffer, src, entry->length);
1366 src = acomp_ctx->buffer;
32acba4c
CZ
1367 zpool_unmap_handle(zpool, entry->handle);
1368 }
1369
1370 sg_init_one(&input, src, entry->length);
1371 sg_init_table(&output, 1);
1372 sg_set_page(&output, page, PAGE_SIZE, 0);
1373 acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, PAGE_SIZE);
1374 BUG_ON(crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait));
1375 BUG_ON(acomp_ctx->req->dlen != PAGE_SIZE);
8ba2f844 1376 mutex_unlock(&acomp_ctx->mutex);
32acba4c
CZ
1377
1378 if (zpool_can_sleep_mapped(zpool))
1379 zpool_unmap_handle(zpool, entry->handle);
1380}
1381
2b281117
SJ
1382/*********************************
1383* writeback code
1384**********************************/
2b281117
SJ
1385/*
1386 * Attempts to free an entry by adding a page to the swap cache,
1387 * decompressing the entry data into the page, and issuing a
1388 * bio write to write the page back to the swap device.
1389 *
1390 * This can be thought of as a "resumed writeback" of the page
1391 * to the swap device. We are basically resuming the same swap
42c06a0e 1392 * writeback path that was intercepted with the zswap_store()
2b281117
SJ
1393 * in the first place. After the page has been decompressed into
1394 * the swap cache, the compressed version stored by zswap can be
1395 * freed.
1396 */
0bb48849 1397static int zswap_writeback_entry(struct zswap_entry *entry,
ff9d5ba2 1398 struct zswap_tree *tree)
2b281117 1399{
0bb48849 1400 swp_entry_t swpentry = entry->swpentry;
2b281117 1401 struct page *page;
ddc1a5cb 1402 struct mempolicy *mpol;
98804a94 1403 bool page_was_allocated;
2b281117
SJ
1404 struct writeback_control wbc = {
1405 .sync_mode = WB_SYNC_NONE,
1406 };
1407
2b281117 1408 /* try to allocate swap cache page */
ddc1a5cb
HD
1409 mpol = get_task_policy(current);
1410 page = __read_swap_cache_async(swpentry, GFP_KERNEL, mpol,
a65b0e76 1411 NO_INTERLEAVE_INDEX, &page_was_allocated, true);
e947ba0b
CZ
1412 if (!page)
1413 return -ENOMEM;
2b281117 1414
e947ba0b
CZ
1415 /*
1416 * Found an existing page, we raced with load/swapin. We generally
1417 * writeback cold pages from zswap, and swapin means the page just
1418 * became hot. Skip this page and let the caller find another one.
1419 */
98804a94 1420 if (!page_was_allocated) {
09cbfeaf 1421 put_page(page);
e947ba0b 1422 return -EEXIST;
98804a94 1423 }
2b281117 1424
98804a94
JW
1425 /*
1426 * Page is locked, and the swapcache is now secured against
1427 * concurrent swapping to and from the slot. Verify that the
1428 * swap entry hasn't been invalidated and recycled behind our
1429 * backs (our zswap_entry reference doesn't prevent that), to
1430 * avoid overwriting a new swap page with old compressed data.
1431 */
1432 spin_lock(&tree->lock);
1433 if (zswap_rb_search(&tree->rbroot, swp_offset(entry->swpentry)) != entry) {
04fc7816 1434 spin_unlock(&tree->lock);
98804a94 1435 delete_from_swap_cache(page_folio(page));
e947ba0b 1436 return -ENOMEM;
98804a94
JW
1437 }
1438 spin_unlock(&tree->lock);
04fc7816 1439
32acba4c 1440 __zswap_load(entry, page);
98804a94
JW
1441
1442 /* page is up to date */
1443 SetPageUptodate(page);
2b281117 1444
b349acc7
WY
1445 /* move it to the tail of the inactive list after end_writeback */
1446 SetPageReclaim(page);
1447
2b281117 1448 /* start writeback */
cf1e3fe4 1449 __swap_writepage(page, &wbc);
09cbfeaf 1450 put_page(page);
2b281117 1451
e947ba0b 1452 return 0;
2b281117
SJ
1453}
1454
a85f878b
SD
1455static int zswap_is_page_same_filled(void *ptr, unsigned long *value)
1456{
a85f878b 1457 unsigned long *page;
62bf1258
TS
1458 unsigned long val;
1459 unsigned int pos, last_pos = PAGE_SIZE / sizeof(*page) - 1;
a85f878b
SD
1460
1461 page = (unsigned long *)ptr;
62bf1258
TS
1462 val = page[0];
1463
1464 if (val != page[last_pos])
1465 return 0;
1466
1467 for (pos = 1; pos < last_pos; pos++) {
1468 if (val != page[pos])
a85f878b
SD
1469 return 0;
1470 }
62bf1258
TS
1471
1472 *value = val;
1473
a85f878b
SD
1474 return 1;
1475}
1476
1477static void zswap_fill_page(void *ptr, unsigned long value)
1478{
1479 unsigned long *page;
1480
1481 page = (unsigned long *)ptr;
1482 memset_l(page, value, PAGE_SIZE / sizeof(unsigned long));
1483}
1484
34f4c198 1485bool zswap_store(struct folio *folio)
2b281117 1486{
3d2c9087 1487 swp_entry_t swp = folio->swap;
42c06a0e
JW
1488 int type = swp_type(swp);
1489 pgoff_t offset = swp_offset(swp);
34f4c198 1490 struct page *page = &folio->page;
2b281117
SJ
1491 struct zswap_tree *tree = zswap_trees[type];
1492 struct zswap_entry *entry, *dupentry;
1ec3b5fe
BS
1493 struct scatterlist input, output;
1494 struct crypto_acomp_ctx *acomp_ctx;
f4840ccf 1495 struct obj_cgroup *objcg = NULL;
a65b0e76 1496 struct mem_cgroup *memcg = NULL;
f4840ccf 1497 struct zswap_pool *pool;
b8cf32dc 1498 struct zpool *zpool;
0bb48849 1499 unsigned int dlen = PAGE_SIZE;
a85f878b 1500 unsigned long handle, value;
2b281117
SJ
1501 char *buf;
1502 u8 *src, *dst;
d2fcd82b 1503 gfp_t gfp;
42c06a0e
JW
1504 int ret;
1505
34f4c198
MWO
1506 VM_WARN_ON_ONCE(!folio_test_locked(folio));
1507 VM_WARN_ON_ONCE(!folio_test_swapcache(folio));
2b281117 1508
34f4c198
MWO
1509 /* Large folios aren't supported */
1510 if (folio_test_large(folio))
42c06a0e 1511 return false;
7ba71669 1512
42c06a0e
JW
1513 if (!zswap_enabled || !tree)
1514 return false;
2b281117 1515
ca56489c
DC
1516 /*
1517 * If this is a duplicate, it must be removed before attempting to store
1518 * it, otherwise, if the store fails the old page won't be removed from
1519 * the tree, and it might be written back overriding the new data.
1520 */
1521 spin_lock(&tree->lock);
1522 dupentry = zswap_rb_search(&tree->rbroot, offset);
1523 if (dupentry) {
1524 zswap_duplicate_entry++;
1525 zswap_invalidate_entry(tree, dupentry);
1526 }
1527 spin_unlock(&tree->lock);
074e3e26 1528 objcg = get_obj_cgroup_from_folio(folio);
a65b0e76
DC
1529 if (objcg && !obj_cgroup_may_zswap(objcg)) {
1530 memcg = get_mem_cgroup_from_objcg(objcg);
1531 if (shrink_memcg(memcg)) {
1532 mem_cgroup_put(memcg);
1533 goto reject;
1534 }
1535 mem_cgroup_put(memcg);
1536 }
f4840ccf 1537
2b281117
SJ
1538 /* reclaim space if needed */
1539 if (zswap_is_full()) {
1540 zswap_pool_limit_hit++;
45190f01 1541 zswap_pool_reached_full = true;
f4840ccf 1542 goto shrink;
45190f01 1543 }
16e536ef 1544
45190f01 1545 if (zswap_pool_reached_full) {
42c06a0e 1546 if (!zswap_can_accept())
e0228d59 1547 goto shrink;
42c06a0e 1548 else
45190f01 1549 zswap_pool_reached_full = false;
2b281117
SJ
1550 }
1551
1552 /* allocate entry */
a65b0e76 1553 entry = zswap_entry_cache_alloc(GFP_KERNEL, page_to_nid(page));
2b281117
SJ
1554 if (!entry) {
1555 zswap_reject_kmemcache_fail++;
2b281117
SJ
1556 goto reject;
1557 }
1558
a85f878b 1559 if (zswap_same_filled_pages_enabled) {
003ae2fb 1560 src = kmap_local_page(page);
a85f878b 1561 if (zswap_is_page_same_filled(src, &value)) {
003ae2fb 1562 kunmap_local(src);
0bb48849 1563 entry->swpentry = swp_entry(type, offset);
a85f878b
SD
1564 entry->length = 0;
1565 entry->value = value;
1566 atomic_inc(&zswap_same_filled_pages);
1567 goto insert_entry;
1568 }
003ae2fb 1569 kunmap_local(src);
a85f878b
SD
1570 }
1571
42c06a0e 1572 if (!zswap_non_same_filled_pages_enabled)
cb325ddd 1573 goto freepage;
cb325ddd 1574
f1c54846
DS
1575 /* if entry is successfully added, it keeps the reference */
1576 entry->pool = zswap_pool_current_get();
42c06a0e 1577 if (!entry->pool)
f1c54846 1578 goto freepage;
f1c54846 1579
a65b0e76
DC
1580 if (objcg) {
1581 memcg = get_mem_cgroup_from_objcg(objcg);
1582 if (memcg_list_lru_alloc(memcg, &entry->pool->list_lru, GFP_KERNEL)) {
1583 mem_cgroup_put(memcg);
1584 goto put_pool;
1585 }
1586 mem_cgroup_put(memcg);
1587 }
1588
2b281117 1589 /* compress */
1ec3b5fe
BS
1590 acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
1591
8ba2f844 1592 mutex_lock(&acomp_ctx->mutex);
1ec3b5fe 1593
8ba2f844 1594 dst = acomp_ctx->buffer;
1ec3b5fe
BS
1595 sg_init_table(&input, 1);
1596 sg_set_page(&input, page, PAGE_SIZE, 0);
1597
8ba2f844
CZ
1598 /*
1599 * We need PAGE_SIZE * 2 here since there maybe over-compression case,
1600 * and hardware-accelerators may won't check the dst buffer size, so
1601 * giving the dst buffer with enough length to avoid buffer overflow.
1602 */
1ec3b5fe
BS
1603 sg_init_one(&output, dst, PAGE_SIZE * 2);
1604 acomp_request_set_params(acomp_ctx->req, &input, &output, PAGE_SIZE, dlen);
1605 /*
1606 * it maybe looks a little bit silly that we send an asynchronous request,
1607 * then wait for its completion synchronously. This makes the process look
1608 * synchronous in fact.
1609 * Theoretically, acomp supports users send multiple acomp requests in one
1610 * acomp instance, then get those requests done simultaneously. but in this
42c06a0e 1611 * case, zswap actually does store and load page by page, there is no
1ec3b5fe 1612 * existing method to send the second page before the first page is done
42c06a0e 1613 * in one thread doing zwap.
1ec3b5fe
BS
1614 * but in different threads running on different cpu, we have different
1615 * acomp instance, so multiple threads can do (de)compression in parallel.
1616 */
1617 ret = crypto_wait_req(crypto_acomp_compress(acomp_ctx->req), &acomp_ctx->wait);
1618 dlen = acomp_ctx->req->dlen;
1619
cb61dad8
NP
1620 if (ret) {
1621 zswap_reject_compress_fail++;
f1c54846 1622 goto put_dstmem;
cb61dad8 1623 }
2b281117
SJ
1624
1625 /* store */
b8cf32dc 1626 zpool = zswap_find_zpool(entry);
d2fcd82b 1627 gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
b8cf32dc 1628 if (zpool_malloc_support_movable(zpool))
d2fcd82b 1629 gfp |= __GFP_HIGHMEM | __GFP_MOVABLE;
b8cf32dc 1630 ret = zpool_malloc(zpool, dlen, gfp, &handle);
2b281117
SJ
1631 if (ret == -ENOSPC) {
1632 zswap_reject_compress_poor++;
f1c54846 1633 goto put_dstmem;
2b281117
SJ
1634 }
1635 if (ret) {
1636 zswap_reject_alloc_fail++;
f1c54846 1637 goto put_dstmem;
2b281117 1638 }
b8cf32dc 1639 buf = zpool_map_handle(zpool, handle, ZPOOL_MM_WO);
0bb48849 1640 memcpy(buf, dst, dlen);
b8cf32dc 1641 zpool_unmap_handle(zpool, handle);
8ba2f844 1642 mutex_unlock(&acomp_ctx->mutex);
2b281117
SJ
1643
1644 /* populate entry */
0bb48849 1645 entry->swpentry = swp_entry(type, offset);
2b281117
SJ
1646 entry->handle = handle;
1647 entry->length = dlen;
1648
a85f878b 1649insert_entry:
f4840ccf
JW
1650 entry->objcg = objcg;
1651 if (objcg) {
1652 obj_cgroup_charge_zswap(objcg, entry->length);
1653 /* Account before objcg ref is moved to tree */
1654 count_objcg_event(objcg, ZSWPOUT);
1655 }
1656
2b281117
SJ
1657 /* map */
1658 spin_lock(&tree->lock);
ca56489c
DC
1659 /*
1660 * A duplicate entry should have been removed at the beginning of this
1661 * function. Since the swap entry should be pinned, if a duplicate is
1662 * found again here it means that something went wrong in the swap
1663 * cache.
1664 */
42c06a0e 1665 while (zswap_rb_insert(&tree->rbroot, entry, &dupentry) == -EEXIST) {
ca56489c 1666 WARN_ON(1);
42c06a0e 1667 zswap_duplicate_entry++;
56c67049 1668 zswap_invalidate_entry(tree, dupentry);
42c06a0e 1669 }
35499e2b 1670 if (entry->length) {
a65b0e76
DC
1671 INIT_LIST_HEAD(&entry->lru);
1672 zswap_lru_add(&entry->pool->list_lru, entry);
b5ba474f 1673 atomic_inc(&entry->pool->nr_stored);
f999f38b 1674 }
2b281117
SJ
1675 spin_unlock(&tree->lock);
1676
1677 /* update stats */
1678 atomic_inc(&zswap_stored_pages);
f1c54846 1679 zswap_update_total_size();
f6498b77 1680 count_vm_event(ZSWPOUT);
2b281117 1681
42c06a0e 1682 return true;
2b281117 1683
f1c54846 1684put_dstmem:
8ba2f844 1685 mutex_unlock(&acomp_ctx->mutex);
a65b0e76 1686put_pool:
f1c54846
DS
1687 zswap_pool_put(entry->pool);
1688freepage:
2b281117
SJ
1689 zswap_entry_cache_free(entry);
1690reject:
f4840ccf
JW
1691 if (objcg)
1692 obj_cgroup_put(objcg);
42c06a0e 1693 return false;
f4840ccf
JW
1694
1695shrink:
1696 pool = zswap_pool_last_get();
969d63e1
JW
1697 if (pool && !queue_work(shrink_wq, &pool->shrink_work))
1698 zswap_pool_put(pool);
f4840ccf 1699 goto reject;
2b281117
SJ
1700}
1701
ca54f6d8 1702bool zswap_load(struct folio *folio)
2b281117 1703{
3d2c9087 1704 swp_entry_t swp = folio->swap;
42c06a0e
JW
1705 int type = swp_type(swp);
1706 pgoff_t offset = swp_offset(swp);
ca54f6d8 1707 struct page *page = &folio->page;
2b281117
SJ
1708 struct zswap_tree *tree = zswap_trees[type];
1709 struct zswap_entry *entry;
32acba4c 1710 u8 *dst;
42c06a0e 1711
ca54f6d8 1712 VM_WARN_ON_ONCE(!folio_test_locked(folio));
2b281117
SJ
1713
1714 /* find */
1715 spin_lock(&tree->lock);
0ab0abcf 1716 entry = zswap_entry_find_get(&tree->rbroot, offset);
2b281117 1717 if (!entry) {
2b281117 1718 spin_unlock(&tree->lock);
42c06a0e 1719 return false;
2b281117 1720 }
2b281117
SJ
1721 spin_unlock(&tree->lock);
1722
66447fd0
CZ
1723 if (entry->length)
1724 __zswap_load(entry, page);
1725 else {
003ae2fb 1726 dst = kmap_local_page(page);
a85f878b 1727 zswap_fill_page(dst, entry->value);
003ae2fb 1728 kunmap_local(dst);
a85f878b
SD
1729 }
1730
f6498b77 1731 count_vm_event(ZSWPIN);
f4840ccf
JW
1732 if (entry->objcg)
1733 count_objcg_event(entry->objcg, ZSWPIN);
c75f5c1e 1734
2b281117 1735 spin_lock(&tree->lock);
66447fd0 1736 if (zswap_exclusive_loads_enabled) {
b9c91c43 1737 zswap_invalidate_entry(tree, entry);
ca54f6d8 1738 folio_mark_dirty(folio);
35499e2b 1739 } else if (entry->length) {
a65b0e76
DC
1740 zswap_lru_del(&entry->pool->list_lru, entry);
1741 zswap_lru_add(&entry->pool->list_lru, entry);
b9c91c43 1742 }
18a93707 1743 zswap_entry_put(tree, entry);
2b281117
SJ
1744 spin_unlock(&tree->lock);
1745
66447fd0 1746 return true;
2b281117
SJ
1747}
1748
42c06a0e 1749void zswap_invalidate(int type, pgoff_t offset)
2b281117
SJ
1750{
1751 struct zswap_tree *tree = zswap_trees[type];
1752 struct zswap_entry *entry;
2b281117
SJ
1753
1754 /* find */
1755 spin_lock(&tree->lock);
1756 entry = zswap_rb_search(&tree->rbroot, offset);
1757 if (!entry) {
1758 /* entry was written back */
1759 spin_unlock(&tree->lock);
1760 return;
1761 }
b9c91c43 1762 zswap_invalidate_entry(tree, entry);
2b281117 1763 spin_unlock(&tree->lock);
2b281117
SJ
1764}
1765
42c06a0e
JW
1766void zswap_swapon(int type)
1767{
1768 struct zswap_tree *tree;
1769
1770 tree = kzalloc(sizeof(*tree), GFP_KERNEL);
1771 if (!tree) {
1772 pr_err("alloc failed, zswap disabled for swap type %d\n", type);
1773 return;
1774 }
1775
1776 tree->rbroot = RB_ROOT;
1777 spin_lock_init(&tree->lock);
1778 zswap_trees[type] = tree;
1779}
1780
1781void zswap_swapoff(int type)
2b281117
SJ
1782{
1783 struct zswap_tree *tree = zswap_trees[type];
0bd42136 1784 struct zswap_entry *entry, *n;
2b281117
SJ
1785
1786 if (!tree)
1787 return;
1788
1789 /* walk the tree and free everything */
1790 spin_lock(&tree->lock);
0ab0abcf 1791 rbtree_postorder_for_each_entry_safe(entry, n, &tree->rbroot, rbnode)
60105e12 1792 zswap_free_entry(entry);
2b281117
SJ
1793 tree->rbroot = RB_ROOT;
1794 spin_unlock(&tree->lock);
aa9bca05
WY
1795 kfree(tree);
1796 zswap_trees[type] = NULL;
2b281117
SJ
1797}
1798
2b281117
SJ
1799/*********************************
1800* debugfs functions
1801**********************************/
1802#ifdef CONFIG_DEBUG_FS
1803#include <linux/debugfs.h>
1804
1805static struct dentry *zswap_debugfs_root;
1806
141fdeec 1807static int zswap_debugfs_init(void)
2b281117
SJ
1808{
1809 if (!debugfs_initialized())
1810 return -ENODEV;
1811
1812 zswap_debugfs_root = debugfs_create_dir("zswap", NULL);
2b281117 1813
0825a6f9
JP
1814 debugfs_create_u64("pool_limit_hit", 0444,
1815 zswap_debugfs_root, &zswap_pool_limit_hit);
1816 debugfs_create_u64("reject_reclaim_fail", 0444,
1817 zswap_debugfs_root, &zswap_reject_reclaim_fail);
1818 debugfs_create_u64("reject_alloc_fail", 0444,
1819 zswap_debugfs_root, &zswap_reject_alloc_fail);
1820 debugfs_create_u64("reject_kmemcache_fail", 0444,
1821 zswap_debugfs_root, &zswap_reject_kmemcache_fail);
cb61dad8
NP
1822 debugfs_create_u64("reject_compress_fail", 0444,
1823 zswap_debugfs_root, &zswap_reject_compress_fail);
0825a6f9
JP
1824 debugfs_create_u64("reject_compress_poor", 0444,
1825 zswap_debugfs_root, &zswap_reject_compress_poor);
1826 debugfs_create_u64("written_back_pages", 0444,
1827 zswap_debugfs_root, &zswap_written_back_pages);
1828 debugfs_create_u64("duplicate_entry", 0444,
1829 zswap_debugfs_root, &zswap_duplicate_entry);
1830 debugfs_create_u64("pool_total_size", 0444,
1831 zswap_debugfs_root, &zswap_pool_total_size);
1832 debugfs_create_atomic_t("stored_pages", 0444,
1833 zswap_debugfs_root, &zswap_stored_pages);
a85f878b 1834 debugfs_create_atomic_t("same_filled_pages", 0444,
0825a6f9 1835 zswap_debugfs_root, &zswap_same_filled_pages);
2b281117
SJ
1836
1837 return 0;
1838}
2b281117 1839#else
141fdeec 1840static int zswap_debugfs_init(void)
2b281117
SJ
1841{
1842 return 0;
1843}
2b281117
SJ
1844#endif
1845
1846/*********************************
1847* module init and exit
1848**********************************/
141fdeec 1849static int zswap_setup(void)
2b281117 1850{
f1c54846 1851 struct zswap_pool *pool;
ad7ed770 1852 int ret;
60105e12 1853
b7919122
LS
1854 zswap_entry_cache = KMEM_CACHE(zswap_entry, 0);
1855 if (!zswap_entry_cache) {
2b281117 1856 pr_err("entry cache creation failed\n");
f1c54846 1857 goto cache_fail;
2b281117 1858 }
f1c54846 1859
cab7a7e5
SAS
1860 ret = cpuhp_setup_state_multi(CPUHP_MM_ZSWP_POOL_PREPARE,
1861 "mm/zswap_pool:prepare",
1862 zswap_cpu_comp_prepare,
1863 zswap_cpu_comp_dead);
1864 if (ret)
1865 goto hp_fail;
1866
f1c54846 1867 pool = __zswap_pool_create_fallback();
ae3d89a7
DS
1868 if (pool) {
1869 pr_info("loaded using pool %s/%s\n", pool->tfm_name,
b8cf32dc 1870 zpool_get_type(pool->zpools[0]));
ae3d89a7
DS
1871 list_add(&pool->list, &zswap_pools);
1872 zswap_has_pool = true;
1873 } else {
f1c54846 1874 pr_err("pool creation failed\n");
ae3d89a7 1875 zswap_enabled = false;
2b281117 1876 }
60105e12 1877
45190f01
VW
1878 shrink_wq = create_workqueue("zswap-shrink");
1879 if (!shrink_wq)
1880 goto fallback_fail;
1881
2b281117
SJ
1882 if (zswap_debugfs_init())
1883 pr_warn("debugfs initialization failed\n");
9021ccec 1884 zswap_init_state = ZSWAP_INIT_SUCCEED;
2b281117 1885 return 0;
f1c54846 1886
45190f01 1887fallback_fail:
38aeb071
DC
1888 if (pool)
1889 zswap_pool_destroy(pool);
cab7a7e5 1890hp_fail:
b7919122 1891 kmem_cache_destroy(zswap_entry_cache);
f1c54846 1892cache_fail:
d7b028f5 1893 /* if built-in, we aren't unloaded on failure; don't allow use */
9021ccec 1894 zswap_init_state = ZSWAP_INIT_FAILED;
d7b028f5 1895 zswap_enabled = false;
2b281117
SJ
1896 return -ENOMEM;
1897}
141fdeec
LS
1898
1899static int __init zswap_init(void)
1900{
1901 if (!zswap_enabled)
1902 return 0;
1903 return zswap_setup();
1904}
2b281117 1905/* must be late so crypto has time to come up */
141fdeec 1906late_initcall(zswap_init);
2b281117 1907
68386da8 1908MODULE_AUTHOR("Seth Jennings <sjennings@variantweb.net>");
2b281117 1909MODULE_DESCRIPTION("Compressed cache for swap pages");