Merge tag 'kvm-x86-misc-6.9' of https://github.com/kvm-x86/linux into HEAD
[linux-2.6-block.git] / fs / erofs / utils.c
CommitLineData
29b24f6c 1// SPDX-License-Identifier: GPL-2.0-only
b29e64d8 2/*
b29e64d8 3 * Copyright (C) 2018 HUAWEI, Inc.
592e7cd0 4 * https://www.huawei.com/
b29e64d8 5 */
b29e64d8
GX
6#include "internal.h"
7
eaa9172a 8struct page *erofs_allocpage(struct page **pagepool, gfp_t gfp)
b29e64d8 9{
eaa9172a 10 struct page *page = *pagepool;
b29e64d8 11
eaa9172a 12 if (page) {
b25a1519 13 DBG_BUGON(page_ref_count(page) != 1);
eaa9172a 14 *pagepool = (struct page *)page_private(page);
b29e64d8 15 } else {
5ddcee1f 16 page = alloc_page(gfp);
b29e64d8
GX
17 }
18 return page;
19}
20
eaa9172a
GX
21void erofs_release_pages(struct page **pagepool)
22{
23 while (*pagepool) {
24 struct page *page = *pagepool;
25
26 *pagepool = (struct page *)page_private(page);
27 put_page(page);
28 }
29}
30
22fe04a7 31#ifdef CONFIG_EROFS_FS_ZIP
e7e9a307
GX
32/* global shrink count (for all mounted EROFS instances) */
33static atomic_long_t erofs_global_shrink_cnt;
34
7674a42f 35static bool erofs_workgroup_get(struct erofs_workgroup *grp)
d60eff43 36{
7674a42f
GX
37 if (lockref_get_not_zero(&grp->lockref))
38 return true;
d60eff43 39
7674a42f
GX
40 spin_lock(&grp->lockref.lock);
41 if (__lockref_is_dead(&grp->lockref)) {
42 spin_unlock(&grp->lockref.lock);
43 return false;
44 }
d60eff43 45
7674a42f 46 if (!grp->lockref.count++)
4501ca36 47 atomic_long_dec(&erofs_global_shrink_cnt);
7674a42f
GX
48 spin_unlock(&grp->lockref.lock);
49 return true;
d60eff43 50}
e7e9a307 51
4501ca36 52struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb,
997626d8 53 pgoff_t index)
e7e9a307
GX
54{
55 struct erofs_sb_info *sbi = EROFS_SB(sb);
56 struct erofs_workgroup *grp;
e7e9a307
GX
57
58repeat:
59 rcu_read_lock();
64094a04 60 grp = xa_load(&sbi->managed_pslots, index);
561fb35a 61 if (grp) {
7674a42f 62 if (!erofs_workgroup_get(grp)) {
e7e9a307
GX
63 /* prefer to relax rcu read side */
64 rcu_read_unlock();
65 goto repeat;
66 }
67
b8e076a6 68 DBG_BUGON(index != grp->index);
e7e9a307
GX
69 }
70 rcu_read_unlock();
71 return grp;
72}
73
64094a04
GX
74struct erofs_workgroup *erofs_insert_workgroup(struct super_block *sb,
75 struct erofs_workgroup *grp)
e7e9a307 76{
64094a04
GX
77 struct erofs_sb_info *const sbi = EROFS_SB(sb);
78 struct erofs_workgroup *pre;
e7e9a307 79
1a0ac8bd 80 DBG_BUGON(grp->lockref.count < 1);
64094a04
GX
81repeat:
82 xa_lock(&sbi->managed_pslots);
83 pre = __xa_cmpxchg(&sbi->managed_pslots, grp->index,
97cf5d53 84 NULL, grp, GFP_KERNEL);
64094a04
GX
85 if (pre) {
86 if (xa_is_err(pre)) {
87 pre = ERR_PTR(xa_err(pre));
7674a42f 88 } else if (!erofs_workgroup_get(pre)) {
64094a04
GX
89 /* try to legitimize the current in-tree one */
90 xa_unlock(&sbi->managed_pslots);
91 cond_resched();
92 goto repeat;
93 }
64094a04
GX
94 grp = pre;
95 }
96 xa_unlock(&sbi->managed_pslots);
97 return grp;
e7e9a307
GX
98}
99
51232df5
GX
100static void __erofs_workgroup_free(struct erofs_workgroup *grp)
101{
102 atomic_long_dec(&erofs_global_shrink_cnt);
103 erofs_workgroup_free_rcu(grp);
104}
105
7674a42f 106void erofs_workgroup_put(struct erofs_workgroup *grp)
3883a79a 107{
7674a42f
GX
108 if (lockref_put_or_lock(&grp->lockref))
109 return;
3883a79a 110
7674a42f
GX
111 DBG_BUGON(__lockref_is_dead(&grp->lockref));
112 if (grp->lockref.count == 1)
3883a79a 113 atomic_long_inc(&erofs_global_shrink_cnt);
7674a42f
GX
114 --grp->lockref.count;
115 spin_unlock(&grp->lockref.lock);
3883a79a
GX
116}
117
0a64d62d 118static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi,
bda17a45 119 struct erofs_workgroup *grp)
51232df5 120{
7674a42f
GX
121 int free = false;
122
123 spin_lock(&grp->lockref.lock);
124 if (grp->lockref.count)
125 goto out;
51232df5
GX
126
127 /*
7674a42f
GX
128 * Note that all cached pages should be detached before deleted from
129 * the XArray. Otherwise some cached pages could be still attached to
130 * the orphan old workgroup when the new one is available in the tree.
51232df5 131 */
7674a42f
GX
132 if (erofs_try_to_free_all_cached_pages(sbi, grp))
133 goto out;
51232df5
GX
134
135 /*
2bb90cc2 136 * It's impossible to fail after the workgroup is freezed,
51232df5
GX
137 * however in order to avoid some race conditions, add a
138 * DBG_BUGON to observe this in advance.
139 */
57bbeacd 140 DBG_BUGON(__xa_erase(&sbi->managed_pslots, grp->index) != grp);
51232df5 141
7674a42f
GX
142 lockref_mark_dead(&grp->lockref);
143 free = true;
144out:
145 spin_unlock(&grp->lockref.lock);
146 if (free)
147 __erofs_workgroup_free(grp);
148 return free;
51232df5
GX
149}
150
22fe04a7 151static unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi,
bda17a45 152 unsigned long nr_shrink)
e7e9a307 153{
64094a04 154 struct erofs_workgroup *grp;
7dd68b14 155 unsigned int freed = 0;
64094a04 156 unsigned long index;
3883a79a 157
57bbeacd 158 xa_lock(&sbi->managed_pslots);
64094a04 159 xa_for_each(&sbi->managed_pslots, index, grp) {
51232df5 160 /* try to shrink each valid workgroup */
bda17a45 161 if (!erofs_try_to_release_workgroup(sbi, grp))
3883a79a 162 continue;
57bbeacd 163 xa_unlock(&sbi->managed_pslots);
3883a79a
GX
164
165 ++freed;
8d8a09b0 166 if (!--nr_shrink)
57bbeacd
HJ
167 return freed;
168 xa_lock(&sbi->managed_pslots);
3883a79a 169 }
57bbeacd 170 xa_unlock(&sbi->managed_pslots);
3883a79a 171 return freed;
e7e9a307
GX
172}
173
a1581312
GX
174/* protected by 'erofs_sb_list_lock' */
175static unsigned int shrinker_run_no;
176
177/* protects the mounted 'erofs_sb_list' */
178static DEFINE_SPINLOCK(erofs_sb_list_lock);
2497ee41
GX
179static LIST_HEAD(erofs_sb_list);
180
22fe04a7 181void erofs_shrinker_register(struct super_block *sb)
2497ee41 182{
a1581312
GX
183 struct erofs_sb_info *sbi = EROFS_SB(sb);
184
185 mutex_init(&sbi->umount_mutex);
186
187 spin_lock(&erofs_sb_list_lock);
188 list_add(&sbi->list, &erofs_sb_list);
189 spin_unlock(&erofs_sb_list_lock);
2497ee41
GX
190}
191
22fe04a7 192void erofs_shrinker_unregister(struct super_block *sb)
2497ee41 193{
22fe04a7
GX
194 struct erofs_sb_info *const sbi = EROFS_SB(sb);
195
196 mutex_lock(&sbi->umount_mutex);
bda17a45
GX
197 /* clean up all remaining workgroups in memory */
198 erofs_shrink_workstation(sbi, ~0UL);
22fe04a7 199
a1581312 200 spin_lock(&erofs_sb_list_lock);
22fe04a7 201 list_del(&sbi->list);
a1581312 202 spin_unlock(&erofs_sb_list_lock);
22fe04a7 203 mutex_unlock(&sbi->umount_mutex);
a1581312
GX
204}
205
d55bc7ba
GX
206static unsigned long erofs_shrink_count(struct shrinker *shrink,
207 struct shrink_control *sc)
a1581312
GX
208{
209 return atomic_long_read(&erofs_global_shrink_cnt);
210}
211
d55bc7ba
GX
212static unsigned long erofs_shrink_scan(struct shrinker *shrink,
213 struct shrink_control *sc)
a1581312
GX
214{
215 struct erofs_sb_info *sbi;
216 struct list_head *p;
217
218 unsigned long nr = sc->nr_to_scan;
219 unsigned int run_no;
220 unsigned long freed = 0;
221
222 spin_lock(&erofs_sb_list_lock);
2bb90cc2 223 do {
a1581312 224 run_no = ++shrinker_run_no;
2bb90cc2 225 } while (run_no == 0);
a1581312
GX
226
227 /* Iterate over all mounted superblocks and try to shrink them */
228 p = erofs_sb_list.next;
229 while (p != &erofs_sb_list) {
230 sbi = list_entry(p, struct erofs_sb_info, list);
231
232 /*
233 * We move the ones we do to the end of the list, so we stop
234 * when we see one we have already done.
235 */
236 if (sbi->shrinker_run_no == run_no)
237 break;
238
239 if (!mutex_trylock(&sbi->umount_mutex)) {
240 p = p->next;
241 continue;
242 }
243
244 spin_unlock(&erofs_sb_list_lock);
245 sbi->shrinker_run_no = run_no;
246
9d5a09c6 247 freed += erofs_shrink_workstation(sbi, nr - freed);
a1581312
GX
248
249 spin_lock(&erofs_sb_list_lock);
250 /* Get the next list element before we move this one */
251 p = p->next;
252
253 /*
254 * Move this one to the end of the list to provide some
255 * fairness.
256 */
257 list_move_tail(&sbi->list, &erofs_sb_list);
258 mutex_unlock(&sbi->umount_mutex);
259
260 if (freed >= nr)
261 break;
262 }
263 spin_unlock(&erofs_sb_list_lock);
264 return freed;
2497ee41
GX
265}
266
557936ee 267static struct shrinker *erofs_shrinker_info;
d55bc7ba 268
22fe04a7
GX
269int __init erofs_init_shrinker(void)
270{
557936ee
QZ
271 erofs_shrinker_info = shrinker_alloc(0, "erofs-shrinker");
272 if (!erofs_shrinker_info)
273 return -ENOMEM;
274
275 erofs_shrinker_info->count_objects = erofs_shrink_count;
276 erofs_shrinker_info->scan_objects = erofs_shrink_scan;
277
278 shrinker_register(erofs_shrinker_info);
279
280 return 0;
22fe04a7
GX
281}
282
283void erofs_exit_shrinker(void)
284{
557936ee 285 shrinker_free(erofs_shrinker_info);
22fe04a7
GX
286}
287#endif /* !CONFIG_EROFS_FS_ZIP */