erofs: shouldn't churn the mapping page for duplicated copies
[linux-block.git] / fs / erofs / zdata.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2018 HUAWEI, Inc.
4  *             https://www.huawei.com/
5  * Copyright (C) 2022 Alibaba Cloud
6  */
7 #include "zdata.h"
8 #include "compress.h"
9 #include <linux/prefetch.h>
10 #include <linux/psi.h>
11
12 #include <trace/events/erofs.h>
13
14 /*
15  * since pclustersize is variable for big pcluster feature, introduce slab
16  * pools implementation for different pcluster sizes.
17  */
18 struct z_erofs_pcluster_slab {
19         struct kmem_cache *slab;
20         unsigned int maxpages;
21         char name[48];
22 };
23
24 #define _PCLP(n) { .maxpages = n }
25
26 static struct z_erofs_pcluster_slab pcluster_pool[] __read_mostly = {
27         _PCLP(1), _PCLP(4), _PCLP(16), _PCLP(64), _PCLP(128),
28         _PCLP(Z_EROFS_PCLUSTER_MAX_PAGES)
29 };
30
31 struct z_erofs_bvec_iter {
32         struct page *bvpage;
33         struct z_erofs_bvset *bvset;
34         unsigned int nr, cur;
35 };
36
37 static struct page *z_erofs_bvec_iter_end(struct z_erofs_bvec_iter *iter)
38 {
39         if (iter->bvpage)
40                 kunmap_local(iter->bvset);
41         return iter->bvpage;
42 }
43
44 static struct page *z_erofs_bvset_flip(struct z_erofs_bvec_iter *iter)
45 {
46         unsigned long base = (unsigned long)((struct z_erofs_bvset *)0)->bvec;
47         /* have to access nextpage in advance, otherwise it will be unmapped */
48         struct page *nextpage = iter->bvset->nextpage;
49         struct page *oldpage;
50
51         DBG_BUGON(!nextpage);
52         oldpage = z_erofs_bvec_iter_end(iter);
53         iter->bvpage = nextpage;
54         iter->bvset = kmap_local_page(nextpage);
55         iter->nr = (PAGE_SIZE - base) / sizeof(struct z_erofs_bvec);
56         iter->cur = 0;
57         return oldpage;
58 }
59
60 static void z_erofs_bvec_iter_begin(struct z_erofs_bvec_iter *iter,
61                                     struct z_erofs_bvset_inline *bvset,
62                                     unsigned int bootstrap_nr,
63                                     unsigned int cur)
64 {
65         *iter = (struct z_erofs_bvec_iter) {
66                 .nr = bootstrap_nr,
67                 .bvset = (struct z_erofs_bvset *)bvset,
68         };
69
70         while (cur > iter->nr) {
71                 cur -= iter->nr;
72                 z_erofs_bvset_flip(iter);
73         }
74         iter->cur = cur;
75 }
76
77 static int z_erofs_bvec_enqueue(struct z_erofs_bvec_iter *iter,
78                                 struct z_erofs_bvec *bvec,
79                                 struct page **candidate_bvpage)
80 {
81         if (iter->cur == iter->nr) {
82                 if (!*candidate_bvpage)
83                         return -EAGAIN;
84
85                 DBG_BUGON(iter->bvset->nextpage);
86                 iter->bvset->nextpage = *candidate_bvpage;
87                 z_erofs_bvset_flip(iter);
88
89                 iter->bvset->nextpage = NULL;
90                 *candidate_bvpage = NULL;
91         }
92         iter->bvset->bvec[iter->cur++] = *bvec;
93         return 0;
94 }
95
96 static void z_erofs_bvec_dequeue(struct z_erofs_bvec_iter *iter,
97                                  struct z_erofs_bvec *bvec,
98                                  struct page **old_bvpage)
99 {
100         if (iter->cur == iter->nr)
101                 *old_bvpage = z_erofs_bvset_flip(iter);
102         else
103                 *old_bvpage = NULL;
104         *bvec = iter->bvset->bvec[iter->cur++];
105 }
106
107 static void z_erofs_destroy_pcluster_pool(void)
108 {
109         int i;
110
111         for (i = 0; i < ARRAY_SIZE(pcluster_pool); ++i) {
112                 if (!pcluster_pool[i].slab)
113                         continue;
114                 kmem_cache_destroy(pcluster_pool[i].slab);
115                 pcluster_pool[i].slab = NULL;
116         }
117 }
118
119 static int z_erofs_create_pcluster_pool(void)
120 {
121         struct z_erofs_pcluster_slab *pcs;
122         struct z_erofs_pcluster *a;
123         unsigned int size;
124
125         for (pcs = pcluster_pool;
126              pcs < pcluster_pool + ARRAY_SIZE(pcluster_pool); ++pcs) {
127                 size = struct_size(a, compressed_bvecs, pcs->maxpages);
128
129                 sprintf(pcs->name, "erofs_pcluster-%u", pcs->maxpages);
130                 pcs->slab = kmem_cache_create(pcs->name, size, 0,
131                                               SLAB_RECLAIM_ACCOUNT, NULL);
132                 if (pcs->slab)
133                         continue;
134
135                 z_erofs_destroy_pcluster_pool();
136                 return -ENOMEM;
137         }
138         return 0;
139 }
140
141 static struct z_erofs_pcluster *z_erofs_alloc_pcluster(unsigned int nrpages)
142 {
143         int i;
144
145         for (i = 0; i < ARRAY_SIZE(pcluster_pool); ++i) {
146                 struct z_erofs_pcluster_slab *pcs = pcluster_pool + i;
147                 struct z_erofs_pcluster *pcl;
148
149                 if (nrpages > pcs->maxpages)
150                         continue;
151
152                 pcl = kmem_cache_zalloc(pcs->slab, GFP_NOFS);
153                 if (!pcl)
154                         return ERR_PTR(-ENOMEM);
155                 pcl->pclusterpages = nrpages;
156                 return pcl;
157         }
158         return ERR_PTR(-EINVAL);
159 }
160
161 static void z_erofs_free_pcluster(struct z_erofs_pcluster *pcl)
162 {
163         unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
164         int i;
165
166         for (i = 0; i < ARRAY_SIZE(pcluster_pool); ++i) {
167                 struct z_erofs_pcluster_slab *pcs = pcluster_pool + i;
168
169                 if (pclusterpages > pcs->maxpages)
170                         continue;
171
172                 kmem_cache_free(pcs->slab, pcl);
173                 return;
174         }
175         DBG_BUGON(1);
176 }
177
178 /* how to allocate cached pages for a pcluster */
179 enum z_erofs_cache_alloctype {
180         DONTALLOC,      /* don't allocate any cached pages */
181         /*
182          * try to use cached I/O if page allocation succeeds or fallback
183          * to in-place I/O instead to avoid any direct reclaim.
184          */
185         TRYALLOC,
186 };
187
188 /*
189  * tagged pointer with 1-bit tag for all compressed pages
190  * tag 0 - the page is just found with an extra page reference
191  */
192 typedef tagptr1_t compressed_page_t;
193
194 #define tag_compressed_page_justfound(page) \
195         tagptr_fold(compressed_page_t, page, 1)
196
197 static struct workqueue_struct *z_erofs_workqueue __read_mostly;
198
199 void z_erofs_exit_zip_subsystem(void)
200 {
201         destroy_workqueue(z_erofs_workqueue);
202         z_erofs_destroy_pcluster_pool();
203 }
204
205 static inline int z_erofs_init_workqueue(void)
206 {
207         const unsigned int onlinecpus = num_possible_cpus();
208
209         /*
210          * no need to spawn too many threads, limiting threads could minimum
211          * scheduling overhead, perhaps per-CPU threads should be better?
212          */
213         z_erofs_workqueue = alloc_workqueue("erofs_unzipd",
214                                             WQ_UNBOUND | WQ_HIGHPRI,
215                                             onlinecpus + onlinecpus / 4);
216         return z_erofs_workqueue ? 0 : -ENOMEM;
217 }
218
219 int __init z_erofs_init_zip_subsystem(void)
220 {
221         int err = z_erofs_create_pcluster_pool();
222
223         if (err)
224                 return err;
225         err = z_erofs_init_workqueue();
226         if (err)
227                 z_erofs_destroy_pcluster_pool();
228         return err;
229 }
230
231 enum z_erofs_pclustermode {
232         Z_EROFS_PCLUSTER_INFLIGHT,
233         /*
234          * The current pclusters was the tail of an exist chain, in addition
235          * that the previous processed chained pclusters are all decided to
236          * be hooked up to it.
237          * A new chain will be created for the remaining pclusters which are
238          * not processed yet, so different from Z_EROFS_PCLUSTER_FOLLOWED,
239          * the next pcluster cannot reuse the whole page safely for inplace I/O
240          * in the following scenario:
241          *  ________________________________________________________________
242          * |      tail (partial) page     |       head (partial) page       |
243          * |   (belongs to the next pcl)  |   (belongs to the current pcl)  |
244          * |_______PCLUSTER_FOLLOWED______|________PCLUSTER_HOOKED__________|
245          */
246         Z_EROFS_PCLUSTER_HOOKED,
247         /*
248          * a weak form of Z_EROFS_PCLUSTER_FOLLOWED, the difference is that it
249          * could be dispatched into bypass queue later due to uptodated managed
250          * pages. All related online pages cannot be reused for inplace I/O (or
251          * bvpage) since it can be directly decoded without I/O submission.
252          */
253         Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE,
254         /*
255          * The current collection has been linked with the owned chain, and
256          * could also be linked with the remaining collections, which means
257          * if the processing page is the tail page of the collection, thus
258          * the current collection can safely use the whole page (since
259          * the previous collection is under control) for in-place I/O, as
260          * illustrated below:
261          *  ________________________________________________________________
262          * |  tail (partial) page |          head (partial) page           |
263          * |  (of the current cl) |      (of the previous collection)      |
264          * | PCLUSTER_FOLLOWED or |                                        |
265          * |_____PCLUSTER_HOOKED__|___________PCLUSTER_FOLLOWED____________|
266          *
267          * [  (*) the above page can be used as inplace I/O.               ]
268          */
269         Z_EROFS_PCLUSTER_FOLLOWED,
270 };
271
272 struct z_erofs_decompress_frontend {
273         struct inode *const inode;
274         struct erofs_map_blocks map;
275         struct z_erofs_bvec_iter biter;
276
277         struct page *candidate_bvpage;
278         struct z_erofs_pcluster *pcl, *tailpcl;
279         z_erofs_next_pcluster_t owned_head;
280         enum z_erofs_pclustermode mode;
281
282         bool readahead;
283         /* used for applying cache strategy on the fly */
284         bool backmost;
285         erofs_off_t headoffset;
286
287         /* a pointer used to pick up inplace I/O pages */
288         unsigned int icur;
289 };
290
291 #define DECOMPRESS_FRONTEND_INIT(__i) { \
292         .inode = __i, .owned_head = Z_EROFS_PCLUSTER_TAIL, \
293         .mode = Z_EROFS_PCLUSTER_FOLLOWED, .backmost = true }
294
295 static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe,
296                                enum z_erofs_cache_alloctype type,
297                                struct page **pagepool)
298 {
299         struct address_space *mc = MNGD_MAPPING(EROFS_I_SB(fe->inode));
300         struct z_erofs_pcluster *pcl = fe->pcl;
301         bool standalone = true;
302         /*
303          * optimistic allocation without direct reclaim since inplace I/O
304          * can be used if low memory otherwise.
305          */
306         gfp_t gfp = (mapping_gfp_mask(mc) & ~__GFP_DIRECT_RECLAIM) |
307                         __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
308         unsigned int i;
309
310         if (fe->mode < Z_EROFS_PCLUSTER_FOLLOWED)
311                 return;
312
313         for (i = 0; i < pcl->pclusterpages; ++i) {
314                 struct page *page;
315                 compressed_page_t t;
316                 struct page *newpage = NULL;
317
318                 /* the compressed page was loaded before */
319                 if (READ_ONCE(pcl->compressed_bvecs[i].page))
320                         continue;
321
322                 page = find_get_page(mc, pcl->obj.index + i);
323
324                 if (page) {
325                         t = tag_compressed_page_justfound(page);
326                 } else {
327                         /* I/O is needed, no possible to decompress directly */
328                         standalone = false;
329                         switch (type) {
330                         case TRYALLOC:
331                                 newpage = erofs_allocpage(pagepool, gfp);
332                                 if (!newpage)
333                                         continue;
334                                 set_page_private(newpage,
335                                                  Z_EROFS_PREALLOCATED_PAGE);
336                                 t = tag_compressed_page_justfound(newpage);
337                                 break;
338                         default:        /* DONTALLOC */
339                                 continue;
340                         }
341                 }
342
343                 if (!cmpxchg_relaxed(&pcl->compressed_bvecs[i].page, NULL,
344                                      tagptr_cast_ptr(t)))
345                         continue;
346
347                 if (page)
348                         put_page(page);
349                 else if (newpage)
350                         erofs_pagepool_add(pagepool, newpage);
351         }
352
353         /*
354          * don't do inplace I/O if all compressed pages are available in
355          * managed cache since it can be moved to the bypass queue instead.
356          */
357         if (standalone)
358                 fe->mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE;
359 }
360
361 /* called by erofs_shrinker to get rid of all compressed_pages */
362 int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
363                                        struct erofs_workgroup *grp)
364 {
365         struct z_erofs_pcluster *const pcl =
366                 container_of(grp, struct z_erofs_pcluster, obj);
367         int i;
368
369         DBG_BUGON(z_erofs_is_inline_pcluster(pcl));
370         /*
371          * refcount of workgroup is now freezed as 1,
372          * therefore no need to worry about available decompression users.
373          */
374         for (i = 0; i < pcl->pclusterpages; ++i) {
375                 struct page *page = pcl->compressed_bvecs[i].page;
376
377                 if (!page)
378                         continue;
379
380                 /* block other users from reclaiming or migrating the page */
381                 if (!trylock_page(page))
382                         return -EBUSY;
383
384                 if (!erofs_page_is_managed(sbi, page))
385                         continue;
386
387                 /* barrier is implied in the following 'unlock_page' */
388                 WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL);
389                 detach_page_private(page);
390                 unlock_page(page);
391         }
392         return 0;
393 }
394
395 int erofs_try_to_free_cached_page(struct page *page)
396 {
397         struct z_erofs_pcluster *const pcl = (void *)page_private(page);
398         int ret, i;
399
400         if (!erofs_workgroup_try_to_freeze(&pcl->obj, 1))
401                 return 0;
402
403         ret = 0;
404         DBG_BUGON(z_erofs_is_inline_pcluster(pcl));
405         for (i = 0; i < pcl->pclusterpages; ++i) {
406                 if (pcl->compressed_bvecs[i].page == page) {
407                         WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL);
408                         ret = 1;
409                         break;
410                 }
411         }
412         erofs_workgroup_unfreeze(&pcl->obj, 1);
413         if (ret)
414                 detach_page_private(page);
415         return ret;
416 }
417
418 static bool z_erofs_try_inplace_io(struct z_erofs_decompress_frontend *fe,
419                                    struct z_erofs_bvec *bvec)
420 {
421         struct z_erofs_pcluster *const pcl = fe->pcl;
422
423         while (fe->icur > 0) {
424                 if (!cmpxchg(&pcl->compressed_bvecs[--fe->icur].page,
425                              NULL, bvec->page)) {
426                         pcl->compressed_bvecs[fe->icur] = *bvec;
427                         return true;
428                 }
429         }
430         return false;
431 }
432
433 /* callers must be with pcluster lock held */
434 static int z_erofs_attach_page(struct z_erofs_decompress_frontend *fe,
435                                struct z_erofs_bvec *bvec, bool exclusive)
436 {
437         int ret;
438
439         if (exclusive) {
440                 /* give priority for inplaceio to use file pages first */
441                 if (z_erofs_try_inplace_io(fe, bvec))
442                         return 0;
443                 /* otherwise, check if it can be used as a bvpage */
444                 if (fe->mode >= Z_EROFS_PCLUSTER_FOLLOWED &&
445                     !fe->candidate_bvpage)
446                         fe->candidate_bvpage = bvec->page;
447         }
448         ret = z_erofs_bvec_enqueue(&fe->biter, bvec, &fe->candidate_bvpage);
449         fe->pcl->vcnt += (ret >= 0);
450         return ret;
451 }
452
453 static void z_erofs_try_to_claim_pcluster(struct z_erofs_decompress_frontend *f)
454 {
455         struct z_erofs_pcluster *pcl = f->pcl;
456         z_erofs_next_pcluster_t *owned_head = &f->owned_head;
457
458         /* type 1, nil pcluster (this pcluster doesn't belong to any chain.) */
459         if (cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_NIL,
460                     *owned_head) == Z_EROFS_PCLUSTER_NIL) {
461                 *owned_head = &pcl->next;
462                 /* so we can attach this pcluster to our submission chain. */
463                 f->mode = Z_EROFS_PCLUSTER_FOLLOWED;
464                 return;
465         }
466
467         /*
468          * type 2, link to the end of an existing open chain, be careful
469          * that its submission is controlled by the original attached chain.
470          */
471         if (*owned_head != &pcl->next && pcl != f->tailpcl &&
472             cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_TAIL,
473                     *owned_head) == Z_EROFS_PCLUSTER_TAIL) {
474                 *owned_head = Z_EROFS_PCLUSTER_TAIL;
475                 f->mode = Z_EROFS_PCLUSTER_HOOKED;
476                 f->tailpcl = NULL;
477                 return;
478         }
479         /* type 3, it belongs to a chain, but it isn't the end of the chain */
480         f->mode = Z_EROFS_PCLUSTER_INFLIGHT;
481 }
482
483 static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe)
484 {
485         struct erofs_map_blocks *map = &fe->map;
486         bool ztailpacking = map->m_flags & EROFS_MAP_META;
487         struct z_erofs_pcluster *pcl;
488         struct erofs_workgroup *grp;
489         int err;
490
491         if (!(map->m_flags & EROFS_MAP_ENCODED)) {
492                 DBG_BUGON(1);
493                 return -EFSCORRUPTED;
494         }
495
496         /* no available pcluster, let's allocate one */
497         pcl = z_erofs_alloc_pcluster(ztailpacking ? 1 :
498                                      map->m_plen >> PAGE_SHIFT);
499         if (IS_ERR(pcl))
500                 return PTR_ERR(pcl);
501
502         atomic_set(&pcl->obj.refcount, 1);
503         pcl->algorithmformat = map->m_algorithmformat;
504         pcl->length = 0;
505         pcl->partial = true;
506
507         /* new pclusters should be claimed as type 1, primary and followed */
508         pcl->next = fe->owned_head;
509         pcl->pageofs_out = map->m_la & ~PAGE_MASK;
510         fe->mode = Z_EROFS_PCLUSTER_FOLLOWED;
511
512         /*
513          * lock all primary followed works before visible to others
514          * and mutex_trylock *never* fails for a new pcluster.
515          */
516         mutex_init(&pcl->lock);
517         DBG_BUGON(!mutex_trylock(&pcl->lock));
518
519         if (ztailpacking) {
520                 pcl->obj.index = 0;     /* which indicates ztailpacking */
521                 pcl->pageofs_in = erofs_blkoff(map->m_pa);
522                 pcl->tailpacking_size = map->m_plen;
523         } else {
524                 pcl->obj.index = map->m_pa >> PAGE_SHIFT;
525
526                 grp = erofs_insert_workgroup(fe->inode->i_sb, &pcl->obj);
527                 if (IS_ERR(grp)) {
528                         err = PTR_ERR(grp);
529                         goto err_out;
530                 }
531
532                 if (grp != &pcl->obj) {
533                         fe->pcl = container_of(grp,
534                                         struct z_erofs_pcluster, obj);
535                         err = -EEXIST;
536                         goto err_out;
537                 }
538         }
539         /* used to check tail merging loop due to corrupted images */
540         if (fe->owned_head == Z_EROFS_PCLUSTER_TAIL)
541                 fe->tailpcl = pcl;
542         fe->owned_head = &pcl->next;
543         fe->pcl = pcl;
544         return 0;
545
546 err_out:
547         mutex_unlock(&pcl->lock);
548         z_erofs_free_pcluster(pcl);
549         return err;
550 }
551
552 static int z_erofs_collector_begin(struct z_erofs_decompress_frontend *fe)
553 {
554         struct erofs_map_blocks *map = &fe->map;
555         struct erofs_workgroup *grp = NULL;
556         int ret;
557
558         DBG_BUGON(fe->pcl);
559
560         /* must be Z_EROFS_PCLUSTER_TAIL or pointed to previous pcluster */
561         DBG_BUGON(fe->owned_head == Z_EROFS_PCLUSTER_NIL);
562         DBG_BUGON(fe->owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED);
563
564         if (!(map->m_flags & EROFS_MAP_META)) {
565                 grp = erofs_find_workgroup(fe->inode->i_sb,
566                                            map->m_pa >> PAGE_SHIFT);
567         } else if ((map->m_pa & ~PAGE_MASK) + map->m_plen > PAGE_SIZE) {
568                 DBG_BUGON(1);
569                 return -EFSCORRUPTED;
570         }
571
572         if (grp) {
573                 fe->pcl = container_of(grp, struct z_erofs_pcluster, obj);
574                 ret = -EEXIST;
575         } else {
576                 ret = z_erofs_register_pcluster(fe);
577         }
578
579         if (ret == -EEXIST) {
580                 mutex_lock(&fe->pcl->lock);
581                 /* used to check tail merging loop due to corrupted images */
582                 if (fe->owned_head == Z_EROFS_PCLUSTER_TAIL)
583                         fe->tailpcl = fe->pcl;
584
585                 z_erofs_try_to_claim_pcluster(fe);
586         } else if (ret) {
587                 return ret;
588         }
589         z_erofs_bvec_iter_begin(&fe->biter, &fe->pcl->bvset,
590                                 Z_EROFS_INLINE_BVECS, fe->pcl->vcnt);
591         /* since file-backed online pages are traversed in reverse order */
592         fe->icur = z_erofs_pclusterpages(fe->pcl);
593         return 0;
594 }
595
596 /*
597  * keep in mind that no referenced pclusters will be freed
598  * only after a RCU grace period.
599  */
600 static void z_erofs_rcu_callback(struct rcu_head *head)
601 {
602         z_erofs_free_pcluster(container_of(head,
603                         struct z_erofs_pcluster, rcu));
604 }
605
606 void erofs_workgroup_free_rcu(struct erofs_workgroup *grp)
607 {
608         struct z_erofs_pcluster *const pcl =
609                 container_of(grp, struct z_erofs_pcluster, obj);
610
611         call_rcu(&pcl->rcu, z_erofs_rcu_callback);
612 }
613
614 static bool z_erofs_collector_end(struct z_erofs_decompress_frontend *fe)
615 {
616         struct z_erofs_pcluster *pcl = fe->pcl;
617
618         if (!pcl)
619                 return false;
620
621         z_erofs_bvec_iter_end(&fe->biter);
622         mutex_unlock(&pcl->lock);
623
624         if (fe->candidate_bvpage) {
625                 DBG_BUGON(z_erofs_is_shortlived_page(fe->candidate_bvpage));
626                 fe->candidate_bvpage = NULL;
627         }
628
629         /*
630          * if all pending pages are added, don't hold its reference
631          * any longer if the pcluster isn't hosted by ourselves.
632          */
633         if (fe->mode < Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE)
634                 erofs_workgroup_put(&pcl->obj);
635
636         fe->pcl = NULL;
637         return true;
638 }
639
640 static bool should_alloc_managed_pages(struct z_erofs_decompress_frontend *fe,
641                                        unsigned int cachestrategy,
642                                        erofs_off_t la)
643 {
644         if (cachestrategy <= EROFS_ZIP_CACHE_DISABLED)
645                 return false;
646
647         if (fe->backmost)
648                 return true;
649
650         return cachestrategy >= EROFS_ZIP_CACHE_READAROUND &&
651                 la < fe->headoffset;
652 }
653
654 static int z_erofs_read_fragment(struct inode *inode, erofs_off_t pos,
655                                  struct page *page, unsigned int pageofs,
656                                  unsigned int len)
657 {
658         struct inode *packed_inode = EROFS_I_SB(inode)->packed_inode;
659         struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
660         u8 *src, *dst;
661         unsigned int i, cnt;
662
663         pos += EROFS_I(inode)->z_fragmentoff;
664         for (i = 0; i < len; i += cnt) {
665                 cnt = min_t(unsigned int, len - i,
666                             EROFS_BLKSIZ - erofs_blkoff(pos));
667                 src = erofs_bread(&buf, packed_inode,
668                                   erofs_blknr(pos), EROFS_KMAP);
669                 if (IS_ERR(src)) {
670                         erofs_put_metabuf(&buf);
671                         return PTR_ERR(src);
672                 }
673
674                 dst = kmap_local_page(page);
675                 memcpy(dst + pageofs + i, src + erofs_blkoff(pos), cnt);
676                 kunmap_local(dst);
677                 pos += cnt;
678         }
679         erofs_put_metabuf(&buf);
680         return 0;
681 }
682
683 static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
684                                 struct page *page, struct page **pagepool)
685 {
686         struct inode *const inode = fe->inode;
687         struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
688         struct erofs_map_blocks *const map = &fe->map;
689         const loff_t offset = page_offset(page);
690         bool tight = true, exclusive;
691
692         enum z_erofs_cache_alloctype cache_strategy;
693         unsigned int cur, end, spiltted;
694         int err = 0;
695
696         /* register locked file pages as online pages in pack */
697         z_erofs_onlinepage_init(page);
698
699         spiltted = 0;
700         end = PAGE_SIZE;
701 repeat:
702         cur = end - 1;
703
704         if (offset + cur < map->m_la ||
705             offset + cur >= map->m_la + map->m_llen) {
706                 erofs_dbg("out-of-range map @ pos %llu", offset + cur);
707
708                 if (z_erofs_collector_end(fe))
709                         fe->backmost = false;
710                 map->m_la = offset + cur;
711                 map->m_llen = 0;
712                 err = z_erofs_map_blocks_iter(inode, map, 0);
713                 if (err)
714                         goto out;
715         } else {
716                 if (fe->pcl)
717                         goto hitted;
718                 /* didn't get a valid pcluster previously (very rare) */
719         }
720
721         if (!(map->m_flags & EROFS_MAP_MAPPED) ||
722             map->m_flags & EROFS_MAP_FRAGMENT)
723                 goto hitted;
724
725         err = z_erofs_collector_begin(fe);
726         if (err)
727                 goto out;
728
729         if (z_erofs_is_inline_pcluster(fe->pcl)) {
730                 void *mp;
731
732                 mp = erofs_read_metabuf(&fe->map.buf, inode->i_sb,
733                                         erofs_blknr(map->m_pa), EROFS_NO_KMAP);
734                 if (IS_ERR(mp)) {
735                         err = PTR_ERR(mp);
736                         erofs_err(inode->i_sb,
737                                   "failed to get inline page, err %d", err);
738                         goto out;
739                 }
740                 get_page(fe->map.buf.page);
741                 WRITE_ONCE(fe->pcl->compressed_bvecs[0].page,
742                            fe->map.buf.page);
743                 fe->mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE;
744         } else {
745                 /* bind cache first when cached decompression is preferred */
746                 if (should_alloc_managed_pages(fe, sbi->opt.cache_strategy,
747                                                map->m_la))
748                         cache_strategy = TRYALLOC;
749                 else
750                         cache_strategy = DONTALLOC;
751
752                 z_erofs_bind_cache(fe, cache_strategy, pagepool);
753         }
754 hitted:
755         /*
756          * Ensure the current partial page belongs to this submit chain rather
757          * than other concurrent submit chains or the noio(bypass) chain since
758          * those chains are handled asynchronously thus the page cannot be used
759          * for inplace I/O or bvpage (should be processed in a strict order.)
760          */
761         tight &= (fe->mode >= Z_EROFS_PCLUSTER_HOOKED &&
762                   fe->mode != Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE);
763
764         cur = end - min_t(unsigned int, offset + end - map->m_la, end);
765         if (!(map->m_flags & EROFS_MAP_MAPPED)) {
766                 zero_user_segment(page, cur, end);
767                 goto next_part;
768         }
769         if (map->m_flags & EROFS_MAP_FRAGMENT) {
770                 unsigned int pageofs, skip, len;
771
772                 if (offset > map->m_la) {
773                         pageofs = 0;
774                         skip = offset - map->m_la;
775                 } else {
776                         pageofs = map->m_la & ~PAGE_MASK;
777                         skip = 0;
778                 }
779                 len = min_t(unsigned int, map->m_llen - skip, end - cur);
780                 err = z_erofs_read_fragment(inode, skip, page, pageofs, len);
781                 if (err)
782                         goto out;
783                 ++spiltted;
784                 tight = false;
785                 goto next_part;
786         }
787
788         exclusive = (!cur && (!spiltted || tight));
789         if (cur)
790                 tight &= (fe->mode >= Z_EROFS_PCLUSTER_FOLLOWED);
791
792 retry:
793         err = z_erofs_attach_page(fe, &((struct z_erofs_bvec) {
794                                         .page = page,
795                                         .offset = offset - map->m_la,
796                                         .end = end,
797                                   }), exclusive);
798         /* should allocate an additional short-lived page for bvset */
799         if (err == -EAGAIN && !fe->candidate_bvpage) {
800                 fe->candidate_bvpage = alloc_page(GFP_NOFS | __GFP_NOFAIL);
801                 set_page_private(fe->candidate_bvpage,
802                                  Z_EROFS_SHORTLIVED_PAGE);
803                 goto retry;
804         }
805
806         if (err) {
807                 DBG_BUGON(err == -EAGAIN && fe->candidate_bvpage);
808                 goto out;
809         }
810
811         z_erofs_onlinepage_split(page);
812         /* bump up the number of spiltted parts of a page */
813         ++spiltted;
814         if (fe->pcl->pageofs_out != (map->m_la & ~PAGE_MASK))
815                 fe->pcl->multibases = true;
816
817         if ((map->m_flags & EROFS_MAP_FULL_MAPPED) &&
818             !(map->m_flags & EROFS_MAP_PARTIAL_REF) &&
819             fe->pcl->length == map->m_llen)
820                 fe->pcl->partial = false;
821         if (fe->pcl->length < offset + end - map->m_la) {
822                 fe->pcl->length = offset + end - map->m_la;
823                 fe->pcl->pageofs_out = map->m_la & ~PAGE_MASK;
824         }
825 next_part:
826         /* shorten the remaining extent to update progress */
827         map->m_llen = offset + cur - map->m_la;
828         map->m_flags &= ~EROFS_MAP_FULL_MAPPED;
829
830         end = cur;
831         if (end > 0)
832                 goto repeat;
833
834 out:
835         if (err)
836                 z_erofs_page_mark_eio(page);
837         z_erofs_onlinepage_endio(page);
838
839         erofs_dbg("%s, finish page: %pK spiltted: %u map->m_llen %llu",
840                   __func__, page, spiltted, map->m_llen);
841         return err;
842 }
843
844 static bool z_erofs_get_sync_decompress_policy(struct erofs_sb_info *sbi,
845                                        unsigned int readahead_pages)
846 {
847         /* auto: enable for read_folio, disable for readahead */
848         if ((sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_AUTO) &&
849             !readahead_pages)
850                 return true;
851
852         if ((sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_FORCE_ON) &&
853             (readahead_pages <= sbi->opt.max_sync_decompress_pages))
854                 return true;
855
856         return false;
857 }
858
859 static bool z_erofs_page_is_invalidated(struct page *page)
860 {
861         return !page->mapping && !z_erofs_is_shortlived_page(page);
862 }
863
864 struct z_erofs_decompress_backend {
865         struct page *onstack_pages[Z_EROFS_ONSTACK_PAGES];
866         struct super_block *sb;
867         struct z_erofs_pcluster *pcl;
868
869         /* pages with the longest decompressed length for deduplication */
870         struct page **decompressed_pages;
871         /* pages to keep the compressed data */
872         struct page **compressed_pages;
873
874         struct list_head decompressed_secondary_bvecs;
875         struct page **pagepool;
876         unsigned int onstack_used, nr_pages;
877 };
878
879 struct z_erofs_bvec_item {
880         struct z_erofs_bvec bvec;
881         struct list_head list;
882 };
883
884 static void z_erofs_do_decompressed_bvec(struct z_erofs_decompress_backend *be,
885                                          struct z_erofs_bvec *bvec)
886 {
887         struct z_erofs_bvec_item *item;
888
889         if (!((bvec->offset + be->pcl->pageofs_out) & ~PAGE_MASK)) {
890                 unsigned int pgnr;
891
892                 pgnr = (bvec->offset + be->pcl->pageofs_out) >> PAGE_SHIFT;
893                 DBG_BUGON(pgnr >= be->nr_pages);
894                 if (!be->decompressed_pages[pgnr]) {
895                         be->decompressed_pages[pgnr] = bvec->page;
896                         return;
897                 }
898         }
899
900         /* (cold path) one pcluster is requested multiple times */
901         item = kmalloc(sizeof(*item), GFP_KERNEL | __GFP_NOFAIL);
902         item->bvec = *bvec;
903         list_add(&item->list, &be->decompressed_secondary_bvecs);
904 }
905
906 static void z_erofs_fill_other_copies(struct z_erofs_decompress_backend *be,
907                                       int err)
908 {
909         unsigned int off0 = be->pcl->pageofs_out;
910         struct list_head *p, *n;
911
912         list_for_each_safe(p, n, &be->decompressed_secondary_bvecs) {
913                 struct z_erofs_bvec_item *bvi;
914                 unsigned int end, cur;
915                 void *dst, *src;
916
917                 bvi = container_of(p, struct z_erofs_bvec_item, list);
918                 cur = bvi->bvec.offset < 0 ? -bvi->bvec.offset : 0;
919                 end = min_t(unsigned int, be->pcl->length - bvi->bvec.offset,
920                             bvi->bvec.end);
921                 dst = kmap_local_page(bvi->bvec.page);
922                 while (cur < end) {
923                         unsigned int pgnr, scur, len;
924
925                         pgnr = (bvi->bvec.offset + cur + off0) >> PAGE_SHIFT;
926                         DBG_BUGON(pgnr >= be->nr_pages);
927
928                         scur = bvi->bvec.offset + cur -
929                                         ((pgnr << PAGE_SHIFT) - off0);
930                         len = min_t(unsigned int, end - cur, PAGE_SIZE - scur);
931                         if (!be->decompressed_pages[pgnr]) {
932                                 err = -EFSCORRUPTED;
933                                 cur += len;
934                                 continue;
935                         }
936                         src = kmap_local_page(be->decompressed_pages[pgnr]);
937                         memcpy(dst + cur, src + scur, len);
938                         kunmap_local(src);
939                         cur += len;
940                 }
941                 kunmap_local(dst);
942                 if (err)
943                         z_erofs_page_mark_eio(bvi->bvec.page);
944                 z_erofs_onlinepage_endio(bvi->bvec.page);
945                 list_del(p);
946                 kfree(bvi);
947         }
948 }
949
950 static void z_erofs_parse_out_bvecs(struct z_erofs_decompress_backend *be)
951 {
952         struct z_erofs_pcluster *pcl = be->pcl;
953         struct z_erofs_bvec_iter biter;
954         struct page *old_bvpage;
955         int i;
956
957         z_erofs_bvec_iter_begin(&biter, &pcl->bvset, Z_EROFS_INLINE_BVECS, 0);
958         for (i = 0; i < pcl->vcnt; ++i) {
959                 struct z_erofs_bvec bvec;
960
961                 z_erofs_bvec_dequeue(&biter, &bvec, &old_bvpage);
962
963                 if (old_bvpage)
964                         z_erofs_put_shortlivedpage(be->pagepool, old_bvpage);
965
966                 DBG_BUGON(z_erofs_page_is_invalidated(bvec.page));
967                 z_erofs_do_decompressed_bvec(be, &bvec);
968         }
969
970         old_bvpage = z_erofs_bvec_iter_end(&biter);
971         if (old_bvpage)
972                 z_erofs_put_shortlivedpage(be->pagepool, old_bvpage);
973 }
974
975 static int z_erofs_parse_in_bvecs(struct z_erofs_decompress_backend *be,
976                                   bool *overlapped)
977 {
978         struct z_erofs_pcluster *pcl = be->pcl;
979         unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
980         int i, err = 0;
981
982         *overlapped = false;
983         for (i = 0; i < pclusterpages; ++i) {
984                 struct z_erofs_bvec *bvec = &pcl->compressed_bvecs[i];
985                 struct page *page = bvec->page;
986
987                 /* compressed pages ought to be present before decompressing */
988                 if (!page) {
989                         DBG_BUGON(1);
990                         continue;
991                 }
992                 be->compressed_pages[i] = page;
993
994                 if (z_erofs_is_inline_pcluster(pcl)) {
995                         if (!PageUptodate(page))
996                                 err = -EIO;
997                         continue;
998                 }
999
1000                 DBG_BUGON(z_erofs_page_is_invalidated(page));
1001                 if (!z_erofs_is_shortlived_page(page)) {
1002                         if (erofs_page_is_managed(EROFS_SB(be->sb), page)) {
1003                                 if (!PageUptodate(page))
1004                                         err = -EIO;
1005                                 continue;
1006                         }
1007                         z_erofs_do_decompressed_bvec(be, bvec);
1008                         *overlapped = true;
1009                 }
1010         }
1011
1012         if (err)
1013                 return err;
1014         return 0;
1015 }
1016
1017 static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
1018                                        int err)
1019 {
1020         struct erofs_sb_info *const sbi = EROFS_SB(be->sb);
1021         struct z_erofs_pcluster *pcl = be->pcl;
1022         unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
1023         unsigned int i, inputsize;
1024         int err2;
1025         struct page *page;
1026         bool overlapped;
1027
1028         mutex_lock(&pcl->lock);
1029         be->nr_pages = PAGE_ALIGN(pcl->length + pcl->pageofs_out) >> PAGE_SHIFT;
1030
1031         /* allocate (de)compressed page arrays if cannot be kept on stack */
1032         be->decompressed_pages = NULL;
1033         be->compressed_pages = NULL;
1034         be->onstack_used = 0;
1035         if (be->nr_pages <= Z_EROFS_ONSTACK_PAGES) {
1036                 be->decompressed_pages = be->onstack_pages;
1037                 be->onstack_used = be->nr_pages;
1038                 memset(be->decompressed_pages, 0,
1039                        sizeof(struct page *) * be->nr_pages);
1040         }
1041
1042         if (pclusterpages + be->onstack_used <= Z_EROFS_ONSTACK_PAGES)
1043                 be->compressed_pages = be->onstack_pages + be->onstack_used;
1044
1045         if (!be->decompressed_pages)
1046                 be->decompressed_pages =
1047                         kvcalloc(be->nr_pages, sizeof(struct page *),
1048                                  GFP_KERNEL | __GFP_NOFAIL);
1049         if (!be->compressed_pages)
1050                 be->compressed_pages =
1051                         kvcalloc(pclusterpages, sizeof(struct page *),
1052                                  GFP_KERNEL | __GFP_NOFAIL);
1053
1054         z_erofs_parse_out_bvecs(be);
1055         err2 = z_erofs_parse_in_bvecs(be, &overlapped);
1056         if (err2)
1057                 err = err2;
1058         if (err)
1059                 goto out;
1060
1061         if (z_erofs_is_inline_pcluster(pcl))
1062                 inputsize = pcl->tailpacking_size;
1063         else
1064                 inputsize = pclusterpages * PAGE_SIZE;
1065
1066         err = z_erofs_decompress(&(struct z_erofs_decompress_req) {
1067                                         .sb = be->sb,
1068                                         .in = be->compressed_pages,
1069                                         .out = be->decompressed_pages,
1070                                         .pageofs_in = pcl->pageofs_in,
1071                                         .pageofs_out = pcl->pageofs_out,
1072                                         .inputsize = inputsize,
1073                                         .outputsize = pcl->length,
1074                                         .alg = pcl->algorithmformat,
1075                                         .inplace_io = overlapped,
1076                                         .partial_decoding = pcl->partial,
1077                                         .fillgaps = pcl->multibases,
1078                                  }, be->pagepool);
1079
1080 out:
1081         /* must handle all compressed pages before actual file pages */
1082         if (z_erofs_is_inline_pcluster(pcl)) {
1083                 page = pcl->compressed_bvecs[0].page;
1084                 WRITE_ONCE(pcl->compressed_bvecs[0].page, NULL);
1085                 put_page(page);
1086         } else {
1087                 for (i = 0; i < pclusterpages; ++i) {
1088                         page = pcl->compressed_bvecs[i].page;
1089
1090                         if (erofs_page_is_managed(sbi, page))
1091                                 continue;
1092
1093                         /* recycle all individual short-lived pages */
1094                         (void)z_erofs_put_shortlivedpage(be->pagepool, page);
1095                         WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL);
1096                 }
1097         }
1098         if (be->compressed_pages < be->onstack_pages ||
1099             be->compressed_pages >= be->onstack_pages + Z_EROFS_ONSTACK_PAGES)
1100                 kvfree(be->compressed_pages);
1101         z_erofs_fill_other_copies(be, err);
1102
1103         for (i = 0; i < be->nr_pages; ++i) {
1104                 page = be->decompressed_pages[i];
1105                 if (!page)
1106                         continue;
1107
1108                 DBG_BUGON(z_erofs_page_is_invalidated(page));
1109
1110                 /* recycle all individual short-lived pages */
1111                 if (z_erofs_put_shortlivedpage(be->pagepool, page))
1112                         continue;
1113                 if (err)
1114                         z_erofs_page_mark_eio(page);
1115                 z_erofs_onlinepage_endio(page);
1116         }
1117
1118         if (be->decompressed_pages != be->onstack_pages)
1119                 kvfree(be->decompressed_pages);
1120
1121         pcl->length = 0;
1122         pcl->partial = true;
1123         pcl->multibases = false;
1124         pcl->bvset.nextpage = NULL;
1125         pcl->vcnt = 0;
1126
1127         /* pcluster lock MUST be taken before the following line */
1128         WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_NIL);
1129         mutex_unlock(&pcl->lock);
1130         return err;
1131 }
1132
1133 static void z_erofs_decompress_queue(const struct z_erofs_decompressqueue *io,
1134                                      struct page **pagepool)
1135 {
1136         struct z_erofs_decompress_backend be = {
1137                 .sb = io->sb,
1138                 .pagepool = pagepool,
1139                 .decompressed_secondary_bvecs =
1140                         LIST_HEAD_INIT(be.decompressed_secondary_bvecs),
1141         };
1142         z_erofs_next_pcluster_t owned = io->head;
1143
1144         while (owned != Z_EROFS_PCLUSTER_TAIL_CLOSED) {
1145                 /* impossible that 'owned' equals Z_EROFS_WORK_TPTR_TAIL */
1146                 DBG_BUGON(owned == Z_EROFS_PCLUSTER_TAIL);
1147                 /* impossible that 'owned' equals Z_EROFS_PCLUSTER_NIL */
1148                 DBG_BUGON(owned == Z_EROFS_PCLUSTER_NIL);
1149
1150                 be.pcl = container_of(owned, struct z_erofs_pcluster, next);
1151                 owned = READ_ONCE(be.pcl->next);
1152
1153                 z_erofs_decompress_pcluster(&be, io->eio ? -EIO : 0);
1154                 erofs_workgroup_put(&be.pcl->obj);
1155         }
1156 }
1157
1158 static void z_erofs_decompressqueue_work(struct work_struct *work)
1159 {
1160         struct z_erofs_decompressqueue *bgq =
1161                 container_of(work, struct z_erofs_decompressqueue, u.work);
1162         struct page *pagepool = NULL;
1163
1164         DBG_BUGON(bgq->head == Z_EROFS_PCLUSTER_TAIL_CLOSED);
1165         z_erofs_decompress_queue(bgq, &pagepool);
1166
1167         erofs_release_pages(&pagepool);
1168         kvfree(bgq);
1169 }
1170
1171 static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io,
1172                                        bool sync, int bios)
1173 {
1174         struct erofs_sb_info *const sbi = EROFS_SB(io->sb);
1175
1176         /* wake up the caller thread for sync decompression */
1177         if (sync) {
1178                 if (!atomic_add_return(bios, &io->pending_bios))
1179                         complete(&io->u.done);
1180                 return;
1181         }
1182
1183         if (atomic_add_return(bios, &io->pending_bios))
1184                 return;
1185         /* Use workqueue and sync decompression for atomic contexts only */
1186         if (in_atomic() || irqs_disabled()) {
1187                 queue_work(z_erofs_workqueue, &io->u.work);
1188                 /* enable sync decompression for readahead */
1189                 if (sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_AUTO)
1190                         sbi->opt.sync_decompress = EROFS_SYNC_DECOMPRESS_FORCE_ON;
1191                 return;
1192         }
1193         z_erofs_decompressqueue_work(&io->u.work);
1194 }
1195
1196 static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl,
1197                                                unsigned int nr,
1198                                                struct page **pagepool,
1199                                                struct address_space *mc)
1200 {
1201         const pgoff_t index = pcl->obj.index;
1202         gfp_t gfp = mapping_gfp_mask(mc);
1203         bool tocache = false;
1204
1205         struct address_space *mapping;
1206         struct page *oldpage, *page;
1207
1208         compressed_page_t t;
1209         int justfound;
1210
1211 repeat:
1212         page = READ_ONCE(pcl->compressed_bvecs[nr].page);
1213         oldpage = page;
1214
1215         if (!page)
1216                 goto out_allocpage;
1217
1218         /* process the target tagged pointer */
1219         t = tagptr_init(compressed_page_t, page);
1220         justfound = tagptr_unfold_tags(t);
1221         page = tagptr_unfold_ptr(t);
1222
1223         /*
1224          * preallocated cached pages, which is used to avoid direct reclaim
1225          * otherwise, it will go inplace I/O path instead.
1226          */
1227         if (page->private == Z_EROFS_PREALLOCATED_PAGE) {
1228                 WRITE_ONCE(pcl->compressed_bvecs[nr].page, page);
1229                 set_page_private(page, 0);
1230                 tocache = true;
1231                 goto out_tocache;
1232         }
1233         mapping = READ_ONCE(page->mapping);
1234
1235         /*
1236          * file-backed online pages in plcuster are all locked steady,
1237          * therefore it is impossible for `mapping' to be NULL.
1238          */
1239         if (mapping && mapping != mc)
1240                 /* ought to be unmanaged pages */
1241                 goto out;
1242
1243         /* directly return for shortlived page as well */
1244         if (z_erofs_is_shortlived_page(page))
1245                 goto out;
1246
1247         lock_page(page);
1248
1249         /* only true if page reclaim goes wrong, should never happen */
1250         DBG_BUGON(justfound && PagePrivate(page));
1251
1252         /* the page is still in manage cache */
1253         if (page->mapping == mc) {
1254                 WRITE_ONCE(pcl->compressed_bvecs[nr].page, page);
1255
1256                 if (!PagePrivate(page)) {
1257                         /*
1258                          * impossible to be !PagePrivate(page) for
1259                          * the current restriction as well if
1260                          * the page is already in compressed_bvecs[].
1261                          */
1262                         DBG_BUGON(!justfound);
1263
1264                         justfound = 0;
1265                         set_page_private(page, (unsigned long)pcl);
1266                         SetPagePrivate(page);
1267                 }
1268
1269                 /* no need to submit io if it is already up-to-date */
1270                 if (PageUptodate(page)) {
1271                         unlock_page(page);
1272                         page = NULL;
1273                 }
1274                 goto out;
1275         }
1276
1277         /*
1278          * the managed page has been truncated, it's unsafe to
1279          * reuse this one, let's allocate a new cache-managed page.
1280          */
1281         DBG_BUGON(page->mapping);
1282         DBG_BUGON(!justfound);
1283
1284         tocache = true;
1285         unlock_page(page);
1286         put_page(page);
1287 out_allocpage:
1288         page = erofs_allocpage(pagepool, gfp | __GFP_NOFAIL);
1289         if (oldpage != cmpxchg(&pcl->compressed_bvecs[nr].page,
1290                                oldpage, page)) {
1291                 erofs_pagepool_add(pagepool, page);
1292                 cond_resched();
1293                 goto repeat;
1294         }
1295 out_tocache:
1296         if (!tocache || add_to_page_cache_lru(page, mc, index + nr, gfp)) {
1297                 /* turn into temporary page if fails (1 ref) */
1298                 set_page_private(page, Z_EROFS_SHORTLIVED_PAGE);
1299                 goto out;
1300         }
1301         attach_page_private(page, pcl);
1302         /* drop a refcount added by allocpage (then we have 2 refs here) */
1303         put_page(page);
1304
1305 out:    /* the only exit (for tracing and debugging) */
1306         return page;
1307 }
1308
1309 static struct z_erofs_decompressqueue *
1310 jobqueue_init(struct super_block *sb,
1311               struct z_erofs_decompressqueue *fgq, bool *fg)
1312 {
1313         struct z_erofs_decompressqueue *q;
1314
1315         if (fg && !*fg) {
1316                 q = kvzalloc(sizeof(*q), GFP_KERNEL | __GFP_NOWARN);
1317                 if (!q) {
1318                         *fg = true;
1319                         goto fg_out;
1320                 }
1321                 INIT_WORK(&q->u.work, z_erofs_decompressqueue_work);
1322         } else {
1323 fg_out:
1324                 q = fgq;
1325                 init_completion(&fgq->u.done);
1326                 atomic_set(&fgq->pending_bios, 0);
1327                 q->eio = false;
1328         }
1329         q->sb = sb;
1330         q->head = Z_EROFS_PCLUSTER_TAIL_CLOSED;
1331         return q;
1332 }
1333
1334 /* define decompression jobqueue types */
1335 enum {
1336         JQ_BYPASS,
1337         JQ_SUBMIT,
1338         NR_JOBQUEUES,
1339 };
1340
1341 static void *jobqueueset_init(struct super_block *sb,
1342                               struct z_erofs_decompressqueue *q[],
1343                               struct z_erofs_decompressqueue *fgq, bool *fg)
1344 {
1345         /*
1346          * if managed cache is enabled, bypass jobqueue is needed,
1347          * no need to read from device for all pclusters in this queue.
1348          */
1349         q[JQ_BYPASS] = jobqueue_init(sb, fgq + JQ_BYPASS, NULL);
1350         q[JQ_SUBMIT] = jobqueue_init(sb, fgq + JQ_SUBMIT, fg);
1351
1352         return tagptr_cast_ptr(tagptr_fold(tagptr1_t, q[JQ_SUBMIT], *fg));
1353 }
1354
1355 static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl,
1356                                     z_erofs_next_pcluster_t qtail[],
1357                                     z_erofs_next_pcluster_t owned_head)
1358 {
1359         z_erofs_next_pcluster_t *const submit_qtail = qtail[JQ_SUBMIT];
1360         z_erofs_next_pcluster_t *const bypass_qtail = qtail[JQ_BYPASS];
1361
1362         DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED);
1363         if (owned_head == Z_EROFS_PCLUSTER_TAIL)
1364                 owned_head = Z_EROFS_PCLUSTER_TAIL_CLOSED;
1365
1366         WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_TAIL_CLOSED);
1367
1368         WRITE_ONCE(*submit_qtail, owned_head);
1369         WRITE_ONCE(*bypass_qtail, &pcl->next);
1370
1371         qtail[JQ_BYPASS] = &pcl->next;
1372 }
1373
1374 static void z_erofs_decompressqueue_endio(struct bio *bio)
1375 {
1376         tagptr1_t t = tagptr_init(tagptr1_t, bio->bi_private);
1377         struct z_erofs_decompressqueue *q = tagptr_unfold_ptr(t);
1378         blk_status_t err = bio->bi_status;
1379         struct bio_vec *bvec;
1380         struct bvec_iter_all iter_all;
1381
1382         bio_for_each_segment_all(bvec, bio, iter_all) {
1383                 struct page *page = bvec->bv_page;
1384
1385                 DBG_BUGON(PageUptodate(page));
1386                 DBG_BUGON(z_erofs_page_is_invalidated(page));
1387
1388                 if (erofs_page_is_managed(EROFS_SB(q->sb), page)) {
1389                         if (!err)
1390                                 SetPageUptodate(page);
1391                         unlock_page(page);
1392                 }
1393         }
1394         if (err)
1395                 q->eio = true;
1396         z_erofs_decompress_kickoff(q, tagptr_unfold_tags(t), -1);
1397         bio_put(bio);
1398 }
1399
1400 static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f,
1401                                  struct page **pagepool,
1402                                  struct z_erofs_decompressqueue *fgq,
1403                                  bool *force_fg)
1404 {
1405         struct super_block *sb = f->inode->i_sb;
1406         struct address_space *mc = MNGD_MAPPING(EROFS_SB(sb));
1407         z_erofs_next_pcluster_t qtail[NR_JOBQUEUES];
1408         struct z_erofs_decompressqueue *q[NR_JOBQUEUES];
1409         void *bi_private;
1410         z_erofs_next_pcluster_t owned_head = f->owned_head;
1411         /* bio is NULL initially, so no need to initialize last_{index,bdev} */
1412         pgoff_t last_index;
1413         struct block_device *last_bdev;
1414         unsigned int nr_bios = 0;
1415         struct bio *bio = NULL;
1416         /* initialize to 1 to make skip psi_memstall_leave unless needed */
1417         unsigned long pflags = 1;
1418
1419         bi_private = jobqueueset_init(sb, q, fgq, force_fg);
1420         qtail[JQ_BYPASS] = &q[JQ_BYPASS]->head;
1421         qtail[JQ_SUBMIT] = &q[JQ_SUBMIT]->head;
1422
1423         /* by default, all need io submission */
1424         q[JQ_SUBMIT]->head = owned_head;
1425
1426         do {
1427                 struct erofs_map_dev mdev;
1428                 struct z_erofs_pcluster *pcl;
1429                 pgoff_t cur, end;
1430                 unsigned int i = 0;
1431                 bool bypass = true;
1432
1433                 /* no possible 'owned_head' equals the following */
1434                 DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED);
1435                 DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_NIL);
1436
1437                 pcl = container_of(owned_head, struct z_erofs_pcluster, next);
1438
1439                 /* close the main owned chain at first */
1440                 owned_head = cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_TAIL,
1441                                      Z_EROFS_PCLUSTER_TAIL_CLOSED);
1442                 if (z_erofs_is_inline_pcluster(pcl)) {
1443                         move_to_bypass_jobqueue(pcl, qtail, owned_head);
1444                         continue;
1445                 }
1446
1447                 /* no device id here, thus it will always succeed */
1448                 mdev = (struct erofs_map_dev) {
1449                         .m_pa = blknr_to_addr(pcl->obj.index),
1450                 };
1451                 (void)erofs_map_dev(sb, &mdev);
1452
1453                 cur = erofs_blknr(mdev.m_pa);
1454                 end = cur + pcl->pclusterpages;
1455
1456                 do {
1457                         struct page *page;
1458
1459                         page = pickup_page_for_submission(pcl, i++, pagepool,
1460                                                           mc);
1461                         if (!page)
1462                                 continue;
1463
1464                         if (bio && (cur != last_index + 1 ||
1465                                     last_bdev != mdev.m_bdev)) {
1466 submit_bio_retry:
1467                                 if (!pflags)
1468                                         psi_memstall_leave(&pflags);
1469                                 submit_bio(bio);
1470                                 bio = NULL;
1471                         }
1472
1473                         if (unlikely(PageWorkingset(page)))
1474                                 psi_memstall_enter(&pflags);
1475
1476                         if (!bio) {
1477                                 bio = bio_alloc(mdev.m_bdev, BIO_MAX_VECS,
1478                                                 REQ_OP_READ, GFP_NOIO);
1479                                 bio->bi_end_io = z_erofs_decompressqueue_endio;
1480
1481                                 last_bdev = mdev.m_bdev;
1482                                 bio->bi_iter.bi_sector = (sector_t)cur <<
1483                                         LOG_SECTORS_PER_BLOCK;
1484                                 bio->bi_private = bi_private;
1485                                 if (f->readahead)
1486                                         bio->bi_opf |= REQ_RAHEAD;
1487                                 ++nr_bios;
1488                         }
1489
1490                         if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE)
1491                                 goto submit_bio_retry;
1492
1493                         last_index = cur;
1494                         bypass = false;
1495                 } while (++cur < end);
1496
1497                 if (!bypass)
1498                         qtail[JQ_SUBMIT] = &pcl->next;
1499                 else
1500                         move_to_bypass_jobqueue(pcl, qtail, owned_head);
1501         } while (owned_head != Z_EROFS_PCLUSTER_TAIL);
1502
1503         if (bio) {
1504                 if (!pflags)
1505                         psi_memstall_leave(&pflags);
1506                 submit_bio(bio);
1507         }
1508
1509         /*
1510          * although background is preferred, no one is pending for submission.
1511          * don't issue workqueue for decompression but drop it directly instead.
1512          */
1513         if (!*force_fg && !nr_bios) {
1514                 kvfree(q[JQ_SUBMIT]);
1515                 return;
1516         }
1517         z_erofs_decompress_kickoff(q[JQ_SUBMIT], *force_fg, nr_bios);
1518 }
1519
1520 static void z_erofs_runqueue(struct z_erofs_decompress_frontend *f,
1521                              struct page **pagepool, bool force_fg)
1522 {
1523         struct z_erofs_decompressqueue io[NR_JOBQUEUES];
1524
1525         if (f->owned_head == Z_EROFS_PCLUSTER_TAIL)
1526                 return;
1527         z_erofs_submit_queue(f, pagepool, io, &force_fg);
1528
1529         /* handle bypass queue (no i/o pclusters) immediately */
1530         z_erofs_decompress_queue(&io[JQ_BYPASS], pagepool);
1531
1532         if (!force_fg)
1533                 return;
1534
1535         /* wait until all bios are completed */
1536         wait_for_completion_io(&io[JQ_SUBMIT].u.done);
1537
1538         /* handle synchronous decompress queue in the caller context */
1539         z_erofs_decompress_queue(&io[JQ_SUBMIT], pagepool);
1540 }
1541
1542 /*
1543  * Since partial uptodate is still unimplemented for now, we have to use
1544  * approximate readmore strategies as a start.
1545  */
1546 static void z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend *f,
1547                                       struct readahead_control *rac,
1548                                       erofs_off_t end,
1549                                       struct page **pagepool,
1550                                       bool backmost)
1551 {
1552         struct inode *inode = f->inode;
1553         struct erofs_map_blocks *map = &f->map;
1554         erofs_off_t cur;
1555         int err;
1556
1557         if (backmost) {
1558                 map->m_la = end;
1559                 err = z_erofs_map_blocks_iter(inode, map,
1560                                               EROFS_GET_BLOCKS_READMORE);
1561                 if (err)
1562                         return;
1563
1564                 /* expend ra for the trailing edge if readahead */
1565                 if (rac) {
1566                         loff_t newstart = readahead_pos(rac);
1567
1568                         cur = round_up(map->m_la + map->m_llen, PAGE_SIZE);
1569                         readahead_expand(rac, newstart, cur - newstart);
1570                         return;
1571                 }
1572                 end = round_up(end, PAGE_SIZE);
1573         } else {
1574                 end = round_up(map->m_la, PAGE_SIZE);
1575
1576                 if (!map->m_llen)
1577                         return;
1578         }
1579
1580         cur = map->m_la + map->m_llen - 1;
1581         while (cur >= end) {
1582                 pgoff_t index = cur >> PAGE_SHIFT;
1583                 struct page *page;
1584
1585                 page = erofs_grab_cache_page_nowait(inode->i_mapping, index);
1586                 if (page) {
1587                         if (PageUptodate(page)) {
1588                                 unlock_page(page);
1589                         } else {
1590                                 err = z_erofs_do_read_page(f, page, pagepool);
1591                                 if (err)
1592                                         erofs_err(inode->i_sb,
1593                                                   "readmore error at page %lu @ nid %llu",
1594                                                   index, EROFS_I(inode)->nid);
1595                         }
1596                         put_page(page);
1597                 }
1598
1599                 if (cur < PAGE_SIZE)
1600                         break;
1601                 cur = (index << PAGE_SHIFT) - 1;
1602         }
1603 }
1604
1605 static int z_erofs_read_folio(struct file *file, struct folio *folio)
1606 {
1607         struct page *page = &folio->page;
1608         struct inode *const inode = page->mapping->host;
1609         struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
1610         struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode);
1611         struct page *pagepool = NULL;
1612         int err;
1613
1614         trace_erofs_readpage(page, false);
1615         f.headoffset = (erofs_off_t)page->index << PAGE_SHIFT;
1616
1617         z_erofs_pcluster_readmore(&f, NULL, f.headoffset + PAGE_SIZE - 1,
1618                                   &pagepool, true);
1619         err = z_erofs_do_read_page(&f, page, &pagepool);
1620         z_erofs_pcluster_readmore(&f, NULL, 0, &pagepool, false);
1621
1622         (void)z_erofs_collector_end(&f);
1623
1624         /* if some compressed cluster ready, need submit them anyway */
1625         z_erofs_runqueue(&f, &pagepool,
1626                          z_erofs_get_sync_decompress_policy(sbi, 0));
1627
1628         if (err)
1629                 erofs_err(inode->i_sb, "failed to read, err [%d]", err);
1630
1631         erofs_put_metabuf(&f.map.buf);
1632         erofs_release_pages(&pagepool);
1633         return err;
1634 }
1635
1636 static void z_erofs_readahead(struct readahead_control *rac)
1637 {
1638         struct inode *const inode = rac->mapping->host;
1639         struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
1640         struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode);
1641         struct page *pagepool = NULL, *head = NULL, *page;
1642         unsigned int nr_pages;
1643
1644         f.readahead = true;
1645         f.headoffset = readahead_pos(rac);
1646
1647         z_erofs_pcluster_readmore(&f, rac, f.headoffset +
1648                                   readahead_length(rac) - 1, &pagepool, true);
1649         nr_pages = readahead_count(rac);
1650         trace_erofs_readpages(inode, readahead_index(rac), nr_pages, false);
1651
1652         while ((page = readahead_page(rac))) {
1653                 set_page_private(page, (unsigned long)head);
1654                 head = page;
1655         }
1656
1657         while (head) {
1658                 struct page *page = head;
1659                 int err;
1660
1661                 /* traversal in reverse order */
1662                 head = (void *)page_private(page);
1663
1664                 err = z_erofs_do_read_page(&f, page, &pagepool);
1665                 if (err)
1666                         erofs_err(inode->i_sb,
1667                                   "readahead error at page %lu @ nid %llu",
1668                                   page->index, EROFS_I(inode)->nid);
1669                 put_page(page);
1670         }
1671         z_erofs_pcluster_readmore(&f, rac, 0, &pagepool, false);
1672         (void)z_erofs_collector_end(&f);
1673
1674         z_erofs_runqueue(&f, &pagepool,
1675                          z_erofs_get_sync_decompress_policy(sbi, nr_pages));
1676         erofs_put_metabuf(&f.map.buf);
1677         erofs_release_pages(&pagepool);
1678 }
1679
1680 const struct address_space_operations z_erofs_aops = {
1681         .read_folio = z_erofs_read_folio,
1682         .readahead = z_erofs_readahead,
1683 };