Merge tag 'for-6.16-rc4-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave...
[linux-2.6-block.git] / fs / f2fs / node.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * fs/f2fs/node.c
4  *
5  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6  *             http://www.samsung.com/
7  */
8 #include <linux/fs.h>
9 #include <linux/f2fs_fs.h>
10 #include <linux/mpage.h>
11 #include <linux/sched/mm.h>
12 #include <linux/blkdev.h>
13 #include <linux/pagevec.h>
14 #include <linux/swap.h>
15
16 #include "f2fs.h"
17 #include "node.h"
18 #include "segment.h"
19 #include "xattr.h"
20 #include "iostat.h"
21 #include <trace/events/f2fs.h>
22
23 #define on_f2fs_build_free_nids(nm_i) mutex_is_locked(&(nm_i)->build_lock)
24
25 static struct kmem_cache *nat_entry_slab;
26 static struct kmem_cache *free_nid_slab;
27 static struct kmem_cache *nat_entry_set_slab;
28 static struct kmem_cache *fsync_node_entry_slab;
29
30 /*
31  * Check whether the given nid is within node id range.
32  */
33 int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid)
34 {
35         if (unlikely(nid < F2FS_ROOT_INO(sbi) || nid >= NM_I(sbi)->max_nid)) {
36                 set_sbi_flag(sbi, SBI_NEED_FSCK);
37                 f2fs_warn(sbi, "%s: out-of-range nid=%x, run fsck to fix.",
38                           __func__, nid);
39                 f2fs_handle_error(sbi, ERROR_CORRUPTED_INODE);
40                 return -EFSCORRUPTED;
41         }
42         return 0;
43 }
44
45 bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type)
46 {
47         struct f2fs_nm_info *nm_i = NM_I(sbi);
48         struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
49         struct sysinfo val;
50         unsigned long avail_ram;
51         unsigned long mem_size = 0;
52         bool res = false;
53
54         if (!nm_i)
55                 return true;
56
57         si_meminfo(&val);
58
59         /* only uses low memory */
60         avail_ram = val.totalram - val.totalhigh;
61
62         /*
63          * give 25%, 25%, 50%, 50%, 25%, 25% memory for each components respectively
64          */
65         if (type == FREE_NIDS) {
66                 mem_size = (nm_i->nid_cnt[FREE_NID] *
67                                 sizeof(struct free_nid)) >> PAGE_SHIFT;
68                 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
69         } else if (type == NAT_ENTRIES) {
70                 mem_size = (nm_i->nat_cnt[TOTAL_NAT] *
71                                 sizeof(struct nat_entry)) >> PAGE_SHIFT;
72                 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
73                 if (excess_cached_nats(sbi))
74                         res = false;
75         } else if (type == DIRTY_DENTS) {
76                 if (sbi->sb->s_bdi->wb.dirty_exceeded)
77                         return false;
78                 mem_size = get_pages(sbi, F2FS_DIRTY_DENTS);
79                 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
80         } else if (type == INO_ENTRIES) {
81                 int i;
82
83                 for (i = 0; i < MAX_INO_ENTRY; i++)
84                         mem_size += sbi->im[i].ino_num *
85                                                 sizeof(struct ino_entry);
86                 mem_size >>= PAGE_SHIFT;
87                 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
88         } else if (type == READ_EXTENT_CACHE || type == AGE_EXTENT_CACHE) {
89                 enum extent_type etype = type == READ_EXTENT_CACHE ?
90                                                 EX_READ : EX_BLOCK_AGE;
91                 struct extent_tree_info *eti = &sbi->extent_tree[etype];
92
93                 mem_size = (atomic_read(&eti->total_ext_tree) *
94                                 sizeof(struct extent_tree) +
95                                 atomic_read(&eti->total_ext_node) *
96                                 sizeof(struct extent_node)) >> PAGE_SHIFT;
97                 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
98         } else if (type == DISCARD_CACHE) {
99                 mem_size = (atomic_read(&dcc->discard_cmd_cnt) *
100                                 sizeof(struct discard_cmd)) >> PAGE_SHIFT;
101                 res = mem_size < (avail_ram * nm_i->ram_thresh / 100);
102         } else if (type == COMPRESS_PAGE) {
103 #ifdef CONFIG_F2FS_FS_COMPRESSION
104                 unsigned long free_ram = val.freeram;
105
106                 /*
107                  * free memory is lower than watermark or cached page count
108                  * exceed threshold, deny caching compress page.
109                  */
110                 res = (free_ram > avail_ram * sbi->compress_watermark / 100) &&
111                         (COMPRESS_MAPPING(sbi)->nrpages <
112                          free_ram * sbi->compress_percent / 100);
113 #else
114                 res = false;
115 #endif
116         } else {
117                 if (!sbi->sb->s_bdi->wb.dirty_exceeded)
118                         return true;
119         }
120         return res;
121 }
122
123 static void clear_node_folio_dirty(struct folio *folio)
124 {
125         if (folio_test_dirty(folio)) {
126                 f2fs_clear_page_cache_dirty_tag(folio);
127                 folio_clear_dirty_for_io(folio);
128                 dec_page_count(F2FS_F_SB(folio), F2FS_DIRTY_NODES);
129         }
130         folio_clear_uptodate(folio);
131 }
132
133 static struct folio *get_current_nat_folio(struct f2fs_sb_info *sbi, nid_t nid)
134 {
135         return f2fs_get_meta_folio_retry(sbi, current_nat_addr(sbi, nid));
136 }
137
138 static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
139 {
140         struct folio *src_folio;
141         struct folio *dst_folio;
142         pgoff_t dst_off;
143         void *src_addr;
144         void *dst_addr;
145         struct f2fs_nm_info *nm_i = NM_I(sbi);
146
147         dst_off = next_nat_addr(sbi, current_nat_addr(sbi, nid));
148
149         /* get current nat block page with lock */
150         src_folio = get_current_nat_folio(sbi, nid);
151         if (IS_ERR(src_folio))
152                 return &src_folio->page;
153         dst_folio = f2fs_grab_meta_folio(sbi, dst_off);
154         f2fs_bug_on(sbi, folio_test_dirty(src_folio));
155
156         src_addr = folio_address(src_folio);
157         dst_addr = folio_address(dst_folio);
158         memcpy(dst_addr, src_addr, PAGE_SIZE);
159         folio_mark_dirty(dst_folio);
160         f2fs_folio_put(src_folio, true);
161
162         set_to_next_nat(nm_i, nid);
163
164         return &dst_folio->page;
165 }
166
167 static struct nat_entry *__alloc_nat_entry(struct f2fs_sb_info *sbi,
168                                                 nid_t nid, bool no_fail)
169 {
170         struct nat_entry *new;
171
172         new = f2fs_kmem_cache_alloc(nat_entry_slab,
173                                         GFP_F2FS_ZERO, no_fail, sbi);
174         if (new) {
175                 nat_set_nid(new, nid);
176                 nat_reset_flag(new);
177         }
178         return new;
179 }
180
181 static void __free_nat_entry(struct nat_entry *e)
182 {
183         kmem_cache_free(nat_entry_slab, e);
184 }
185
186 /* must be locked by nat_tree_lock */
187 static struct nat_entry *__init_nat_entry(struct f2fs_nm_info *nm_i,
188         struct nat_entry *ne, struct f2fs_nat_entry *raw_ne, bool no_fail)
189 {
190         if (no_fail)
191                 f2fs_radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne);
192         else if (radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne))
193                 return NULL;
194
195         if (raw_ne)
196                 node_info_from_raw_nat(&ne->ni, raw_ne);
197
198         spin_lock(&nm_i->nat_list_lock);
199         list_add_tail(&ne->list, &nm_i->nat_entries);
200         spin_unlock(&nm_i->nat_list_lock);
201
202         nm_i->nat_cnt[TOTAL_NAT]++;
203         nm_i->nat_cnt[RECLAIMABLE_NAT]++;
204         return ne;
205 }
206
207 static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n)
208 {
209         struct nat_entry *ne;
210
211         ne = radix_tree_lookup(&nm_i->nat_root, n);
212
213         /* for recent accessed nat entry, move it to tail of lru list */
214         if (ne && !get_nat_flag(ne, IS_DIRTY)) {
215                 spin_lock(&nm_i->nat_list_lock);
216                 if (!list_empty(&ne->list))
217                         list_move_tail(&ne->list, &nm_i->nat_entries);
218                 spin_unlock(&nm_i->nat_list_lock);
219         }
220
221         return ne;
222 }
223
224 static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i,
225                 nid_t start, unsigned int nr, struct nat_entry **ep)
226 {
227         return radix_tree_gang_lookup(&nm_i->nat_root, (void **)ep, start, nr);
228 }
229
230 static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e)
231 {
232         radix_tree_delete(&nm_i->nat_root, nat_get_nid(e));
233         nm_i->nat_cnt[TOTAL_NAT]--;
234         nm_i->nat_cnt[RECLAIMABLE_NAT]--;
235         __free_nat_entry(e);
236 }
237
238 static struct nat_entry_set *__grab_nat_entry_set(struct f2fs_nm_info *nm_i,
239                                                         struct nat_entry *ne)
240 {
241         nid_t set = NAT_BLOCK_OFFSET(ne->ni.nid);
242         struct nat_entry_set *head;
243
244         head = radix_tree_lookup(&nm_i->nat_set_root, set);
245         if (!head) {
246                 head = f2fs_kmem_cache_alloc(nat_entry_set_slab,
247                                                 GFP_NOFS, true, NULL);
248
249                 INIT_LIST_HEAD(&head->entry_list);
250                 INIT_LIST_HEAD(&head->set_list);
251                 head->set = set;
252                 head->entry_cnt = 0;
253                 f2fs_radix_tree_insert(&nm_i->nat_set_root, set, head);
254         }
255         return head;
256 }
257
258 static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i,
259                                                 struct nat_entry *ne)
260 {
261         struct nat_entry_set *head;
262         bool new_ne = nat_get_blkaddr(ne) == NEW_ADDR;
263
264         if (!new_ne)
265                 head = __grab_nat_entry_set(nm_i, ne);
266
267         /*
268          * update entry_cnt in below condition:
269          * 1. update NEW_ADDR to valid block address;
270          * 2. update old block address to new one;
271          */
272         if (!new_ne && (get_nat_flag(ne, IS_PREALLOC) ||
273                                 !get_nat_flag(ne, IS_DIRTY)))
274                 head->entry_cnt++;
275
276         set_nat_flag(ne, IS_PREALLOC, new_ne);
277
278         if (get_nat_flag(ne, IS_DIRTY))
279                 goto refresh_list;
280
281         nm_i->nat_cnt[DIRTY_NAT]++;
282         nm_i->nat_cnt[RECLAIMABLE_NAT]--;
283         set_nat_flag(ne, IS_DIRTY, true);
284 refresh_list:
285         spin_lock(&nm_i->nat_list_lock);
286         if (new_ne)
287                 list_del_init(&ne->list);
288         else
289                 list_move_tail(&ne->list, &head->entry_list);
290         spin_unlock(&nm_i->nat_list_lock);
291 }
292
293 static void __clear_nat_cache_dirty(struct f2fs_nm_info *nm_i,
294                 struct nat_entry_set *set, struct nat_entry *ne)
295 {
296         spin_lock(&nm_i->nat_list_lock);
297         list_move_tail(&ne->list, &nm_i->nat_entries);
298         spin_unlock(&nm_i->nat_list_lock);
299
300         set_nat_flag(ne, IS_DIRTY, false);
301         set->entry_cnt--;
302         nm_i->nat_cnt[DIRTY_NAT]--;
303         nm_i->nat_cnt[RECLAIMABLE_NAT]++;
304 }
305
306 static unsigned int __gang_lookup_nat_set(struct f2fs_nm_info *nm_i,
307                 nid_t start, unsigned int nr, struct nat_entry_set **ep)
308 {
309         return radix_tree_gang_lookup(&nm_i->nat_set_root, (void **)ep,
310                                                         start, nr);
311 }
312
313 bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct folio *folio)
314 {
315         return is_node_folio(folio) && IS_DNODE(&folio->page) &&
316                                         is_cold_node(&folio->page);
317 }
318
319 void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi)
320 {
321         spin_lock_init(&sbi->fsync_node_lock);
322         INIT_LIST_HEAD(&sbi->fsync_node_list);
323         sbi->fsync_seg_id = 0;
324         sbi->fsync_node_num = 0;
325 }
326
327 static unsigned int f2fs_add_fsync_node_entry(struct f2fs_sb_info *sbi,
328                 struct folio *folio)
329 {
330         struct fsync_node_entry *fn;
331         unsigned long flags;
332         unsigned int seq_id;
333
334         fn = f2fs_kmem_cache_alloc(fsync_node_entry_slab,
335                                         GFP_NOFS, true, NULL);
336
337         folio_get(folio);
338         fn->folio = folio;
339         INIT_LIST_HEAD(&fn->list);
340
341         spin_lock_irqsave(&sbi->fsync_node_lock, flags);
342         list_add_tail(&fn->list, &sbi->fsync_node_list);
343         fn->seq_id = sbi->fsync_seg_id++;
344         seq_id = fn->seq_id;
345         sbi->fsync_node_num++;
346         spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
347
348         return seq_id;
349 }
350
351 void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct folio *folio)
352 {
353         struct fsync_node_entry *fn;
354         unsigned long flags;
355
356         spin_lock_irqsave(&sbi->fsync_node_lock, flags);
357         list_for_each_entry(fn, &sbi->fsync_node_list, list) {
358                 if (fn->folio == folio) {
359                         list_del(&fn->list);
360                         sbi->fsync_node_num--;
361                         spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
362                         kmem_cache_free(fsync_node_entry_slab, fn);
363                         folio_put(folio);
364                         return;
365                 }
366         }
367         spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
368         f2fs_bug_on(sbi, 1);
369 }
370
371 void f2fs_reset_fsync_node_info(struct f2fs_sb_info *sbi)
372 {
373         unsigned long flags;
374
375         spin_lock_irqsave(&sbi->fsync_node_lock, flags);
376         sbi->fsync_seg_id = 0;
377         spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
378 }
379
380 int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid)
381 {
382         struct f2fs_nm_info *nm_i = NM_I(sbi);
383         struct nat_entry *e;
384         bool need = false;
385
386         f2fs_down_read(&nm_i->nat_tree_lock);
387         e = __lookup_nat_cache(nm_i, nid);
388         if (e) {
389                 if (!get_nat_flag(e, IS_CHECKPOINTED) &&
390                                 !get_nat_flag(e, HAS_FSYNCED_INODE))
391                         need = true;
392         }
393         f2fs_up_read(&nm_i->nat_tree_lock);
394         return need;
395 }
396
397 bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid)
398 {
399         struct f2fs_nm_info *nm_i = NM_I(sbi);
400         struct nat_entry *e;
401         bool is_cp = true;
402
403         f2fs_down_read(&nm_i->nat_tree_lock);
404         e = __lookup_nat_cache(nm_i, nid);
405         if (e && !get_nat_flag(e, IS_CHECKPOINTED))
406                 is_cp = false;
407         f2fs_up_read(&nm_i->nat_tree_lock);
408         return is_cp;
409 }
410
411 bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino)
412 {
413         struct f2fs_nm_info *nm_i = NM_I(sbi);
414         struct nat_entry *e;
415         bool need_update = true;
416
417         f2fs_down_read(&nm_i->nat_tree_lock);
418         e = __lookup_nat_cache(nm_i, ino);
419         if (e && get_nat_flag(e, HAS_LAST_FSYNC) &&
420                         (get_nat_flag(e, IS_CHECKPOINTED) ||
421                          get_nat_flag(e, HAS_FSYNCED_INODE)))
422                 need_update = false;
423         f2fs_up_read(&nm_i->nat_tree_lock);
424         return need_update;
425 }
426
427 /* must be locked by nat_tree_lock */
428 static void cache_nat_entry(struct f2fs_sb_info *sbi, nid_t nid,
429                                                 struct f2fs_nat_entry *ne)
430 {
431         struct f2fs_nm_info *nm_i = NM_I(sbi);
432         struct nat_entry *new, *e;
433
434         /* Let's mitigate lock contention of nat_tree_lock during checkpoint */
435         if (f2fs_rwsem_is_locked(&sbi->cp_global_sem))
436                 return;
437
438         new = __alloc_nat_entry(sbi, nid, false);
439         if (!new)
440                 return;
441
442         f2fs_down_write(&nm_i->nat_tree_lock);
443         e = __lookup_nat_cache(nm_i, nid);
444         if (!e)
445                 e = __init_nat_entry(nm_i, new, ne, false);
446         else
447                 f2fs_bug_on(sbi, nat_get_ino(e) != le32_to_cpu(ne->ino) ||
448                                 nat_get_blkaddr(e) !=
449                                         le32_to_cpu(ne->block_addr) ||
450                                 nat_get_version(e) != ne->version);
451         f2fs_up_write(&nm_i->nat_tree_lock);
452         if (e != new)
453                 __free_nat_entry(new);
454 }
455
456 static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
457                         block_t new_blkaddr, bool fsync_done)
458 {
459         struct f2fs_nm_info *nm_i = NM_I(sbi);
460         struct nat_entry *e;
461         struct nat_entry *new = __alloc_nat_entry(sbi, ni->nid, true);
462
463         f2fs_down_write(&nm_i->nat_tree_lock);
464         e = __lookup_nat_cache(nm_i, ni->nid);
465         if (!e) {
466                 e = __init_nat_entry(nm_i, new, NULL, true);
467                 copy_node_info(&e->ni, ni);
468                 f2fs_bug_on(sbi, ni->blk_addr == NEW_ADDR);
469         } else if (new_blkaddr == NEW_ADDR) {
470                 /*
471                  * when nid is reallocated,
472                  * previous nat entry can be remained in nat cache.
473                  * So, reinitialize it with new information.
474                  */
475                 copy_node_info(&e->ni, ni);
476                 f2fs_bug_on(sbi, ni->blk_addr != NULL_ADDR);
477         }
478         /* let's free early to reduce memory consumption */
479         if (e != new)
480                 __free_nat_entry(new);
481
482         /* sanity check */
483         f2fs_bug_on(sbi, nat_get_blkaddr(e) != ni->blk_addr);
484         f2fs_bug_on(sbi, nat_get_blkaddr(e) == NULL_ADDR &&
485                         new_blkaddr == NULL_ADDR);
486         f2fs_bug_on(sbi, nat_get_blkaddr(e) == NEW_ADDR &&
487                         new_blkaddr == NEW_ADDR);
488         f2fs_bug_on(sbi, __is_valid_data_blkaddr(nat_get_blkaddr(e)) &&
489                         new_blkaddr == NEW_ADDR);
490
491         /* increment version no as node is removed */
492         if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) {
493                 unsigned char version = nat_get_version(e);
494
495                 nat_set_version(e, inc_node_version(version));
496         }
497
498         /* change address */
499         nat_set_blkaddr(e, new_blkaddr);
500         if (!__is_valid_data_blkaddr(new_blkaddr))
501                 set_nat_flag(e, IS_CHECKPOINTED, false);
502         __set_nat_cache_dirty(nm_i, e);
503
504         /* update fsync_mark if its inode nat entry is still alive */
505         if (ni->nid != ni->ino)
506                 e = __lookup_nat_cache(nm_i, ni->ino);
507         if (e) {
508                 if (fsync_done && ni->nid == ni->ino)
509                         set_nat_flag(e, HAS_FSYNCED_INODE, true);
510                 set_nat_flag(e, HAS_LAST_FSYNC, fsync_done);
511         }
512         f2fs_up_write(&nm_i->nat_tree_lock);
513 }
514
515 int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
516 {
517         struct f2fs_nm_info *nm_i = NM_I(sbi);
518         int nr = nr_shrink;
519
520         if (!f2fs_down_write_trylock(&nm_i->nat_tree_lock))
521                 return 0;
522
523         spin_lock(&nm_i->nat_list_lock);
524         while (nr_shrink) {
525                 struct nat_entry *ne;
526
527                 if (list_empty(&nm_i->nat_entries))
528                         break;
529
530                 ne = list_first_entry(&nm_i->nat_entries,
531                                         struct nat_entry, list);
532                 list_del(&ne->list);
533                 spin_unlock(&nm_i->nat_list_lock);
534
535                 __del_from_nat_cache(nm_i, ne);
536                 nr_shrink--;
537
538                 spin_lock(&nm_i->nat_list_lock);
539         }
540         spin_unlock(&nm_i->nat_list_lock);
541
542         f2fs_up_write(&nm_i->nat_tree_lock);
543         return nr - nr_shrink;
544 }
545
546 int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
547                                 struct node_info *ni, bool checkpoint_context)
548 {
549         struct f2fs_nm_info *nm_i = NM_I(sbi);
550         struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
551         struct f2fs_journal *journal = curseg->journal;
552         nid_t start_nid = START_NID(nid);
553         struct f2fs_nat_block *nat_blk;
554         struct folio *folio = NULL;
555         struct f2fs_nat_entry ne;
556         struct nat_entry *e;
557         pgoff_t index;
558         block_t blkaddr;
559         int i;
560
561         ni->flag = 0;
562         ni->nid = nid;
563 retry:
564         /* Check nat cache */
565         f2fs_down_read(&nm_i->nat_tree_lock);
566         e = __lookup_nat_cache(nm_i, nid);
567         if (e) {
568                 ni->ino = nat_get_ino(e);
569                 ni->blk_addr = nat_get_blkaddr(e);
570                 ni->version = nat_get_version(e);
571                 f2fs_up_read(&nm_i->nat_tree_lock);
572                 return 0;
573         }
574
575         /*
576          * Check current segment summary by trying to grab journal_rwsem first.
577          * This sem is on the critical path on the checkpoint requiring the above
578          * nat_tree_lock. Therefore, we should retry, if we failed to grab here
579          * while not bothering checkpoint.
580          */
581         if (!f2fs_rwsem_is_locked(&sbi->cp_global_sem) || checkpoint_context) {
582                 down_read(&curseg->journal_rwsem);
583         } else if (f2fs_rwsem_is_contended(&nm_i->nat_tree_lock) ||
584                                 !down_read_trylock(&curseg->journal_rwsem)) {
585                 f2fs_up_read(&nm_i->nat_tree_lock);
586                 goto retry;
587         }
588
589         i = f2fs_lookup_journal_in_cursum(journal, NAT_JOURNAL, nid, 0);
590         if (i >= 0) {
591                 ne = nat_in_journal(journal, i);
592                 node_info_from_raw_nat(ni, &ne);
593         }
594         up_read(&curseg->journal_rwsem);
595         if (i >= 0) {
596                 f2fs_up_read(&nm_i->nat_tree_lock);
597                 goto cache;
598         }
599
600         /* Fill node_info from nat page */
601         index = current_nat_addr(sbi, nid);
602         f2fs_up_read(&nm_i->nat_tree_lock);
603
604         folio = f2fs_get_meta_folio(sbi, index);
605         if (IS_ERR(folio))
606                 return PTR_ERR(folio);
607
608         nat_blk = folio_address(folio);
609         ne = nat_blk->entries[nid - start_nid];
610         node_info_from_raw_nat(ni, &ne);
611         f2fs_folio_put(folio, true);
612 cache:
613         blkaddr = le32_to_cpu(ne.block_addr);
614         if (__is_valid_data_blkaddr(blkaddr) &&
615                 !f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE))
616                 return -EFAULT;
617
618         /* cache nat entry */
619         cache_nat_entry(sbi, nid, &ne);
620         return 0;
621 }
622
623 /*
624  * readahead MAX_RA_NODE number of node pages.
625  */
626 static void f2fs_ra_node_pages(struct folio *parent, int start, int n)
627 {
628         struct f2fs_sb_info *sbi = F2FS_F_SB(parent);
629         struct blk_plug plug;
630         int i, end;
631         nid_t nid;
632
633         blk_start_plug(&plug);
634
635         /* Then, try readahead for siblings of the desired node */
636         end = start + n;
637         end = min(end, (int)NIDS_PER_BLOCK);
638         for (i = start; i < end; i++) {
639                 nid = get_nid(&parent->page, i, false);
640                 f2fs_ra_node_page(sbi, nid);
641         }
642
643         blk_finish_plug(&plug);
644 }
645
646 pgoff_t f2fs_get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs)
647 {
648         const long direct_index = ADDRS_PER_INODE(dn->inode);
649         const long direct_blks = ADDRS_PER_BLOCK(dn->inode);
650         const long indirect_blks = ADDRS_PER_BLOCK(dn->inode) * NIDS_PER_BLOCK;
651         unsigned int skipped_unit = ADDRS_PER_BLOCK(dn->inode);
652         int cur_level = dn->cur_level;
653         int max_level = dn->max_level;
654         pgoff_t base = 0;
655
656         if (!dn->max_level)
657                 return pgofs + 1;
658
659         while (max_level-- > cur_level)
660                 skipped_unit *= NIDS_PER_BLOCK;
661
662         switch (dn->max_level) {
663         case 3:
664                 base += 2 * indirect_blks;
665                 fallthrough;
666         case 2:
667                 base += 2 * direct_blks;
668                 fallthrough;
669         case 1:
670                 base += direct_index;
671                 break;
672         default:
673                 f2fs_bug_on(F2FS_I_SB(dn->inode), 1);
674         }
675
676         return ((pgofs - base) / skipped_unit + 1) * skipped_unit + base;
677 }
678
679 /*
680  * The maximum depth is four.
681  * Offset[0] will have raw inode offset.
682  */
683 static int get_node_path(struct inode *inode, long block,
684                                 int offset[4], unsigned int noffset[4])
685 {
686         const long direct_index = ADDRS_PER_INODE(inode);
687         const long direct_blks = ADDRS_PER_BLOCK(inode);
688         const long dptrs_per_blk = NIDS_PER_BLOCK;
689         const long indirect_blks = ADDRS_PER_BLOCK(inode) * NIDS_PER_BLOCK;
690         const long dindirect_blks = indirect_blks * NIDS_PER_BLOCK;
691         int n = 0;
692         int level = 0;
693
694         noffset[0] = 0;
695
696         if (block < direct_index) {
697                 offset[n] = block;
698                 goto got;
699         }
700         block -= direct_index;
701         if (block < direct_blks) {
702                 offset[n++] = NODE_DIR1_BLOCK;
703                 noffset[n] = 1;
704                 offset[n] = block;
705                 level = 1;
706                 goto got;
707         }
708         block -= direct_blks;
709         if (block < direct_blks) {
710                 offset[n++] = NODE_DIR2_BLOCK;
711                 noffset[n] = 2;
712                 offset[n] = block;
713                 level = 1;
714                 goto got;
715         }
716         block -= direct_blks;
717         if (block < indirect_blks) {
718                 offset[n++] = NODE_IND1_BLOCK;
719                 noffset[n] = 3;
720                 offset[n++] = block / direct_blks;
721                 noffset[n] = 4 + offset[n - 1];
722                 offset[n] = block % direct_blks;
723                 level = 2;
724                 goto got;
725         }
726         block -= indirect_blks;
727         if (block < indirect_blks) {
728                 offset[n++] = NODE_IND2_BLOCK;
729                 noffset[n] = 4 + dptrs_per_blk;
730                 offset[n++] = block / direct_blks;
731                 noffset[n] = 5 + dptrs_per_blk + offset[n - 1];
732                 offset[n] = block % direct_blks;
733                 level = 2;
734                 goto got;
735         }
736         block -= indirect_blks;
737         if (block < dindirect_blks) {
738                 offset[n++] = NODE_DIND_BLOCK;
739                 noffset[n] = 5 + (dptrs_per_blk * 2);
740                 offset[n++] = block / indirect_blks;
741                 noffset[n] = 6 + (dptrs_per_blk * 2) +
742                               offset[n - 1] * (dptrs_per_blk + 1);
743                 offset[n++] = (block / direct_blks) % dptrs_per_blk;
744                 noffset[n] = 7 + (dptrs_per_blk * 2) +
745                               offset[n - 2] * (dptrs_per_blk + 1) +
746                               offset[n - 1];
747                 offset[n] = block % direct_blks;
748                 level = 3;
749                 goto got;
750         } else {
751                 return -E2BIG;
752         }
753 got:
754         return level;
755 }
756
757 static struct folio *f2fs_get_node_folio_ra(struct folio *parent, int start);
758
759 /*
760  * Caller should call f2fs_put_dnode(dn).
761  * Also, it should grab and release a rwsem by calling f2fs_lock_op() and
762  * f2fs_unlock_op() only if mode is set with ALLOC_NODE.
763  */
764 int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
765 {
766         struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
767         struct folio *nfolio[4];
768         struct folio *parent = NULL;
769         int offset[4];
770         unsigned int noffset[4];
771         nid_t nids[4];
772         int level, i = 0;
773         int err = 0;
774
775         level = get_node_path(dn->inode, index, offset, noffset);
776         if (level < 0)
777                 return level;
778
779         nids[0] = dn->inode->i_ino;
780
781         if (!dn->inode_folio) {
782                 nfolio[0] = f2fs_get_inode_folio(sbi, nids[0]);
783                 if (IS_ERR(nfolio[0]))
784                         return PTR_ERR(nfolio[0]);
785         } else {
786                 nfolio[0] = dn->inode_folio;
787         }
788
789         /* if inline_data is set, should not report any block indices */
790         if (f2fs_has_inline_data(dn->inode) && index) {
791                 err = -ENOENT;
792                 f2fs_folio_put(nfolio[0], true);
793                 goto release_out;
794         }
795
796         parent = nfolio[0];
797         if (level != 0)
798                 nids[1] = get_nid(&parent->page, offset[0], true);
799         dn->inode_folio = nfolio[0];
800         dn->inode_folio_locked = true;
801
802         /* get indirect or direct nodes */
803         for (i = 1; i <= level; i++) {
804                 bool done = false;
805
806                 if (!nids[i] && mode == ALLOC_NODE) {
807                         /* alloc new node */
808                         if (!f2fs_alloc_nid(sbi, &(nids[i]))) {
809                                 err = -ENOSPC;
810                                 goto release_pages;
811                         }
812
813                         dn->nid = nids[i];
814                         nfolio[i] = f2fs_new_node_folio(dn, noffset[i]);
815                         if (IS_ERR(nfolio[i])) {
816                                 f2fs_alloc_nid_failed(sbi, nids[i]);
817                                 err = PTR_ERR(nfolio[i]);
818                                 goto release_pages;
819                         }
820
821                         set_nid(parent, offset[i - 1], nids[i], i == 1);
822                         f2fs_alloc_nid_done(sbi, nids[i]);
823                         done = true;
824                 } else if (mode == LOOKUP_NODE_RA && i == level && level > 1) {
825                         nfolio[i] = f2fs_get_node_folio_ra(parent, offset[i - 1]);
826                         if (IS_ERR(nfolio[i])) {
827                                 err = PTR_ERR(nfolio[i]);
828                                 goto release_pages;
829                         }
830                         done = true;
831                 }
832                 if (i == 1) {
833                         dn->inode_folio_locked = false;
834                         folio_unlock(parent);
835                 } else {
836                         f2fs_folio_put(parent, true);
837                 }
838
839                 if (!done) {
840                         nfolio[i] = f2fs_get_node_folio(sbi, nids[i]);
841                         if (IS_ERR(nfolio[i])) {
842                                 err = PTR_ERR(nfolio[i]);
843                                 f2fs_folio_put(nfolio[0], false);
844                                 goto release_out;
845                         }
846                 }
847                 if (i < level) {
848                         parent = nfolio[i];
849                         nids[i + 1] = get_nid(&parent->page, offset[i], false);
850                 }
851         }
852         dn->nid = nids[level];
853         dn->ofs_in_node = offset[level];
854         dn->node_folio = nfolio[level];
855         dn->data_blkaddr = f2fs_data_blkaddr(dn);
856
857         if (is_inode_flag_set(dn->inode, FI_COMPRESSED_FILE) &&
858                                         f2fs_sb_has_readonly(sbi)) {
859                 unsigned int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
860                 unsigned int ofs_in_node = dn->ofs_in_node;
861                 pgoff_t fofs = index;
862                 unsigned int c_len;
863                 block_t blkaddr;
864
865                 /* should align fofs and ofs_in_node to cluster_size */
866                 if (fofs % cluster_size) {
867                         fofs = round_down(fofs, cluster_size);
868                         ofs_in_node = round_down(ofs_in_node, cluster_size);
869                 }
870
871                 c_len = f2fs_cluster_blocks_are_contiguous(dn, ofs_in_node);
872                 if (!c_len)
873                         goto out;
874
875                 blkaddr = data_blkaddr(dn->inode, dn->node_folio, ofs_in_node);
876                 if (blkaddr == COMPRESS_ADDR)
877                         blkaddr = data_blkaddr(dn->inode, dn->node_folio,
878                                                 ofs_in_node + 1);
879
880                 f2fs_update_read_extent_tree_range_compressed(dn->inode,
881                                         fofs, blkaddr, cluster_size, c_len);
882         }
883 out:
884         return 0;
885
886 release_pages:
887         f2fs_folio_put(parent, true);
888         if (i > 1)
889                 f2fs_folio_put(nfolio[0], false);
890 release_out:
891         dn->inode_folio = NULL;
892         dn->node_folio = NULL;
893         if (err == -ENOENT) {
894                 dn->cur_level = i;
895                 dn->max_level = level;
896                 dn->ofs_in_node = offset[level];
897         }
898         return err;
899 }
900
901 static int truncate_node(struct dnode_of_data *dn)
902 {
903         struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
904         struct node_info ni;
905         int err;
906         pgoff_t index;
907
908         err = f2fs_get_node_info(sbi, dn->nid, &ni, false);
909         if (err)
910                 return err;
911
912         if (ni.blk_addr != NEW_ADDR &&
913                 !f2fs_is_valid_blkaddr(sbi, ni.blk_addr, DATA_GENERIC_ENHANCE)) {
914                 f2fs_err_ratelimited(sbi,
915                         "nat entry is corrupted, run fsck to fix it, ino:%u, "
916                         "nid:%u, blkaddr:%u", ni.ino, ni.nid, ni.blk_addr);
917                 set_sbi_flag(sbi, SBI_NEED_FSCK);
918                 f2fs_handle_error(sbi, ERROR_INCONSISTENT_NAT);
919                 return -EFSCORRUPTED;
920         }
921
922         /* Deallocate node address */
923         f2fs_invalidate_blocks(sbi, ni.blk_addr, 1);
924         dec_valid_node_count(sbi, dn->inode, dn->nid == dn->inode->i_ino);
925         set_node_addr(sbi, &ni, NULL_ADDR, false);
926
927         if (dn->nid == dn->inode->i_ino) {
928                 f2fs_remove_orphan_inode(sbi, dn->nid);
929                 dec_valid_inode_count(sbi);
930                 f2fs_inode_synced(dn->inode);
931         }
932
933         clear_node_folio_dirty(dn->node_folio);
934         set_sbi_flag(sbi, SBI_IS_DIRTY);
935
936         index = dn->node_folio->index;
937         f2fs_folio_put(dn->node_folio, true);
938
939         invalidate_mapping_pages(NODE_MAPPING(sbi),
940                         index, index);
941
942         dn->node_folio = NULL;
943         trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr);
944
945         return 0;
946 }
947
948 static int truncate_dnode(struct dnode_of_data *dn)
949 {
950         struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
951         struct folio *folio;
952         int err;
953
954         if (dn->nid == 0)
955                 return 1;
956
957         /* get direct node */
958         folio = f2fs_get_node_folio(sbi, dn->nid);
959         if (PTR_ERR(folio) == -ENOENT)
960                 return 1;
961         else if (IS_ERR(folio))
962                 return PTR_ERR(folio);
963
964         if (IS_INODE(&folio->page) || ino_of_node(&folio->page) != dn->inode->i_ino) {
965                 f2fs_err(sbi, "incorrect node reference, ino: %lu, nid: %u, ino_of_node: %u",
966                                 dn->inode->i_ino, dn->nid, ino_of_node(&folio->page));
967                 set_sbi_flag(sbi, SBI_NEED_FSCK);
968                 f2fs_handle_error(sbi, ERROR_INVALID_NODE_REFERENCE);
969                 f2fs_folio_put(folio, true);
970                 return -EFSCORRUPTED;
971         }
972
973         /* Make dnode_of_data for parameter */
974         dn->node_folio = folio;
975         dn->ofs_in_node = 0;
976         f2fs_truncate_data_blocks_range(dn, ADDRS_PER_BLOCK(dn->inode));
977         err = truncate_node(dn);
978         if (err) {
979                 f2fs_folio_put(folio, true);
980                 return err;
981         }
982
983         return 1;
984 }
985
986 static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
987                                                 int ofs, int depth)
988 {
989         struct dnode_of_data rdn = *dn;
990         struct folio *folio;
991         struct f2fs_node *rn;
992         nid_t child_nid;
993         unsigned int child_nofs;
994         int freed = 0;
995         int i, ret;
996
997         if (dn->nid == 0)
998                 return NIDS_PER_BLOCK + 1;
999
1000         trace_f2fs_truncate_nodes_enter(dn->inode, dn->nid, dn->data_blkaddr);
1001
1002         folio = f2fs_get_node_folio(F2FS_I_SB(dn->inode), dn->nid);
1003         if (IS_ERR(folio)) {
1004                 trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(folio));
1005                 return PTR_ERR(folio);
1006         }
1007
1008         f2fs_ra_node_pages(folio, ofs, NIDS_PER_BLOCK);
1009
1010         rn = F2FS_NODE(&folio->page);
1011         if (depth < 3) {
1012                 for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) {
1013                         child_nid = le32_to_cpu(rn->in.nid[i]);
1014                         if (child_nid == 0)
1015                                 continue;
1016                         rdn.nid = child_nid;
1017                         ret = truncate_dnode(&rdn);
1018                         if (ret < 0)
1019                                 goto out_err;
1020                         if (set_nid(folio, i, 0, false))
1021                                 dn->node_changed = true;
1022                 }
1023         } else {
1024                 child_nofs = nofs + ofs * (NIDS_PER_BLOCK + 1) + 1;
1025                 for (i = ofs; i < NIDS_PER_BLOCK; i++) {
1026                         child_nid = le32_to_cpu(rn->in.nid[i]);
1027                         if (child_nid == 0) {
1028                                 child_nofs += NIDS_PER_BLOCK + 1;
1029                                 continue;
1030                         }
1031                         rdn.nid = child_nid;
1032                         ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1);
1033                         if (ret == (NIDS_PER_BLOCK + 1)) {
1034                                 if (set_nid(folio, i, 0, false))
1035                                         dn->node_changed = true;
1036                                 child_nofs += ret;
1037                         } else if (ret < 0 && ret != -ENOENT) {
1038                                 goto out_err;
1039                         }
1040                 }
1041                 freed = child_nofs;
1042         }
1043
1044         if (!ofs) {
1045                 /* remove current indirect node */
1046                 dn->node_folio = folio;
1047                 ret = truncate_node(dn);
1048                 if (ret)
1049                         goto out_err;
1050                 freed++;
1051         } else {
1052                 f2fs_folio_put(folio, true);
1053         }
1054         trace_f2fs_truncate_nodes_exit(dn->inode, freed);
1055         return freed;
1056
1057 out_err:
1058         f2fs_folio_put(folio, true);
1059         trace_f2fs_truncate_nodes_exit(dn->inode, ret);
1060         return ret;
1061 }
1062
1063 static int truncate_partial_nodes(struct dnode_of_data *dn,
1064                         struct f2fs_inode *ri, int *offset, int depth)
1065 {
1066         struct folio *folios[2];
1067         nid_t nid[3];
1068         nid_t child_nid;
1069         int err = 0;
1070         int i;
1071         int idx = depth - 2;
1072
1073         nid[0] = get_nid(&dn->inode_folio->page, offset[0], true);
1074         if (!nid[0])
1075                 return 0;
1076
1077         /* get indirect nodes in the path */
1078         for (i = 0; i < idx + 1; i++) {
1079                 /* reference count'll be increased */
1080                 folios[i] = f2fs_get_node_folio(F2FS_I_SB(dn->inode), nid[i]);
1081                 if (IS_ERR(folios[i])) {
1082                         err = PTR_ERR(folios[i]);
1083                         idx = i - 1;
1084                         goto fail;
1085                 }
1086                 nid[i + 1] = get_nid(&folios[i]->page, offset[i + 1], false);
1087         }
1088
1089         f2fs_ra_node_pages(folios[idx], offset[idx + 1], NIDS_PER_BLOCK);
1090
1091         /* free direct nodes linked to a partial indirect node */
1092         for (i = offset[idx + 1]; i < NIDS_PER_BLOCK; i++) {
1093                 child_nid = get_nid(&folios[idx]->page, i, false);
1094                 if (!child_nid)
1095                         continue;
1096                 dn->nid = child_nid;
1097                 err = truncate_dnode(dn);
1098                 if (err < 0)
1099                         goto fail;
1100                 if (set_nid(folios[idx], i, 0, false))
1101                         dn->node_changed = true;
1102         }
1103
1104         if (offset[idx + 1] == 0) {
1105                 dn->node_folio = folios[idx];
1106                 dn->nid = nid[idx];
1107                 err = truncate_node(dn);
1108                 if (err)
1109                         goto fail;
1110         } else {
1111                 f2fs_folio_put(folios[idx], true);
1112         }
1113         offset[idx]++;
1114         offset[idx + 1] = 0;
1115         idx--;
1116 fail:
1117         for (i = idx; i >= 0; i--)
1118                 f2fs_folio_put(folios[i], true);
1119
1120         trace_f2fs_truncate_partial_nodes(dn->inode, nid, depth, err);
1121
1122         return err;
1123 }
1124
1125 /*
1126  * All the block addresses of data and nodes should be nullified.
1127  */
1128 int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from)
1129 {
1130         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1131         int err = 0, cont = 1;
1132         int level, offset[4], noffset[4];
1133         unsigned int nofs = 0;
1134         struct f2fs_inode *ri;
1135         struct dnode_of_data dn;
1136         struct folio *folio;
1137
1138         trace_f2fs_truncate_inode_blocks_enter(inode, from);
1139
1140         level = get_node_path(inode, from, offset, noffset);
1141         if (level <= 0) {
1142                 if (!level) {
1143                         level = -EFSCORRUPTED;
1144                         f2fs_err(sbi, "%s: inode ino=%lx has corrupted node block, from:%lu addrs:%u",
1145                                         __func__, inode->i_ino,
1146                                         from, ADDRS_PER_INODE(inode));
1147                         set_sbi_flag(sbi, SBI_NEED_FSCK);
1148                 }
1149                 trace_f2fs_truncate_inode_blocks_exit(inode, level);
1150                 return level;
1151         }
1152
1153         folio = f2fs_get_inode_folio(sbi, inode->i_ino);
1154         if (IS_ERR(folio)) {
1155                 trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(folio));
1156                 return PTR_ERR(folio);
1157         }
1158
1159         set_new_dnode(&dn, inode, folio, NULL, 0);
1160         folio_unlock(folio);
1161
1162         ri = F2FS_INODE(&folio->page);
1163         switch (level) {
1164         case 0:
1165         case 1:
1166                 nofs = noffset[1];
1167                 break;
1168         case 2:
1169                 nofs = noffset[1];
1170                 if (!offset[level - 1])
1171                         goto skip_partial;
1172                 err = truncate_partial_nodes(&dn, ri, offset, level);
1173                 if (err < 0 && err != -ENOENT)
1174                         goto fail;
1175                 nofs += 1 + NIDS_PER_BLOCK;
1176                 break;
1177         case 3:
1178                 nofs = 5 + 2 * NIDS_PER_BLOCK;
1179                 if (!offset[level - 1])
1180                         goto skip_partial;
1181                 err = truncate_partial_nodes(&dn, ri, offset, level);
1182                 if (err < 0 && err != -ENOENT)
1183                         goto fail;
1184                 break;
1185         default:
1186                 BUG();
1187         }
1188
1189 skip_partial:
1190         while (cont) {
1191                 dn.nid = get_nid(&folio->page, offset[0], true);
1192                 switch (offset[0]) {
1193                 case NODE_DIR1_BLOCK:
1194                 case NODE_DIR2_BLOCK:
1195                         err = truncate_dnode(&dn);
1196                         break;
1197
1198                 case NODE_IND1_BLOCK:
1199                 case NODE_IND2_BLOCK:
1200                         err = truncate_nodes(&dn, nofs, offset[1], 2);
1201                         break;
1202
1203                 case NODE_DIND_BLOCK:
1204                         err = truncate_nodes(&dn, nofs, offset[1], 3);
1205                         cont = 0;
1206                         break;
1207
1208                 default:
1209                         BUG();
1210                 }
1211                 if (err == -ENOENT) {
1212                         set_sbi_flag(F2FS_F_SB(folio), SBI_NEED_FSCK);
1213                         f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
1214                         f2fs_err_ratelimited(sbi,
1215                                 "truncate node fail, ino:%lu, nid:%u, "
1216                                 "offset[0]:%d, offset[1]:%d, nofs:%d",
1217                                 inode->i_ino, dn.nid, offset[0],
1218                                 offset[1], nofs);
1219                         err = 0;
1220                 }
1221                 if (err < 0)
1222                         goto fail;
1223                 if (offset[1] == 0 && get_nid(&folio->page, offset[0], true)) {
1224                         folio_lock(folio);
1225                         BUG_ON(!is_node_folio(folio));
1226                         set_nid(folio, offset[0], 0, true);
1227                         folio_unlock(folio);
1228                 }
1229                 offset[1] = 0;
1230                 offset[0]++;
1231                 nofs += err;
1232         }
1233 fail:
1234         f2fs_folio_put(folio, false);
1235         trace_f2fs_truncate_inode_blocks_exit(inode, err);
1236         return err > 0 ? 0 : err;
1237 }
1238
1239 /* caller must lock inode page */
1240 int f2fs_truncate_xattr_node(struct inode *inode)
1241 {
1242         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1243         nid_t nid = F2FS_I(inode)->i_xattr_nid;
1244         struct dnode_of_data dn;
1245         struct folio *nfolio;
1246         int err;
1247
1248         if (!nid)
1249                 return 0;
1250
1251         nfolio = f2fs_get_xnode_folio(sbi, nid);
1252         if (IS_ERR(nfolio))
1253                 return PTR_ERR(nfolio);
1254
1255         set_new_dnode(&dn, inode, NULL, nfolio, nid);
1256         err = truncate_node(&dn);
1257         if (err) {
1258                 f2fs_folio_put(nfolio, true);
1259                 return err;
1260         }
1261
1262         f2fs_i_xnid_write(inode, 0);
1263
1264         return 0;
1265 }
1266
1267 /*
1268  * Caller should grab and release a rwsem by calling f2fs_lock_op() and
1269  * f2fs_unlock_op().
1270  */
1271 int f2fs_remove_inode_page(struct inode *inode)
1272 {
1273         struct dnode_of_data dn;
1274         int err;
1275
1276         set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
1277         err = f2fs_get_dnode_of_data(&dn, 0, LOOKUP_NODE);
1278         if (err)
1279                 return err;
1280
1281         err = f2fs_truncate_xattr_node(inode);
1282         if (err) {
1283                 f2fs_put_dnode(&dn);
1284                 return err;
1285         }
1286
1287         /* remove potential inline_data blocks */
1288         if (!IS_DEVICE_ALIASING(inode) &&
1289             (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1290              S_ISLNK(inode->i_mode)))
1291                 f2fs_truncate_data_blocks_range(&dn, 1);
1292
1293         /* 0 is possible, after f2fs_new_inode() has failed */
1294         if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
1295                 f2fs_put_dnode(&dn);
1296                 return -EIO;
1297         }
1298
1299         if (unlikely(inode->i_blocks != 0 && inode->i_blocks != 8)) {
1300                 f2fs_warn(F2FS_I_SB(inode),
1301                         "f2fs_remove_inode_page: inconsistent i_blocks, ino:%lu, iblocks:%llu",
1302                         inode->i_ino, (unsigned long long)inode->i_blocks);
1303                 set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK);
1304         }
1305
1306         /* will put inode & node pages */
1307         err = truncate_node(&dn);
1308         if (err) {
1309                 f2fs_put_dnode(&dn);
1310                 return err;
1311         }
1312         return 0;
1313 }
1314
1315 struct folio *f2fs_new_inode_folio(struct inode *inode)
1316 {
1317         struct dnode_of_data dn;
1318
1319         /* allocate inode page for new inode */
1320         set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
1321
1322         /* caller should f2fs_folio_put(folio, true); */
1323         return f2fs_new_node_folio(&dn, 0);
1324 }
1325
1326 struct folio *f2fs_new_node_folio(struct dnode_of_data *dn, unsigned int ofs)
1327 {
1328         struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1329         struct node_info new_ni;
1330         struct folio *folio;
1331         int err;
1332
1333         if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
1334                 return ERR_PTR(-EPERM);
1335
1336         folio = f2fs_grab_cache_folio(NODE_MAPPING(sbi), dn->nid, false);
1337         if (IS_ERR(folio))
1338                 return folio;
1339
1340         if (unlikely((err = inc_valid_node_count(sbi, dn->inode, !ofs))))
1341                 goto fail;
1342
1343 #ifdef CONFIG_F2FS_CHECK_FS
1344         err = f2fs_get_node_info(sbi, dn->nid, &new_ni, false);
1345         if (err) {
1346                 dec_valid_node_count(sbi, dn->inode, !ofs);
1347                 goto fail;
1348         }
1349         if (unlikely(new_ni.blk_addr != NULL_ADDR)) {
1350                 err = -EFSCORRUPTED;
1351                 dec_valid_node_count(sbi, dn->inode, !ofs);
1352                 set_sbi_flag(sbi, SBI_NEED_FSCK);
1353                 f2fs_warn_ratelimited(sbi,
1354                         "f2fs_new_node_folio: inconsistent nat entry, "
1355                         "ino:%u, nid:%u, blkaddr:%u, ver:%u, flag:%u",
1356                         new_ni.ino, new_ni.nid, new_ni.blk_addr,
1357                         new_ni.version, new_ni.flag);
1358                 f2fs_handle_error(sbi, ERROR_INCONSISTENT_NAT);
1359                 goto fail;
1360         }
1361 #endif
1362         new_ni.nid = dn->nid;
1363         new_ni.ino = dn->inode->i_ino;
1364         new_ni.blk_addr = NULL_ADDR;
1365         new_ni.flag = 0;
1366         new_ni.version = 0;
1367         set_node_addr(sbi, &new_ni, NEW_ADDR, false);
1368
1369         f2fs_folio_wait_writeback(folio, NODE, true, true);
1370         fill_node_footer(&folio->page, dn->nid, dn->inode->i_ino, ofs, true);
1371         set_cold_node(&folio->page, S_ISDIR(dn->inode->i_mode));
1372         if (!folio_test_uptodate(folio))
1373                 folio_mark_uptodate(folio);
1374         if (folio_mark_dirty(folio))
1375                 dn->node_changed = true;
1376
1377         if (f2fs_has_xattr_block(ofs))
1378                 f2fs_i_xnid_write(dn->inode, dn->nid);
1379
1380         if (ofs == 0)
1381                 inc_valid_inode_count(sbi);
1382         return folio;
1383 fail:
1384         clear_node_folio_dirty(folio);
1385         f2fs_folio_put(folio, true);
1386         return ERR_PTR(err);
1387 }
1388
1389 /*
1390  * Caller should do after getting the following values.
1391  * 0: f2fs_folio_put(folio, false)
1392  * LOCKED_PAGE or error: f2fs_folio_put(folio, true)
1393  */
1394 static int read_node_folio(struct folio *folio, blk_opf_t op_flags)
1395 {
1396         struct f2fs_sb_info *sbi = F2FS_F_SB(folio);
1397         struct node_info ni;
1398         struct f2fs_io_info fio = {
1399                 .sbi = sbi,
1400                 .type = NODE,
1401                 .op = REQ_OP_READ,
1402                 .op_flags = op_flags,
1403                 .page = &folio->page,
1404                 .encrypted_page = NULL,
1405         };
1406         int err;
1407
1408         if (folio_test_uptodate(folio)) {
1409                 if (!f2fs_inode_chksum_verify(sbi, folio)) {
1410                         folio_clear_uptodate(folio);
1411                         return -EFSBADCRC;
1412                 }
1413                 return LOCKED_PAGE;
1414         }
1415
1416         err = f2fs_get_node_info(sbi, folio->index, &ni, false);
1417         if (err)
1418                 return err;
1419
1420         /* NEW_ADDR can be seen, after cp_error drops some dirty node pages */
1421         if (unlikely(ni.blk_addr == NULL_ADDR || ni.blk_addr == NEW_ADDR)) {
1422                 folio_clear_uptodate(folio);
1423                 return -ENOENT;
1424         }
1425
1426         fio.new_blkaddr = fio.old_blkaddr = ni.blk_addr;
1427
1428         err = f2fs_submit_page_bio(&fio);
1429
1430         if (!err)
1431                 f2fs_update_iostat(sbi, NULL, FS_NODE_READ_IO, F2FS_BLKSIZE);
1432
1433         return err;
1434 }
1435
1436 /*
1437  * Readahead a node page
1438  */
1439 void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
1440 {
1441         struct folio *afolio;
1442         int err;
1443
1444         if (!nid)
1445                 return;
1446         if (f2fs_check_nid_range(sbi, nid))
1447                 return;
1448
1449         afolio = xa_load(&NODE_MAPPING(sbi)->i_pages, nid);
1450         if (afolio)
1451                 return;
1452
1453         afolio = f2fs_grab_cache_folio(NODE_MAPPING(sbi), nid, false);
1454         if (IS_ERR(afolio))
1455                 return;
1456
1457         err = read_node_folio(afolio, REQ_RAHEAD);
1458         f2fs_folio_put(afolio, err ? true : false);
1459 }
1460
1461 static int sanity_check_node_footer(struct f2fs_sb_info *sbi,
1462                                         struct folio *folio, pgoff_t nid,
1463                                         enum node_type ntype)
1464 {
1465         struct page *page = &folio->page;
1466
1467         if (unlikely(nid != nid_of_node(page) ||
1468                 (ntype == NODE_TYPE_INODE && !IS_INODE(page)) ||
1469                 (ntype == NODE_TYPE_XATTR &&
1470                 !f2fs_has_xattr_block(ofs_of_node(page))) ||
1471                 time_to_inject(sbi, FAULT_INCONSISTENT_FOOTER))) {
1472                 f2fs_warn(sbi, "inconsistent node block, node_type:%d, nid:%lu, "
1473                           "node_footer[nid:%u,ino:%u,ofs:%u,cpver:%llu,blkaddr:%u]",
1474                           ntype, nid, nid_of_node(page), ino_of_node(page),
1475                           ofs_of_node(page), cpver_of_node(page),
1476                           next_blkaddr_of_node(folio));
1477                 set_sbi_flag(sbi, SBI_NEED_FSCK);
1478                 f2fs_handle_error(sbi, ERROR_INCONSISTENT_FOOTER);
1479                 return -EFSCORRUPTED;
1480         }
1481         return 0;
1482 }
1483
1484 static struct folio *__get_node_folio(struct f2fs_sb_info *sbi, pgoff_t nid,
1485                 struct folio *parent, int start, enum node_type ntype)
1486 {
1487         struct folio *folio;
1488         int err;
1489
1490         if (!nid)
1491                 return ERR_PTR(-ENOENT);
1492         if (f2fs_check_nid_range(sbi, nid))
1493                 return ERR_PTR(-EINVAL);
1494 repeat:
1495         folio = f2fs_grab_cache_folio(NODE_MAPPING(sbi), nid, false);
1496         if (IS_ERR(folio))
1497                 return folio;
1498
1499         err = read_node_folio(folio, 0);
1500         if (err < 0)
1501                 goto out_put_err;
1502         if (err == LOCKED_PAGE)
1503                 goto page_hit;
1504
1505         if (parent)
1506                 f2fs_ra_node_pages(parent, start + 1, MAX_RA_NODE);
1507
1508         folio_lock(folio);
1509
1510         if (unlikely(!is_node_folio(folio))) {
1511                 f2fs_folio_put(folio, true);
1512                 goto repeat;
1513         }
1514
1515         if (unlikely(!folio_test_uptodate(folio))) {
1516                 err = -EIO;
1517                 goto out_err;
1518         }
1519
1520         if (!f2fs_inode_chksum_verify(sbi, folio)) {
1521                 err = -EFSBADCRC;
1522                 goto out_err;
1523         }
1524 page_hit:
1525         err = sanity_check_node_footer(sbi, folio, nid, ntype);
1526         if (!err)
1527                 return folio;
1528 out_err:
1529         folio_clear_uptodate(folio);
1530 out_put_err:
1531         /* ENOENT comes from read_node_folio which is not an error. */
1532         if (err != -ENOENT)
1533                 f2fs_handle_page_eio(sbi, folio, NODE);
1534         f2fs_folio_put(folio, true);
1535         return ERR_PTR(err);
1536 }
1537
1538 struct folio *f2fs_get_node_folio(struct f2fs_sb_info *sbi, pgoff_t nid)
1539 {
1540         return __get_node_folio(sbi, nid, NULL, 0, NODE_TYPE_REGULAR);
1541 }
1542
1543 struct folio *f2fs_get_inode_folio(struct f2fs_sb_info *sbi, pgoff_t ino)
1544 {
1545         return __get_node_folio(sbi, ino, NULL, 0, NODE_TYPE_INODE);
1546 }
1547
1548 struct folio *f2fs_get_xnode_folio(struct f2fs_sb_info *sbi, pgoff_t xnid)
1549 {
1550         return __get_node_folio(sbi, xnid, NULL, 0, NODE_TYPE_XATTR);
1551 }
1552
1553 static struct folio *f2fs_get_node_folio_ra(struct folio *parent, int start)
1554 {
1555         struct f2fs_sb_info *sbi = F2FS_F_SB(parent);
1556         nid_t nid = get_nid(&parent->page, start, false);
1557
1558         return __get_node_folio(sbi, nid, parent, start, NODE_TYPE_REGULAR);
1559 }
1560
1561 static void flush_inline_data(struct f2fs_sb_info *sbi, nid_t ino)
1562 {
1563         struct inode *inode;
1564         struct folio *folio;
1565         int ret;
1566
1567         /* should flush inline_data before evict_inode */
1568         inode = ilookup(sbi->sb, ino);
1569         if (!inode)
1570                 return;
1571
1572         folio = f2fs_filemap_get_folio(inode->i_mapping, 0,
1573                                         FGP_LOCK|FGP_NOWAIT, 0);
1574         if (IS_ERR(folio))
1575                 goto iput_out;
1576
1577         if (!folio_test_uptodate(folio))
1578                 goto folio_out;
1579
1580         if (!folio_test_dirty(folio))
1581                 goto folio_out;
1582
1583         if (!folio_clear_dirty_for_io(folio))
1584                 goto folio_out;
1585
1586         ret = f2fs_write_inline_data(inode, folio);
1587         inode_dec_dirty_pages(inode);
1588         f2fs_remove_dirty_inode(inode);
1589         if (ret)
1590                 folio_mark_dirty(folio);
1591 folio_out:
1592         f2fs_folio_put(folio, true);
1593 iput_out:
1594         iput(inode);
1595 }
1596
1597 static struct folio *last_fsync_dnode(struct f2fs_sb_info *sbi, nid_t ino)
1598 {
1599         pgoff_t index;
1600         struct folio_batch fbatch;
1601         struct folio *last_folio = NULL;
1602         int nr_folios;
1603
1604         folio_batch_init(&fbatch);
1605         index = 0;
1606
1607         while ((nr_folios = filemap_get_folios_tag(NODE_MAPPING(sbi), &index,
1608                                         (pgoff_t)-1, PAGECACHE_TAG_DIRTY,
1609                                         &fbatch))) {
1610                 int i;
1611
1612                 for (i = 0; i < nr_folios; i++) {
1613                         struct folio *folio = fbatch.folios[i];
1614
1615                         if (unlikely(f2fs_cp_error(sbi))) {
1616                                 f2fs_folio_put(last_folio, false);
1617                                 folio_batch_release(&fbatch);
1618                                 return ERR_PTR(-EIO);
1619                         }
1620
1621                         if (!IS_DNODE(&folio->page) || !is_cold_node(&folio->page))
1622                                 continue;
1623                         if (ino_of_node(&folio->page) != ino)
1624                                 continue;
1625
1626                         folio_lock(folio);
1627
1628                         if (unlikely(!is_node_folio(folio))) {
1629 continue_unlock:
1630                                 folio_unlock(folio);
1631                                 continue;
1632                         }
1633                         if (ino_of_node(&folio->page) != ino)
1634                                 goto continue_unlock;
1635
1636                         if (!folio_test_dirty(folio)) {
1637                                 /* someone wrote it for us */
1638                                 goto continue_unlock;
1639                         }
1640
1641                         if (last_folio)
1642                                 f2fs_folio_put(last_folio, false);
1643
1644                         folio_get(folio);
1645                         last_folio = folio;
1646                         folio_unlock(folio);
1647                 }
1648                 folio_batch_release(&fbatch);
1649                 cond_resched();
1650         }
1651         return last_folio;
1652 }
1653
1654 static bool __write_node_folio(struct folio *folio, bool atomic, bool *submitted,
1655                                 struct writeback_control *wbc, bool do_balance,
1656                                 enum iostat_type io_type, unsigned int *seq_id)
1657 {
1658         struct f2fs_sb_info *sbi = F2FS_F_SB(folio);
1659         nid_t nid;
1660         struct node_info ni;
1661         struct f2fs_io_info fio = {
1662                 .sbi = sbi,
1663                 .ino = ino_of_node(&folio->page),
1664                 .type = NODE,
1665                 .op = REQ_OP_WRITE,
1666                 .op_flags = wbc_to_write_flags(wbc),
1667                 .page = &folio->page,
1668                 .encrypted_page = NULL,
1669                 .submitted = 0,
1670                 .io_type = io_type,
1671                 .io_wbc = wbc,
1672         };
1673         unsigned int seq;
1674
1675         trace_f2fs_writepage(folio, NODE);
1676
1677         if (unlikely(f2fs_cp_error(sbi))) {
1678                 /* keep node pages in remount-ro mode */
1679                 if (F2FS_OPTION(sbi).errors == MOUNT_ERRORS_READONLY)
1680                         goto redirty_out;
1681                 folio_clear_uptodate(folio);
1682                 dec_page_count(sbi, F2FS_DIRTY_NODES);
1683                 folio_unlock(folio);
1684                 return true;
1685         }
1686
1687         if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1688                 goto redirty_out;
1689
1690         if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
1691                         wbc->sync_mode == WB_SYNC_NONE &&
1692                         IS_DNODE(&folio->page) && is_cold_node(&folio->page))
1693                 goto redirty_out;
1694
1695         /* get old block addr of this node page */
1696         nid = nid_of_node(&folio->page);
1697         f2fs_bug_on(sbi, folio->index != nid);
1698
1699         if (f2fs_get_node_info(sbi, nid, &ni, !do_balance))
1700                 goto redirty_out;
1701
1702         f2fs_down_read(&sbi->node_write);
1703
1704         /* This page is already truncated */
1705         if (unlikely(ni.blk_addr == NULL_ADDR)) {
1706                 folio_clear_uptodate(folio);
1707                 dec_page_count(sbi, F2FS_DIRTY_NODES);
1708                 f2fs_up_read(&sbi->node_write);
1709                 folio_unlock(folio);
1710                 return true;
1711         }
1712
1713         if (__is_valid_data_blkaddr(ni.blk_addr) &&
1714                 !f2fs_is_valid_blkaddr(sbi, ni.blk_addr,
1715                                         DATA_GENERIC_ENHANCE)) {
1716                 f2fs_up_read(&sbi->node_write);
1717                 goto redirty_out;
1718         }
1719
1720         if (atomic && !test_opt(sbi, NOBARRIER))
1721                 fio.op_flags |= REQ_PREFLUSH | REQ_FUA;
1722
1723         /* should add to global list before clearing PAGECACHE status */
1724         if (f2fs_in_warm_node_list(sbi, folio)) {
1725                 seq = f2fs_add_fsync_node_entry(sbi, folio);
1726                 if (seq_id)
1727                         *seq_id = seq;
1728         }
1729
1730         folio_start_writeback(folio);
1731
1732         fio.old_blkaddr = ni.blk_addr;
1733         f2fs_do_write_node_page(nid, &fio);
1734         set_node_addr(sbi, &ni, fio.new_blkaddr, is_fsync_dnode(&folio->page));
1735         dec_page_count(sbi, F2FS_DIRTY_NODES);
1736         f2fs_up_read(&sbi->node_write);
1737
1738         folio_unlock(folio);
1739
1740         if (unlikely(f2fs_cp_error(sbi))) {
1741                 f2fs_submit_merged_write(sbi, NODE);
1742                 submitted = NULL;
1743         }
1744         if (submitted)
1745                 *submitted = fio.submitted;
1746
1747         if (do_balance)
1748                 f2fs_balance_fs(sbi, false);
1749         return true;
1750
1751 redirty_out:
1752         folio_redirty_for_writepage(wbc, folio);
1753         folio_unlock(folio);
1754         return false;
1755 }
1756
1757 int f2fs_move_node_folio(struct folio *node_folio, int gc_type)
1758 {
1759         int err = 0;
1760
1761         if (gc_type == FG_GC) {
1762                 struct writeback_control wbc = {
1763                         .sync_mode = WB_SYNC_ALL,
1764                         .nr_to_write = 1,
1765                 };
1766
1767                 f2fs_folio_wait_writeback(node_folio, NODE, true, true);
1768
1769                 folio_mark_dirty(node_folio);
1770
1771                 if (!folio_clear_dirty_for_io(node_folio)) {
1772                         err = -EAGAIN;
1773                         goto out_page;
1774                 }
1775
1776                 if (!__write_node_folio(node_folio, false, NULL,
1777                                         &wbc, false, FS_GC_NODE_IO, NULL))
1778                         err = -EAGAIN;
1779                 goto release_page;
1780         } else {
1781                 /* set page dirty and write it */
1782                 if (!folio_test_writeback(node_folio))
1783                         folio_mark_dirty(node_folio);
1784         }
1785 out_page:
1786         folio_unlock(node_folio);
1787 release_page:
1788         f2fs_folio_put(node_folio, false);
1789         return err;
1790 }
1791
1792 int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
1793                         struct writeback_control *wbc, bool atomic,
1794                         unsigned int *seq_id)
1795 {
1796         pgoff_t index;
1797         struct folio_batch fbatch;
1798         int ret = 0;
1799         struct folio *last_folio = NULL;
1800         bool marked = false;
1801         nid_t ino = inode->i_ino;
1802         int nr_folios;
1803         int nwritten = 0;
1804
1805         if (atomic) {
1806                 last_folio = last_fsync_dnode(sbi, ino);
1807                 if (IS_ERR_OR_NULL(last_folio))
1808                         return PTR_ERR_OR_ZERO(last_folio);
1809         }
1810 retry:
1811         folio_batch_init(&fbatch);
1812         index = 0;
1813
1814         while ((nr_folios = filemap_get_folios_tag(NODE_MAPPING(sbi), &index,
1815                                         (pgoff_t)-1, PAGECACHE_TAG_DIRTY,
1816                                         &fbatch))) {
1817                 int i;
1818
1819                 for (i = 0; i < nr_folios; i++) {
1820                         struct folio *folio = fbatch.folios[i];
1821                         bool submitted = false;
1822
1823                         if (unlikely(f2fs_cp_error(sbi))) {
1824                                 f2fs_folio_put(last_folio, false);
1825                                 folio_batch_release(&fbatch);
1826                                 ret = -EIO;
1827                                 goto out;
1828                         }
1829
1830                         if (!IS_DNODE(&folio->page) || !is_cold_node(&folio->page))
1831                                 continue;
1832                         if (ino_of_node(&folio->page) != ino)
1833                                 continue;
1834
1835                         folio_lock(folio);
1836
1837                         if (unlikely(!is_node_folio(folio))) {
1838 continue_unlock:
1839                                 folio_unlock(folio);
1840                                 continue;
1841                         }
1842                         if (ino_of_node(&folio->page) != ino)
1843                                 goto continue_unlock;
1844
1845                         if (!folio_test_dirty(folio) && folio != last_folio) {
1846                                 /* someone wrote it for us */
1847                                 goto continue_unlock;
1848                         }
1849
1850                         f2fs_folio_wait_writeback(folio, NODE, true, true);
1851
1852                         set_fsync_mark(&folio->page, 0);
1853                         set_dentry_mark(&folio->page, 0);
1854
1855                         if (!atomic || folio == last_folio) {
1856                                 set_fsync_mark(&folio->page, 1);
1857                                 percpu_counter_inc(&sbi->rf_node_block_count);
1858                                 if (IS_INODE(&folio->page)) {
1859                                         if (is_inode_flag_set(inode,
1860                                                                 FI_DIRTY_INODE))
1861                                                 f2fs_update_inode(inode, folio);
1862                                         set_dentry_mark(&folio->page,
1863                                                 f2fs_need_dentry_mark(sbi, ino));
1864                                 }
1865                                 /* may be written by other thread */
1866                                 if (!folio_test_dirty(folio))
1867                                         folio_mark_dirty(folio);
1868                         }
1869
1870                         if (!folio_clear_dirty_for_io(folio))
1871                                 goto continue_unlock;
1872
1873                         if (!__write_node_folio(folio, atomic &&
1874                                                 folio == last_folio,
1875                                                 &submitted, wbc, true,
1876                                                 FS_NODE_IO, seq_id)) {
1877                                 f2fs_folio_put(last_folio, false);
1878                                 folio_batch_release(&fbatch);
1879                                 ret = -EIO;
1880                                 goto out;
1881                         }
1882                         if (submitted)
1883                                 nwritten++;
1884
1885                         if (folio == last_folio) {
1886                                 f2fs_folio_put(folio, false);
1887                                 folio_batch_release(&fbatch);
1888                                 marked = true;
1889                                 goto out;
1890                         }
1891                 }
1892                 folio_batch_release(&fbatch);
1893                 cond_resched();
1894         }
1895         if (atomic && !marked) {
1896                 f2fs_debug(sbi, "Retry to write fsync mark: ino=%u, idx=%lx",
1897                            ino, last_folio->index);
1898                 folio_lock(last_folio);
1899                 f2fs_folio_wait_writeback(last_folio, NODE, true, true);
1900                 folio_mark_dirty(last_folio);
1901                 folio_unlock(last_folio);
1902                 goto retry;
1903         }
1904 out:
1905         if (nwritten)
1906                 f2fs_submit_merged_write_cond(sbi, NULL, NULL, ino, NODE);
1907         return ret;
1908 }
1909
1910 static int f2fs_match_ino(struct inode *inode, unsigned long ino, void *data)
1911 {
1912         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1913         bool clean;
1914
1915         if (inode->i_ino != ino)
1916                 return 0;
1917
1918         if (!is_inode_flag_set(inode, FI_DIRTY_INODE))
1919                 return 0;
1920
1921         spin_lock(&sbi->inode_lock[DIRTY_META]);
1922         clean = list_empty(&F2FS_I(inode)->gdirty_list);
1923         spin_unlock(&sbi->inode_lock[DIRTY_META]);
1924
1925         if (clean)
1926                 return 0;
1927
1928         inode = igrab(inode);
1929         if (!inode)
1930                 return 0;
1931         return 1;
1932 }
1933
1934 static bool flush_dirty_inode(struct folio *folio)
1935 {
1936         struct f2fs_sb_info *sbi = F2FS_F_SB(folio);
1937         struct inode *inode;
1938         nid_t ino = ino_of_node(&folio->page);
1939
1940         inode = find_inode_nowait(sbi->sb, ino, f2fs_match_ino, NULL);
1941         if (!inode)
1942                 return false;
1943
1944         f2fs_update_inode(inode, folio);
1945         folio_unlock(folio);
1946
1947         iput(inode);
1948         return true;
1949 }
1950
1951 void f2fs_flush_inline_data(struct f2fs_sb_info *sbi)
1952 {
1953         pgoff_t index = 0;
1954         struct folio_batch fbatch;
1955         int nr_folios;
1956
1957         folio_batch_init(&fbatch);
1958
1959         while ((nr_folios = filemap_get_folios_tag(NODE_MAPPING(sbi), &index,
1960                                         (pgoff_t)-1, PAGECACHE_TAG_DIRTY,
1961                                         &fbatch))) {
1962                 int i;
1963
1964                 for (i = 0; i < nr_folios; i++) {
1965                         struct folio *folio = fbatch.folios[i];
1966
1967                         if (!IS_INODE(&folio->page))
1968                                 continue;
1969
1970                         folio_lock(folio);
1971
1972                         if (unlikely(!is_node_folio(folio)))
1973                                 goto unlock;
1974                         if (!folio_test_dirty(folio))
1975                                 goto unlock;
1976
1977                         /* flush inline_data, if it's async context. */
1978                         if (page_private_inline(&folio->page)) {
1979                                 clear_page_private_inline(&folio->page);
1980                                 folio_unlock(folio);
1981                                 flush_inline_data(sbi, ino_of_node(&folio->page));
1982                                 continue;
1983                         }
1984 unlock:
1985                         folio_unlock(folio);
1986                 }
1987                 folio_batch_release(&fbatch);
1988                 cond_resched();
1989         }
1990 }
1991
1992 int f2fs_sync_node_pages(struct f2fs_sb_info *sbi,
1993                                 struct writeback_control *wbc,
1994                                 bool do_balance, enum iostat_type io_type)
1995 {
1996         pgoff_t index;
1997         struct folio_batch fbatch;
1998         int step = 0;
1999         int nwritten = 0;
2000         int ret = 0;
2001         int nr_folios, done = 0;
2002
2003         folio_batch_init(&fbatch);
2004
2005 next_step:
2006         index = 0;
2007
2008         while (!done && (nr_folios = filemap_get_folios_tag(NODE_MAPPING(sbi),
2009                                 &index, (pgoff_t)-1, PAGECACHE_TAG_DIRTY,
2010                                 &fbatch))) {
2011                 int i;
2012
2013                 for (i = 0; i < nr_folios; i++) {
2014                         struct folio *folio = fbatch.folios[i];
2015                         bool submitted = false;
2016
2017                         /* give a priority to WB_SYNC threads */
2018                         if (atomic_read(&sbi->wb_sync_req[NODE]) &&
2019                                         wbc->sync_mode == WB_SYNC_NONE) {
2020                                 done = 1;
2021                                 break;
2022                         }
2023
2024                         /*
2025                          * flushing sequence with step:
2026                          * 0. indirect nodes
2027                          * 1. dentry dnodes
2028                          * 2. file dnodes
2029                          */
2030                         if (step == 0 && IS_DNODE(&folio->page))
2031                                 continue;
2032                         if (step == 1 && (!IS_DNODE(&folio->page) ||
2033                                                 is_cold_node(&folio->page)))
2034                                 continue;
2035                         if (step == 2 && (!IS_DNODE(&folio->page) ||
2036                                                 !is_cold_node(&folio->page)))
2037                                 continue;
2038 lock_node:
2039                         if (wbc->sync_mode == WB_SYNC_ALL)
2040                                 folio_lock(folio);
2041                         else if (!folio_trylock(folio))
2042                                 continue;
2043
2044                         if (unlikely(!is_node_folio(folio))) {
2045 continue_unlock:
2046                                 folio_unlock(folio);
2047                                 continue;
2048                         }
2049
2050                         if (!folio_test_dirty(folio)) {
2051                                 /* someone wrote it for us */
2052                                 goto continue_unlock;
2053                         }
2054
2055                         /* flush inline_data/inode, if it's async context. */
2056                         if (!do_balance)
2057                                 goto write_node;
2058
2059                         /* flush inline_data */
2060                         if (page_private_inline(&folio->page)) {
2061                                 clear_page_private_inline(&folio->page);
2062                                 folio_unlock(folio);
2063                                 flush_inline_data(sbi, ino_of_node(&folio->page));
2064                                 goto lock_node;
2065                         }
2066
2067                         /* flush dirty inode */
2068                         if (IS_INODE(&folio->page) && flush_dirty_inode(folio))
2069                                 goto lock_node;
2070 write_node:
2071                         f2fs_folio_wait_writeback(folio, NODE, true, true);
2072
2073                         if (!folio_clear_dirty_for_io(folio))
2074                                 goto continue_unlock;
2075
2076                         set_fsync_mark(&folio->page, 0);
2077                         set_dentry_mark(&folio->page, 0);
2078
2079                         if (!__write_node_folio(folio, false, &submitted,
2080                                         wbc, do_balance, io_type, NULL)) {
2081                                 folio_batch_release(&fbatch);
2082                                 ret = -EIO;
2083                                 goto out;
2084                         }
2085                         if (submitted)
2086                                 nwritten++;
2087
2088                         if (--wbc->nr_to_write == 0)
2089                                 break;
2090                 }
2091                 folio_batch_release(&fbatch);
2092                 cond_resched();
2093
2094                 if (wbc->nr_to_write == 0) {
2095                         step = 2;
2096                         break;
2097                 }
2098         }
2099
2100         if (step < 2) {
2101                 if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
2102                                 wbc->sync_mode == WB_SYNC_NONE && step == 1)
2103                         goto out;
2104                 step++;
2105                 goto next_step;
2106         }
2107 out:
2108         if (nwritten)
2109                 f2fs_submit_merged_write(sbi, NODE);
2110
2111         if (unlikely(f2fs_cp_error(sbi)))
2112                 return -EIO;
2113         return ret;
2114 }
2115
2116 int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi,
2117                                                 unsigned int seq_id)
2118 {
2119         struct fsync_node_entry *fn;
2120         struct list_head *head = &sbi->fsync_node_list;
2121         unsigned long flags;
2122         unsigned int cur_seq_id = 0;
2123
2124         while (seq_id && cur_seq_id < seq_id) {
2125                 struct folio *folio;
2126
2127                 spin_lock_irqsave(&sbi->fsync_node_lock, flags);
2128                 if (list_empty(head)) {
2129                         spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
2130                         break;
2131                 }
2132                 fn = list_first_entry(head, struct fsync_node_entry, list);
2133                 if (fn->seq_id > seq_id) {
2134                         spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
2135                         break;
2136                 }
2137                 cur_seq_id = fn->seq_id;
2138                 folio = fn->folio;
2139                 folio_get(folio);
2140                 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
2141
2142                 f2fs_folio_wait_writeback(folio, NODE, true, false);
2143
2144                 folio_put(folio);
2145         }
2146
2147         return filemap_check_errors(NODE_MAPPING(sbi));
2148 }
2149
2150 static int f2fs_write_node_pages(struct address_space *mapping,
2151                             struct writeback_control *wbc)
2152 {
2153         struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
2154         struct blk_plug plug;
2155         long diff;
2156
2157         if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
2158                 goto skip_write;
2159
2160         /* balancing f2fs's metadata in background */
2161         f2fs_balance_fs_bg(sbi, true);
2162
2163         /* collect a number of dirty node pages and write together */
2164         if (wbc->sync_mode != WB_SYNC_ALL &&
2165                         get_pages(sbi, F2FS_DIRTY_NODES) <
2166                                         nr_pages_to_skip(sbi, NODE))
2167                 goto skip_write;
2168
2169         if (wbc->sync_mode == WB_SYNC_ALL)
2170                 atomic_inc(&sbi->wb_sync_req[NODE]);
2171         else if (atomic_read(&sbi->wb_sync_req[NODE])) {
2172                 /* to avoid potential deadlock */
2173                 if (current->plug)
2174                         blk_finish_plug(current->plug);
2175                 goto skip_write;
2176         }
2177
2178         trace_f2fs_writepages(mapping->host, wbc, NODE);
2179
2180         diff = nr_pages_to_write(sbi, NODE, wbc);
2181         blk_start_plug(&plug);
2182         f2fs_sync_node_pages(sbi, wbc, true, FS_NODE_IO);
2183         blk_finish_plug(&plug);
2184         wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
2185
2186         if (wbc->sync_mode == WB_SYNC_ALL)
2187                 atomic_dec(&sbi->wb_sync_req[NODE]);
2188         return 0;
2189
2190 skip_write:
2191         wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_NODES);
2192         trace_f2fs_writepages(mapping->host, wbc, NODE);
2193         return 0;
2194 }
2195
2196 static bool f2fs_dirty_node_folio(struct address_space *mapping,
2197                 struct folio *folio)
2198 {
2199         trace_f2fs_set_page_dirty(folio, NODE);
2200
2201         if (!folio_test_uptodate(folio))
2202                 folio_mark_uptodate(folio);
2203 #ifdef CONFIG_F2FS_CHECK_FS
2204         if (IS_INODE(&folio->page))
2205                 f2fs_inode_chksum_set(F2FS_M_SB(mapping), &folio->page);
2206 #endif
2207         if (filemap_dirty_folio(mapping, folio)) {
2208                 inc_page_count(F2FS_M_SB(mapping), F2FS_DIRTY_NODES);
2209                 set_page_private_reference(&folio->page);
2210                 return true;
2211         }
2212         return false;
2213 }
2214
2215 /*
2216  * Structure of the f2fs node operations
2217  */
2218 const struct address_space_operations f2fs_node_aops = {
2219         .writepages     = f2fs_write_node_pages,
2220         .dirty_folio    = f2fs_dirty_node_folio,
2221         .invalidate_folio = f2fs_invalidate_folio,
2222         .release_folio  = f2fs_release_folio,
2223         .migrate_folio  = filemap_migrate_folio,
2224 };
2225
2226 static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i,
2227                                                 nid_t n)
2228 {
2229         return radix_tree_lookup(&nm_i->free_nid_root, n);
2230 }
2231
2232 static int __insert_free_nid(struct f2fs_sb_info *sbi,
2233                                 struct free_nid *i)
2234 {
2235         struct f2fs_nm_info *nm_i = NM_I(sbi);
2236         int err = radix_tree_insert(&nm_i->free_nid_root, i->nid, i);
2237
2238         if (err)
2239                 return err;
2240
2241         nm_i->nid_cnt[FREE_NID]++;
2242         list_add_tail(&i->list, &nm_i->free_nid_list);
2243         return 0;
2244 }
2245
2246 static void __remove_free_nid(struct f2fs_sb_info *sbi,
2247                         struct free_nid *i, enum nid_state state)
2248 {
2249         struct f2fs_nm_info *nm_i = NM_I(sbi);
2250
2251         f2fs_bug_on(sbi, state != i->state);
2252         nm_i->nid_cnt[state]--;
2253         if (state == FREE_NID)
2254                 list_del(&i->list);
2255         radix_tree_delete(&nm_i->free_nid_root, i->nid);
2256 }
2257
2258 static void __move_free_nid(struct f2fs_sb_info *sbi, struct free_nid *i,
2259                         enum nid_state org_state, enum nid_state dst_state)
2260 {
2261         struct f2fs_nm_info *nm_i = NM_I(sbi);
2262
2263         f2fs_bug_on(sbi, org_state != i->state);
2264         i->state = dst_state;
2265         nm_i->nid_cnt[org_state]--;
2266         nm_i->nid_cnt[dst_state]++;
2267
2268         switch (dst_state) {
2269         case PREALLOC_NID:
2270                 list_del(&i->list);
2271                 break;
2272         case FREE_NID:
2273                 list_add_tail(&i->list, &nm_i->free_nid_list);
2274                 break;
2275         default:
2276                 BUG_ON(1);
2277         }
2278 }
2279
2280 static void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid,
2281                                                         bool set, bool build)
2282 {
2283         struct f2fs_nm_info *nm_i = NM_I(sbi);
2284         unsigned int nat_ofs = NAT_BLOCK_OFFSET(nid);
2285         unsigned int nid_ofs = nid - START_NID(nid);
2286
2287         if (!test_bit_le(nat_ofs, nm_i->nat_block_bitmap))
2288                 return;
2289
2290         if (set) {
2291                 if (test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]))
2292                         return;
2293                 __set_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
2294                 nm_i->free_nid_count[nat_ofs]++;
2295         } else {
2296                 if (!test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]))
2297                         return;
2298                 __clear_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
2299                 if (!build)
2300                         nm_i->free_nid_count[nat_ofs]--;
2301         }
2302 }
2303
2304 /* return if the nid is recognized as free */
2305 static bool add_free_nid(struct f2fs_sb_info *sbi,
2306                                 nid_t nid, bool build, bool update)
2307 {
2308         struct f2fs_nm_info *nm_i = NM_I(sbi);
2309         struct free_nid *i, *e;
2310         struct nat_entry *ne;
2311         int err;
2312         bool ret = false;
2313
2314         /* 0 nid should not be used */
2315         if (unlikely(nid == 0))
2316                 return false;
2317
2318         if (unlikely(f2fs_check_nid_range(sbi, nid)))
2319                 return false;
2320
2321         i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS, true, NULL);
2322         i->nid = nid;
2323         i->state = FREE_NID;
2324
2325         err = radix_tree_preload(GFP_NOFS | __GFP_NOFAIL);
2326         f2fs_bug_on(sbi, err);
2327
2328         err = -EINVAL;
2329
2330         spin_lock(&nm_i->nid_list_lock);
2331
2332         if (build) {
2333                 /*
2334                  *   Thread A             Thread B
2335                  *  - f2fs_create
2336                  *   - f2fs_new_inode
2337                  *    - f2fs_alloc_nid
2338                  *     - __insert_nid_to_list(PREALLOC_NID)
2339                  *                     - f2fs_balance_fs_bg
2340                  *                      - f2fs_build_free_nids
2341                  *                       - __f2fs_build_free_nids
2342                  *                        - scan_nat_page
2343                  *                         - add_free_nid
2344                  *                          - __lookup_nat_cache
2345                  *  - f2fs_add_link
2346                  *   - f2fs_init_inode_metadata
2347                  *    - f2fs_new_inode_folio
2348                  *     - f2fs_new_node_folio
2349                  *      - set_node_addr
2350                  *  - f2fs_alloc_nid_done
2351                  *   - __remove_nid_from_list(PREALLOC_NID)
2352                  *                         - __insert_nid_to_list(FREE_NID)
2353                  */
2354                 ne = __lookup_nat_cache(nm_i, nid);
2355                 if (ne && (!get_nat_flag(ne, IS_CHECKPOINTED) ||
2356                                 nat_get_blkaddr(ne) != NULL_ADDR))
2357                         goto err_out;
2358
2359                 e = __lookup_free_nid_list(nm_i, nid);
2360                 if (e) {
2361                         if (e->state == FREE_NID)
2362                                 ret = true;
2363                         goto err_out;
2364                 }
2365         }
2366         ret = true;
2367         err = __insert_free_nid(sbi, i);
2368 err_out:
2369         if (update) {
2370                 update_free_nid_bitmap(sbi, nid, ret, build);
2371                 if (!build)
2372                         nm_i->available_nids++;
2373         }
2374         spin_unlock(&nm_i->nid_list_lock);
2375         radix_tree_preload_end();
2376
2377         if (err)
2378                 kmem_cache_free(free_nid_slab, i);
2379         return ret;
2380 }
2381
2382 static void remove_free_nid(struct f2fs_sb_info *sbi, nid_t nid)
2383 {
2384         struct f2fs_nm_info *nm_i = NM_I(sbi);
2385         struct free_nid *i;
2386         bool need_free = false;
2387
2388         spin_lock(&nm_i->nid_list_lock);
2389         i = __lookup_free_nid_list(nm_i, nid);
2390         if (i && i->state == FREE_NID) {
2391                 __remove_free_nid(sbi, i, FREE_NID);
2392                 need_free = true;
2393         }
2394         spin_unlock(&nm_i->nid_list_lock);
2395
2396         if (need_free)
2397                 kmem_cache_free(free_nid_slab, i);
2398 }
2399
2400 static int scan_nat_page(struct f2fs_sb_info *sbi,
2401                         struct f2fs_nat_block *nat_blk, nid_t start_nid)
2402 {
2403         struct f2fs_nm_info *nm_i = NM_I(sbi);
2404         block_t blk_addr;
2405         unsigned int nat_ofs = NAT_BLOCK_OFFSET(start_nid);
2406         int i;
2407
2408         __set_bit_le(nat_ofs, nm_i->nat_block_bitmap);
2409
2410         i = start_nid % NAT_ENTRY_PER_BLOCK;
2411
2412         for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) {
2413                 if (unlikely(start_nid >= nm_i->max_nid))
2414                         break;
2415
2416                 blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
2417
2418                 if (blk_addr == NEW_ADDR)
2419                         return -EFSCORRUPTED;
2420
2421                 if (blk_addr == NULL_ADDR) {
2422                         add_free_nid(sbi, start_nid, true, true);
2423                 } else {
2424                         spin_lock(&NM_I(sbi)->nid_list_lock);
2425                         update_free_nid_bitmap(sbi, start_nid, false, true);
2426                         spin_unlock(&NM_I(sbi)->nid_list_lock);
2427                 }
2428         }
2429
2430         return 0;
2431 }
2432
2433 static void scan_curseg_cache(struct f2fs_sb_info *sbi)
2434 {
2435         struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2436         struct f2fs_journal *journal = curseg->journal;
2437         int i;
2438
2439         down_read(&curseg->journal_rwsem);
2440         for (i = 0; i < nats_in_cursum(journal); i++) {
2441                 block_t addr;
2442                 nid_t nid;
2443
2444                 addr = le32_to_cpu(nat_in_journal(journal, i).block_addr);
2445                 nid = le32_to_cpu(nid_in_journal(journal, i));
2446                 if (addr == NULL_ADDR)
2447                         add_free_nid(sbi, nid, true, false);
2448                 else
2449                         remove_free_nid(sbi, nid);
2450         }
2451         up_read(&curseg->journal_rwsem);
2452 }
2453
2454 static void scan_free_nid_bits(struct f2fs_sb_info *sbi)
2455 {
2456         struct f2fs_nm_info *nm_i = NM_I(sbi);
2457         unsigned int i, idx;
2458         nid_t nid;
2459
2460         f2fs_down_read(&nm_i->nat_tree_lock);
2461
2462         for (i = 0; i < nm_i->nat_blocks; i++) {
2463                 if (!test_bit_le(i, nm_i->nat_block_bitmap))
2464                         continue;
2465                 if (!nm_i->free_nid_count[i])
2466                         continue;
2467                 for (idx = 0; idx < NAT_ENTRY_PER_BLOCK; idx++) {
2468                         idx = find_next_bit_le(nm_i->free_nid_bitmap[i],
2469                                                 NAT_ENTRY_PER_BLOCK, idx);
2470                         if (idx >= NAT_ENTRY_PER_BLOCK)
2471                                 break;
2472
2473                         nid = i * NAT_ENTRY_PER_BLOCK + idx;
2474                         add_free_nid(sbi, nid, true, false);
2475
2476                         if (nm_i->nid_cnt[FREE_NID] >= MAX_FREE_NIDS)
2477                                 goto out;
2478                 }
2479         }
2480 out:
2481         scan_curseg_cache(sbi);
2482
2483         f2fs_up_read(&nm_i->nat_tree_lock);
2484 }
2485
2486 static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
2487                                                 bool sync, bool mount)
2488 {
2489         struct f2fs_nm_info *nm_i = NM_I(sbi);
2490         int i = 0, ret;
2491         nid_t nid = nm_i->next_scan_nid;
2492
2493         if (unlikely(nid >= nm_i->max_nid))
2494                 nid = 0;
2495
2496         if (unlikely(nid % NAT_ENTRY_PER_BLOCK))
2497                 nid = NAT_BLOCK_OFFSET(nid) * NAT_ENTRY_PER_BLOCK;
2498
2499         /* Enough entries */
2500         if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK)
2501                 return 0;
2502
2503         if (!sync && !f2fs_available_free_memory(sbi, FREE_NIDS))
2504                 return 0;
2505
2506         if (!mount) {
2507                 /* try to find free nids in free_nid_bitmap */
2508                 scan_free_nid_bits(sbi);
2509
2510                 if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK)
2511                         return 0;
2512         }
2513
2514         /* readahead nat pages to be scanned */
2515         f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES,
2516                                                         META_NAT, true);
2517
2518         f2fs_down_read(&nm_i->nat_tree_lock);
2519
2520         while (1) {
2521                 if (!test_bit_le(NAT_BLOCK_OFFSET(nid),
2522                                                 nm_i->nat_block_bitmap)) {
2523                         struct folio *folio = get_current_nat_folio(sbi, nid);
2524
2525                         if (IS_ERR(folio)) {
2526                                 ret = PTR_ERR(folio);
2527                         } else {
2528                                 ret = scan_nat_page(sbi, folio_address(folio),
2529                                                 nid);
2530                                 f2fs_folio_put(folio, true);
2531                         }
2532
2533                         if (ret) {
2534                                 f2fs_up_read(&nm_i->nat_tree_lock);
2535
2536                                 if (ret == -EFSCORRUPTED) {
2537                                         f2fs_err(sbi, "NAT is corrupt, run fsck to fix it");
2538                                         set_sbi_flag(sbi, SBI_NEED_FSCK);
2539                                         f2fs_handle_error(sbi,
2540                                                 ERROR_INCONSISTENT_NAT);
2541                                 }
2542
2543                                 return ret;
2544                         }
2545                 }
2546
2547                 nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK));
2548                 if (unlikely(nid >= nm_i->max_nid))
2549                         nid = 0;
2550
2551                 if (++i >= FREE_NID_PAGES)
2552                         break;
2553         }
2554
2555         /* go to the next free nat pages to find free nids abundantly */
2556         nm_i->next_scan_nid = nid;
2557
2558         /* find free nids from current sum_pages */
2559         scan_curseg_cache(sbi);
2560
2561         f2fs_up_read(&nm_i->nat_tree_lock);
2562
2563         f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid),
2564                                         nm_i->ra_nid_pages, META_NAT, false);
2565
2566         return 0;
2567 }
2568
2569 int f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
2570 {
2571         int ret;
2572
2573         mutex_lock(&NM_I(sbi)->build_lock);
2574         ret = __f2fs_build_free_nids(sbi, sync, mount);
2575         mutex_unlock(&NM_I(sbi)->build_lock);
2576
2577         return ret;
2578 }
2579
2580 /*
2581  * If this function returns success, caller can obtain a new nid
2582  * from second parameter of this function.
2583  * The returned nid could be used ino as well as nid when inode is created.
2584  */
2585 bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid)
2586 {
2587         struct f2fs_nm_info *nm_i = NM_I(sbi);
2588         struct free_nid *i = NULL;
2589 retry:
2590         if (time_to_inject(sbi, FAULT_ALLOC_NID))
2591                 return false;
2592
2593         spin_lock(&nm_i->nid_list_lock);
2594
2595         if (unlikely(nm_i->available_nids == 0)) {
2596                 spin_unlock(&nm_i->nid_list_lock);
2597                 return false;
2598         }
2599
2600         /* We should not use stale free nids created by f2fs_build_free_nids */
2601         if (nm_i->nid_cnt[FREE_NID] && !on_f2fs_build_free_nids(nm_i)) {
2602                 f2fs_bug_on(sbi, list_empty(&nm_i->free_nid_list));
2603                 i = list_first_entry(&nm_i->free_nid_list,
2604                                         struct free_nid, list);
2605                 *nid = i->nid;
2606
2607                 __move_free_nid(sbi, i, FREE_NID, PREALLOC_NID);
2608                 nm_i->available_nids--;
2609
2610                 update_free_nid_bitmap(sbi, *nid, false, false);
2611
2612                 spin_unlock(&nm_i->nid_list_lock);
2613                 return true;
2614         }
2615         spin_unlock(&nm_i->nid_list_lock);
2616
2617         /* Let's scan nat pages and its caches to get free nids */
2618         if (!f2fs_build_free_nids(sbi, true, false))
2619                 goto retry;
2620         return false;
2621 }
2622
2623 /*
2624  * f2fs_alloc_nid() should be called prior to this function.
2625  */
2626 void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid)
2627 {
2628         struct f2fs_nm_info *nm_i = NM_I(sbi);
2629         struct free_nid *i;
2630
2631         spin_lock(&nm_i->nid_list_lock);
2632         i = __lookup_free_nid_list(nm_i, nid);
2633         f2fs_bug_on(sbi, !i);
2634         __remove_free_nid(sbi, i, PREALLOC_NID);
2635         spin_unlock(&nm_i->nid_list_lock);
2636
2637         kmem_cache_free(free_nid_slab, i);
2638 }
2639
2640 /*
2641  * f2fs_alloc_nid() should be called prior to this function.
2642  */
2643 void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
2644 {
2645         struct f2fs_nm_info *nm_i = NM_I(sbi);
2646         struct free_nid *i;
2647         bool need_free = false;
2648
2649         if (!nid)
2650                 return;
2651
2652         spin_lock(&nm_i->nid_list_lock);
2653         i = __lookup_free_nid_list(nm_i, nid);
2654         f2fs_bug_on(sbi, !i);
2655
2656         if (!f2fs_available_free_memory(sbi, FREE_NIDS)) {
2657                 __remove_free_nid(sbi, i, PREALLOC_NID);
2658                 need_free = true;
2659         } else {
2660                 __move_free_nid(sbi, i, PREALLOC_NID, FREE_NID);
2661         }
2662
2663         nm_i->available_nids++;
2664
2665         update_free_nid_bitmap(sbi, nid, true, false);
2666
2667         spin_unlock(&nm_i->nid_list_lock);
2668
2669         if (need_free)
2670                 kmem_cache_free(free_nid_slab, i);
2671 }
2672
2673 int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink)
2674 {
2675         struct f2fs_nm_info *nm_i = NM_I(sbi);
2676         int nr = nr_shrink;
2677
2678         if (nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS)
2679                 return 0;
2680
2681         if (!mutex_trylock(&nm_i->build_lock))
2682                 return 0;
2683
2684         while (nr_shrink && nm_i->nid_cnt[FREE_NID] > MAX_FREE_NIDS) {
2685                 struct free_nid *i, *next;
2686                 unsigned int batch = SHRINK_NID_BATCH_SIZE;
2687
2688                 spin_lock(&nm_i->nid_list_lock);
2689                 list_for_each_entry_safe(i, next, &nm_i->free_nid_list, list) {
2690                         if (!nr_shrink || !batch ||
2691                                 nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS)
2692                                 break;
2693                         __remove_free_nid(sbi, i, FREE_NID);
2694                         kmem_cache_free(free_nid_slab, i);
2695                         nr_shrink--;
2696                         batch--;
2697                 }
2698                 spin_unlock(&nm_i->nid_list_lock);
2699         }
2700
2701         mutex_unlock(&nm_i->build_lock);
2702
2703         return nr - nr_shrink;
2704 }
2705
2706 int f2fs_recover_inline_xattr(struct inode *inode, struct folio *folio)
2707 {
2708         void *src_addr, *dst_addr;
2709         size_t inline_size;
2710         struct folio *ifolio;
2711         struct f2fs_inode *ri;
2712
2713         ifolio = f2fs_get_inode_folio(F2FS_I_SB(inode), inode->i_ino);
2714         if (IS_ERR(ifolio))
2715                 return PTR_ERR(ifolio);
2716
2717         ri = F2FS_INODE(&folio->page);
2718         if (ri->i_inline & F2FS_INLINE_XATTR) {
2719                 if (!f2fs_has_inline_xattr(inode)) {
2720                         set_inode_flag(inode, FI_INLINE_XATTR);
2721                         stat_inc_inline_xattr(inode);
2722                 }
2723         } else {
2724                 if (f2fs_has_inline_xattr(inode)) {
2725                         stat_dec_inline_xattr(inode);
2726                         clear_inode_flag(inode, FI_INLINE_XATTR);
2727                 }
2728                 goto update_inode;
2729         }
2730
2731         dst_addr = inline_xattr_addr(inode, ifolio);
2732         src_addr = inline_xattr_addr(inode, folio);
2733         inline_size = inline_xattr_size(inode);
2734
2735         f2fs_folio_wait_writeback(ifolio, NODE, true, true);
2736         memcpy(dst_addr, src_addr, inline_size);
2737 update_inode:
2738         f2fs_update_inode(inode, ifolio);
2739         f2fs_folio_put(ifolio, true);
2740         return 0;
2741 }
2742
2743 int f2fs_recover_xattr_data(struct inode *inode, struct page *page)
2744 {
2745         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2746         nid_t prev_xnid = F2FS_I(inode)->i_xattr_nid;
2747         nid_t new_xnid;
2748         struct dnode_of_data dn;
2749         struct node_info ni;
2750         struct folio *xfolio;
2751         int err;
2752
2753         if (!prev_xnid)
2754                 goto recover_xnid;
2755
2756         /* 1: invalidate the previous xattr nid */
2757         err = f2fs_get_node_info(sbi, prev_xnid, &ni, false);
2758         if (err)
2759                 return err;
2760
2761         f2fs_invalidate_blocks(sbi, ni.blk_addr, 1);
2762         dec_valid_node_count(sbi, inode, false);
2763         set_node_addr(sbi, &ni, NULL_ADDR, false);
2764
2765 recover_xnid:
2766         /* 2: update xattr nid in inode */
2767         if (!f2fs_alloc_nid(sbi, &new_xnid))
2768                 return -ENOSPC;
2769
2770         set_new_dnode(&dn, inode, NULL, NULL, new_xnid);
2771         xfolio = f2fs_new_node_folio(&dn, XATTR_NODE_OFFSET);
2772         if (IS_ERR(xfolio)) {
2773                 f2fs_alloc_nid_failed(sbi, new_xnid);
2774                 return PTR_ERR(xfolio);
2775         }
2776
2777         f2fs_alloc_nid_done(sbi, new_xnid);
2778         f2fs_update_inode_page(inode);
2779
2780         /* 3: update and set xattr node page dirty */
2781         if (page) {
2782                 memcpy(F2FS_NODE(&xfolio->page), F2FS_NODE(page),
2783                                 VALID_XATTR_BLOCK_SIZE);
2784                 folio_mark_dirty(xfolio);
2785         }
2786         f2fs_folio_put(xfolio, true);
2787
2788         return 0;
2789 }
2790
2791 int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
2792 {
2793         struct f2fs_inode *src, *dst;
2794         nid_t ino = ino_of_node(page);
2795         struct node_info old_ni, new_ni;
2796         struct folio *ifolio;
2797         int err;
2798
2799         err = f2fs_get_node_info(sbi, ino, &old_ni, false);
2800         if (err)
2801                 return err;
2802
2803         if (unlikely(old_ni.blk_addr != NULL_ADDR))
2804                 return -EINVAL;
2805 retry:
2806         ifolio = f2fs_grab_cache_folio(NODE_MAPPING(sbi), ino, false);
2807         if (IS_ERR(ifolio)) {
2808                 memalloc_retry_wait(GFP_NOFS);
2809                 goto retry;
2810         }
2811
2812         /* Should not use this inode from free nid list */
2813         remove_free_nid(sbi, ino);
2814
2815         if (!folio_test_uptodate(ifolio))
2816                 folio_mark_uptodate(ifolio);
2817         fill_node_footer(&ifolio->page, ino, ino, 0, true);
2818         set_cold_node(&ifolio->page, false);
2819
2820         src = F2FS_INODE(page);
2821         dst = F2FS_INODE(&ifolio->page);
2822
2823         memcpy(dst, src, offsetof(struct f2fs_inode, i_ext));
2824         dst->i_size = 0;
2825         dst->i_blocks = cpu_to_le64(1);
2826         dst->i_links = cpu_to_le32(1);
2827         dst->i_xattr_nid = 0;
2828         dst->i_inline = src->i_inline & (F2FS_INLINE_XATTR | F2FS_EXTRA_ATTR);
2829         if (dst->i_inline & F2FS_EXTRA_ATTR) {
2830                 dst->i_extra_isize = src->i_extra_isize;
2831
2832                 if (f2fs_sb_has_flexible_inline_xattr(sbi) &&
2833                         F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
2834                                                         i_inline_xattr_size))
2835                         dst->i_inline_xattr_size = src->i_inline_xattr_size;
2836
2837                 if (f2fs_sb_has_project_quota(sbi) &&
2838                         F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
2839                                                                 i_projid))
2840                         dst->i_projid = src->i_projid;
2841
2842                 if (f2fs_sb_has_inode_crtime(sbi) &&
2843                         F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
2844                                                         i_crtime_nsec)) {
2845                         dst->i_crtime = src->i_crtime;
2846                         dst->i_crtime_nsec = src->i_crtime_nsec;
2847                 }
2848         }
2849
2850         new_ni = old_ni;
2851         new_ni.ino = ino;
2852
2853         if (unlikely(inc_valid_node_count(sbi, NULL, true)))
2854                 WARN_ON(1);
2855         set_node_addr(sbi, &new_ni, NEW_ADDR, false);
2856         inc_valid_inode_count(sbi);
2857         folio_mark_dirty(ifolio);
2858         f2fs_folio_put(ifolio, true);
2859         return 0;
2860 }
2861
2862 int f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
2863                         unsigned int segno, struct f2fs_summary_block *sum)
2864 {
2865         struct f2fs_node *rn;
2866         struct f2fs_summary *sum_entry;
2867         block_t addr;
2868         int i, idx, last_offset, nrpages;
2869
2870         /* scan the node segment */
2871         last_offset = BLKS_PER_SEG(sbi);
2872         addr = START_BLOCK(sbi, segno);
2873         sum_entry = &sum->entries[0];
2874
2875         for (i = 0; i < last_offset; i += nrpages, addr += nrpages) {
2876                 nrpages = bio_max_segs(last_offset - i);
2877
2878                 /* readahead node pages */
2879                 f2fs_ra_meta_pages(sbi, addr, nrpages, META_POR, true);
2880
2881                 for (idx = addr; idx < addr + nrpages; idx++) {
2882                         struct folio *folio = f2fs_get_tmp_folio(sbi, idx);
2883
2884                         if (IS_ERR(folio))
2885                                 return PTR_ERR(folio);
2886
2887                         rn = F2FS_NODE(&folio->page);
2888                         sum_entry->nid = rn->footer.nid;
2889                         sum_entry->version = 0;
2890                         sum_entry->ofs_in_node = 0;
2891                         sum_entry++;
2892                         f2fs_folio_put(folio, true);
2893                 }
2894
2895                 invalidate_mapping_pages(META_MAPPING(sbi), addr,
2896                                                         addr + nrpages);
2897         }
2898         return 0;
2899 }
2900
2901 static void remove_nats_in_journal(struct f2fs_sb_info *sbi)
2902 {
2903         struct f2fs_nm_info *nm_i = NM_I(sbi);
2904         struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2905         struct f2fs_journal *journal = curseg->journal;
2906         int i;
2907
2908         down_write(&curseg->journal_rwsem);
2909         for (i = 0; i < nats_in_cursum(journal); i++) {
2910                 struct nat_entry *ne;
2911                 struct f2fs_nat_entry raw_ne;
2912                 nid_t nid = le32_to_cpu(nid_in_journal(journal, i));
2913
2914                 if (f2fs_check_nid_range(sbi, nid))
2915                         continue;
2916
2917                 raw_ne = nat_in_journal(journal, i);
2918
2919                 ne = __lookup_nat_cache(nm_i, nid);
2920                 if (!ne) {
2921                         ne = __alloc_nat_entry(sbi, nid, true);
2922                         __init_nat_entry(nm_i, ne, &raw_ne, true);
2923                 }
2924
2925                 /*
2926                  * if a free nat in journal has not been used after last
2927                  * checkpoint, we should remove it from available nids,
2928                  * since later we will add it again.
2929                  */
2930                 if (!get_nat_flag(ne, IS_DIRTY) &&
2931                                 le32_to_cpu(raw_ne.block_addr) == NULL_ADDR) {
2932                         spin_lock(&nm_i->nid_list_lock);
2933                         nm_i->available_nids--;
2934                         spin_unlock(&nm_i->nid_list_lock);
2935                 }
2936
2937                 __set_nat_cache_dirty(nm_i, ne);
2938         }
2939         update_nats_in_cursum(journal, -i);
2940         up_write(&curseg->journal_rwsem);
2941 }
2942
2943 static void __adjust_nat_entry_set(struct nat_entry_set *nes,
2944                                                 struct list_head *head, int max)
2945 {
2946         struct nat_entry_set *cur;
2947
2948         if (nes->entry_cnt >= max)
2949                 goto add_out;
2950
2951         list_for_each_entry(cur, head, set_list) {
2952                 if (cur->entry_cnt >= nes->entry_cnt) {
2953                         list_add(&nes->set_list, cur->set_list.prev);
2954                         return;
2955                 }
2956         }
2957 add_out:
2958         list_add_tail(&nes->set_list, head);
2959 }
2960
2961 static void __update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid,
2962                                                 struct page *page)
2963 {
2964         struct f2fs_nm_info *nm_i = NM_I(sbi);
2965         unsigned int nat_index = start_nid / NAT_ENTRY_PER_BLOCK;
2966         struct f2fs_nat_block *nat_blk = page_address(page);
2967         int valid = 0;
2968         int i = 0;
2969
2970         if (!enabled_nat_bits(sbi, NULL))
2971                 return;
2972
2973         if (nat_index == 0) {
2974                 valid = 1;
2975                 i = 1;
2976         }
2977         for (; i < NAT_ENTRY_PER_BLOCK; i++) {
2978                 if (le32_to_cpu(nat_blk->entries[i].block_addr) != NULL_ADDR)
2979                         valid++;
2980         }
2981         if (valid == 0) {
2982                 __set_bit_le(nat_index, nm_i->empty_nat_bits);
2983                 __clear_bit_le(nat_index, nm_i->full_nat_bits);
2984                 return;
2985         }
2986
2987         __clear_bit_le(nat_index, nm_i->empty_nat_bits);
2988         if (valid == NAT_ENTRY_PER_BLOCK)
2989                 __set_bit_le(nat_index, nm_i->full_nat_bits);
2990         else
2991                 __clear_bit_le(nat_index, nm_i->full_nat_bits);
2992 }
2993
2994 static int __flush_nat_entry_set(struct f2fs_sb_info *sbi,
2995                 struct nat_entry_set *set, struct cp_control *cpc)
2996 {
2997         struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2998         struct f2fs_journal *journal = curseg->journal;
2999         nid_t start_nid = set->set * NAT_ENTRY_PER_BLOCK;
3000         bool to_journal = true;
3001         struct f2fs_nat_block *nat_blk;
3002         struct nat_entry *ne, *cur;
3003         struct page *page = NULL;
3004
3005         /*
3006          * there are two steps to flush nat entries:
3007          * #1, flush nat entries to journal in current hot data summary block.
3008          * #2, flush nat entries to nat page.
3009          */
3010         if (enabled_nat_bits(sbi, cpc) ||
3011                 !__has_cursum_space(journal, set->entry_cnt, NAT_JOURNAL))
3012                 to_journal = false;
3013
3014         if (to_journal) {
3015                 down_write(&curseg->journal_rwsem);
3016         } else {
3017                 page = get_next_nat_page(sbi, start_nid);
3018                 if (IS_ERR(page))
3019                         return PTR_ERR(page);
3020
3021                 nat_blk = page_address(page);
3022                 f2fs_bug_on(sbi, !nat_blk);
3023         }
3024
3025         /* flush dirty nats in nat entry set */
3026         list_for_each_entry_safe(ne, cur, &set->entry_list, list) {
3027                 struct f2fs_nat_entry *raw_ne;
3028                 nid_t nid = nat_get_nid(ne);
3029                 int offset;
3030
3031                 f2fs_bug_on(sbi, nat_get_blkaddr(ne) == NEW_ADDR);
3032
3033                 if (to_journal) {
3034                         offset = f2fs_lookup_journal_in_cursum(journal,
3035                                                         NAT_JOURNAL, nid, 1);
3036                         f2fs_bug_on(sbi, offset < 0);
3037                         raw_ne = &nat_in_journal(journal, offset);
3038                         nid_in_journal(journal, offset) = cpu_to_le32(nid);
3039                 } else {
3040                         raw_ne = &nat_blk->entries[nid - start_nid];
3041                 }
3042                 raw_nat_from_node_info(raw_ne, &ne->ni);
3043                 nat_reset_flag(ne);
3044                 __clear_nat_cache_dirty(NM_I(sbi), set, ne);
3045                 if (nat_get_blkaddr(ne) == NULL_ADDR) {
3046                         add_free_nid(sbi, nid, false, true);
3047                 } else {
3048                         spin_lock(&NM_I(sbi)->nid_list_lock);
3049                         update_free_nid_bitmap(sbi, nid, false, false);
3050                         spin_unlock(&NM_I(sbi)->nid_list_lock);
3051                 }
3052         }
3053
3054         if (to_journal) {
3055                 up_write(&curseg->journal_rwsem);
3056         } else {
3057                 __update_nat_bits(sbi, start_nid, page);
3058                 f2fs_put_page(page, 1);
3059         }
3060
3061         /* Allow dirty nats by node block allocation in write_begin */
3062         if (!set->entry_cnt) {
3063                 radix_tree_delete(&NM_I(sbi)->nat_set_root, set->set);
3064                 kmem_cache_free(nat_entry_set_slab, set);
3065         }
3066         return 0;
3067 }
3068
3069 /*
3070  * This function is called during the checkpointing process.
3071  */
3072 int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
3073 {
3074         struct f2fs_nm_info *nm_i = NM_I(sbi);
3075         struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
3076         struct f2fs_journal *journal = curseg->journal;
3077         struct nat_entry_set *setvec[NAT_VEC_SIZE];
3078         struct nat_entry_set *set, *tmp;
3079         unsigned int found;
3080         nid_t set_idx = 0;
3081         LIST_HEAD(sets);
3082         int err = 0;
3083
3084         /*
3085          * during unmount, let's flush nat_bits before checking
3086          * nat_cnt[DIRTY_NAT].
3087          */
3088         if (enabled_nat_bits(sbi, cpc)) {
3089                 f2fs_down_write(&nm_i->nat_tree_lock);
3090                 remove_nats_in_journal(sbi);
3091                 f2fs_up_write(&nm_i->nat_tree_lock);
3092         }
3093
3094         if (!nm_i->nat_cnt[DIRTY_NAT])
3095                 return 0;
3096
3097         f2fs_down_write(&nm_i->nat_tree_lock);
3098
3099         /*
3100          * if there are no enough space in journal to store dirty nat
3101          * entries, remove all entries from journal and merge them
3102          * into nat entry set.
3103          */
3104         if (enabled_nat_bits(sbi, cpc) ||
3105                 !__has_cursum_space(journal,
3106                         nm_i->nat_cnt[DIRTY_NAT], NAT_JOURNAL))
3107                 remove_nats_in_journal(sbi);
3108
3109         while ((found = __gang_lookup_nat_set(nm_i,
3110                                         set_idx, NAT_VEC_SIZE, setvec))) {
3111                 unsigned idx;
3112
3113                 set_idx = setvec[found - 1]->set + 1;
3114                 for (idx = 0; idx < found; idx++)
3115                         __adjust_nat_entry_set(setvec[idx], &sets,
3116                                                 MAX_NAT_JENTRIES(journal));
3117         }
3118
3119         /* flush dirty nats in nat entry set */
3120         list_for_each_entry_safe(set, tmp, &sets, set_list) {
3121                 err = __flush_nat_entry_set(sbi, set, cpc);
3122                 if (err)
3123                         break;
3124         }
3125
3126         f2fs_up_write(&nm_i->nat_tree_lock);
3127         /* Allow dirty nats by node block allocation in write_begin */
3128
3129         return err;
3130 }
3131
3132 static int __get_nat_bitmaps(struct f2fs_sb_info *sbi)
3133 {
3134         struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
3135         struct f2fs_nm_info *nm_i = NM_I(sbi);
3136         unsigned int nat_bits_bytes = nm_i->nat_blocks / BITS_PER_BYTE;
3137         unsigned int i;
3138         __u64 cp_ver = cur_cp_version(ckpt);
3139         block_t nat_bits_addr;
3140
3141         if (!enabled_nat_bits(sbi, NULL))
3142                 return 0;
3143
3144         nm_i->nat_bits_blocks = F2FS_BLK_ALIGN((nat_bits_bytes << 1) + 8);
3145         nm_i->nat_bits = f2fs_kvzalloc(sbi,
3146                         F2FS_BLK_TO_BYTES(nm_i->nat_bits_blocks), GFP_KERNEL);
3147         if (!nm_i->nat_bits)
3148                 return -ENOMEM;
3149
3150         nat_bits_addr = __start_cp_addr(sbi) + BLKS_PER_SEG(sbi) -
3151                                                 nm_i->nat_bits_blocks;
3152         for (i = 0; i < nm_i->nat_bits_blocks; i++) {
3153                 struct folio *folio;
3154
3155                 folio = f2fs_get_meta_folio(sbi, nat_bits_addr++);
3156                 if (IS_ERR(folio))
3157                         return PTR_ERR(folio);
3158
3159                 memcpy(nm_i->nat_bits + F2FS_BLK_TO_BYTES(i),
3160                                         folio_address(folio), F2FS_BLKSIZE);
3161                 f2fs_folio_put(folio, true);
3162         }
3163
3164         cp_ver |= (cur_cp_crc(ckpt) << 32);
3165         if (cpu_to_le64(cp_ver) != *(__le64 *)nm_i->nat_bits) {
3166                 disable_nat_bits(sbi, true);
3167                 return 0;
3168         }
3169
3170         nm_i->full_nat_bits = nm_i->nat_bits + 8;
3171         nm_i->empty_nat_bits = nm_i->full_nat_bits + nat_bits_bytes;
3172
3173         f2fs_notice(sbi, "Found nat_bits in checkpoint");
3174         return 0;
3175 }
3176
3177 static inline void load_free_nid_bitmap(struct f2fs_sb_info *sbi)
3178 {
3179         struct f2fs_nm_info *nm_i = NM_I(sbi);
3180         unsigned int i = 0;
3181         nid_t nid, last_nid;
3182
3183         if (!enabled_nat_bits(sbi, NULL))
3184                 return;
3185
3186         for (i = 0; i < nm_i->nat_blocks; i++) {
3187                 i = find_next_bit_le(nm_i->empty_nat_bits, nm_i->nat_blocks, i);
3188                 if (i >= nm_i->nat_blocks)
3189                         break;
3190
3191                 __set_bit_le(i, nm_i->nat_block_bitmap);
3192
3193                 nid = i * NAT_ENTRY_PER_BLOCK;
3194                 last_nid = nid + NAT_ENTRY_PER_BLOCK;
3195
3196                 spin_lock(&NM_I(sbi)->nid_list_lock);
3197                 for (; nid < last_nid; nid++)
3198                         update_free_nid_bitmap(sbi, nid, true, true);
3199                 spin_unlock(&NM_I(sbi)->nid_list_lock);
3200         }
3201
3202         for (i = 0; i < nm_i->nat_blocks; i++) {
3203                 i = find_next_bit_le(nm_i->full_nat_bits, nm_i->nat_blocks, i);
3204                 if (i >= nm_i->nat_blocks)
3205                         break;
3206
3207                 __set_bit_le(i, nm_i->nat_block_bitmap);
3208         }
3209 }
3210
3211 static int init_node_manager(struct f2fs_sb_info *sbi)
3212 {
3213         struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi);
3214         struct f2fs_nm_info *nm_i = NM_I(sbi);
3215         unsigned char *version_bitmap;
3216         unsigned int nat_segs;
3217         int err;
3218
3219         nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr);
3220
3221         /* segment_count_nat includes pair segment so divide to 2. */
3222         nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1;
3223         nm_i->nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg);
3224         nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nm_i->nat_blocks;
3225
3226         /* not used nids: 0, node, meta, (and root counted as valid node) */
3227         nm_i->available_nids = nm_i->max_nid - sbi->total_valid_node_count -
3228                                                 F2FS_RESERVED_NODE_NUM;
3229         nm_i->nid_cnt[FREE_NID] = 0;
3230         nm_i->nid_cnt[PREALLOC_NID] = 0;
3231         nm_i->ram_thresh = DEF_RAM_THRESHOLD;
3232         nm_i->ra_nid_pages = DEF_RA_NID_PAGES;
3233         nm_i->dirty_nats_ratio = DEF_DIRTY_NAT_RATIO_THRESHOLD;
3234         nm_i->max_rf_node_blocks = DEF_RF_NODE_BLOCKS;
3235
3236         INIT_RADIX_TREE(&nm_i->free_nid_root, GFP_ATOMIC);
3237         INIT_LIST_HEAD(&nm_i->free_nid_list);
3238         INIT_RADIX_TREE(&nm_i->nat_root, GFP_NOIO);
3239         INIT_RADIX_TREE(&nm_i->nat_set_root, GFP_NOIO);
3240         INIT_LIST_HEAD(&nm_i->nat_entries);
3241         spin_lock_init(&nm_i->nat_list_lock);
3242
3243         mutex_init(&nm_i->build_lock);
3244         spin_lock_init(&nm_i->nid_list_lock);
3245         init_f2fs_rwsem(&nm_i->nat_tree_lock);
3246
3247         nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
3248         nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP);
3249         version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP);
3250         nm_i->nat_bitmap = kmemdup(version_bitmap, nm_i->bitmap_size,
3251                                         GFP_KERNEL);
3252         if (!nm_i->nat_bitmap)
3253                 return -ENOMEM;
3254
3255         if (!test_opt(sbi, NAT_BITS))
3256                 disable_nat_bits(sbi, true);
3257
3258         err = __get_nat_bitmaps(sbi);
3259         if (err)
3260                 return err;
3261
3262 #ifdef CONFIG_F2FS_CHECK_FS
3263         nm_i->nat_bitmap_mir = kmemdup(version_bitmap, nm_i->bitmap_size,
3264                                         GFP_KERNEL);
3265         if (!nm_i->nat_bitmap_mir)
3266                 return -ENOMEM;
3267 #endif
3268
3269         return 0;
3270 }
3271
3272 static int init_free_nid_cache(struct f2fs_sb_info *sbi)
3273 {
3274         struct f2fs_nm_info *nm_i = NM_I(sbi);
3275         int i;
3276
3277         nm_i->free_nid_bitmap =
3278                 f2fs_kvzalloc(sbi, array_size(sizeof(unsigned char *),
3279                                               nm_i->nat_blocks),
3280                               GFP_KERNEL);
3281         if (!nm_i->free_nid_bitmap)
3282                 return -ENOMEM;
3283
3284         for (i = 0; i < nm_i->nat_blocks; i++) {
3285                 nm_i->free_nid_bitmap[i] = f2fs_kvzalloc(sbi,
3286                         f2fs_bitmap_size(NAT_ENTRY_PER_BLOCK), GFP_KERNEL);
3287                 if (!nm_i->free_nid_bitmap[i])
3288                         return -ENOMEM;
3289         }
3290
3291         nm_i->nat_block_bitmap = f2fs_kvzalloc(sbi, nm_i->nat_blocks / 8,
3292                                                                 GFP_KERNEL);
3293         if (!nm_i->nat_block_bitmap)
3294                 return -ENOMEM;
3295
3296         nm_i->free_nid_count =
3297                 f2fs_kvzalloc(sbi, array_size(sizeof(unsigned short),
3298                                               nm_i->nat_blocks),
3299                               GFP_KERNEL);
3300         if (!nm_i->free_nid_count)
3301                 return -ENOMEM;
3302         return 0;
3303 }
3304
3305 int f2fs_build_node_manager(struct f2fs_sb_info *sbi)
3306 {
3307         int err;
3308
3309         sbi->nm_info = f2fs_kzalloc(sbi, sizeof(struct f2fs_nm_info),
3310                                                         GFP_KERNEL);
3311         if (!sbi->nm_info)
3312                 return -ENOMEM;
3313
3314         err = init_node_manager(sbi);
3315         if (err)
3316                 return err;
3317
3318         err = init_free_nid_cache(sbi);
3319         if (err)
3320                 return err;
3321
3322         /* load free nid status from nat_bits table */
3323         load_free_nid_bitmap(sbi);
3324
3325         return f2fs_build_free_nids(sbi, true, true);
3326 }
3327
3328 void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi)
3329 {
3330         struct f2fs_nm_info *nm_i = NM_I(sbi);
3331         struct free_nid *i, *next_i;
3332         void *vec[NAT_VEC_SIZE];
3333         struct nat_entry **natvec = (struct nat_entry **)vec;
3334         struct nat_entry_set **setvec = (struct nat_entry_set **)vec;
3335         nid_t nid = 0;
3336         unsigned int found;
3337
3338         if (!nm_i)
3339                 return;
3340
3341         /* destroy free nid list */
3342         spin_lock(&nm_i->nid_list_lock);
3343         list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) {
3344                 __remove_free_nid(sbi, i, FREE_NID);
3345                 spin_unlock(&nm_i->nid_list_lock);
3346                 kmem_cache_free(free_nid_slab, i);
3347                 spin_lock(&nm_i->nid_list_lock);
3348         }
3349         f2fs_bug_on(sbi, nm_i->nid_cnt[FREE_NID]);
3350         f2fs_bug_on(sbi, nm_i->nid_cnt[PREALLOC_NID]);
3351         f2fs_bug_on(sbi, !list_empty(&nm_i->free_nid_list));
3352         spin_unlock(&nm_i->nid_list_lock);
3353
3354         /* destroy nat cache */
3355         f2fs_down_write(&nm_i->nat_tree_lock);
3356         while ((found = __gang_lookup_nat_cache(nm_i,
3357                                         nid, NAT_VEC_SIZE, natvec))) {
3358                 unsigned idx;
3359
3360                 nid = nat_get_nid(natvec[found - 1]) + 1;
3361                 for (idx = 0; idx < found; idx++) {
3362                         spin_lock(&nm_i->nat_list_lock);
3363                         list_del(&natvec[idx]->list);
3364                         spin_unlock(&nm_i->nat_list_lock);
3365
3366                         __del_from_nat_cache(nm_i, natvec[idx]);
3367                 }
3368         }
3369         f2fs_bug_on(sbi, nm_i->nat_cnt[TOTAL_NAT]);
3370
3371         /* destroy nat set cache */
3372         nid = 0;
3373         memset(vec, 0, sizeof(void *) * NAT_VEC_SIZE);
3374         while ((found = __gang_lookup_nat_set(nm_i,
3375                                         nid, NAT_VEC_SIZE, setvec))) {
3376                 unsigned idx;
3377
3378                 nid = setvec[found - 1]->set + 1;
3379                 for (idx = 0; idx < found; idx++) {
3380                         /* entry_cnt is not zero, when cp_error was occurred */
3381                         f2fs_bug_on(sbi, !list_empty(&setvec[idx]->entry_list));
3382                         radix_tree_delete(&nm_i->nat_set_root, setvec[idx]->set);
3383                         kmem_cache_free(nat_entry_set_slab, setvec[idx]);
3384                 }
3385         }
3386         f2fs_up_write(&nm_i->nat_tree_lock);
3387
3388         kvfree(nm_i->nat_block_bitmap);
3389         if (nm_i->free_nid_bitmap) {
3390                 int i;
3391
3392                 for (i = 0; i < nm_i->nat_blocks; i++)
3393                         kvfree(nm_i->free_nid_bitmap[i]);
3394                 kvfree(nm_i->free_nid_bitmap);
3395         }
3396         kvfree(nm_i->free_nid_count);
3397
3398         kvfree(nm_i->nat_bitmap);
3399         kvfree(nm_i->nat_bits);
3400 #ifdef CONFIG_F2FS_CHECK_FS
3401         kvfree(nm_i->nat_bitmap_mir);
3402 #endif
3403         sbi->nm_info = NULL;
3404         kfree(nm_i);
3405 }
3406
3407 int __init f2fs_create_node_manager_caches(void)
3408 {
3409         nat_entry_slab = f2fs_kmem_cache_create("f2fs_nat_entry",
3410                         sizeof(struct nat_entry));
3411         if (!nat_entry_slab)
3412                 goto fail;
3413
3414         free_nid_slab = f2fs_kmem_cache_create("f2fs_free_nid",
3415                         sizeof(struct free_nid));
3416         if (!free_nid_slab)
3417                 goto destroy_nat_entry;
3418
3419         nat_entry_set_slab = f2fs_kmem_cache_create("f2fs_nat_entry_set",
3420                         sizeof(struct nat_entry_set));
3421         if (!nat_entry_set_slab)
3422                 goto destroy_free_nid;
3423
3424         fsync_node_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_node_entry",
3425                         sizeof(struct fsync_node_entry));
3426         if (!fsync_node_entry_slab)
3427                 goto destroy_nat_entry_set;
3428         return 0;
3429
3430 destroy_nat_entry_set:
3431         kmem_cache_destroy(nat_entry_set_slab);
3432 destroy_free_nid:
3433         kmem_cache_destroy(free_nid_slab);
3434 destroy_nat_entry:
3435         kmem_cache_destroy(nat_entry_slab);
3436 fail:
3437         return -ENOMEM;
3438 }
3439
3440 void f2fs_destroy_node_manager_caches(void)
3441 {
3442         kmem_cache_destroy(fsync_node_entry_slab);
3443         kmem_cache_destroy(nat_entry_set_slab);
3444         kmem_cache_destroy(free_nid_slab);
3445         kmem_cache_destroy(nat_entry_slab);
3446 }