f2fs: introduce DEFAULT_IO_TIMEOUT
[linux-2.6-block.git] / fs / f2fs / checkpoint.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * fs/f2fs/checkpoint.c
4  *
5  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6  *             http://www.samsung.com/
7  */
8 #include <linux/fs.h>
9 #include <linux/bio.h>
10 #include <linux/mpage.h>
11 #include <linux/writeback.h>
12 #include <linux/blkdev.h>
13 #include <linux/f2fs_fs.h>
14 #include <linux/pagevec.h>
15 #include <linux/swap.h>
16
17 #include "f2fs.h"
18 #include "node.h"
19 #include "segment.h"
20 #include "trace.h"
21 #include <trace/events/f2fs.h>
22
23 static struct kmem_cache *ino_entry_slab;
24 struct kmem_cache *f2fs_inode_entry_slab;
25
26 void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io)
27 {
28         f2fs_build_fault_attr(sbi, 0, 0);
29         set_ckpt_flags(sbi, CP_ERROR_FLAG);
30         if (!end_io)
31                 f2fs_flush_merged_writes(sbi);
32 }
33
34 /*
35  * We guarantee no failure on the returned page.
36  */
37 struct page *f2fs_grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)
38 {
39         struct address_space *mapping = META_MAPPING(sbi);
40         struct page *page = NULL;
41 repeat:
42         page = f2fs_grab_cache_page(mapping, index, false);
43         if (!page) {
44                 cond_resched();
45                 goto repeat;
46         }
47         f2fs_wait_on_page_writeback(page, META, true, true);
48         if (!PageUptodate(page))
49                 SetPageUptodate(page);
50         return page;
51 }
52
53 static struct page *__get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index,
54                                                         bool is_meta)
55 {
56         struct address_space *mapping = META_MAPPING(sbi);
57         struct page *page;
58         struct f2fs_io_info fio = {
59                 .sbi = sbi,
60                 .type = META,
61                 .op = REQ_OP_READ,
62                 .op_flags = REQ_META | REQ_PRIO,
63                 .old_blkaddr = index,
64                 .new_blkaddr = index,
65                 .encrypted_page = NULL,
66                 .is_por = !is_meta,
67         };
68         int err;
69
70         if (unlikely(!is_meta))
71                 fio.op_flags &= ~REQ_META;
72 repeat:
73         page = f2fs_grab_cache_page(mapping, index, false);
74         if (!page) {
75                 cond_resched();
76                 goto repeat;
77         }
78         if (PageUptodate(page))
79                 goto out;
80
81         fio.page = page;
82
83         err = f2fs_submit_page_bio(&fio);
84         if (err) {
85                 f2fs_put_page(page, 1);
86                 return ERR_PTR(err);
87         }
88
89         lock_page(page);
90         if (unlikely(page->mapping != mapping)) {
91                 f2fs_put_page(page, 1);
92                 goto repeat;
93         }
94
95         if (unlikely(!PageUptodate(page))) {
96                 f2fs_put_page(page, 1);
97                 return ERR_PTR(-EIO);
98         }
99 out:
100         return page;
101 }
102
103 struct page *f2fs_get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)
104 {
105         return __get_meta_page(sbi, index, true);
106 }
107
108 struct page *f2fs_get_meta_page_nofail(struct f2fs_sb_info *sbi, pgoff_t index)
109 {
110         struct page *page;
111         int count = 0;
112
113 retry:
114         page = __get_meta_page(sbi, index, true);
115         if (IS_ERR(page)) {
116                 if (PTR_ERR(page) == -EIO &&
117                                 ++count <= DEFAULT_RETRY_IO_COUNT)
118                         goto retry;
119                 f2fs_stop_checkpoint(sbi, false);
120         }
121         return page;
122 }
123
124 /* for POR only */
125 struct page *f2fs_get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index)
126 {
127         return __get_meta_page(sbi, index, false);
128 }
129
130 static bool __is_bitmap_valid(struct f2fs_sb_info *sbi, block_t blkaddr,
131                                                         int type)
132 {
133         struct seg_entry *se;
134         unsigned int segno, offset;
135         bool exist;
136
137         if (type != DATA_GENERIC_ENHANCE && type != DATA_GENERIC_ENHANCE_READ)
138                 return true;
139
140         segno = GET_SEGNO(sbi, blkaddr);
141         offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
142         se = get_seg_entry(sbi, segno);
143
144         exist = f2fs_test_bit(offset, se->cur_valid_map);
145         if (!exist && type == DATA_GENERIC_ENHANCE) {
146                 f2fs_err(sbi, "Inconsistent error blkaddr:%u, sit bitmap:%d",
147                          blkaddr, exist);
148                 set_sbi_flag(sbi, SBI_NEED_FSCK);
149                 WARN_ON(1);
150         }
151         return exist;
152 }
153
154 bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
155                                         block_t blkaddr, int type)
156 {
157         switch (type) {
158         case META_NAT:
159                 break;
160         case META_SIT:
161                 if (unlikely(blkaddr >= SIT_BLK_CNT(sbi)))
162                         return false;
163                 break;
164         case META_SSA:
165                 if (unlikely(blkaddr >= MAIN_BLKADDR(sbi) ||
166                         blkaddr < SM_I(sbi)->ssa_blkaddr))
167                         return false;
168                 break;
169         case META_CP:
170                 if (unlikely(blkaddr >= SIT_I(sbi)->sit_base_addr ||
171                         blkaddr < __start_cp_addr(sbi)))
172                         return false;
173                 break;
174         case META_POR:
175                 if (unlikely(blkaddr >= MAX_BLKADDR(sbi) ||
176                         blkaddr < MAIN_BLKADDR(sbi)))
177                         return false;
178                 break;
179         case DATA_GENERIC:
180         case DATA_GENERIC_ENHANCE:
181         case DATA_GENERIC_ENHANCE_READ:
182                 if (unlikely(blkaddr >= MAX_BLKADDR(sbi) ||
183                                 blkaddr < MAIN_BLKADDR(sbi))) {
184                         f2fs_warn(sbi, "access invalid blkaddr:%u",
185                                   blkaddr);
186                         set_sbi_flag(sbi, SBI_NEED_FSCK);
187                         WARN_ON(1);
188                         return false;
189                 } else {
190                         return __is_bitmap_valid(sbi, blkaddr, type);
191                 }
192                 break;
193         case META_GENERIC:
194                 if (unlikely(blkaddr < SEG0_BLKADDR(sbi) ||
195                         blkaddr >= MAIN_BLKADDR(sbi)))
196                         return false;
197                 break;
198         default:
199                 BUG();
200         }
201
202         return true;
203 }
204
205 /*
206  * Readahead CP/NAT/SIT/SSA/POR pages
207  */
208 int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
209                                                         int type, bool sync)
210 {
211         struct page *page;
212         block_t blkno = start;
213         struct f2fs_io_info fio = {
214                 .sbi = sbi,
215                 .type = META,
216                 .op = REQ_OP_READ,
217                 .op_flags = sync ? (REQ_META | REQ_PRIO) : REQ_RAHEAD,
218                 .encrypted_page = NULL,
219                 .in_list = false,
220                 .is_por = (type == META_POR),
221         };
222         struct blk_plug plug;
223
224         if (unlikely(type == META_POR))
225                 fio.op_flags &= ~REQ_META;
226
227         blk_start_plug(&plug);
228         for (; nrpages-- > 0; blkno++) {
229
230                 if (!f2fs_is_valid_blkaddr(sbi, blkno, type))
231                         goto out;
232
233                 switch (type) {
234                 case META_NAT:
235                         if (unlikely(blkno >=
236                                         NAT_BLOCK_OFFSET(NM_I(sbi)->max_nid)))
237                                 blkno = 0;
238                         /* get nat block addr */
239                         fio.new_blkaddr = current_nat_addr(sbi,
240                                         blkno * NAT_ENTRY_PER_BLOCK);
241                         break;
242                 case META_SIT:
243                         /* get sit block addr */
244                         fio.new_blkaddr = current_sit_addr(sbi,
245                                         blkno * SIT_ENTRY_PER_BLOCK);
246                         break;
247                 case META_SSA:
248                 case META_CP:
249                 case META_POR:
250                         fio.new_blkaddr = blkno;
251                         break;
252                 default:
253                         BUG();
254                 }
255
256                 page = f2fs_grab_cache_page(META_MAPPING(sbi),
257                                                 fio.new_blkaddr, false);
258                 if (!page)
259                         continue;
260                 if (PageUptodate(page)) {
261                         f2fs_put_page(page, 1);
262                         continue;
263                 }
264
265                 fio.page = page;
266                 f2fs_submit_page_bio(&fio);
267                 f2fs_put_page(page, 0);
268         }
269 out:
270         blk_finish_plug(&plug);
271         return blkno - start;
272 }
273
274 void f2fs_ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index)
275 {
276         struct page *page;
277         bool readahead = false;
278
279         page = find_get_page(META_MAPPING(sbi), index);
280         if (!page || !PageUptodate(page))
281                 readahead = true;
282         f2fs_put_page(page, 0);
283
284         if (readahead)
285                 f2fs_ra_meta_pages(sbi, index, BIO_MAX_PAGES, META_POR, true);
286 }
287
288 static int __f2fs_write_meta_page(struct page *page,
289                                 struct writeback_control *wbc,
290                                 enum iostat_type io_type)
291 {
292         struct f2fs_sb_info *sbi = F2FS_P_SB(page);
293
294         trace_f2fs_writepage(page, META);
295
296         if (unlikely(f2fs_cp_error(sbi)))
297                 goto redirty_out;
298         if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
299                 goto redirty_out;
300         if (wbc->for_reclaim && page->index < GET_SUM_BLOCK(sbi, 0))
301                 goto redirty_out;
302
303         f2fs_do_write_meta_page(sbi, page, io_type);
304         dec_page_count(sbi, F2FS_DIRTY_META);
305
306         if (wbc->for_reclaim)
307                 f2fs_submit_merged_write_cond(sbi, NULL, page, 0, META);
308
309         unlock_page(page);
310
311         if (unlikely(f2fs_cp_error(sbi)))
312                 f2fs_submit_merged_write(sbi, META);
313
314         return 0;
315
316 redirty_out:
317         redirty_page_for_writepage(wbc, page);
318         return AOP_WRITEPAGE_ACTIVATE;
319 }
320
321 static int f2fs_write_meta_page(struct page *page,
322                                 struct writeback_control *wbc)
323 {
324         return __f2fs_write_meta_page(page, wbc, FS_META_IO);
325 }
326
327 static int f2fs_write_meta_pages(struct address_space *mapping,
328                                 struct writeback_control *wbc)
329 {
330         struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
331         long diff, written;
332
333         if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
334                 goto skip_write;
335
336         /* collect a number of dirty meta pages and write together */
337         if (wbc->sync_mode != WB_SYNC_ALL &&
338                         get_pages(sbi, F2FS_DIRTY_META) <
339                                         nr_pages_to_skip(sbi, META))
340                 goto skip_write;
341
342         /* if locked failed, cp will flush dirty pages instead */
343         if (!mutex_trylock(&sbi->cp_mutex))
344                 goto skip_write;
345
346         trace_f2fs_writepages(mapping->host, wbc, META);
347         diff = nr_pages_to_write(sbi, META, wbc);
348         written = f2fs_sync_meta_pages(sbi, META, wbc->nr_to_write, FS_META_IO);
349         mutex_unlock(&sbi->cp_mutex);
350         wbc->nr_to_write = max((long)0, wbc->nr_to_write - written - diff);
351         return 0;
352
353 skip_write:
354         wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_META);
355         trace_f2fs_writepages(mapping->host, wbc, META);
356         return 0;
357 }
358
359 long f2fs_sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type,
360                                 long nr_to_write, enum iostat_type io_type)
361 {
362         struct address_space *mapping = META_MAPPING(sbi);
363         pgoff_t index = 0, prev = ULONG_MAX;
364         struct pagevec pvec;
365         long nwritten = 0;
366         int nr_pages;
367         struct writeback_control wbc = {
368                 .for_reclaim = 0,
369         };
370         struct blk_plug plug;
371
372         pagevec_init(&pvec);
373
374         blk_start_plug(&plug);
375
376         while ((nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
377                                 PAGECACHE_TAG_DIRTY))) {
378                 int i;
379
380                 for (i = 0; i < nr_pages; i++) {
381                         struct page *page = pvec.pages[i];
382
383                         if (prev == ULONG_MAX)
384                                 prev = page->index - 1;
385                         if (nr_to_write != LONG_MAX && page->index != prev + 1) {
386                                 pagevec_release(&pvec);
387                                 goto stop;
388                         }
389
390                         lock_page(page);
391
392                         if (unlikely(page->mapping != mapping)) {
393 continue_unlock:
394                                 unlock_page(page);
395                                 continue;
396                         }
397                         if (!PageDirty(page)) {
398                                 /* someone wrote it for us */
399                                 goto continue_unlock;
400                         }
401
402                         f2fs_wait_on_page_writeback(page, META, true, true);
403
404                         if (!clear_page_dirty_for_io(page))
405                                 goto continue_unlock;
406
407                         if (__f2fs_write_meta_page(page, &wbc, io_type)) {
408                                 unlock_page(page);
409                                 break;
410                         }
411                         nwritten++;
412                         prev = page->index;
413                         if (unlikely(nwritten >= nr_to_write))
414                                 break;
415                 }
416                 pagevec_release(&pvec);
417                 cond_resched();
418         }
419 stop:
420         if (nwritten)
421                 f2fs_submit_merged_write(sbi, type);
422
423         blk_finish_plug(&plug);
424
425         return nwritten;
426 }
427
428 static int f2fs_set_meta_page_dirty(struct page *page)
429 {
430         trace_f2fs_set_page_dirty(page, META);
431
432         if (!PageUptodate(page))
433                 SetPageUptodate(page);
434         if (!PageDirty(page)) {
435                 __set_page_dirty_nobuffers(page);
436                 inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_META);
437                 f2fs_set_page_private(page, 0);
438                 f2fs_trace_pid(page);
439                 return 1;
440         }
441         return 0;
442 }
443
444 const struct address_space_operations f2fs_meta_aops = {
445         .writepage      = f2fs_write_meta_page,
446         .writepages     = f2fs_write_meta_pages,
447         .set_page_dirty = f2fs_set_meta_page_dirty,
448         .invalidatepage = f2fs_invalidate_page,
449         .releasepage    = f2fs_release_page,
450 #ifdef CONFIG_MIGRATION
451         .migratepage    = f2fs_migrate_page,
452 #endif
453 };
454
455 static void __add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino,
456                                                 unsigned int devidx, int type)
457 {
458         struct inode_management *im = &sbi->im[type];
459         struct ino_entry *e, *tmp;
460
461         tmp = f2fs_kmem_cache_alloc(ino_entry_slab, GFP_NOFS);
462
463         radix_tree_preload(GFP_NOFS | __GFP_NOFAIL);
464
465         spin_lock(&im->ino_lock);
466         e = radix_tree_lookup(&im->ino_root, ino);
467         if (!e) {
468                 e = tmp;
469                 if (unlikely(radix_tree_insert(&im->ino_root, ino, e)))
470                         f2fs_bug_on(sbi, 1);
471
472                 memset(e, 0, sizeof(struct ino_entry));
473                 e->ino = ino;
474
475                 list_add_tail(&e->list, &im->ino_list);
476                 if (type != ORPHAN_INO)
477                         im->ino_num++;
478         }
479
480         if (type == FLUSH_INO)
481                 f2fs_set_bit(devidx, (char *)&e->dirty_device);
482
483         spin_unlock(&im->ino_lock);
484         radix_tree_preload_end();
485
486         if (e != tmp)
487                 kmem_cache_free(ino_entry_slab, tmp);
488 }
489
490 static void __remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
491 {
492         struct inode_management *im = &sbi->im[type];
493         struct ino_entry *e;
494
495         spin_lock(&im->ino_lock);
496         e = radix_tree_lookup(&im->ino_root, ino);
497         if (e) {
498                 list_del(&e->list);
499                 radix_tree_delete(&im->ino_root, ino);
500                 im->ino_num--;
501                 spin_unlock(&im->ino_lock);
502                 kmem_cache_free(ino_entry_slab, e);
503                 return;
504         }
505         spin_unlock(&im->ino_lock);
506 }
507
508 void f2fs_add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
509 {
510         /* add new dirty ino entry into list */
511         __add_ino_entry(sbi, ino, 0, type);
512 }
513
514 void f2fs_remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
515 {
516         /* remove dirty ino entry from list */
517         __remove_ino_entry(sbi, ino, type);
518 }
519
520 /* mode should be APPEND_INO or UPDATE_INO */
521 bool f2fs_exist_written_data(struct f2fs_sb_info *sbi, nid_t ino, int mode)
522 {
523         struct inode_management *im = &sbi->im[mode];
524         struct ino_entry *e;
525
526         spin_lock(&im->ino_lock);
527         e = radix_tree_lookup(&im->ino_root, ino);
528         spin_unlock(&im->ino_lock);
529         return e ? true : false;
530 }
531
532 void f2fs_release_ino_entry(struct f2fs_sb_info *sbi, bool all)
533 {
534         struct ino_entry *e, *tmp;
535         int i;
536
537         for (i = all ? ORPHAN_INO : APPEND_INO; i < MAX_INO_ENTRY; i++) {
538                 struct inode_management *im = &sbi->im[i];
539
540                 spin_lock(&im->ino_lock);
541                 list_for_each_entry_safe(e, tmp, &im->ino_list, list) {
542                         list_del(&e->list);
543                         radix_tree_delete(&im->ino_root, e->ino);
544                         kmem_cache_free(ino_entry_slab, e);
545                         im->ino_num--;
546                 }
547                 spin_unlock(&im->ino_lock);
548         }
549 }
550
551 void f2fs_set_dirty_device(struct f2fs_sb_info *sbi, nid_t ino,
552                                         unsigned int devidx, int type)
553 {
554         __add_ino_entry(sbi, ino, devidx, type);
555 }
556
557 bool f2fs_is_dirty_device(struct f2fs_sb_info *sbi, nid_t ino,
558                                         unsigned int devidx, int type)
559 {
560         struct inode_management *im = &sbi->im[type];
561         struct ino_entry *e;
562         bool is_dirty = false;
563
564         spin_lock(&im->ino_lock);
565         e = radix_tree_lookup(&im->ino_root, ino);
566         if (e && f2fs_test_bit(devidx, (char *)&e->dirty_device))
567                 is_dirty = true;
568         spin_unlock(&im->ino_lock);
569         return is_dirty;
570 }
571
572 int f2fs_acquire_orphan_inode(struct f2fs_sb_info *sbi)
573 {
574         struct inode_management *im = &sbi->im[ORPHAN_INO];
575         int err = 0;
576
577         spin_lock(&im->ino_lock);
578
579         if (time_to_inject(sbi, FAULT_ORPHAN)) {
580                 spin_unlock(&im->ino_lock);
581                 f2fs_show_injection_info(sbi, FAULT_ORPHAN);
582                 return -ENOSPC;
583         }
584
585         if (unlikely(im->ino_num >= sbi->max_orphans))
586                 err = -ENOSPC;
587         else
588                 im->ino_num++;
589         spin_unlock(&im->ino_lock);
590
591         return err;
592 }
593
594 void f2fs_release_orphan_inode(struct f2fs_sb_info *sbi)
595 {
596         struct inode_management *im = &sbi->im[ORPHAN_INO];
597
598         spin_lock(&im->ino_lock);
599         f2fs_bug_on(sbi, im->ino_num == 0);
600         im->ino_num--;
601         spin_unlock(&im->ino_lock);
602 }
603
604 void f2fs_add_orphan_inode(struct inode *inode)
605 {
606         /* add new orphan ino entry into list */
607         __add_ino_entry(F2FS_I_SB(inode), inode->i_ino, 0, ORPHAN_INO);
608         f2fs_update_inode_page(inode);
609 }
610
611 void f2fs_remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
612 {
613         /* remove orphan entry from orphan list */
614         __remove_ino_entry(sbi, ino, ORPHAN_INO);
615 }
616
617 static int recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
618 {
619         struct inode *inode;
620         struct node_info ni;
621         int err;
622
623         inode = f2fs_iget_retry(sbi->sb, ino);
624         if (IS_ERR(inode)) {
625                 /*
626                  * there should be a bug that we can't find the entry
627                  * to orphan inode.
628                  */
629                 f2fs_bug_on(sbi, PTR_ERR(inode) == -ENOENT);
630                 return PTR_ERR(inode);
631         }
632
633         err = dquot_initialize(inode);
634         if (err) {
635                 iput(inode);
636                 goto err_out;
637         }
638
639         clear_nlink(inode);
640
641         /* truncate all the data during iput */
642         iput(inode);
643
644         err = f2fs_get_node_info(sbi, ino, &ni);
645         if (err)
646                 goto err_out;
647
648         /* ENOMEM was fully retried in f2fs_evict_inode. */
649         if (ni.blk_addr != NULL_ADDR) {
650                 err = -EIO;
651                 goto err_out;
652         }
653         return 0;
654
655 err_out:
656         set_sbi_flag(sbi, SBI_NEED_FSCK);
657         f2fs_warn(sbi, "%s: orphan failed (ino=%x), run fsck to fix.",
658                   __func__, ino);
659         return err;
660 }
661
662 int f2fs_recover_orphan_inodes(struct f2fs_sb_info *sbi)
663 {
664         block_t start_blk, orphan_blocks, i, j;
665         unsigned int s_flags = sbi->sb->s_flags;
666         int err = 0;
667 #ifdef CONFIG_QUOTA
668         int quota_enabled;
669 #endif
670
671         if (!is_set_ckpt_flags(sbi, CP_ORPHAN_PRESENT_FLAG))
672                 return 0;
673
674         if (bdev_read_only(sbi->sb->s_bdev)) {
675                 f2fs_info(sbi, "write access unavailable, skipping orphan cleanup");
676                 return 0;
677         }
678
679         if (s_flags & SB_RDONLY) {
680                 f2fs_info(sbi, "orphan cleanup on readonly fs");
681                 sbi->sb->s_flags &= ~SB_RDONLY;
682         }
683
684 #ifdef CONFIG_QUOTA
685         /* Needed for iput() to work correctly and not trash data */
686         sbi->sb->s_flags |= SB_ACTIVE;
687
688         /*
689          * Turn on quotas which were not enabled for read-only mounts if
690          * filesystem has quota feature, so that they are updated correctly.
691          */
692         quota_enabled = f2fs_enable_quota_files(sbi, s_flags & SB_RDONLY);
693 #endif
694
695         start_blk = __start_cp_addr(sbi) + 1 + __cp_payload(sbi);
696         orphan_blocks = __start_sum_addr(sbi) - 1 - __cp_payload(sbi);
697
698         f2fs_ra_meta_pages(sbi, start_blk, orphan_blocks, META_CP, true);
699
700         for (i = 0; i < orphan_blocks; i++) {
701                 struct page *page;
702                 struct f2fs_orphan_block *orphan_blk;
703
704                 page = f2fs_get_meta_page(sbi, start_blk + i);
705                 if (IS_ERR(page)) {
706                         err = PTR_ERR(page);
707                         goto out;
708                 }
709
710                 orphan_blk = (struct f2fs_orphan_block *)page_address(page);
711                 for (j = 0; j < le32_to_cpu(orphan_blk->entry_count); j++) {
712                         nid_t ino = le32_to_cpu(orphan_blk->ino[j]);
713                         err = recover_orphan_inode(sbi, ino);
714                         if (err) {
715                                 f2fs_put_page(page, 1);
716                                 goto out;
717                         }
718                 }
719                 f2fs_put_page(page, 1);
720         }
721         /* clear Orphan Flag */
722         clear_ckpt_flags(sbi, CP_ORPHAN_PRESENT_FLAG);
723 out:
724         set_sbi_flag(sbi, SBI_IS_RECOVERED);
725
726 #ifdef CONFIG_QUOTA
727         /* Turn quotas off */
728         if (quota_enabled)
729                 f2fs_quota_off_umount(sbi->sb);
730 #endif
731         sbi->sb->s_flags = s_flags; /* Restore SB_RDONLY status */
732
733         return err;
734 }
735
736 static void write_orphan_inodes(struct f2fs_sb_info *sbi, block_t start_blk)
737 {
738         struct list_head *head;
739         struct f2fs_orphan_block *orphan_blk = NULL;
740         unsigned int nentries = 0;
741         unsigned short index = 1;
742         unsigned short orphan_blocks;
743         struct page *page = NULL;
744         struct ino_entry *orphan = NULL;
745         struct inode_management *im = &sbi->im[ORPHAN_INO];
746
747         orphan_blocks = GET_ORPHAN_BLOCKS(im->ino_num);
748
749         /*
750          * we don't need to do spin_lock(&im->ino_lock) here, since all the
751          * orphan inode operations are covered under f2fs_lock_op().
752          * And, spin_lock should be avoided due to page operations below.
753          */
754         head = &im->ino_list;
755
756         /* loop for each orphan inode entry and write them in Jornal block */
757         list_for_each_entry(orphan, head, list) {
758                 if (!page) {
759                         page = f2fs_grab_meta_page(sbi, start_blk++);
760                         orphan_blk =
761                                 (struct f2fs_orphan_block *)page_address(page);
762                         memset(orphan_blk, 0, sizeof(*orphan_blk));
763                 }
764
765                 orphan_blk->ino[nentries++] = cpu_to_le32(orphan->ino);
766
767                 if (nentries == F2FS_ORPHANS_PER_BLOCK) {
768                         /*
769                          * an orphan block is full of 1020 entries,
770                          * then we need to flush current orphan blocks
771                          * and bring another one in memory
772                          */
773                         orphan_blk->blk_addr = cpu_to_le16(index);
774                         orphan_blk->blk_count = cpu_to_le16(orphan_blocks);
775                         orphan_blk->entry_count = cpu_to_le32(nentries);
776                         set_page_dirty(page);
777                         f2fs_put_page(page, 1);
778                         index++;
779                         nentries = 0;
780                         page = NULL;
781                 }
782         }
783
784         if (page) {
785                 orphan_blk->blk_addr = cpu_to_le16(index);
786                 orphan_blk->blk_count = cpu_to_le16(orphan_blocks);
787                 orphan_blk->entry_count = cpu_to_le32(nentries);
788                 set_page_dirty(page);
789                 f2fs_put_page(page, 1);
790         }
791 }
792
793 static __u32 f2fs_checkpoint_chksum(struct f2fs_sb_info *sbi,
794                                                 struct f2fs_checkpoint *ckpt)
795 {
796         unsigned int chksum_ofs = le32_to_cpu(ckpt->checksum_offset);
797         __u32 chksum;
798
799         chksum = f2fs_crc32(sbi, ckpt, chksum_ofs);
800         if (chksum_ofs < CP_CHKSUM_OFFSET) {
801                 chksum_ofs += sizeof(chksum);
802                 chksum = f2fs_chksum(sbi, chksum, (__u8 *)ckpt + chksum_ofs,
803                                                 F2FS_BLKSIZE - chksum_ofs);
804         }
805         return chksum;
806 }
807
808 static int get_checkpoint_version(struct f2fs_sb_info *sbi, block_t cp_addr,
809                 struct f2fs_checkpoint **cp_block, struct page **cp_page,
810                 unsigned long long *version)
811 {
812         size_t crc_offset = 0;
813         __u32 crc;
814
815         *cp_page = f2fs_get_meta_page(sbi, cp_addr);
816         if (IS_ERR(*cp_page))
817                 return PTR_ERR(*cp_page);
818
819         *cp_block = (struct f2fs_checkpoint *)page_address(*cp_page);
820
821         crc_offset = le32_to_cpu((*cp_block)->checksum_offset);
822         if (crc_offset < CP_MIN_CHKSUM_OFFSET ||
823                         crc_offset > CP_CHKSUM_OFFSET) {
824                 f2fs_put_page(*cp_page, 1);
825                 f2fs_warn(sbi, "invalid crc_offset: %zu", crc_offset);
826                 return -EINVAL;
827         }
828
829         crc = f2fs_checkpoint_chksum(sbi, *cp_block);
830         if (crc != cur_cp_crc(*cp_block)) {
831                 f2fs_put_page(*cp_page, 1);
832                 f2fs_warn(sbi, "invalid crc value");
833                 return -EINVAL;
834         }
835
836         *version = cur_cp_version(*cp_block);
837         return 0;
838 }
839
840 static struct page *validate_checkpoint(struct f2fs_sb_info *sbi,
841                                 block_t cp_addr, unsigned long long *version)
842 {
843         struct page *cp_page_1 = NULL, *cp_page_2 = NULL;
844         struct f2fs_checkpoint *cp_block = NULL;
845         unsigned long long cur_version = 0, pre_version = 0;
846         int err;
847
848         err = get_checkpoint_version(sbi, cp_addr, &cp_block,
849                                         &cp_page_1, version);
850         if (err)
851                 return NULL;
852
853         if (le32_to_cpu(cp_block->cp_pack_total_block_count) >
854                                         sbi->blocks_per_seg) {
855                 f2fs_warn(sbi, "invalid cp_pack_total_block_count:%u",
856                           le32_to_cpu(cp_block->cp_pack_total_block_count));
857                 goto invalid_cp;
858         }
859         pre_version = *version;
860
861         cp_addr += le32_to_cpu(cp_block->cp_pack_total_block_count) - 1;
862         err = get_checkpoint_version(sbi, cp_addr, &cp_block,
863                                         &cp_page_2, version);
864         if (err)
865                 goto invalid_cp;
866         cur_version = *version;
867
868         if (cur_version == pre_version) {
869                 *version = cur_version;
870                 f2fs_put_page(cp_page_2, 1);
871                 return cp_page_1;
872         }
873         f2fs_put_page(cp_page_2, 1);
874 invalid_cp:
875         f2fs_put_page(cp_page_1, 1);
876         return NULL;
877 }
878
879 int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi)
880 {
881         struct f2fs_checkpoint *cp_block;
882         struct f2fs_super_block *fsb = sbi->raw_super;
883         struct page *cp1, *cp2, *cur_page;
884         unsigned long blk_size = sbi->blocksize;
885         unsigned long long cp1_version = 0, cp2_version = 0;
886         unsigned long long cp_start_blk_no;
887         unsigned int cp_blks = 1 + __cp_payload(sbi);
888         block_t cp_blk_no;
889         int i;
890         int err;
891
892         sbi->ckpt = f2fs_kzalloc(sbi, array_size(blk_size, cp_blks),
893                                  GFP_KERNEL);
894         if (!sbi->ckpt)
895                 return -ENOMEM;
896         /*
897          * Finding out valid cp block involves read both
898          * sets( cp pack 1 and cp pack 2)
899          */
900         cp_start_blk_no = le32_to_cpu(fsb->cp_blkaddr);
901         cp1 = validate_checkpoint(sbi, cp_start_blk_no, &cp1_version);
902
903         /* The second checkpoint pack should start at the next segment */
904         cp_start_blk_no += ((unsigned long long)1) <<
905                                 le32_to_cpu(fsb->log_blocks_per_seg);
906         cp2 = validate_checkpoint(sbi, cp_start_blk_no, &cp2_version);
907
908         if (cp1 && cp2) {
909                 if (ver_after(cp2_version, cp1_version))
910                         cur_page = cp2;
911                 else
912                         cur_page = cp1;
913         } else if (cp1) {
914                 cur_page = cp1;
915         } else if (cp2) {
916                 cur_page = cp2;
917         } else {
918                 err = -EFSCORRUPTED;
919                 goto fail_no_cp;
920         }
921
922         cp_block = (struct f2fs_checkpoint *)page_address(cur_page);
923         memcpy(sbi->ckpt, cp_block, blk_size);
924
925         if (cur_page == cp1)
926                 sbi->cur_cp_pack = 1;
927         else
928                 sbi->cur_cp_pack = 2;
929
930         /* Sanity checking of checkpoint */
931         if (f2fs_sanity_check_ckpt(sbi)) {
932                 err = -EFSCORRUPTED;
933                 goto free_fail_no_cp;
934         }
935
936         if (cp_blks <= 1)
937                 goto done;
938
939         cp_blk_no = le32_to_cpu(fsb->cp_blkaddr);
940         if (cur_page == cp2)
941                 cp_blk_no += 1 << le32_to_cpu(fsb->log_blocks_per_seg);
942
943         for (i = 1; i < cp_blks; i++) {
944                 void *sit_bitmap_ptr;
945                 unsigned char *ckpt = (unsigned char *)sbi->ckpt;
946
947                 cur_page = f2fs_get_meta_page(sbi, cp_blk_no + i);
948                 if (IS_ERR(cur_page)) {
949                         err = PTR_ERR(cur_page);
950                         goto free_fail_no_cp;
951                 }
952                 sit_bitmap_ptr = page_address(cur_page);
953                 memcpy(ckpt + i * blk_size, sit_bitmap_ptr, blk_size);
954                 f2fs_put_page(cur_page, 1);
955         }
956 done:
957         f2fs_put_page(cp1, 1);
958         f2fs_put_page(cp2, 1);
959         return 0;
960
961 free_fail_no_cp:
962         f2fs_put_page(cp1, 1);
963         f2fs_put_page(cp2, 1);
964 fail_no_cp:
965         kvfree(sbi->ckpt);
966         return err;
967 }
968
969 static void __add_dirty_inode(struct inode *inode, enum inode_type type)
970 {
971         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
972         int flag = (type == DIR_INODE) ? FI_DIRTY_DIR : FI_DIRTY_FILE;
973
974         if (is_inode_flag_set(inode, flag))
975                 return;
976
977         set_inode_flag(inode, flag);
978         if (!f2fs_is_volatile_file(inode))
979                 list_add_tail(&F2FS_I(inode)->dirty_list,
980                                                 &sbi->inode_list[type]);
981         stat_inc_dirty_inode(sbi, type);
982 }
983
984 static void __remove_dirty_inode(struct inode *inode, enum inode_type type)
985 {
986         int flag = (type == DIR_INODE) ? FI_DIRTY_DIR : FI_DIRTY_FILE;
987
988         if (get_dirty_pages(inode) || !is_inode_flag_set(inode, flag))
989                 return;
990
991         list_del_init(&F2FS_I(inode)->dirty_list);
992         clear_inode_flag(inode, flag);
993         stat_dec_dirty_inode(F2FS_I_SB(inode), type);
994 }
995
996 void f2fs_update_dirty_page(struct inode *inode, struct page *page)
997 {
998         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
999         enum inode_type type = S_ISDIR(inode->i_mode) ? DIR_INODE : FILE_INODE;
1000
1001         if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) &&
1002                         !S_ISLNK(inode->i_mode))
1003                 return;
1004
1005         spin_lock(&sbi->inode_lock[type]);
1006         if (type != FILE_INODE || test_opt(sbi, DATA_FLUSH))
1007                 __add_dirty_inode(inode, type);
1008         inode_inc_dirty_pages(inode);
1009         spin_unlock(&sbi->inode_lock[type]);
1010
1011         f2fs_set_page_private(page, 0);
1012         f2fs_trace_pid(page);
1013 }
1014
1015 void f2fs_remove_dirty_inode(struct inode *inode)
1016 {
1017         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1018         enum inode_type type = S_ISDIR(inode->i_mode) ? DIR_INODE : FILE_INODE;
1019
1020         if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) &&
1021                         !S_ISLNK(inode->i_mode))
1022                 return;
1023
1024         if (type == FILE_INODE && !test_opt(sbi, DATA_FLUSH))
1025                 return;
1026
1027         spin_lock(&sbi->inode_lock[type]);
1028         __remove_dirty_inode(inode, type);
1029         spin_unlock(&sbi->inode_lock[type]);
1030 }
1031
1032 int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type)
1033 {
1034         struct list_head *head;
1035         struct inode *inode;
1036         struct f2fs_inode_info *fi;
1037         bool is_dir = (type == DIR_INODE);
1038         unsigned long ino = 0;
1039
1040         trace_f2fs_sync_dirty_inodes_enter(sbi->sb, is_dir,
1041                                 get_pages(sbi, is_dir ?
1042                                 F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA));
1043 retry:
1044         if (unlikely(f2fs_cp_error(sbi)))
1045                 return -EIO;
1046
1047         spin_lock(&sbi->inode_lock[type]);
1048
1049         head = &sbi->inode_list[type];
1050         if (list_empty(head)) {
1051                 spin_unlock(&sbi->inode_lock[type]);
1052                 trace_f2fs_sync_dirty_inodes_exit(sbi->sb, is_dir,
1053                                 get_pages(sbi, is_dir ?
1054                                 F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA));
1055                 return 0;
1056         }
1057         fi = list_first_entry(head, struct f2fs_inode_info, dirty_list);
1058         inode = igrab(&fi->vfs_inode);
1059         spin_unlock(&sbi->inode_lock[type]);
1060         if (inode) {
1061                 unsigned long cur_ino = inode->i_ino;
1062
1063                 F2FS_I(inode)->cp_task = current;
1064
1065                 filemap_fdatawrite(inode->i_mapping);
1066
1067                 F2FS_I(inode)->cp_task = NULL;
1068
1069                 iput(inode);
1070                 /* We need to give cpu to another writers. */
1071                 if (ino == cur_ino)
1072                         cond_resched();
1073                 else
1074                         ino = cur_ino;
1075         } else {
1076                 /*
1077                  * We should submit bio, since it exists several
1078                  * wribacking dentry pages in the freeing inode.
1079                  */
1080                 f2fs_submit_merged_write(sbi, DATA);
1081                 cond_resched();
1082         }
1083         goto retry;
1084 }
1085
1086 int f2fs_sync_inode_meta(struct f2fs_sb_info *sbi)
1087 {
1088         struct list_head *head = &sbi->inode_list[DIRTY_META];
1089         struct inode *inode;
1090         struct f2fs_inode_info *fi;
1091         s64 total = get_pages(sbi, F2FS_DIRTY_IMETA);
1092
1093         while (total--) {
1094                 if (unlikely(f2fs_cp_error(sbi)))
1095                         return -EIO;
1096
1097                 spin_lock(&sbi->inode_lock[DIRTY_META]);
1098                 if (list_empty(head)) {
1099                         spin_unlock(&sbi->inode_lock[DIRTY_META]);
1100                         return 0;
1101                 }
1102                 fi = list_first_entry(head, struct f2fs_inode_info,
1103                                                         gdirty_list);
1104                 inode = igrab(&fi->vfs_inode);
1105                 spin_unlock(&sbi->inode_lock[DIRTY_META]);
1106                 if (inode) {
1107                         sync_inode_metadata(inode, 0);
1108
1109                         /* it's on eviction */
1110                         if (is_inode_flag_set(inode, FI_DIRTY_INODE))
1111                                 f2fs_update_inode_page(inode);
1112                         iput(inode);
1113                 }
1114         }
1115         return 0;
1116 }
1117
1118 static void __prepare_cp_block(struct f2fs_sb_info *sbi)
1119 {
1120         struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1121         struct f2fs_nm_info *nm_i = NM_I(sbi);
1122         nid_t last_nid = nm_i->next_scan_nid;
1123
1124         next_free_nid(sbi, &last_nid);
1125         ckpt->valid_block_count = cpu_to_le64(valid_user_blocks(sbi));
1126         ckpt->valid_node_count = cpu_to_le32(valid_node_count(sbi));
1127         ckpt->valid_inode_count = cpu_to_le32(valid_inode_count(sbi));
1128         ckpt->next_free_nid = cpu_to_le32(last_nid);
1129 }
1130
1131 static bool __need_flush_quota(struct f2fs_sb_info *sbi)
1132 {
1133         bool ret = false;
1134
1135         if (!is_journalled_quota(sbi))
1136                 return false;
1137
1138         down_write(&sbi->quota_sem);
1139         if (is_sbi_flag_set(sbi, SBI_QUOTA_SKIP_FLUSH)) {
1140                 ret = false;
1141         } else if (is_sbi_flag_set(sbi, SBI_QUOTA_NEED_REPAIR)) {
1142                 ret = false;
1143         } else if (is_sbi_flag_set(sbi, SBI_QUOTA_NEED_FLUSH)) {
1144                 clear_sbi_flag(sbi, SBI_QUOTA_NEED_FLUSH);
1145                 ret = true;
1146         } else if (get_pages(sbi, F2FS_DIRTY_QDATA)) {
1147                 ret = true;
1148         }
1149         up_write(&sbi->quota_sem);
1150         return ret;
1151 }
1152
1153 /*
1154  * Freeze all the FS-operations for checkpoint.
1155  */
1156 static int block_operations(struct f2fs_sb_info *sbi)
1157 {
1158         struct writeback_control wbc = {
1159                 .sync_mode = WB_SYNC_ALL,
1160                 .nr_to_write = LONG_MAX,
1161                 .for_reclaim = 0,
1162         };
1163         struct blk_plug plug;
1164         int err = 0, cnt = 0;
1165
1166         blk_start_plug(&plug);
1167
1168 retry_flush_quotas:
1169         f2fs_lock_all(sbi);
1170         if (__need_flush_quota(sbi)) {
1171                 int locked;
1172
1173                 if (++cnt > DEFAULT_RETRY_QUOTA_FLUSH_COUNT) {
1174                         set_sbi_flag(sbi, SBI_QUOTA_SKIP_FLUSH);
1175                         set_sbi_flag(sbi, SBI_QUOTA_NEED_FLUSH);
1176                         goto retry_flush_dents;
1177                 }
1178                 f2fs_unlock_all(sbi);
1179
1180                 /* only failed during mount/umount/freeze/quotactl */
1181                 locked = down_read_trylock(&sbi->sb->s_umount);
1182                 f2fs_quota_sync(sbi->sb, -1);
1183                 if (locked)
1184                         up_read(&sbi->sb->s_umount);
1185                 cond_resched();
1186                 goto retry_flush_quotas;
1187         }
1188
1189 retry_flush_dents:
1190         /* write all the dirty dentry pages */
1191         if (get_pages(sbi, F2FS_DIRTY_DENTS)) {
1192                 f2fs_unlock_all(sbi);
1193                 err = f2fs_sync_dirty_inodes(sbi, DIR_INODE);
1194                 if (err)
1195                         goto out;
1196                 cond_resched();
1197                 goto retry_flush_quotas;
1198         }
1199
1200         /*
1201          * POR: we should ensure that there are no dirty node pages
1202          * until finishing nat/sit flush. inode->i_blocks can be updated.
1203          */
1204         down_write(&sbi->node_change);
1205
1206         if (get_pages(sbi, F2FS_DIRTY_IMETA)) {
1207                 up_write(&sbi->node_change);
1208                 f2fs_unlock_all(sbi);
1209                 err = f2fs_sync_inode_meta(sbi);
1210                 if (err)
1211                         goto out;
1212                 cond_resched();
1213                 goto retry_flush_quotas;
1214         }
1215
1216 retry_flush_nodes:
1217         down_write(&sbi->node_write);
1218
1219         if (get_pages(sbi, F2FS_DIRTY_NODES)) {
1220                 up_write(&sbi->node_write);
1221                 atomic_inc(&sbi->wb_sync_req[NODE]);
1222                 err = f2fs_sync_node_pages(sbi, &wbc, false, FS_CP_NODE_IO);
1223                 atomic_dec(&sbi->wb_sync_req[NODE]);
1224                 if (err) {
1225                         up_write(&sbi->node_change);
1226                         f2fs_unlock_all(sbi);
1227                         goto out;
1228                 }
1229                 cond_resched();
1230                 goto retry_flush_nodes;
1231         }
1232
1233         /*
1234          * sbi->node_change is used only for AIO write_begin path which produces
1235          * dirty node blocks and some checkpoint values by block allocation.
1236          */
1237         __prepare_cp_block(sbi);
1238         up_write(&sbi->node_change);
1239 out:
1240         blk_finish_plug(&plug);
1241         return err;
1242 }
1243
1244 static void unblock_operations(struct f2fs_sb_info *sbi)
1245 {
1246         up_write(&sbi->node_write);
1247         f2fs_unlock_all(sbi);
1248 }
1249
1250 void f2fs_wait_on_all_pages(struct f2fs_sb_info *sbi, int type)
1251 {
1252         DEFINE_WAIT(wait);
1253
1254         for (;;) {
1255                 prepare_to_wait(&sbi->cp_wait, &wait, TASK_UNINTERRUPTIBLE);
1256
1257                 if (!get_pages(sbi, type))
1258                         break;
1259
1260                 if (unlikely(f2fs_cp_error(sbi)))
1261                         break;
1262
1263                 io_schedule_timeout(DEFAULT_IO_TIMEOUT);
1264         }
1265         finish_wait(&sbi->cp_wait, &wait);
1266 }
1267
1268 static void update_ckpt_flags(struct f2fs_sb_info *sbi, struct cp_control *cpc)
1269 {
1270         unsigned long orphan_num = sbi->im[ORPHAN_INO].ino_num;
1271         struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1272         unsigned long flags;
1273
1274         spin_lock_irqsave(&sbi->cp_lock, flags);
1275
1276         if ((cpc->reason & CP_UMOUNT) &&
1277                         le32_to_cpu(ckpt->cp_pack_total_block_count) >
1278                         sbi->blocks_per_seg - NM_I(sbi)->nat_bits_blocks)
1279                 disable_nat_bits(sbi, false);
1280
1281         if (cpc->reason & CP_TRIMMED)
1282                 __set_ckpt_flags(ckpt, CP_TRIMMED_FLAG);
1283         else
1284                 __clear_ckpt_flags(ckpt, CP_TRIMMED_FLAG);
1285
1286         if (cpc->reason & CP_UMOUNT)
1287                 __set_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
1288         else
1289                 __clear_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
1290
1291         if (cpc->reason & CP_FASTBOOT)
1292                 __set_ckpt_flags(ckpt, CP_FASTBOOT_FLAG);
1293         else
1294                 __clear_ckpt_flags(ckpt, CP_FASTBOOT_FLAG);
1295
1296         if (orphan_num)
1297                 __set_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
1298         else
1299                 __clear_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
1300
1301         if (is_sbi_flag_set(sbi, SBI_NEED_FSCK) ||
1302                 is_sbi_flag_set(sbi, SBI_IS_RESIZEFS))
1303                 __set_ckpt_flags(ckpt, CP_FSCK_FLAG);
1304
1305         if (is_sbi_flag_set(sbi, SBI_CP_DISABLED))
1306                 __set_ckpt_flags(ckpt, CP_DISABLED_FLAG);
1307         else
1308                 __clear_ckpt_flags(ckpt, CP_DISABLED_FLAG);
1309
1310         if (is_sbi_flag_set(sbi, SBI_CP_DISABLED_QUICK))
1311                 __set_ckpt_flags(ckpt, CP_DISABLED_QUICK_FLAG);
1312         else
1313                 __clear_ckpt_flags(ckpt, CP_DISABLED_QUICK_FLAG);
1314
1315         if (is_sbi_flag_set(sbi, SBI_QUOTA_SKIP_FLUSH))
1316                 __set_ckpt_flags(ckpt, CP_QUOTA_NEED_FSCK_FLAG);
1317         else
1318                 __clear_ckpt_flags(ckpt, CP_QUOTA_NEED_FSCK_FLAG);
1319
1320         if (is_sbi_flag_set(sbi, SBI_QUOTA_NEED_REPAIR))
1321                 __set_ckpt_flags(ckpt, CP_QUOTA_NEED_FSCK_FLAG);
1322
1323         /* set this flag to activate crc|cp_ver for recovery */
1324         __set_ckpt_flags(ckpt, CP_CRC_RECOVERY_FLAG);
1325         __clear_ckpt_flags(ckpt, CP_NOCRC_RECOVERY_FLAG);
1326
1327         spin_unlock_irqrestore(&sbi->cp_lock, flags);
1328 }
1329
1330 static void commit_checkpoint(struct f2fs_sb_info *sbi,
1331         void *src, block_t blk_addr)
1332 {
1333         struct writeback_control wbc = {
1334                 .for_reclaim = 0,
1335         };
1336
1337         /*
1338          * pagevec_lookup_tag and lock_page again will take
1339          * some extra time. Therefore, f2fs_update_meta_pages and
1340          * f2fs_sync_meta_pages are combined in this function.
1341          */
1342         struct page *page = f2fs_grab_meta_page(sbi, blk_addr);
1343         int err;
1344
1345         f2fs_wait_on_page_writeback(page, META, true, true);
1346
1347         memcpy(page_address(page), src, PAGE_SIZE);
1348
1349         set_page_dirty(page);
1350         if (unlikely(!clear_page_dirty_for_io(page)))
1351                 f2fs_bug_on(sbi, 1);
1352
1353         /* writeout cp pack 2 page */
1354         err = __f2fs_write_meta_page(page, &wbc, FS_CP_META_IO);
1355         if (unlikely(err && f2fs_cp_error(sbi))) {
1356                 f2fs_put_page(page, 1);
1357                 return;
1358         }
1359
1360         f2fs_bug_on(sbi, err);
1361         f2fs_put_page(page, 0);
1362
1363         /* submit checkpoint (with barrier if NOBARRIER is not set) */
1364         f2fs_submit_merged_write(sbi, META_FLUSH);
1365 }
1366
1367 static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
1368 {
1369         struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1370         struct f2fs_nm_info *nm_i = NM_I(sbi);
1371         unsigned long orphan_num = sbi->im[ORPHAN_INO].ino_num, flags;
1372         block_t start_blk;
1373         unsigned int data_sum_blocks, orphan_blocks;
1374         __u32 crc32 = 0;
1375         int i;
1376         int cp_payload_blks = __cp_payload(sbi);
1377         struct super_block *sb = sbi->sb;
1378         struct curseg_info *seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
1379         u64 kbytes_written;
1380         int err;
1381
1382         /* Flush all the NAT/SIT pages */
1383         f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO);
1384
1385         /* start to update checkpoint, cp ver is already updated previously */
1386         ckpt->elapsed_time = cpu_to_le64(get_mtime(sbi, true));
1387         ckpt->free_segment_count = cpu_to_le32(free_segments(sbi));
1388         for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
1389                 ckpt->cur_node_segno[i] =
1390                         cpu_to_le32(curseg_segno(sbi, i + CURSEG_HOT_NODE));
1391                 ckpt->cur_node_blkoff[i] =
1392                         cpu_to_le16(curseg_blkoff(sbi, i + CURSEG_HOT_NODE));
1393                 ckpt->alloc_type[i + CURSEG_HOT_NODE] =
1394                                 curseg_alloc_type(sbi, i + CURSEG_HOT_NODE);
1395         }
1396         for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) {
1397                 ckpt->cur_data_segno[i] =
1398                         cpu_to_le32(curseg_segno(sbi, i + CURSEG_HOT_DATA));
1399                 ckpt->cur_data_blkoff[i] =
1400                         cpu_to_le16(curseg_blkoff(sbi, i + CURSEG_HOT_DATA));
1401                 ckpt->alloc_type[i + CURSEG_HOT_DATA] =
1402                                 curseg_alloc_type(sbi, i + CURSEG_HOT_DATA);
1403         }
1404
1405         /* 2 cp  + n data seg summary + orphan inode blocks */
1406         data_sum_blocks = f2fs_npages_for_summary_flush(sbi, false);
1407         spin_lock_irqsave(&sbi->cp_lock, flags);
1408         if (data_sum_blocks < NR_CURSEG_DATA_TYPE)
1409                 __set_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG);
1410         else
1411                 __clear_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG);
1412         spin_unlock_irqrestore(&sbi->cp_lock, flags);
1413
1414         orphan_blocks = GET_ORPHAN_BLOCKS(orphan_num);
1415         ckpt->cp_pack_start_sum = cpu_to_le32(1 + cp_payload_blks +
1416                         orphan_blocks);
1417
1418         if (__remain_node_summaries(cpc->reason))
1419                 ckpt->cp_pack_total_block_count = cpu_to_le32(F2FS_CP_PACKS+
1420                                 cp_payload_blks + data_sum_blocks +
1421                                 orphan_blocks + NR_CURSEG_NODE_TYPE);
1422         else
1423                 ckpt->cp_pack_total_block_count = cpu_to_le32(F2FS_CP_PACKS +
1424                                 cp_payload_blks + data_sum_blocks +
1425                                 orphan_blocks);
1426
1427         /* update ckpt flag for checkpoint */
1428         update_ckpt_flags(sbi, cpc);
1429
1430         /* update SIT/NAT bitmap */
1431         get_sit_bitmap(sbi, __bitmap_ptr(sbi, SIT_BITMAP));
1432         get_nat_bitmap(sbi, __bitmap_ptr(sbi, NAT_BITMAP));
1433
1434         crc32 = f2fs_checkpoint_chksum(sbi, ckpt);
1435         *((__le32 *)((unsigned char *)ckpt +
1436                                 le32_to_cpu(ckpt->checksum_offset)))
1437                                 = cpu_to_le32(crc32);
1438
1439         start_blk = __start_cp_next_addr(sbi);
1440
1441         /* write nat bits */
1442         if (enabled_nat_bits(sbi, cpc)) {
1443                 __u64 cp_ver = cur_cp_version(ckpt);
1444                 block_t blk;
1445
1446                 cp_ver |= ((__u64)crc32 << 32);
1447                 *(__le64 *)nm_i->nat_bits = cpu_to_le64(cp_ver);
1448
1449                 blk = start_blk + sbi->blocks_per_seg - nm_i->nat_bits_blocks;
1450                 for (i = 0; i < nm_i->nat_bits_blocks; i++)
1451                         f2fs_update_meta_page(sbi, nm_i->nat_bits +
1452                                         (i << F2FS_BLKSIZE_BITS), blk + i);
1453         }
1454
1455         /* write out checkpoint buffer at block 0 */
1456         f2fs_update_meta_page(sbi, ckpt, start_blk++);
1457
1458         for (i = 1; i < 1 + cp_payload_blks; i++)
1459                 f2fs_update_meta_page(sbi, (char *)ckpt + i * F2FS_BLKSIZE,
1460                                                         start_blk++);
1461
1462         if (orphan_num) {
1463                 write_orphan_inodes(sbi, start_blk);
1464                 start_blk += orphan_blocks;
1465         }
1466
1467         f2fs_write_data_summaries(sbi, start_blk);
1468         start_blk += data_sum_blocks;
1469
1470         /* Record write statistics in the hot node summary */
1471         kbytes_written = sbi->kbytes_written;
1472         if (sb->s_bdev->bd_part)
1473                 kbytes_written += BD_PART_WRITTEN(sbi);
1474
1475         seg_i->journal->info.kbytes_written = cpu_to_le64(kbytes_written);
1476
1477         if (__remain_node_summaries(cpc->reason)) {
1478                 f2fs_write_node_summaries(sbi, start_blk);
1479                 start_blk += NR_CURSEG_NODE_TYPE;
1480         }
1481
1482         /* update user_block_counts */
1483         sbi->last_valid_block_count = sbi->total_valid_block_count;
1484         percpu_counter_set(&sbi->alloc_valid_block_count, 0);
1485
1486         /* Here, we have one bio having CP pack except cp pack 2 page */
1487         f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO);
1488         /* Wait for all dirty meta pages to be submitted for IO */
1489         f2fs_wait_on_all_pages(sbi, F2FS_DIRTY_META);
1490
1491         /* wait for previous submitted meta pages writeback */
1492         f2fs_wait_on_all_pages(sbi, F2FS_WB_CP_DATA);
1493
1494         /* flush all device cache */
1495         err = f2fs_flush_device_cache(sbi);
1496         if (err)
1497                 return err;
1498
1499         /* barrier and flush checkpoint cp pack 2 page if it can */
1500         commit_checkpoint(sbi, ckpt, start_blk);
1501         f2fs_wait_on_all_pages(sbi, F2FS_WB_CP_DATA);
1502
1503         /*
1504          * invalidate intermediate page cache borrowed from meta inode which are
1505          * used for migration of encrypted or verity inode's blocks.
1506          */
1507         if (f2fs_sb_has_encrypt(sbi) || f2fs_sb_has_verity(sbi))
1508                 invalidate_mapping_pages(META_MAPPING(sbi),
1509                                 MAIN_BLKADDR(sbi), MAX_BLKADDR(sbi) - 1);
1510
1511         f2fs_release_ino_entry(sbi, false);
1512
1513         f2fs_reset_fsync_node_info(sbi);
1514
1515         clear_sbi_flag(sbi, SBI_IS_DIRTY);
1516         clear_sbi_flag(sbi, SBI_NEED_CP);
1517         clear_sbi_flag(sbi, SBI_QUOTA_SKIP_FLUSH);
1518
1519         spin_lock(&sbi->stat_lock);
1520         sbi->unusable_block_count = 0;
1521         spin_unlock(&sbi->stat_lock);
1522
1523         __set_cp_next_pack(sbi);
1524
1525         /*
1526          * redirty superblock if metadata like node page or inode cache is
1527          * updated during writing checkpoint.
1528          */
1529         if (get_pages(sbi, F2FS_DIRTY_NODES) ||
1530                         get_pages(sbi, F2FS_DIRTY_IMETA))
1531                 set_sbi_flag(sbi, SBI_IS_DIRTY);
1532
1533         f2fs_bug_on(sbi, get_pages(sbi, F2FS_DIRTY_DENTS));
1534
1535         return unlikely(f2fs_cp_error(sbi)) ? -EIO : 0;
1536 }
1537
1538 int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
1539 {
1540         struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1541         unsigned long long ckpt_ver;
1542         int err = 0;
1543
1544         if (f2fs_readonly(sbi->sb) || f2fs_hw_is_readonly(sbi))
1545                 return -EROFS;
1546
1547         if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
1548                 if (cpc->reason != CP_PAUSE)
1549                         return 0;
1550                 f2fs_warn(sbi, "Start checkpoint disabled!");
1551         }
1552         mutex_lock(&sbi->cp_mutex);
1553
1554         if (!is_sbi_flag_set(sbi, SBI_IS_DIRTY) &&
1555                 ((cpc->reason & CP_FASTBOOT) || (cpc->reason & CP_SYNC) ||
1556                 ((cpc->reason & CP_DISCARD) && !sbi->discard_blks)))
1557                 goto out;
1558         if (unlikely(f2fs_cp_error(sbi))) {
1559                 err = -EIO;
1560                 goto out;
1561         }
1562
1563         trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "start block_ops");
1564
1565         err = block_operations(sbi);
1566         if (err)
1567                 goto out;
1568
1569         trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish block_ops");
1570
1571         f2fs_flush_merged_writes(sbi);
1572
1573         /* this is the case of multiple fstrims without any changes */
1574         if (cpc->reason & CP_DISCARD) {
1575                 if (!f2fs_exist_trim_candidates(sbi, cpc)) {
1576                         unblock_operations(sbi);
1577                         goto out;
1578                 }
1579
1580                 if (NM_I(sbi)->dirty_nat_cnt == 0 &&
1581                                 SIT_I(sbi)->dirty_sentries == 0 &&
1582                                 prefree_segments(sbi) == 0) {
1583                         f2fs_flush_sit_entries(sbi, cpc);
1584                         f2fs_clear_prefree_segments(sbi, cpc);
1585                         unblock_operations(sbi);
1586                         goto out;
1587                 }
1588         }
1589
1590         /*
1591          * update checkpoint pack index
1592          * Increase the version number so that
1593          * SIT entries and seg summaries are written at correct place
1594          */
1595         ckpt_ver = cur_cp_version(ckpt);
1596         ckpt->checkpoint_ver = cpu_to_le64(++ckpt_ver);
1597
1598         /* write cached NAT/SIT entries to NAT/SIT area */
1599         err = f2fs_flush_nat_entries(sbi, cpc);
1600         if (err)
1601                 goto stop;
1602
1603         f2fs_flush_sit_entries(sbi, cpc);
1604
1605         err = do_checkpoint(sbi, cpc);
1606         if (err)
1607                 f2fs_release_discard_addrs(sbi);
1608         else
1609                 f2fs_clear_prefree_segments(sbi, cpc);
1610 stop:
1611         unblock_operations(sbi);
1612         stat_inc_cp_count(sbi->stat_info);
1613
1614         if (cpc->reason & CP_RECOVERY)
1615                 f2fs_notice(sbi, "checkpoint: version = %llx", ckpt_ver);
1616
1617         /* update CP_TIME to trigger checkpoint periodically */
1618         f2fs_update_time(sbi, CP_TIME);
1619         trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish checkpoint");
1620 out:
1621         mutex_unlock(&sbi->cp_mutex);
1622         return err;
1623 }
1624
1625 void f2fs_init_ino_entry_info(struct f2fs_sb_info *sbi)
1626 {
1627         int i;
1628
1629         for (i = 0; i < MAX_INO_ENTRY; i++) {
1630                 struct inode_management *im = &sbi->im[i];
1631
1632                 INIT_RADIX_TREE(&im->ino_root, GFP_ATOMIC);
1633                 spin_lock_init(&im->ino_lock);
1634                 INIT_LIST_HEAD(&im->ino_list);
1635                 im->ino_num = 0;
1636         }
1637
1638         sbi->max_orphans = (sbi->blocks_per_seg - F2FS_CP_PACKS -
1639                         NR_CURSEG_TYPE - __cp_payload(sbi)) *
1640                                 F2FS_ORPHANS_PER_BLOCK;
1641 }
1642
1643 int __init f2fs_create_checkpoint_caches(void)
1644 {
1645         ino_entry_slab = f2fs_kmem_cache_create("f2fs_ino_entry",
1646                         sizeof(struct ino_entry));
1647         if (!ino_entry_slab)
1648                 return -ENOMEM;
1649         f2fs_inode_entry_slab = f2fs_kmem_cache_create("f2fs_inode_entry",
1650                         sizeof(struct inode_entry));
1651         if (!f2fs_inode_entry_slab) {
1652                 kmem_cache_destroy(ino_entry_slab);
1653                 return -ENOMEM;
1654         }
1655         return 0;
1656 }
1657
1658 void f2fs_destroy_checkpoint_caches(void)
1659 {
1660         kmem_cache_destroy(ino_entry_slab);
1661         kmem_cache_destroy(f2fs_inode_entry_slab);
1662 }