f2fs: add recovery routines for roll-forward
[linux-block.git] / fs / f2fs / recovery.c
CommitLineData
d624c96f
JK
1/**
2 * fs/f2fs/recovery.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/fs.h>
12#include <linux/f2fs_fs.h>
13#include "f2fs.h"
14#include "node.h"
15#include "segment.h"
16
17static struct kmem_cache *fsync_entry_slab;
18
19bool space_for_roll_forward(struct f2fs_sb_info *sbi)
20{
21 if (sbi->last_valid_block_count + sbi->alloc_valid_block_count
22 > sbi->user_block_count)
23 return false;
24 return true;
25}
26
27static struct fsync_inode_entry *get_fsync_inode(struct list_head *head,
28 nid_t ino)
29{
30 struct list_head *this;
31 struct fsync_inode_entry *entry;
32
33 list_for_each(this, head) {
34 entry = list_entry(this, struct fsync_inode_entry, list);
35 if (entry->inode->i_ino == ino)
36 return entry;
37 }
38 return NULL;
39}
40
41static int recover_dentry(struct page *ipage, struct inode *inode)
42{
43 struct f2fs_node *raw_node = (struct f2fs_node *)kmap(ipage);
44 struct f2fs_inode *raw_inode = &(raw_node->i);
45 struct dentry dent, parent;
46 struct f2fs_dir_entry *de;
47 struct page *page;
48 struct inode *dir;
49 int err = 0;
50
51 if (!is_dent_dnode(ipage))
52 goto out;
53
54 dir = f2fs_iget(inode->i_sb, le32_to_cpu(raw_inode->i_pino));
55 if (IS_ERR(dir)) {
56 err = -EINVAL;
57 goto out;
58 }
59
60 parent.d_inode = dir;
61 dent.d_parent = &parent;
62 dent.d_name.len = le32_to_cpu(raw_inode->i_namelen);
63 dent.d_name.name = raw_inode->i_name;
64
65 de = f2fs_find_entry(dir, &dent.d_name, &page);
66 if (de) {
67 kunmap(page);
68 f2fs_put_page(page, 0);
69 } else {
70 f2fs_add_link(&dent, inode);
71 }
72 iput(dir);
73out:
74 kunmap(ipage);
75 return err;
76}
77
78static int recover_inode(struct inode *inode, struct page *node_page)
79{
80 void *kaddr = page_address(node_page);
81 struct f2fs_node *raw_node = (struct f2fs_node *)kaddr;
82 struct f2fs_inode *raw_inode = &(raw_node->i);
83
84 inode->i_mode = le32_to_cpu(raw_inode->i_mode);
85 i_size_write(inode, le64_to_cpu(raw_inode->i_size));
86 inode->i_atime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
87 inode->i_ctime.tv_sec = le64_to_cpu(raw_inode->i_ctime);
88 inode->i_mtime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
89 inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
90 inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec);
91 inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
92
93 return recover_dentry(node_page, inode);
94}
95
96static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
97{
98 unsigned long long cp_ver = le64_to_cpu(sbi->ckpt->checkpoint_ver);
99 struct curseg_info *curseg;
100 struct page *page;
101 block_t blkaddr;
102 int err = 0;
103
104 /* get node pages in the current segment */
105 curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
106 blkaddr = START_BLOCK(sbi, curseg->segno) + curseg->next_blkoff;
107
108 /* read node page */
109 page = alloc_page(GFP_F2FS_ZERO);
110 if (IS_ERR(page))
111 return PTR_ERR(page);
112 lock_page(page);
113
114 while (1) {
115 struct fsync_inode_entry *entry;
116
117 if (f2fs_readpage(sbi, page, blkaddr, READ_SYNC))
118 goto out;
119
120 if (cp_ver != cpver_of_node(page))
121 goto out;
122
123 if (!is_fsync_dnode(page))
124 goto next;
125
126 entry = get_fsync_inode(head, ino_of_node(page));
127 if (entry) {
128 entry->blkaddr = blkaddr;
129 if (IS_INODE(page) && is_dent_dnode(page))
130 set_inode_flag(F2FS_I(entry->inode),
131 FI_INC_LINK);
132 } else {
133 if (IS_INODE(page) && is_dent_dnode(page)) {
134 if (recover_inode_page(sbi, page)) {
135 err = -ENOMEM;
136 goto out;
137 }
138 }
139
140 /* add this fsync inode to the list */
141 entry = kmem_cache_alloc(fsync_entry_slab, GFP_NOFS);
142 if (!entry) {
143 err = -ENOMEM;
144 goto out;
145 }
146
147 INIT_LIST_HEAD(&entry->list);
148 list_add_tail(&entry->list, head);
149
150 entry->inode = f2fs_iget(sbi->sb, ino_of_node(page));
151 if (IS_ERR(entry->inode)) {
152 err = PTR_ERR(entry->inode);
153 goto out;
154 }
155 entry->blkaddr = blkaddr;
156 }
157 if (IS_INODE(page)) {
158 err = recover_inode(entry->inode, page);
159 if (err)
160 goto out;
161 }
162next:
163 /* check next segment */
164 blkaddr = next_blkaddr_of_node(page);
165 ClearPageUptodate(page);
166 }
167out:
168 unlock_page(page);
169 __free_pages(page, 0);
170 return err;
171}
172
173static void destroy_fsync_dnodes(struct f2fs_sb_info *sbi,
174 struct list_head *head)
175{
176 struct list_head *this;
177 struct fsync_inode_entry *entry;
178 list_for_each(this, head) {
179 entry = list_entry(this, struct fsync_inode_entry, list);
180 iput(entry->inode);
181 list_del(&entry->list);
182 kmem_cache_free(fsync_entry_slab, entry);
183 }
184}
185
186static void check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
187 block_t blkaddr)
188{
189 struct seg_entry *sentry;
190 unsigned int segno = GET_SEGNO(sbi, blkaddr);
191 unsigned short blkoff = GET_SEGOFF_FROM_SEG0(sbi, blkaddr) &
192 (sbi->blocks_per_seg - 1);
193 struct f2fs_summary sum;
194 nid_t ino;
195 void *kaddr;
196 struct inode *inode;
197 struct page *node_page;
198 block_t bidx;
199 int i;
200
201 sentry = get_seg_entry(sbi, segno);
202 if (!f2fs_test_bit(blkoff, sentry->cur_valid_map))
203 return;
204
205 /* Get the previous summary */
206 for (i = CURSEG_WARM_DATA; i <= CURSEG_COLD_DATA; i++) {
207 struct curseg_info *curseg = CURSEG_I(sbi, i);
208 if (curseg->segno == segno) {
209 sum = curseg->sum_blk->entries[blkoff];
210 break;
211 }
212 }
213 if (i > CURSEG_COLD_DATA) {
214 struct page *sum_page = get_sum_page(sbi, segno);
215 struct f2fs_summary_block *sum_node;
216 kaddr = page_address(sum_page);
217 sum_node = (struct f2fs_summary_block *)kaddr;
218 sum = sum_node->entries[blkoff];
219 f2fs_put_page(sum_page, 1);
220 }
221
222 /* Get the node page */
223 node_page = get_node_page(sbi, le32_to_cpu(sum.nid));
224 bidx = start_bidx_of_node(ofs_of_node(node_page)) +
225 le16_to_cpu(sum.ofs_in_node);
226 ino = ino_of_node(node_page);
227 f2fs_put_page(node_page, 1);
228
229 /* Deallocate previous index in the node page */
230 inode = f2fs_iget_nowait(sbi->sb, ino);
231 truncate_hole(inode, bidx, bidx + 1);
232 iput(inode);
233}
234
235static void do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
236 struct page *page, block_t blkaddr)
237{
238 unsigned int start, end;
239 struct dnode_of_data dn;
240 struct f2fs_summary sum;
241 struct node_info ni;
242
243 start = start_bidx_of_node(ofs_of_node(page));
244 if (IS_INODE(page))
245 end = start + ADDRS_PER_INODE;
246 else
247 end = start + ADDRS_PER_BLOCK;
248
249 set_new_dnode(&dn, inode, NULL, NULL, 0);
250 if (get_dnode_of_data(&dn, start, 0))
251 return;
252
253 wait_on_page_writeback(dn.node_page);
254
255 get_node_info(sbi, dn.nid, &ni);
256 BUG_ON(ni.ino != ino_of_node(page));
257 BUG_ON(ofs_of_node(dn.node_page) != ofs_of_node(page));
258
259 for (; start < end; start++) {
260 block_t src, dest;
261
262 src = datablock_addr(dn.node_page, dn.ofs_in_node);
263 dest = datablock_addr(page, dn.ofs_in_node);
264
265 if (src != dest && dest != NEW_ADDR && dest != NULL_ADDR) {
266 if (src == NULL_ADDR) {
267 int err = reserve_new_block(&dn);
268 /* We should not get -ENOSPC */
269 BUG_ON(err);
270 }
271
272 /* Check the previous node page having this index */
273 check_index_in_prev_nodes(sbi, dest);
274
275 set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
276
277 /* write dummy data page */
278 recover_data_page(sbi, NULL, &sum, src, dest);
279 update_extent_cache(dest, &dn);
280 }
281 dn.ofs_in_node++;
282 }
283
284 /* write node page in place */
285 set_summary(&sum, dn.nid, 0, 0);
286 if (IS_INODE(dn.node_page))
287 sync_inode_page(&dn);
288
289 copy_node_footer(dn.node_page, page);
290 fill_node_footer(dn.node_page, dn.nid, ni.ino,
291 ofs_of_node(page), false);
292 set_page_dirty(dn.node_page);
293
294 recover_node_page(sbi, dn.node_page, &sum, &ni, blkaddr);
295 f2fs_put_dnode(&dn);
296}
297
298static void recover_data(struct f2fs_sb_info *sbi,
299 struct list_head *head, int type)
300{
301 unsigned long long cp_ver = le64_to_cpu(sbi->ckpt->checkpoint_ver);
302 struct curseg_info *curseg;
303 struct page *page;
304 block_t blkaddr;
305
306 /* get node pages in the current segment */
307 curseg = CURSEG_I(sbi, type);
308 blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
309
310 /* read node page */
311 page = alloc_page(GFP_NOFS | __GFP_ZERO);
312 if (IS_ERR(page))
313 return;
314 lock_page(page);
315
316 while (1) {
317 struct fsync_inode_entry *entry;
318
319 if (f2fs_readpage(sbi, page, blkaddr, READ_SYNC))
320 goto out;
321
322 if (cp_ver != cpver_of_node(page))
323 goto out;
324
325 entry = get_fsync_inode(head, ino_of_node(page));
326 if (!entry)
327 goto next;
328
329 do_recover_data(sbi, entry->inode, page, blkaddr);
330
331 if (entry->blkaddr == blkaddr) {
332 iput(entry->inode);
333 list_del(&entry->list);
334 kmem_cache_free(fsync_entry_slab, entry);
335 }
336next:
337 /* check next segment */
338 blkaddr = next_blkaddr_of_node(page);
339 ClearPageUptodate(page);
340 }
341out:
342 unlock_page(page);
343 __free_pages(page, 0);
344
345 allocate_new_segments(sbi);
346}
347
348void recover_fsync_data(struct f2fs_sb_info *sbi)
349{
350 struct list_head inode_list;
351
352 fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
353 sizeof(struct fsync_inode_entry), NULL);
354 if (unlikely(!fsync_entry_slab))
355 return;
356
357 INIT_LIST_HEAD(&inode_list);
358
359 /* step #1: find fsynced inode numbers */
360 if (find_fsync_dnodes(sbi, &inode_list))
361 goto out;
362
363 if (list_empty(&inode_list))
364 goto out;
365
366 /* step #2: recover data */
367 sbi->por_doing = 1;
368 recover_data(sbi, &inode_list, CURSEG_WARM_NODE);
369 sbi->por_doing = 0;
370 BUG_ON(!list_empty(&inode_list));
371out:
372 destroy_fsync_dnodes(sbi, &inode_list);
373 kmem_cache_destroy(fsync_entry_slab);
374 write_checkpoint(sbi, false, false);
375}