f2fs: support lower priority asynchronous readahead in ra_meta_pages
[linux-2.6-block.git] / fs / f2fs / node.c
CommitLineData
0a8165d7 1/*
e05df3b1
JK
2 * fs/f2fs/node.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/fs.h>
12#include <linux/f2fs_fs.h>
13#include <linux/mpage.h>
14#include <linux/backing-dev.h>
15#include <linux/blkdev.h>
16#include <linux/pagevec.h>
17#include <linux/swap.h>
18
19#include "f2fs.h"
20#include "node.h"
21#include "segment.h"
9e4ded3f 22#include "trace.h"
51dd6249 23#include <trace/events/f2fs.h>
e05df3b1 24
f978f5a0
GZ
25#define on_build_free_nids(nmi) mutex_is_locked(&nm_i->build_lock)
26
e05df3b1
JK
27static struct kmem_cache *nat_entry_slab;
28static struct kmem_cache *free_nid_slab;
aec71382 29static struct kmem_cache *nat_entry_set_slab;
e05df3b1 30
6fb03f3a 31bool available_free_memory(struct f2fs_sb_info *sbi, int type)
cdfc41c1 32{
6fb03f3a 33 struct f2fs_nm_info *nm_i = NM_I(sbi);
cdfc41c1 34 struct sysinfo val;
e5e7ea3c 35 unsigned long avail_ram;
cdfc41c1 36 unsigned long mem_size = 0;
6fb03f3a 37 bool res = false;
cdfc41c1
JK
38
39 si_meminfo(&val);
e5e7ea3c
JK
40
41 /* only uses low memory */
42 avail_ram = val.totalram - val.totalhigh;
43
429511cd
CY
44 /*
45 * give 25%, 25%, 50%, 50%, 50% memory for each components respectively
46 */
6fb03f3a 47 if (type == FREE_NIDS) {
e5e7ea3c
JK
48 mem_size = (nm_i->fcnt * sizeof(struct free_nid)) >>
49 PAGE_CACHE_SHIFT;
50 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
6fb03f3a 51 } else if (type == NAT_ENTRIES) {
e5e7ea3c
JK
52 mem_size = (nm_i->nat_cnt * sizeof(struct nat_entry)) >>
53 PAGE_CACHE_SHIFT;
54 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
a1257023
JK
55 } else if (type == DIRTY_DENTS) {
56 if (sbi->sb->s_bdi->wb.dirty_exceeded)
57 return false;
58 mem_size = get_pages(sbi, F2FS_DIRTY_DENTS);
59 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
e5e7ea3c
JK
60 } else if (type == INO_ENTRIES) {
61 int i;
62
e5e7ea3c 63 for (i = 0; i <= UPDATE_INO; i++)
67298804
CY
64 mem_size += (sbi->im[i].ino_num *
65 sizeof(struct ino_entry)) >> PAGE_CACHE_SHIFT;
e5e7ea3c 66 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
429511cd
CY
67 } else if (type == EXTENT_CACHE) {
68 mem_size = (sbi->total_ext_tree * sizeof(struct extent_tree) +
69 atomic_read(&sbi->total_ext_node) *
70 sizeof(struct extent_node)) >> PAGE_CACHE_SHIFT;
71 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
1e84371f 72 } else {
a88a341a 73 if (sbi->sb->s_bdi->wb.dirty_exceeded)
1e84371f 74 return false;
6fb03f3a
JK
75 }
76 return res;
cdfc41c1
JK
77}
78
e05df3b1
JK
79static void clear_node_page_dirty(struct page *page)
80{
81 struct address_space *mapping = page->mapping;
e05df3b1
JK
82 unsigned int long flags;
83
84 if (PageDirty(page)) {
85 spin_lock_irqsave(&mapping->tree_lock, flags);
86 radix_tree_tag_clear(&mapping->page_tree,
87 page_index(page),
88 PAGECACHE_TAG_DIRTY);
89 spin_unlock_irqrestore(&mapping->tree_lock, flags);
90
91 clear_page_dirty_for_io(page);
4081363f 92 dec_page_count(F2FS_M_SB(mapping), F2FS_DIRTY_NODES);
e05df3b1
JK
93 }
94 ClearPageUptodate(page);
95}
96
97static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
98{
99 pgoff_t index = current_nat_addr(sbi, nid);
100 return get_meta_page(sbi, index);
101}
102
103static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
104{
105 struct page *src_page;
106 struct page *dst_page;
107 pgoff_t src_off;
108 pgoff_t dst_off;
109 void *src_addr;
110 void *dst_addr;
111 struct f2fs_nm_info *nm_i = NM_I(sbi);
112
113 src_off = current_nat_addr(sbi, nid);
114 dst_off = next_nat_addr(sbi, src_off);
115
116 /* get current nat block page with lock */
117 src_page = get_meta_page(sbi, src_off);
e05df3b1 118 dst_page = grab_meta_page(sbi, dst_off);
9850cf4a 119 f2fs_bug_on(sbi, PageDirty(src_page));
e05df3b1
JK
120
121 src_addr = page_address(src_page);
122 dst_addr = page_address(dst_page);
123 memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE);
124 set_page_dirty(dst_page);
125 f2fs_put_page(src_page, 1);
126
127 set_to_next_nat(nm_i, nid);
128
129 return dst_page;
130}
131
e05df3b1
JK
132static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n)
133{
134 return radix_tree_lookup(&nm_i->nat_root, n);
135}
136
137static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i,
138 nid_t start, unsigned int nr, struct nat_entry **ep)
139{
140 return radix_tree_gang_lookup(&nm_i->nat_root, (void **)ep, start, nr);
141}
142
143static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e)
144{
145 list_del(&e->list);
146 radix_tree_delete(&nm_i->nat_root, nat_get_nid(e));
147 nm_i->nat_cnt--;
148 kmem_cache_free(nat_entry_slab, e);
149}
150
309cc2b6
JK
151static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i,
152 struct nat_entry *ne)
153{
154 nid_t set = NAT_BLOCK_OFFSET(ne->ni.nid);
155 struct nat_entry_set *head;
156
157 if (get_nat_flag(ne, IS_DIRTY))
158 return;
9be32d72 159
309cc2b6
JK
160 head = radix_tree_lookup(&nm_i->nat_set_root, set);
161 if (!head) {
80c54505 162 head = f2fs_kmem_cache_alloc(nat_entry_set_slab, GFP_NOFS);
309cc2b6
JK
163
164 INIT_LIST_HEAD(&head->entry_list);
165 INIT_LIST_HEAD(&head->set_list);
166 head->set = set;
167 head->entry_cnt = 0;
9be32d72 168 f2fs_radix_tree_insert(&nm_i->nat_set_root, set, head);
309cc2b6
JK
169 }
170 list_move_tail(&ne->list, &head->entry_list);
171 nm_i->dirty_nat_cnt++;
172 head->entry_cnt++;
173 set_nat_flag(ne, IS_DIRTY, true);
174}
175
176static void __clear_nat_cache_dirty(struct f2fs_nm_info *nm_i,
177 struct nat_entry *ne)
178{
20d047c8 179 nid_t set = NAT_BLOCK_OFFSET(ne->ni.nid);
309cc2b6
JK
180 struct nat_entry_set *head;
181
182 head = radix_tree_lookup(&nm_i->nat_set_root, set);
183 if (head) {
184 list_move_tail(&ne->list, &nm_i->nat_entries);
185 set_nat_flag(ne, IS_DIRTY, false);
186 head->entry_cnt--;
187 nm_i->dirty_nat_cnt--;
188 }
189}
190
191static unsigned int __gang_lookup_nat_set(struct f2fs_nm_info *nm_i,
192 nid_t start, unsigned int nr, struct nat_entry_set **ep)
193{
194 return radix_tree_gang_lookup(&nm_i->nat_set_root, (void **)ep,
195 start, nr);
196}
197
2dcf51ab 198int need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid)
e05df3b1
JK
199{
200 struct f2fs_nm_info *nm_i = NM_I(sbi);
201 struct nat_entry *e;
2dcf51ab 202 bool need = false;
e05df3b1 203
8b26ef98 204 down_read(&nm_i->nat_tree_lock);
e05df3b1 205 e = __lookup_nat_cache(nm_i, nid);
2dcf51ab
JK
206 if (e) {
207 if (!get_nat_flag(e, IS_CHECKPOINTED) &&
208 !get_nat_flag(e, HAS_FSYNCED_INODE))
209 need = true;
210 }
8b26ef98 211 up_read(&nm_i->nat_tree_lock);
2dcf51ab 212 return need;
e05df3b1
JK
213}
214
2dcf51ab 215bool is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid)
479f40c4
JK
216{
217 struct f2fs_nm_info *nm_i = NM_I(sbi);
218 struct nat_entry *e;
2dcf51ab 219 bool is_cp = true;
479f40c4 220
8b26ef98 221 down_read(&nm_i->nat_tree_lock);
2dcf51ab
JK
222 e = __lookup_nat_cache(nm_i, nid);
223 if (e && !get_nat_flag(e, IS_CHECKPOINTED))
224 is_cp = false;
8b26ef98 225 up_read(&nm_i->nat_tree_lock);
2dcf51ab 226 return is_cp;
479f40c4
JK
227}
228
88bd02c9 229bool need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino)
b6fe5873
JK
230{
231 struct f2fs_nm_info *nm_i = NM_I(sbi);
232 struct nat_entry *e;
88bd02c9 233 bool need_update = true;
b6fe5873 234
8b26ef98 235 down_read(&nm_i->nat_tree_lock);
88bd02c9
JK
236 e = __lookup_nat_cache(nm_i, ino);
237 if (e && get_nat_flag(e, HAS_LAST_FSYNC) &&
238 (get_nat_flag(e, IS_CHECKPOINTED) ||
239 get_nat_flag(e, HAS_FSYNCED_INODE)))
240 need_update = false;
8b26ef98 241 up_read(&nm_i->nat_tree_lock);
88bd02c9 242 return need_update;
b6fe5873
JK
243}
244
e05df3b1
JK
245static struct nat_entry *grab_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid)
246{
247 struct nat_entry *new;
248
80c54505 249 new = f2fs_kmem_cache_alloc(nat_entry_slab, GFP_NOFS);
9be32d72 250 f2fs_radix_tree_insert(&nm_i->nat_root, nid, new);
e05df3b1
JK
251 memset(new, 0, sizeof(struct nat_entry));
252 nat_set_nid(new, nid);
88bd02c9 253 nat_reset_flag(new);
e05df3b1
JK
254 list_add_tail(&new->list, &nm_i->nat_entries);
255 nm_i->nat_cnt++;
256 return new;
257}
258
259static void cache_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid,
260 struct f2fs_nat_entry *ne)
261{
262 struct nat_entry *e;
9be32d72 263
8b26ef98 264 down_write(&nm_i->nat_tree_lock);
e05df3b1
JK
265 e = __lookup_nat_cache(nm_i, nid);
266 if (!e) {
267 e = grab_nat_entry(nm_i, nid);
94dac22e 268 node_info_from_raw_nat(&e->ni, ne);
e05df3b1 269 }
8b26ef98 270 up_write(&nm_i->nat_tree_lock);
e05df3b1
JK
271}
272
273static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
479f40c4 274 block_t new_blkaddr, bool fsync_done)
e05df3b1
JK
275{
276 struct f2fs_nm_info *nm_i = NM_I(sbi);
277 struct nat_entry *e;
9be32d72 278
8b26ef98 279 down_write(&nm_i->nat_tree_lock);
e05df3b1
JK
280 e = __lookup_nat_cache(nm_i, ni->nid);
281 if (!e) {
282 e = grab_nat_entry(nm_i, ni->nid);
5c27f4ee 283 copy_node_info(&e->ni, ni);
9850cf4a 284 f2fs_bug_on(sbi, ni->blk_addr == NEW_ADDR);
e05df3b1
JK
285 } else if (new_blkaddr == NEW_ADDR) {
286 /*
287 * when nid is reallocated,
288 * previous nat entry can be remained in nat cache.
289 * So, reinitialize it with new information.
290 */
5c27f4ee 291 copy_node_info(&e->ni, ni);
9850cf4a 292 f2fs_bug_on(sbi, ni->blk_addr != NULL_ADDR);
e05df3b1
JK
293 }
294
e05df3b1 295 /* sanity check */
9850cf4a
JK
296 f2fs_bug_on(sbi, nat_get_blkaddr(e) != ni->blk_addr);
297 f2fs_bug_on(sbi, nat_get_blkaddr(e) == NULL_ADDR &&
e05df3b1 298 new_blkaddr == NULL_ADDR);
9850cf4a 299 f2fs_bug_on(sbi, nat_get_blkaddr(e) == NEW_ADDR &&
e05df3b1 300 new_blkaddr == NEW_ADDR);
9850cf4a 301 f2fs_bug_on(sbi, nat_get_blkaddr(e) != NEW_ADDR &&
e05df3b1
JK
302 nat_get_blkaddr(e) != NULL_ADDR &&
303 new_blkaddr == NEW_ADDR);
304
e1c42045 305 /* increment version no as node is removed */
e05df3b1
JK
306 if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) {
307 unsigned char version = nat_get_version(e);
308 nat_set_version(e, inc_node_version(version));
26834466
JK
309
310 /* in order to reuse the nid */
311 if (nm_i->next_scan_nid > ni->nid)
312 nm_i->next_scan_nid = ni->nid;
e05df3b1
JK
313 }
314
315 /* change address */
316 nat_set_blkaddr(e, new_blkaddr);
88bd02c9
JK
317 if (new_blkaddr == NEW_ADDR || new_blkaddr == NULL_ADDR)
318 set_nat_flag(e, IS_CHECKPOINTED, false);
e05df3b1 319 __set_nat_cache_dirty(nm_i, e);
479f40c4
JK
320
321 /* update fsync_mark if its inode nat entry is still alive */
d5b692b7
CY
322 if (ni->nid != ni->ino)
323 e = __lookup_nat_cache(nm_i, ni->ino);
88bd02c9
JK
324 if (e) {
325 if (fsync_done && ni->nid == ni->ino)
326 set_nat_flag(e, HAS_FSYNCED_INODE, true);
327 set_nat_flag(e, HAS_LAST_FSYNC, fsync_done);
328 }
8b26ef98 329 up_write(&nm_i->nat_tree_lock);
e05df3b1
JK
330}
331
4660f9c0 332int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
e05df3b1
JK
333{
334 struct f2fs_nm_info *nm_i = NM_I(sbi);
1b38dc8e 335 int nr = nr_shrink;
e05df3b1 336
1b38dc8e 337 if (!down_write_trylock(&nm_i->nat_tree_lock))
e05df3b1
JK
338 return 0;
339
e05df3b1
JK
340 while (nr_shrink && !list_empty(&nm_i->nat_entries)) {
341 struct nat_entry *ne;
342 ne = list_first_entry(&nm_i->nat_entries,
343 struct nat_entry, list);
344 __del_from_nat_cache(nm_i, ne);
345 nr_shrink--;
346 }
8b26ef98 347 up_write(&nm_i->nat_tree_lock);
1b38dc8e 348 return nr - nr_shrink;
e05df3b1
JK
349}
350
0a8165d7 351/*
e1c42045 352 * This function always returns success
e05df3b1
JK
353 */
354void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
355{
356 struct f2fs_nm_info *nm_i = NM_I(sbi);
357 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
358 struct f2fs_summary_block *sum = curseg->sum_blk;
359 nid_t start_nid = START_NID(nid);
360 struct f2fs_nat_block *nat_blk;
361 struct page *page = NULL;
362 struct f2fs_nat_entry ne;
363 struct nat_entry *e;
364 int i;
365
366 ni->nid = nid;
367
368 /* Check nat cache */
8b26ef98 369 down_read(&nm_i->nat_tree_lock);
e05df3b1
JK
370 e = __lookup_nat_cache(nm_i, nid);
371 if (e) {
372 ni->ino = nat_get_ino(e);
373 ni->blk_addr = nat_get_blkaddr(e);
374 ni->version = nat_get_version(e);
375 }
8b26ef98 376 up_read(&nm_i->nat_tree_lock);
e05df3b1
JK
377 if (e)
378 return;
379
3547ea96
JK
380 memset(&ne, 0, sizeof(struct f2fs_nat_entry));
381
e05df3b1
JK
382 /* Check current segment summary */
383 mutex_lock(&curseg->curseg_mutex);
384 i = lookup_journal_in_cursum(sum, NAT_JOURNAL, nid, 0);
385 if (i >= 0) {
386 ne = nat_in_journal(sum, i);
387 node_info_from_raw_nat(ni, &ne);
388 }
389 mutex_unlock(&curseg->curseg_mutex);
390 if (i >= 0)
391 goto cache;
392
393 /* Fill node_info from nat page */
394 page = get_current_nat_page(sbi, start_nid);
395 nat_blk = (struct f2fs_nat_block *)page_address(page);
396 ne = nat_blk->entries[nid - start_nid];
397 node_info_from_raw_nat(ni, &ne);
398 f2fs_put_page(page, 1);
399cache:
400 /* cache nat entry */
401 cache_nat_entry(NM_I(sbi), nid, &ne);
402}
403
0a8165d7 404/*
e05df3b1
JK
405 * The maximum depth is four.
406 * Offset[0] will have raw inode offset.
407 */
de93653f
JK
408static int get_node_path(struct f2fs_inode_info *fi, long block,
409 int offset[4], unsigned int noffset[4])
e05df3b1 410{
de93653f 411 const long direct_index = ADDRS_PER_INODE(fi);
e05df3b1
JK
412 const long direct_blks = ADDRS_PER_BLOCK;
413 const long dptrs_per_blk = NIDS_PER_BLOCK;
414 const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK;
415 const long dindirect_blks = indirect_blks * NIDS_PER_BLOCK;
416 int n = 0;
417 int level = 0;
418
419 noffset[0] = 0;
420
421 if (block < direct_index) {
25c0a6e5 422 offset[n] = block;
e05df3b1
JK
423 goto got;
424 }
425 block -= direct_index;
426 if (block < direct_blks) {
427 offset[n++] = NODE_DIR1_BLOCK;
428 noffset[n] = 1;
25c0a6e5 429 offset[n] = block;
e05df3b1
JK
430 level = 1;
431 goto got;
432 }
433 block -= direct_blks;
434 if (block < direct_blks) {
435 offset[n++] = NODE_DIR2_BLOCK;
436 noffset[n] = 2;
25c0a6e5 437 offset[n] = block;
e05df3b1
JK
438 level = 1;
439 goto got;
440 }
441 block -= direct_blks;
442 if (block < indirect_blks) {
443 offset[n++] = NODE_IND1_BLOCK;
444 noffset[n] = 3;
445 offset[n++] = block / direct_blks;
446 noffset[n] = 4 + offset[n - 1];
25c0a6e5 447 offset[n] = block % direct_blks;
e05df3b1
JK
448 level = 2;
449 goto got;
450 }
451 block -= indirect_blks;
452 if (block < indirect_blks) {
453 offset[n++] = NODE_IND2_BLOCK;
454 noffset[n] = 4 + dptrs_per_blk;
455 offset[n++] = block / direct_blks;
456 noffset[n] = 5 + dptrs_per_blk + offset[n - 1];
25c0a6e5 457 offset[n] = block % direct_blks;
e05df3b1
JK
458 level = 2;
459 goto got;
460 }
461 block -= indirect_blks;
462 if (block < dindirect_blks) {
463 offset[n++] = NODE_DIND_BLOCK;
464 noffset[n] = 5 + (dptrs_per_blk * 2);
465 offset[n++] = block / indirect_blks;
466 noffset[n] = 6 + (dptrs_per_blk * 2) +
467 offset[n - 1] * (dptrs_per_blk + 1);
468 offset[n++] = (block / direct_blks) % dptrs_per_blk;
469 noffset[n] = 7 + (dptrs_per_blk * 2) +
470 offset[n - 2] * (dptrs_per_blk + 1) +
471 offset[n - 1];
25c0a6e5 472 offset[n] = block % direct_blks;
e05df3b1
JK
473 level = 3;
474 goto got;
475 } else {
476 BUG();
477 }
478got:
479 return level;
480}
481
482/*
483 * Caller should call f2fs_put_dnode(dn).
4f4124d0
CY
484 * Also, it should grab and release a rwsem by calling f2fs_lock_op() and
485 * f2fs_unlock_op() only if ro is not set RDONLY_NODE.
39936837 486 * In the case of RDONLY_NODE, we don't need to care about mutex.
e05df3b1 487 */
266e97a8 488int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
e05df3b1 489{
4081363f 490 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
e05df3b1 491 struct page *npage[4];
f1a3b98e 492 struct page *parent = NULL;
e05df3b1
JK
493 int offset[4];
494 unsigned int noffset[4];
495 nid_t nids[4];
496 int level, i;
497 int err = 0;
498
de93653f 499 level = get_node_path(F2FS_I(dn->inode), index, offset, noffset);
e05df3b1
JK
500
501 nids[0] = dn->inode->i_ino;
1646cfac 502 npage[0] = dn->inode_page;
e05df3b1 503
1646cfac
JK
504 if (!npage[0]) {
505 npage[0] = get_node_page(sbi, nids[0]);
506 if (IS_ERR(npage[0]))
507 return PTR_ERR(npage[0]);
508 }
f1a3b98e
JK
509
510 /* if inline_data is set, should not report any block indices */
511 if (f2fs_has_inline_data(dn->inode) && index) {
76629165 512 err = -ENOENT;
f1a3b98e
JK
513 f2fs_put_page(npage[0], 1);
514 goto release_out;
515 }
516
e05df3b1 517 parent = npage[0];
52c2db3f
CL
518 if (level != 0)
519 nids[1] = get_nid(parent, offset[0], true);
e05df3b1
JK
520 dn->inode_page = npage[0];
521 dn->inode_page_locked = true;
522
523 /* get indirect or direct nodes */
524 for (i = 1; i <= level; i++) {
525 bool done = false;
526
266e97a8 527 if (!nids[i] && mode == ALLOC_NODE) {
e05df3b1
JK
528 /* alloc new node */
529 if (!alloc_nid(sbi, &(nids[i]))) {
e05df3b1
JK
530 err = -ENOSPC;
531 goto release_pages;
532 }
533
534 dn->nid = nids[i];
8ae8f162 535 npage[i] = new_node_page(dn, noffset[i], NULL);
e05df3b1
JK
536 if (IS_ERR(npage[i])) {
537 alloc_nid_failed(sbi, nids[i]);
e05df3b1
JK
538 err = PTR_ERR(npage[i]);
539 goto release_pages;
540 }
541
542 set_nid(parent, offset[i - 1], nids[i], i == 1);
543 alloc_nid_done(sbi, nids[i]);
e05df3b1 544 done = true;
266e97a8 545 } else if (mode == LOOKUP_NODE_RA && i == level && level > 1) {
e05df3b1
JK
546 npage[i] = get_node_page_ra(parent, offset[i - 1]);
547 if (IS_ERR(npage[i])) {
548 err = PTR_ERR(npage[i]);
549 goto release_pages;
550 }
551 done = true;
552 }
553 if (i == 1) {
554 dn->inode_page_locked = false;
555 unlock_page(parent);
556 } else {
557 f2fs_put_page(parent, 1);
558 }
559
560 if (!done) {
561 npage[i] = get_node_page(sbi, nids[i]);
562 if (IS_ERR(npage[i])) {
563 err = PTR_ERR(npage[i]);
564 f2fs_put_page(npage[0], 0);
565 goto release_out;
566 }
567 }
568 if (i < level) {
569 parent = npage[i];
570 nids[i + 1] = get_nid(parent, offset[i], false);
571 }
572 }
573 dn->nid = nids[level];
574 dn->ofs_in_node = offset[level];
575 dn->node_page = npage[level];
576 dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node);
577 return 0;
578
579release_pages:
580 f2fs_put_page(parent, 1);
581 if (i > 1)
582 f2fs_put_page(npage[0], 0);
583release_out:
584 dn->inode_page = NULL;
585 dn->node_page = NULL;
586 return err;
587}
588
589static void truncate_node(struct dnode_of_data *dn)
590{
4081363f 591 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
e05df3b1
JK
592 struct node_info ni;
593
594 get_node_info(sbi, dn->nid, &ni);
71e9fec5 595 if (dn->inode->i_blocks == 0) {
9850cf4a 596 f2fs_bug_on(sbi, ni.blk_addr != NULL_ADDR);
71e9fec5
JK
597 goto invalidate;
598 }
9850cf4a 599 f2fs_bug_on(sbi, ni.blk_addr == NULL_ADDR);
e05df3b1 600
e05df3b1 601 /* Deallocate node address */
71e9fec5 602 invalidate_blocks(sbi, ni.blk_addr);
ef86d709 603 dec_valid_node_count(sbi, dn->inode);
479f40c4 604 set_node_addr(sbi, &ni, NULL_ADDR, false);
e05df3b1
JK
605
606 if (dn->nid == dn->inode->i_ino) {
607 remove_orphan_inode(sbi, dn->nid);
608 dec_valid_inode_count(sbi);
609 } else {
610 sync_inode_page(dn);
611 }
71e9fec5 612invalidate:
e05df3b1 613 clear_node_page_dirty(dn->node_page);
caf0047e 614 set_sbi_flag(sbi, SBI_IS_DIRTY);
e05df3b1
JK
615
616 f2fs_put_page(dn->node_page, 1);
bf39c00a
JK
617
618 invalidate_mapping_pages(NODE_MAPPING(sbi),
619 dn->node_page->index, dn->node_page->index);
620
e05df3b1 621 dn->node_page = NULL;
51dd6249 622 trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr);
e05df3b1
JK
623}
624
625static int truncate_dnode(struct dnode_of_data *dn)
626{
e05df3b1
JK
627 struct page *page;
628
629 if (dn->nid == 0)
630 return 1;
631
632 /* get direct node */
4081363f 633 page = get_node_page(F2FS_I_SB(dn->inode), dn->nid);
e05df3b1
JK
634 if (IS_ERR(page) && PTR_ERR(page) == -ENOENT)
635 return 1;
636 else if (IS_ERR(page))
637 return PTR_ERR(page);
638
639 /* Make dnode_of_data for parameter */
640 dn->node_page = page;
641 dn->ofs_in_node = 0;
642 truncate_data_blocks(dn);
643 truncate_node(dn);
644 return 1;
645}
646
647static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
648 int ofs, int depth)
649{
e05df3b1
JK
650 struct dnode_of_data rdn = *dn;
651 struct page *page;
652 struct f2fs_node *rn;
653 nid_t child_nid;
654 unsigned int child_nofs;
655 int freed = 0;
656 int i, ret;
657
658 if (dn->nid == 0)
659 return NIDS_PER_BLOCK + 1;
660
51dd6249
NJ
661 trace_f2fs_truncate_nodes_enter(dn->inode, dn->nid, dn->data_blkaddr);
662
4081363f 663 page = get_node_page(F2FS_I_SB(dn->inode), dn->nid);
51dd6249
NJ
664 if (IS_ERR(page)) {
665 trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(page));
e05df3b1 666 return PTR_ERR(page);
51dd6249 667 }
e05df3b1 668
45590710 669 rn = F2FS_NODE(page);
e05df3b1
JK
670 if (depth < 3) {
671 for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) {
672 child_nid = le32_to_cpu(rn->in.nid[i]);
673 if (child_nid == 0)
674 continue;
675 rdn.nid = child_nid;
676 ret = truncate_dnode(&rdn);
677 if (ret < 0)
678 goto out_err;
679 set_nid(page, i, 0, false);
680 }
681 } else {
682 child_nofs = nofs + ofs * (NIDS_PER_BLOCK + 1) + 1;
683 for (i = ofs; i < NIDS_PER_BLOCK; i++) {
684 child_nid = le32_to_cpu(rn->in.nid[i]);
685 if (child_nid == 0) {
686 child_nofs += NIDS_PER_BLOCK + 1;
687 continue;
688 }
689 rdn.nid = child_nid;
690 ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1);
691 if (ret == (NIDS_PER_BLOCK + 1)) {
692 set_nid(page, i, 0, false);
693 child_nofs += ret;
694 } else if (ret < 0 && ret != -ENOENT) {
695 goto out_err;
696 }
697 }
698 freed = child_nofs;
699 }
700
701 if (!ofs) {
702 /* remove current indirect node */
703 dn->node_page = page;
704 truncate_node(dn);
705 freed++;
706 } else {
707 f2fs_put_page(page, 1);
708 }
51dd6249 709 trace_f2fs_truncate_nodes_exit(dn->inode, freed);
e05df3b1
JK
710 return freed;
711
712out_err:
713 f2fs_put_page(page, 1);
51dd6249 714 trace_f2fs_truncate_nodes_exit(dn->inode, ret);
e05df3b1
JK
715 return ret;
716}
717
718static int truncate_partial_nodes(struct dnode_of_data *dn,
719 struct f2fs_inode *ri, int *offset, int depth)
720{
e05df3b1
JK
721 struct page *pages[2];
722 nid_t nid[3];
723 nid_t child_nid;
724 int err = 0;
725 int i;
726 int idx = depth - 2;
727
728 nid[0] = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
729 if (!nid[0])
730 return 0;
731
732 /* get indirect nodes in the path */
a225dca3 733 for (i = 0; i < idx + 1; i++) {
e1c42045 734 /* reference count'll be increased */
4081363f 735 pages[i] = get_node_page(F2FS_I_SB(dn->inode), nid[i]);
e05df3b1 736 if (IS_ERR(pages[i])) {
e05df3b1 737 err = PTR_ERR(pages[i]);
a225dca3 738 idx = i - 1;
e05df3b1
JK
739 goto fail;
740 }
741 nid[i + 1] = get_nid(pages[i], offset[i + 1], false);
742 }
743
744 /* free direct nodes linked to a partial indirect node */
a225dca3 745 for (i = offset[idx + 1]; i < NIDS_PER_BLOCK; i++) {
e05df3b1
JK
746 child_nid = get_nid(pages[idx], i, false);
747 if (!child_nid)
748 continue;
749 dn->nid = child_nid;
750 err = truncate_dnode(dn);
751 if (err < 0)
752 goto fail;
753 set_nid(pages[idx], i, 0, false);
754 }
755
a225dca3 756 if (offset[idx + 1] == 0) {
e05df3b1
JK
757 dn->node_page = pages[idx];
758 dn->nid = nid[idx];
759 truncate_node(dn);
760 } else {
761 f2fs_put_page(pages[idx], 1);
762 }
763 offset[idx]++;
a225dca3 764 offset[idx + 1] = 0;
765 idx--;
e05df3b1 766fail:
a225dca3 767 for (i = idx; i >= 0; i--)
e05df3b1 768 f2fs_put_page(pages[i], 1);
51dd6249
NJ
769
770 trace_f2fs_truncate_partial_nodes(dn->inode, nid, depth, err);
771
e05df3b1
JK
772 return err;
773}
774
0a8165d7 775/*
e05df3b1
JK
776 * All the block addresses of data and nodes should be nullified.
777 */
778int truncate_inode_blocks(struct inode *inode, pgoff_t from)
779{
4081363f 780 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
e05df3b1
JK
781 int err = 0, cont = 1;
782 int level, offset[4], noffset[4];
7dd690c8 783 unsigned int nofs = 0;
58bfaf44 784 struct f2fs_inode *ri;
e05df3b1
JK
785 struct dnode_of_data dn;
786 struct page *page;
787
51dd6249
NJ
788 trace_f2fs_truncate_inode_blocks_enter(inode, from);
789
de93653f 790 level = get_node_path(F2FS_I(inode), from, offset, noffset);
afcb7ca0 791restart:
e05df3b1 792 page = get_node_page(sbi, inode->i_ino);
51dd6249
NJ
793 if (IS_ERR(page)) {
794 trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(page));
e05df3b1 795 return PTR_ERR(page);
51dd6249 796 }
e05df3b1
JK
797
798 set_new_dnode(&dn, inode, page, NULL, 0);
799 unlock_page(page);
800
58bfaf44 801 ri = F2FS_INODE(page);
e05df3b1
JK
802 switch (level) {
803 case 0:
804 case 1:
805 nofs = noffset[1];
806 break;
807 case 2:
808 nofs = noffset[1];
809 if (!offset[level - 1])
810 goto skip_partial;
58bfaf44 811 err = truncate_partial_nodes(&dn, ri, offset, level);
e05df3b1
JK
812 if (err < 0 && err != -ENOENT)
813 goto fail;
814 nofs += 1 + NIDS_PER_BLOCK;
815 break;
816 case 3:
817 nofs = 5 + 2 * NIDS_PER_BLOCK;
818 if (!offset[level - 1])
819 goto skip_partial;
58bfaf44 820 err = truncate_partial_nodes(&dn, ri, offset, level);
e05df3b1
JK
821 if (err < 0 && err != -ENOENT)
822 goto fail;
823 break;
824 default:
825 BUG();
826 }
827
828skip_partial:
829 while (cont) {
58bfaf44 830 dn.nid = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
e05df3b1
JK
831 switch (offset[0]) {
832 case NODE_DIR1_BLOCK:
833 case NODE_DIR2_BLOCK:
834 err = truncate_dnode(&dn);
835 break;
836
837 case NODE_IND1_BLOCK:
838 case NODE_IND2_BLOCK:
839 err = truncate_nodes(&dn, nofs, offset[1], 2);
840 break;
841
842 case NODE_DIND_BLOCK:
843 err = truncate_nodes(&dn, nofs, offset[1], 3);
844 cont = 0;
845 break;
846
847 default:
848 BUG();
849 }
850 if (err < 0 && err != -ENOENT)
851 goto fail;
852 if (offset[1] == 0 &&
58bfaf44 853 ri->i_nid[offset[0] - NODE_DIR1_BLOCK]) {
e05df3b1 854 lock_page(page);
4ef51a8f 855 if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
afcb7ca0
JK
856 f2fs_put_page(page, 1);
857 goto restart;
858 }
3cb5ad15 859 f2fs_wait_on_page_writeback(page, NODE);
58bfaf44 860 ri->i_nid[offset[0] - NODE_DIR1_BLOCK] = 0;
e05df3b1
JK
861 set_page_dirty(page);
862 unlock_page(page);
863 }
864 offset[1] = 0;
865 offset[0]++;
866 nofs += err;
867 }
868fail:
869 f2fs_put_page(page, 0);
51dd6249 870 trace_f2fs_truncate_inode_blocks_exit(inode, err);
e05df3b1
JK
871 return err > 0 ? 0 : err;
872}
873
4f16fb0f
JK
874int truncate_xattr_node(struct inode *inode, struct page *page)
875{
4081363f 876 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4f16fb0f
JK
877 nid_t nid = F2FS_I(inode)->i_xattr_nid;
878 struct dnode_of_data dn;
879 struct page *npage;
880
881 if (!nid)
882 return 0;
883
884 npage = get_node_page(sbi, nid);
885 if (IS_ERR(npage))
886 return PTR_ERR(npage);
887
888 F2FS_I(inode)->i_xattr_nid = 0;
65985d93
JK
889
890 /* need to do checkpoint during fsync */
891 F2FS_I(inode)->xattr_ver = cur_cp_version(F2FS_CKPT(sbi));
892
4f16fb0f
JK
893 set_new_dnode(&dn, inode, page, npage, nid);
894
895 if (page)
01d2d1aa 896 dn.inode_page_locked = true;
4f16fb0f
JK
897 truncate_node(&dn);
898 return 0;
899}
900
39936837 901/*
4f4124d0
CY
902 * Caller should grab and release a rwsem by calling f2fs_lock_op() and
903 * f2fs_unlock_op().
39936837 904 */
13ec7297 905int remove_inode_page(struct inode *inode)
e05df3b1 906{
e05df3b1 907 struct dnode_of_data dn;
13ec7297 908 int err;
e05df3b1 909
c2e69583 910 set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
13ec7297
CY
911 err = get_dnode_of_data(&dn, 0, LOOKUP_NODE);
912 if (err)
913 return err;
e05df3b1 914
13ec7297
CY
915 err = truncate_xattr_node(inode, dn.inode_page);
916 if (err) {
c2e69583 917 f2fs_put_dnode(&dn);
13ec7297 918 return err;
e05df3b1 919 }
c2e69583
JK
920
921 /* remove potential inline_data blocks */
922 if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
923 S_ISLNK(inode->i_mode))
924 truncate_data_blocks_range(&dn, 1);
925
e1c42045 926 /* 0 is possible, after f2fs_new_inode() has failed */
9850cf4a
JK
927 f2fs_bug_on(F2FS_I_SB(inode),
928 inode->i_blocks != 0 && inode->i_blocks != 1);
c2e69583
JK
929
930 /* will put inode & node pages */
71e9fec5 931 truncate_node(&dn);
13ec7297 932 return 0;
e05df3b1
JK
933}
934
a014e037 935struct page *new_inode_page(struct inode *inode)
e05df3b1 936{
e05df3b1
JK
937 struct dnode_of_data dn;
938
939 /* allocate inode page for new inode */
940 set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
44a83ff6
JK
941
942 /* caller should f2fs_put_page(page, 1); */
8ae8f162 943 return new_node_page(&dn, 0, NULL);
e05df3b1
JK
944}
945
8ae8f162
JK
946struct page *new_node_page(struct dnode_of_data *dn,
947 unsigned int ofs, struct page *ipage)
e05df3b1 948{
4081363f 949 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
e05df3b1
JK
950 struct node_info old_ni, new_ni;
951 struct page *page;
952 int err;
953
6bacf52f 954 if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
e05df3b1
JK
955 return ERR_PTR(-EPERM);
956
54b591df 957 page = grab_cache_page(NODE_MAPPING(sbi), dn->nid);
e05df3b1
JK
958 if (!page)
959 return ERR_PTR(-ENOMEM);
960
6bacf52f 961 if (unlikely(!inc_valid_node_count(sbi, dn->inode))) {
9c02740c
JK
962 err = -ENOSPC;
963 goto fail;
964 }
e05df3b1 965
9c02740c 966 get_node_info(sbi, dn->nid, &old_ni);
e05df3b1
JK
967
968 /* Reinitialize old_ni with new node page */
9850cf4a 969 f2fs_bug_on(sbi, old_ni.blk_addr != NULL_ADDR);
e05df3b1
JK
970 new_ni = old_ni;
971 new_ni.ino = dn->inode->i_ino;
479f40c4 972 set_node_addr(sbi, &new_ni, NEW_ADDR, false);
9c02740c 973
54b591df 974 f2fs_wait_on_page_writeback(page, NODE);
9c02740c 975 fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true);
398b1ac5 976 set_cold_node(dn->inode, page);
9c02740c
JK
977 SetPageUptodate(page);
978 set_page_dirty(page);
e05df3b1 979
4bc8e9bc 980 if (f2fs_has_xattr_block(ofs))
479bd73a
JK
981 F2FS_I(dn->inode)->i_xattr_nid = dn->nid;
982
e05df3b1 983 dn->node_page = page;
8ae8f162
JK
984 if (ipage)
985 update_inode(dn->inode, ipage);
986 else
987 sync_inode_page(dn);
e05df3b1
JK
988 if (ofs == 0)
989 inc_valid_inode_count(sbi);
990
991 return page;
992
993fail:
71e9fec5 994 clear_node_page_dirty(page);
e05df3b1
JK
995 f2fs_put_page(page, 1);
996 return ERR_PTR(err);
997}
998
56ae674c
JK
999/*
1000 * Caller should do after getting the following values.
1001 * 0: f2fs_put_page(page, 0)
86531d6b 1002 * LOCKED_PAGE or error: f2fs_put_page(page, 1)
56ae674c 1003 */
93dfe2ac 1004static int read_node_page(struct page *page, int rw)
e05df3b1 1005{
4081363f 1006 struct f2fs_sb_info *sbi = F2FS_P_SB(page);
e05df3b1 1007 struct node_info ni;
cf04e8eb 1008 struct f2fs_io_info fio = {
05ca3632 1009 .sbi = sbi,
cf04e8eb
JK
1010 .type = NODE,
1011 .rw = rw,
05ca3632 1012 .page = page,
4375a336 1013 .encrypted_page = NULL,
cf04e8eb 1014 };
e05df3b1
JK
1015
1016 get_node_info(sbi, page->index, &ni);
1017
6bacf52f 1018 if (unlikely(ni.blk_addr == NULL_ADDR)) {
2bca1e23 1019 ClearPageUptodate(page);
e05df3b1 1020 return -ENOENT;
393ff91f
JK
1021 }
1022
56ae674c
JK
1023 if (PageUptodate(page))
1024 return LOCKED_PAGE;
393ff91f 1025
cf04e8eb 1026 fio.blk_addr = ni.blk_addr;
05ca3632 1027 return f2fs_submit_page_bio(&fio);
e05df3b1
JK
1028}
1029
0a8165d7 1030/*
e05df3b1
JK
1031 * Readahead a node page
1032 */
1033void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
1034{
e05df3b1 1035 struct page *apage;
56ae674c 1036 int err;
e05df3b1 1037
4ef51a8f 1038 apage = find_get_page(NODE_MAPPING(sbi), nid);
393ff91f
JK
1039 if (apage && PageUptodate(apage)) {
1040 f2fs_put_page(apage, 0);
1041 return;
1042 }
e05df3b1
JK
1043 f2fs_put_page(apage, 0);
1044
4ef51a8f 1045 apage = grab_cache_page(NODE_MAPPING(sbi), nid);
e05df3b1
JK
1046 if (!apage)
1047 return;
1048
56ae674c 1049 err = read_node_page(apage, READA);
86531d6b 1050 f2fs_put_page(apage, err ? 1 : 0);
e05df3b1
JK
1051}
1052
1053struct page *get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
1054{
56ae674c
JK
1055 struct page *page;
1056 int err;
afcb7ca0 1057repeat:
54b591df 1058 page = grab_cache_page(NODE_MAPPING(sbi), nid);
e05df3b1
JK
1059 if (!page)
1060 return ERR_PTR(-ENOMEM);
1061
1062 err = read_node_page(page, READ_SYNC);
86531d6b
JK
1063 if (err < 0) {
1064 f2fs_put_page(page, 1);
e05df3b1 1065 return ERR_PTR(err);
86531d6b 1066 } else if (err != LOCKED_PAGE) {
aaf96075 1067 lock_page(page);
86531d6b 1068 }
e05df3b1 1069
3bb5e2c8 1070 if (unlikely(!PageUptodate(page) || nid != nid_of_node(page))) {
aaf96075 1071 ClearPageUptodate(page);
393ff91f
JK
1072 f2fs_put_page(page, 1);
1073 return ERR_PTR(-EIO);
1074 }
4ef51a8f 1075 if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
afcb7ca0
JK
1076 f2fs_put_page(page, 1);
1077 goto repeat;
1078 }
e05df3b1
JK
1079 return page;
1080}
1081
0a8165d7 1082/*
e05df3b1
JK
1083 * Return a locked page for the desired node page.
1084 * And, readahead MAX_RA_NODE number of node pages.
1085 */
1086struct page *get_node_page_ra(struct page *parent, int start)
1087{
4081363f 1088 struct f2fs_sb_info *sbi = F2FS_P_SB(parent);
c718379b 1089 struct blk_plug plug;
e05df3b1 1090 struct page *page;
56ae674c
JK
1091 int err, i, end;
1092 nid_t nid;
e05df3b1
JK
1093
1094 /* First, try getting the desired direct node. */
1095 nid = get_nid(parent, start, false);
1096 if (!nid)
1097 return ERR_PTR(-ENOENT);
afcb7ca0 1098repeat:
4ef51a8f 1099 page = grab_cache_page(NODE_MAPPING(sbi), nid);
e05df3b1
JK
1100 if (!page)
1101 return ERR_PTR(-ENOMEM);
1102
66d36a29 1103 err = read_node_page(page, READ_SYNC);
86531d6b
JK
1104 if (err < 0) {
1105 f2fs_put_page(page, 1);
e05df3b1 1106 return ERR_PTR(err);
86531d6b 1107 } else if (err == LOCKED_PAGE) {
56ae674c 1108 goto page_hit;
86531d6b 1109 }
e05df3b1 1110
c718379b
JK
1111 blk_start_plug(&plug);
1112
e05df3b1
JK
1113 /* Then, try readahead for siblings of the desired node */
1114 end = start + MAX_RA_NODE;
1115 end = min(end, NIDS_PER_BLOCK);
1116 for (i = start + 1; i < end; i++) {
1117 nid = get_nid(parent, i, false);
1118 if (!nid)
1119 continue;
1120 ra_node_page(sbi, nid);
1121 }
1122
c718379b
JK
1123 blk_finish_plug(&plug);
1124
e05df3b1 1125 lock_page(page);
4ef51a8f 1126 if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
afcb7ca0
JK
1127 f2fs_put_page(page, 1);
1128 goto repeat;
1129 }
e0f56cb4 1130page_hit:
6bacf52f 1131 if (unlikely(!PageUptodate(page))) {
e05df3b1
JK
1132 f2fs_put_page(page, 1);
1133 return ERR_PTR(-EIO);
1134 }
e05df3b1
JK
1135 return page;
1136}
1137
1138void sync_inode_page(struct dnode_of_data *dn)
1139{
1140 if (IS_INODE(dn->node_page) || dn->inode_page == dn->node_page) {
1141 update_inode(dn->inode, dn->node_page);
1142 } else if (dn->inode_page) {
1143 if (!dn->inode_page_locked)
1144 lock_page(dn->inode_page);
1145 update_inode(dn->inode, dn->inode_page);
1146 if (!dn->inode_page_locked)
1147 unlock_page(dn->inode_page);
1148 } else {
39936837 1149 update_inode_page(dn->inode);
e05df3b1
JK
1150 }
1151}
1152
1153int sync_node_pages(struct f2fs_sb_info *sbi, nid_t ino,
1154 struct writeback_control *wbc)
1155{
e05df3b1
JK
1156 pgoff_t index, end;
1157 struct pagevec pvec;
1158 int step = ino ? 2 : 0;
1159 int nwritten = 0, wrote = 0;
1160
1161 pagevec_init(&pvec, 0);
1162
1163next_step:
1164 index = 0;
1165 end = LONG_MAX;
1166
1167 while (index <= end) {
1168 int i, nr_pages;
4ef51a8f 1169 nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
e05df3b1
JK
1170 PAGECACHE_TAG_DIRTY,
1171 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
1172 if (nr_pages == 0)
1173 break;
1174
1175 for (i = 0; i < nr_pages; i++) {
1176 struct page *page = pvec.pages[i];
1177
1178 /*
1179 * flushing sequence with step:
1180 * 0. indirect nodes
1181 * 1. dentry dnodes
1182 * 2. file dnodes
1183 */
1184 if (step == 0 && IS_DNODE(page))
1185 continue;
1186 if (step == 1 && (!IS_DNODE(page) ||
1187 is_cold_node(page)))
1188 continue;
1189 if (step == 2 && (!IS_DNODE(page) ||
1190 !is_cold_node(page)))
1191 continue;
1192
1193 /*
1194 * If an fsync mode,
1195 * we should not skip writing node pages.
1196 */
1197 if (ino && ino_of_node(page) == ino)
1198 lock_page(page);
1199 else if (!trylock_page(page))
1200 continue;
1201
4ef51a8f 1202 if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
e05df3b1
JK
1203continue_unlock:
1204 unlock_page(page);
1205 continue;
1206 }
1207 if (ino && ino_of_node(page) != ino)
1208 goto continue_unlock;
1209
1210 if (!PageDirty(page)) {
1211 /* someone wrote it for us */
1212 goto continue_unlock;
1213 }
1214
1215 if (!clear_page_dirty_for_io(page))
1216 goto continue_unlock;
1217
1218 /* called by fsync() */
1219 if (ino && IS_DNODE(page)) {
e05df3b1 1220 set_fsync_mark(page, 1);
2dcf51ab
JK
1221 if (IS_INODE(page))
1222 set_dentry_mark(page,
1223 need_dentry_mark(sbi, ino));
e05df3b1
JK
1224 nwritten++;
1225 } else {
1226 set_fsync_mark(page, 0);
1227 set_dentry_mark(page, 0);
1228 }
52746519
JK
1229
1230 if (NODE_MAPPING(sbi)->a_ops->writepage(page, wbc))
1231 unlock_page(page);
1232 else
1233 wrote++;
e05df3b1
JK
1234
1235 if (--wbc->nr_to_write == 0)
1236 break;
1237 }
1238 pagevec_release(&pvec);
1239 cond_resched();
1240
1241 if (wbc->nr_to_write == 0) {
1242 step = 2;
1243 break;
1244 }
1245 }
1246
1247 if (step < 2) {
1248 step++;
1249 goto next_step;
1250 }
1251
1252 if (wrote)
458e6197 1253 f2fs_submit_merged_bio(sbi, NODE, WRITE);
e05df3b1
JK
1254 return nwritten;
1255}
1256
cfe58f9d
JK
1257int wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, nid_t ino)
1258{
cfe58f9d
JK
1259 pgoff_t index = 0, end = LONG_MAX;
1260 struct pagevec pvec;
cfe58f9d
JK
1261 int ret2 = 0, ret = 0;
1262
1263 pagevec_init(&pvec, 0);
4ef51a8f
JK
1264
1265 while (index <= end) {
1266 int i, nr_pages;
1267 nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
1268 PAGECACHE_TAG_WRITEBACK,
1269 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
1270 if (nr_pages == 0)
1271 break;
cfe58f9d
JK
1272
1273 for (i = 0; i < nr_pages; i++) {
1274 struct page *page = pvec.pages[i];
1275
1276 /* until radix tree lookup accepts end_index */
cfb271d4 1277 if (unlikely(page->index > end))
cfe58f9d
JK
1278 continue;
1279
4bf08ff6 1280 if (ino && ino_of_node(page) == ino) {
3cb5ad15 1281 f2fs_wait_on_page_writeback(page, NODE);
4bf08ff6
CY
1282 if (TestClearPageError(page))
1283 ret = -EIO;
1284 }
cfe58f9d
JK
1285 }
1286 pagevec_release(&pvec);
1287 cond_resched();
1288 }
1289
4ef51a8f 1290 if (unlikely(test_and_clear_bit(AS_ENOSPC, &NODE_MAPPING(sbi)->flags)))
cfe58f9d 1291 ret2 = -ENOSPC;
4ef51a8f 1292 if (unlikely(test_and_clear_bit(AS_EIO, &NODE_MAPPING(sbi)->flags)))
cfe58f9d
JK
1293 ret2 = -EIO;
1294 if (!ret)
1295 ret = ret2;
1296 return ret;
1297}
1298
e05df3b1
JK
1299static int f2fs_write_node_page(struct page *page,
1300 struct writeback_control *wbc)
1301{
4081363f 1302 struct f2fs_sb_info *sbi = F2FS_P_SB(page);
e05df3b1 1303 nid_t nid;
e05df3b1 1304 struct node_info ni;
fb5566da 1305 struct f2fs_io_info fio = {
05ca3632 1306 .sbi = sbi,
fb5566da 1307 .type = NODE,
6c311ec6 1308 .rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE,
05ca3632 1309 .page = page,
4375a336 1310 .encrypted_page = NULL,
fb5566da 1311 };
e05df3b1 1312
ecda0de3
CY
1313 trace_f2fs_writepage(page, NODE);
1314
caf0047e 1315 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
87a9bd26 1316 goto redirty_out;
cf779cab
JK
1317 if (unlikely(f2fs_cp_error(sbi)))
1318 goto redirty_out;
87a9bd26 1319
3cb5ad15 1320 f2fs_wait_on_page_writeback(page, NODE);
e05df3b1 1321
e05df3b1
JK
1322 /* get old block addr of this node page */
1323 nid = nid_of_node(page);
9850cf4a 1324 f2fs_bug_on(sbi, page->index != nid);
e05df3b1 1325
25b93346
JK
1326 if (wbc->for_reclaim) {
1327 if (!down_read_trylock(&sbi->node_write))
1328 goto redirty_out;
1329 } else {
1330 down_read(&sbi->node_write);
1331 }
1332
e05df3b1
JK
1333 get_node_info(sbi, nid, &ni);
1334
1335 /* This page is already truncated */
6bacf52f 1336 if (unlikely(ni.blk_addr == NULL_ADDR)) {
2bca1e23 1337 ClearPageUptodate(page);
39936837 1338 dec_page_count(sbi, F2FS_DIRTY_NODES);
25b93346 1339 up_read(&sbi->node_write);
39936837
JK
1340 unlock_page(page);
1341 return 0;
1342 }
e05df3b1
JK
1343
1344 set_page_writeback(page);
cf04e8eb 1345 fio.blk_addr = ni.blk_addr;
05ca3632 1346 write_node_page(nid, &fio);
cf04e8eb 1347 set_node_addr(sbi, &ni, fio.blk_addr, is_fsync_dnode(page));
e05df3b1 1348 dec_page_count(sbi, F2FS_DIRTY_NODES);
b3582c68 1349 up_read(&sbi->node_write);
e05df3b1 1350 unlock_page(page);
27c6bd60
JK
1351
1352 if (wbc->for_reclaim)
1353 f2fs_submit_merged_bio(sbi, NODE, WRITE);
1354
e05df3b1 1355 return 0;
87a9bd26
JK
1356
1357redirty_out:
76f60268 1358 redirty_page_for_writepage(wbc, page);
87a9bd26 1359 return AOP_WRITEPAGE_ACTIVATE;
e05df3b1
JK
1360}
1361
1362static int f2fs_write_node_pages(struct address_space *mapping,
1363 struct writeback_control *wbc)
1364{
4081363f 1365 struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
50c8cdb3 1366 long diff;
e05df3b1 1367
e5748434
CY
1368 trace_f2fs_writepages(mapping->host, wbc, NODE);
1369
4660f9c0
JK
1370 /* balancing f2fs's metadata in background */
1371 f2fs_balance_fs_bg(sbi);
e05df3b1 1372
a7fdffbd 1373 /* collect a number of dirty node pages and write together */
87d6f890 1374 if (get_pages(sbi, F2FS_DIRTY_NODES) < nr_pages_to_skip(sbi, NODE))
d3baf95d 1375 goto skip_write;
a7fdffbd 1376
50c8cdb3 1377 diff = nr_pages_to_write(sbi, NODE, wbc);
fb5566da 1378 wbc->sync_mode = WB_SYNC_NONE;
e05df3b1 1379 sync_node_pages(sbi, 0, wbc);
50c8cdb3 1380 wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
e05df3b1 1381 return 0;
d3baf95d
JK
1382
1383skip_write:
1384 wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_NODES);
1385 return 0;
e05df3b1
JK
1386}
1387
1388static int f2fs_set_node_page_dirty(struct page *page)
1389{
26c6b887
JK
1390 trace_f2fs_set_page_dirty(page, NODE);
1391
e05df3b1
JK
1392 SetPageUptodate(page);
1393 if (!PageDirty(page)) {
1394 __set_page_dirty_nobuffers(page);
4081363f 1395 inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES);
e05df3b1 1396 SetPagePrivate(page);
9e4ded3f 1397 f2fs_trace_pid(page);
e05df3b1
JK
1398 return 1;
1399 }
1400 return 0;
1401}
1402
0a8165d7 1403/*
e05df3b1
JK
1404 * Structure of the f2fs node operations
1405 */
1406const struct address_space_operations f2fs_node_aops = {
1407 .writepage = f2fs_write_node_page,
1408 .writepages = f2fs_write_node_pages,
1409 .set_page_dirty = f2fs_set_node_page_dirty,
487261f3
CY
1410 .invalidatepage = f2fs_invalidate_page,
1411 .releasepage = f2fs_release_page,
e05df3b1
JK
1412};
1413
8a7ed66a
JK
1414static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i,
1415 nid_t n)
e05df3b1 1416{
8a7ed66a 1417 return radix_tree_lookup(&nm_i->free_nid_root, n);
e05df3b1
JK
1418}
1419
8a7ed66a
JK
1420static void __del_from_free_nid_list(struct f2fs_nm_info *nm_i,
1421 struct free_nid *i)
e05df3b1
JK
1422{
1423 list_del(&i->list);
8a7ed66a 1424 radix_tree_delete(&nm_i->free_nid_root, i->nid);
e05df3b1
JK
1425}
1426
6fb03f3a 1427static int add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build)
e05df3b1 1428{
6fb03f3a 1429 struct f2fs_nm_info *nm_i = NM_I(sbi);
e05df3b1 1430 struct free_nid *i;
59bbd474
JK
1431 struct nat_entry *ne;
1432 bool allocated = false;
e05df3b1 1433
6fb03f3a 1434 if (!available_free_memory(sbi, FREE_NIDS))
23d38844 1435 return -1;
9198aceb
JK
1436
1437 /* 0 nid should not be used */
cfb271d4 1438 if (unlikely(nid == 0))
9198aceb 1439 return 0;
59bbd474 1440
7bd59381
GZ
1441 if (build) {
1442 /* do not add allocated nids */
8b26ef98 1443 down_read(&nm_i->nat_tree_lock);
7bd59381 1444 ne = __lookup_nat_cache(nm_i, nid);
8a7ed66a 1445 if (ne &&
7ef35e3b
JK
1446 (!get_nat_flag(ne, IS_CHECKPOINTED) ||
1447 nat_get_blkaddr(ne) != NULL_ADDR))
7bd59381 1448 allocated = true;
8b26ef98 1449 up_read(&nm_i->nat_tree_lock);
7bd59381
GZ
1450 if (allocated)
1451 return 0;
e05df3b1 1452 }
7bd59381
GZ
1453
1454 i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS);
e05df3b1
JK
1455 i->nid = nid;
1456 i->state = NID_NEW;
1457
769ec6e5
JK
1458 if (radix_tree_preload(GFP_NOFS)) {
1459 kmem_cache_free(free_nid_slab, i);
1460 return 0;
1461 }
1462
e05df3b1 1463 spin_lock(&nm_i->free_nid_list_lock);
8a7ed66a 1464 if (radix_tree_insert(&nm_i->free_nid_root, i->nid, i)) {
e05df3b1 1465 spin_unlock(&nm_i->free_nid_list_lock);
769ec6e5 1466 radix_tree_preload_end();
e05df3b1
JK
1467 kmem_cache_free(free_nid_slab, i);
1468 return 0;
1469 }
1470 list_add_tail(&i->list, &nm_i->free_nid_list);
1471 nm_i->fcnt++;
1472 spin_unlock(&nm_i->free_nid_list_lock);
769ec6e5 1473 radix_tree_preload_end();
e05df3b1
JK
1474 return 1;
1475}
1476
1477static void remove_free_nid(struct f2fs_nm_info *nm_i, nid_t nid)
1478{
1479 struct free_nid *i;
cf0ee0f0
CY
1480 bool need_free = false;
1481
e05df3b1 1482 spin_lock(&nm_i->free_nid_list_lock);
8a7ed66a 1483 i = __lookup_free_nid_list(nm_i, nid);
e05df3b1 1484 if (i && i->state == NID_NEW) {
8a7ed66a 1485 __del_from_free_nid_list(nm_i, i);
e05df3b1 1486 nm_i->fcnt--;
cf0ee0f0 1487 need_free = true;
e05df3b1
JK
1488 }
1489 spin_unlock(&nm_i->free_nid_list_lock);
cf0ee0f0
CY
1490
1491 if (need_free)
1492 kmem_cache_free(free_nid_slab, i);
e05df3b1
JK
1493}
1494
6fb03f3a 1495static void scan_nat_page(struct f2fs_sb_info *sbi,
e05df3b1
JK
1496 struct page *nat_page, nid_t start_nid)
1497{
6fb03f3a 1498 struct f2fs_nm_info *nm_i = NM_I(sbi);
e05df3b1
JK
1499 struct f2fs_nat_block *nat_blk = page_address(nat_page);
1500 block_t blk_addr;
e05df3b1
JK
1501 int i;
1502
e05df3b1
JK
1503 i = start_nid % NAT_ENTRY_PER_BLOCK;
1504
1505 for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) {
23d38844 1506
cfb271d4 1507 if (unlikely(start_nid >= nm_i->max_nid))
04431c44 1508 break;
23d38844
HL
1509
1510 blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
9850cf4a 1511 f2fs_bug_on(sbi, blk_addr == NEW_ADDR);
23d38844 1512 if (blk_addr == NULL_ADDR) {
6fb03f3a 1513 if (add_free_nid(sbi, start_nid, true) < 0)
23d38844
HL
1514 break;
1515 }
e05df3b1 1516 }
e05df3b1
JK
1517}
1518
1519static void build_free_nids(struct f2fs_sb_info *sbi)
1520{
e05df3b1
JK
1521 struct f2fs_nm_info *nm_i = NM_I(sbi);
1522 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1523 struct f2fs_summary_block *sum = curseg->sum_blk;
8760952d 1524 int i = 0;
55008d84 1525 nid_t nid = nm_i->next_scan_nid;
e05df3b1 1526
55008d84
JK
1527 /* Enough entries */
1528 if (nm_i->fcnt > NAT_ENTRY_PER_BLOCK)
1529 return;
e05df3b1 1530
55008d84 1531 /* readahead nat pages to be scanned */
26879fb1
CY
1532 ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES,
1533 META_NAT, true);
e05df3b1
JK
1534
1535 while (1) {
1536 struct page *page = get_current_nat_page(sbi, nid);
1537
6fb03f3a 1538 scan_nat_page(sbi, page, nid);
e05df3b1
JK
1539 f2fs_put_page(page, 1);
1540
1541 nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK));
cfb271d4 1542 if (unlikely(nid >= nm_i->max_nid))
e05df3b1 1543 nid = 0;
55008d84 1544
a6d494b6 1545 if (++i >= FREE_NID_PAGES)
e05df3b1
JK
1546 break;
1547 }
1548
55008d84
JK
1549 /* go to the next free nat pages to find free nids abundantly */
1550 nm_i->next_scan_nid = nid;
e05df3b1
JK
1551
1552 /* find free nids from current sum_pages */
1553 mutex_lock(&curseg->curseg_mutex);
1554 for (i = 0; i < nats_in_cursum(sum); i++) {
1555 block_t addr = le32_to_cpu(nat_in_journal(sum, i).block_addr);
1556 nid = le32_to_cpu(nid_in_journal(sum, i));
1557 if (addr == NULL_ADDR)
6fb03f3a 1558 add_free_nid(sbi, nid, true);
e05df3b1
JK
1559 else
1560 remove_free_nid(nm_i, nid);
1561 }
1562 mutex_unlock(&curseg->curseg_mutex);
e05df3b1
JK
1563}
1564
1565/*
1566 * If this function returns success, caller can obtain a new nid
1567 * from second parameter of this function.
1568 * The returned nid could be used ino as well as nid when inode is created.
1569 */
1570bool alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid)
1571{
1572 struct f2fs_nm_info *nm_i = NM_I(sbi);
1573 struct free_nid *i = NULL;
e05df3b1 1574retry:
7ee0eeab 1575 if (unlikely(sbi->total_valid_node_count + 1 > nm_i->available_nids))
55008d84 1576 return false;
e05df3b1 1577
e05df3b1 1578 spin_lock(&nm_i->free_nid_list_lock);
e05df3b1 1579
55008d84 1580 /* We should not use stale free nids created by build_free_nids */
f978f5a0 1581 if (nm_i->fcnt && !on_build_free_nids(nm_i)) {
24928634
JK
1582 struct node_info ni;
1583
9850cf4a 1584 f2fs_bug_on(sbi, list_empty(&nm_i->free_nid_list));
2d7b822a 1585 list_for_each_entry(i, &nm_i->free_nid_list, list)
55008d84
JK
1586 if (i->state == NID_NEW)
1587 break;
e05df3b1 1588
9850cf4a 1589 f2fs_bug_on(sbi, i->state != NID_NEW);
55008d84
JK
1590 *nid = i->nid;
1591 i->state = NID_ALLOC;
1592 nm_i->fcnt--;
1593 spin_unlock(&nm_i->free_nid_list_lock);
24928634
JK
1594
1595 /* check nid is allocated already */
1596 get_node_info(sbi, *nid, &ni);
1597 if (ni.blk_addr != NULL_ADDR) {
1598 alloc_nid_done(sbi, *nid);
1599 goto retry;
1600 }
55008d84
JK
1601 return true;
1602 }
e05df3b1 1603 spin_unlock(&nm_i->free_nid_list_lock);
55008d84
JK
1604
1605 /* Let's scan nat pages and its caches to get free nids */
1606 mutex_lock(&nm_i->build_lock);
55008d84 1607 build_free_nids(sbi);
55008d84
JK
1608 mutex_unlock(&nm_i->build_lock);
1609 goto retry;
e05df3b1
JK
1610}
1611
0a8165d7 1612/*
e05df3b1
JK
1613 * alloc_nid() should be called prior to this function.
1614 */
1615void alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid)
1616{
1617 struct f2fs_nm_info *nm_i = NM_I(sbi);
1618 struct free_nid *i;
1619
1620 spin_lock(&nm_i->free_nid_list_lock);
8a7ed66a 1621 i = __lookup_free_nid_list(nm_i, nid);
9850cf4a 1622 f2fs_bug_on(sbi, !i || i->state != NID_ALLOC);
8a7ed66a 1623 __del_from_free_nid_list(nm_i, i);
e05df3b1 1624 spin_unlock(&nm_i->free_nid_list_lock);
cf0ee0f0
CY
1625
1626 kmem_cache_free(free_nid_slab, i);
e05df3b1
JK
1627}
1628
0a8165d7 1629/*
e05df3b1
JK
1630 * alloc_nid() should be called prior to this function.
1631 */
1632void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
1633{
49952fa1
JK
1634 struct f2fs_nm_info *nm_i = NM_I(sbi);
1635 struct free_nid *i;
cf0ee0f0 1636 bool need_free = false;
49952fa1 1637
65985d93
JK
1638 if (!nid)
1639 return;
1640
49952fa1 1641 spin_lock(&nm_i->free_nid_list_lock);
8a7ed66a 1642 i = __lookup_free_nid_list(nm_i, nid);
9850cf4a 1643 f2fs_bug_on(sbi, !i || i->state != NID_ALLOC);
6fb03f3a 1644 if (!available_free_memory(sbi, FREE_NIDS)) {
8a7ed66a 1645 __del_from_free_nid_list(nm_i, i);
cf0ee0f0 1646 need_free = true;
95630cba
HL
1647 } else {
1648 i->state = NID_NEW;
1649 nm_i->fcnt++;
1650 }
49952fa1 1651 spin_unlock(&nm_i->free_nid_list_lock);
cf0ee0f0
CY
1652
1653 if (need_free)
1654 kmem_cache_free(free_nid_slab, i);
e05df3b1
JK
1655}
1656
31696580
CY
1657int try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink)
1658{
1659 struct f2fs_nm_info *nm_i = NM_I(sbi);
1660 struct free_nid *i, *next;
1661 int nr = nr_shrink;
1662
1663 if (!mutex_trylock(&nm_i->build_lock))
1664 return 0;
1665
1666 spin_lock(&nm_i->free_nid_list_lock);
1667 list_for_each_entry_safe(i, next, &nm_i->free_nid_list, list) {
1668 if (nr_shrink <= 0 || nm_i->fcnt <= NAT_ENTRY_PER_BLOCK)
1669 break;
1670 if (i->state == NID_ALLOC)
1671 continue;
1672 __del_from_free_nid_list(nm_i, i);
31696580 1673 kmem_cache_free(free_nid_slab, i);
f7409d0f 1674 nm_i->fcnt--;
31696580 1675 nr_shrink--;
31696580
CY
1676 }
1677 spin_unlock(&nm_i->free_nid_list_lock);
1678 mutex_unlock(&nm_i->build_lock);
1679
1680 return nr - nr_shrink;
1681}
1682
70cfed88 1683void recover_inline_xattr(struct inode *inode, struct page *page)
28cdce04 1684{
28cdce04
CY
1685 void *src_addr, *dst_addr;
1686 size_t inline_size;
1687 struct page *ipage;
1688 struct f2fs_inode *ri;
1689
4081363f 1690 ipage = get_node_page(F2FS_I_SB(inode), inode->i_ino);
9850cf4a 1691 f2fs_bug_on(F2FS_I_SB(inode), IS_ERR(ipage));
28cdce04 1692
e3b4d43f
JK
1693 ri = F2FS_INODE(page);
1694 if (!(ri->i_inline & F2FS_INLINE_XATTR)) {
1695 clear_inode_flag(F2FS_I(inode), FI_INLINE_XATTR);
1696 goto update_inode;
1697 }
1698
28cdce04
CY
1699 dst_addr = inline_xattr_addr(ipage);
1700 src_addr = inline_xattr_addr(page);
1701 inline_size = inline_xattr_size(inode);
1702
54b591df 1703 f2fs_wait_on_page_writeback(ipage, NODE);
28cdce04 1704 memcpy(dst_addr, src_addr, inline_size);
e3b4d43f 1705update_inode:
28cdce04
CY
1706 update_inode(inode, ipage);
1707 f2fs_put_page(ipage, 1);
1708}
1709
1c35a90e 1710void recover_xattr_data(struct inode *inode, struct page *page, block_t blkaddr)
abb2366c 1711{
4081363f 1712 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
abb2366c
JK
1713 nid_t prev_xnid = F2FS_I(inode)->i_xattr_nid;
1714 nid_t new_xnid = nid_of_node(page);
1715 struct node_info ni;
1716
abb2366c
JK
1717 /* 1: invalidate the previous xattr nid */
1718 if (!prev_xnid)
1719 goto recover_xnid;
1720
1721 /* Deallocate node address */
1722 get_node_info(sbi, prev_xnid, &ni);
9850cf4a 1723 f2fs_bug_on(sbi, ni.blk_addr == NULL_ADDR);
abb2366c
JK
1724 invalidate_blocks(sbi, ni.blk_addr);
1725 dec_valid_node_count(sbi, inode);
479f40c4 1726 set_node_addr(sbi, &ni, NULL_ADDR, false);
abb2366c
JK
1727
1728recover_xnid:
1729 /* 2: allocate new xattr nid */
1730 if (unlikely(!inc_valid_node_count(sbi, inode)))
9850cf4a 1731 f2fs_bug_on(sbi, 1);
abb2366c
JK
1732
1733 remove_free_nid(NM_I(sbi), new_xnid);
1734 get_node_info(sbi, new_xnid, &ni);
1735 ni.ino = inode->i_ino;
479f40c4 1736 set_node_addr(sbi, &ni, NEW_ADDR, false);
abb2366c
JK
1737 F2FS_I(inode)->i_xattr_nid = new_xnid;
1738
1739 /* 3: update xattr blkaddr */
1740 refresh_sit_entry(sbi, NEW_ADDR, blkaddr);
479f40c4 1741 set_node_addr(sbi, &ni, blkaddr, false);
abb2366c
JK
1742
1743 update_inode_page(inode);
abb2366c
JK
1744}
1745
e05df3b1
JK
1746int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
1747{
58bfaf44 1748 struct f2fs_inode *src, *dst;
e05df3b1
JK
1749 nid_t ino = ino_of_node(page);
1750 struct node_info old_ni, new_ni;
1751 struct page *ipage;
1752
e8271fa3
JK
1753 get_node_info(sbi, ino, &old_ni);
1754
1755 if (unlikely(old_ni.blk_addr != NULL_ADDR))
1756 return -EINVAL;
1757
4ef51a8f 1758 ipage = grab_cache_page(NODE_MAPPING(sbi), ino);
e05df3b1
JK
1759 if (!ipage)
1760 return -ENOMEM;
1761
e1c42045 1762 /* Should not use this inode from free nid list */
e05df3b1
JK
1763 remove_free_nid(NM_I(sbi), ino);
1764
e05df3b1
JK
1765 SetPageUptodate(ipage);
1766 fill_node_footer(ipage, ino, ino, 0, true);
1767
58bfaf44
JK
1768 src = F2FS_INODE(page);
1769 dst = F2FS_INODE(ipage);
e05df3b1 1770
58bfaf44
JK
1771 memcpy(dst, src, (unsigned long)&src->i_ext - (unsigned long)src);
1772 dst->i_size = 0;
1773 dst->i_blocks = cpu_to_le64(1);
1774 dst->i_links = cpu_to_le32(1);
1775 dst->i_xattr_nid = 0;
617deb8c 1776 dst->i_inline = src->i_inline & F2FS_INLINE_XATTR;
e05df3b1
JK
1777
1778 new_ni = old_ni;
1779 new_ni.ino = ino;
1780
cfb271d4 1781 if (unlikely(!inc_valid_node_count(sbi, NULL)))
65e5cd0a 1782 WARN_ON(1);
479f40c4 1783 set_node_addr(sbi, &new_ni, NEW_ADDR, false);
e05df3b1 1784 inc_valid_inode_count(sbi);
617deb8c 1785 set_page_dirty(ipage);
e05df3b1
JK
1786 f2fs_put_page(ipage, 1);
1787 return 0;
1788}
1789
1790int restore_node_summary(struct f2fs_sb_info *sbi,
1791 unsigned int segno, struct f2fs_summary_block *sum)
1792{
1793 struct f2fs_node *rn;
1794 struct f2fs_summary *sum_entry;
e05df3b1 1795 block_t addr;
90a893c7 1796 int bio_blocks = MAX_BIO_BLOCKS(sbi);
9ecf4b80 1797 int i, idx, last_offset, nrpages;
e05df3b1
JK
1798
1799 /* scan the node segment */
1800 last_offset = sbi->blocks_per_seg;
1801 addr = START_BLOCK(sbi, segno);
1802 sum_entry = &sum->entries[0];
1803
9ecf4b80 1804 for (i = 0; i < last_offset; i += nrpages, addr += nrpages) {
9af0ff1c 1805 nrpages = min(last_offset - i, bio_blocks);
393ff91f 1806
e1c42045 1807 /* readahead node pages */
26879fb1 1808 ra_meta_pages(sbi, addr, nrpages, META_POR, true);
e05df3b1 1809
9ecf4b80 1810 for (idx = addr; idx < addr + nrpages; idx++) {
2b947003 1811 struct page *page = get_tmp_page(sbi, idx);
9af0ff1c 1812
9ecf4b80
CY
1813 rn = F2FS_NODE(page);
1814 sum_entry->nid = rn->footer.nid;
1815 sum_entry->version = 0;
1816 sum_entry->ofs_in_node = 0;
1817 sum_entry++;
1818 f2fs_put_page(page, 1);
9af0ff1c 1819 }
bac4eef6 1820
9ecf4b80 1821 invalidate_mapping_pages(META_MAPPING(sbi), addr,
bac4eef6 1822 addr + nrpages);
e05df3b1 1823 }
9ecf4b80 1824 return 0;
e05df3b1
JK
1825}
1826
aec71382 1827static void remove_nats_in_journal(struct f2fs_sb_info *sbi)
e05df3b1
JK
1828{
1829 struct f2fs_nm_info *nm_i = NM_I(sbi);
1830 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1831 struct f2fs_summary_block *sum = curseg->sum_blk;
1832 int i;
1833
1834 mutex_lock(&curseg->curseg_mutex);
e05df3b1
JK
1835 for (i = 0; i < nats_in_cursum(sum); i++) {
1836 struct nat_entry *ne;
1837 struct f2fs_nat_entry raw_ne;
1838 nid_t nid = le32_to_cpu(nid_in_journal(sum, i));
1839
1840 raw_ne = nat_in_journal(sum, i);
9be32d72 1841
8b26ef98 1842 down_write(&nm_i->nat_tree_lock);
e05df3b1 1843 ne = __lookup_nat_cache(nm_i, nid);
e05df3b1 1844 if (!ne) {
9be32d72
JK
1845 ne = grab_nat_entry(nm_i, nid);
1846 node_info_from_raw_nat(&ne->ni, &raw_ne);
e05df3b1 1847 }
e05df3b1 1848 __set_nat_cache_dirty(nm_i, ne);
8b26ef98 1849 up_write(&nm_i->nat_tree_lock);
e05df3b1
JK
1850 }
1851 update_nats_in_cursum(sum, -i);
1852 mutex_unlock(&curseg->curseg_mutex);
e05df3b1
JK
1853}
1854
309cc2b6
JK
1855static void __adjust_nat_entry_set(struct nat_entry_set *nes,
1856 struct list_head *head, int max)
e05df3b1 1857{
309cc2b6 1858 struct nat_entry_set *cur;
e05df3b1 1859
309cc2b6
JK
1860 if (nes->entry_cnt >= max)
1861 goto add_out;
e05df3b1 1862
309cc2b6
JK
1863 list_for_each_entry(cur, head, set_list) {
1864 if (cur->entry_cnt >= nes->entry_cnt) {
1865 list_add(&nes->set_list, cur->set_list.prev);
1866 return;
1867 }
aec71382 1868 }
309cc2b6
JK
1869add_out:
1870 list_add_tail(&nes->set_list, head);
1871}
e05df3b1 1872
309cc2b6
JK
1873static void __flush_nat_entry_set(struct f2fs_sb_info *sbi,
1874 struct nat_entry_set *set)
1875{
1876 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1877 struct f2fs_summary_block *sum = curseg->sum_blk;
1878 nid_t start_nid = set->set * NAT_ENTRY_PER_BLOCK;
1879 bool to_journal = true;
1880 struct f2fs_nat_block *nat_blk;
1881 struct nat_entry *ne, *cur;
1882 struct page *page = NULL;
57ed1e95 1883 struct f2fs_nm_info *nm_i = NM_I(sbi);
e05df3b1 1884
aec71382
CY
1885 /*
1886 * there are two steps to flush nat entries:
1887 * #1, flush nat entries to journal in current hot data summary block.
1888 * #2, flush nat entries to nat page.
1889 */
309cc2b6
JK
1890 if (!__has_cursum_space(sum, set->entry_cnt, NAT_JOURNAL))
1891 to_journal = false;
1892
1893 if (to_journal) {
1894 mutex_lock(&curseg->curseg_mutex);
1895 } else {
1896 page = get_next_nat_page(sbi, start_nid);
1897 nat_blk = page_address(page);
1898 f2fs_bug_on(sbi, !nat_blk);
1899 }
aec71382 1900
309cc2b6
JK
1901 /* flush dirty nats in nat entry set */
1902 list_for_each_entry_safe(ne, cur, &set->entry_list, list) {
1903 struct f2fs_nat_entry *raw_ne;
1904 nid_t nid = nat_get_nid(ne);
1905 int offset;
1906
1907 if (nat_get_blkaddr(ne) == NEW_ADDR)
1908 continue;
aec71382
CY
1909
1910 if (to_journal) {
309cc2b6
JK
1911 offset = lookup_journal_in_cursum(sum,
1912 NAT_JOURNAL, nid, 1);
1913 f2fs_bug_on(sbi, offset < 0);
1914 raw_ne = &nat_in_journal(sum, offset);
1915 nid_in_journal(sum, offset) = cpu_to_le32(nid);
aec71382 1916 } else {
309cc2b6 1917 raw_ne = &nat_blk->entries[nid - start_nid];
e05df3b1 1918 }
309cc2b6 1919 raw_nat_from_node_info(raw_ne, &ne->ni);
e05df3b1 1920
8b26ef98 1921 down_write(&NM_I(sbi)->nat_tree_lock);
309cc2b6
JK
1922 nat_reset_flag(ne);
1923 __clear_nat_cache_dirty(NM_I(sbi), ne);
8b26ef98 1924 up_write(&NM_I(sbi)->nat_tree_lock);
aec71382 1925
309cc2b6
JK
1926 if (nat_get_blkaddr(ne) == NULL_ADDR)
1927 add_free_nid(sbi, nid, false);
1928 }
e05df3b1 1929
309cc2b6
JK
1930 if (to_journal)
1931 mutex_unlock(&curseg->curseg_mutex);
1932 else
1933 f2fs_put_page(page, 1);
aec71382 1934
80ec2e91
CL
1935 f2fs_bug_on(sbi, set->entry_cnt);
1936
57ed1e95 1937 down_write(&nm_i->nat_tree_lock);
80ec2e91 1938 radix_tree_delete(&NM_I(sbi)->nat_set_root, set->set);
57ed1e95 1939 up_write(&nm_i->nat_tree_lock);
80ec2e91 1940 kmem_cache_free(nat_entry_set_slab, set);
309cc2b6 1941}
aec71382 1942
309cc2b6
JK
1943/*
1944 * This function is called during the checkpointing process.
1945 */
1946void flush_nat_entries(struct f2fs_sb_info *sbi)
1947{
1948 struct f2fs_nm_info *nm_i = NM_I(sbi);
1949 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1950 struct f2fs_summary_block *sum = curseg->sum_blk;
7aed0d45 1951 struct nat_entry_set *setvec[SETVEC_SIZE];
309cc2b6
JK
1952 struct nat_entry_set *set, *tmp;
1953 unsigned int found;
1954 nid_t set_idx = 0;
1955 LIST_HEAD(sets);
1956
20d047c8
CL
1957 if (!nm_i->dirty_nat_cnt)
1958 return;
309cc2b6
JK
1959 /*
1960 * if there are no enough space in journal to store dirty nat
1961 * entries, remove all entries from journal and merge them
1962 * into nat entry set.
1963 */
1964 if (!__has_cursum_space(sum, nm_i->dirty_nat_cnt, NAT_JOURNAL))
1965 remove_nats_in_journal(sbi);
1966
57ed1e95 1967 down_write(&nm_i->nat_tree_lock);
309cc2b6 1968 while ((found = __gang_lookup_nat_set(nm_i,
7aed0d45 1969 set_idx, SETVEC_SIZE, setvec))) {
309cc2b6
JK
1970 unsigned idx;
1971 set_idx = setvec[found - 1]->set + 1;
1972 for (idx = 0; idx < found; idx++)
1973 __adjust_nat_entry_set(setvec[idx], &sets,
1974 MAX_NAT_JENTRIES(sum));
e05df3b1 1975 }
57ed1e95 1976 up_write(&nm_i->nat_tree_lock);
aec71382 1977
309cc2b6
JK
1978 /* flush dirty nats in nat entry set */
1979 list_for_each_entry_safe(set, tmp, &sets, set_list)
1980 __flush_nat_entry_set(sbi, set);
1981
9850cf4a 1982 f2fs_bug_on(sbi, nm_i->dirty_nat_cnt);
e05df3b1
JK
1983}
1984
1985static int init_node_manager(struct f2fs_sb_info *sbi)
1986{
1987 struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi);
1988 struct f2fs_nm_info *nm_i = NM_I(sbi);
1989 unsigned char *version_bitmap;
1990 unsigned int nat_segs, nat_blocks;
1991
1992 nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr);
1993
1994 /* segment_count_nat includes pair segment so divide to 2. */
1995 nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1;
1996 nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg);
b63da15e 1997
7ee0eeab
JK
1998 nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nat_blocks;
1999
b63da15e 2000 /* not used nids: 0, node, meta, (and root counted as valid node) */
c200b1aa 2001 nm_i->available_nids = nm_i->max_nid - F2FS_RESERVED_NODE_NUM;
e05df3b1
JK
2002 nm_i->fcnt = 0;
2003 nm_i->nat_cnt = 0;
cdfc41c1 2004 nm_i->ram_thresh = DEF_RAM_THRESHOLD;
e05df3b1 2005
8a7ed66a 2006 INIT_RADIX_TREE(&nm_i->free_nid_root, GFP_ATOMIC);
e05df3b1 2007 INIT_LIST_HEAD(&nm_i->free_nid_list);
769ec6e5
JK
2008 INIT_RADIX_TREE(&nm_i->nat_root, GFP_NOIO);
2009 INIT_RADIX_TREE(&nm_i->nat_set_root, GFP_NOIO);
e05df3b1 2010 INIT_LIST_HEAD(&nm_i->nat_entries);
e05df3b1
JK
2011
2012 mutex_init(&nm_i->build_lock);
2013 spin_lock_init(&nm_i->free_nid_list_lock);
8b26ef98 2014 init_rwsem(&nm_i->nat_tree_lock);
e05df3b1 2015
e05df3b1 2016 nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
79b5793b 2017 nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP);
e05df3b1
JK
2018 version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP);
2019 if (!version_bitmap)
2020 return -EFAULT;
2021
79b5793b
AG
2022 nm_i->nat_bitmap = kmemdup(version_bitmap, nm_i->bitmap_size,
2023 GFP_KERNEL);
2024 if (!nm_i->nat_bitmap)
2025 return -ENOMEM;
e05df3b1
JK
2026 return 0;
2027}
2028
2029int build_node_manager(struct f2fs_sb_info *sbi)
2030{
2031 int err;
2032
2033 sbi->nm_info = kzalloc(sizeof(struct f2fs_nm_info), GFP_KERNEL);
2034 if (!sbi->nm_info)
2035 return -ENOMEM;
2036
2037 err = init_node_manager(sbi);
2038 if (err)
2039 return err;
2040
2041 build_free_nids(sbi);
2042 return 0;
2043}
2044
2045void destroy_node_manager(struct f2fs_sb_info *sbi)
2046{
2047 struct f2fs_nm_info *nm_i = NM_I(sbi);
2048 struct free_nid *i, *next_i;
2049 struct nat_entry *natvec[NATVEC_SIZE];
7aed0d45 2050 struct nat_entry_set *setvec[SETVEC_SIZE];
e05df3b1
JK
2051 nid_t nid = 0;
2052 unsigned int found;
2053
2054 if (!nm_i)
2055 return;
2056
2057 /* destroy free nid list */
2058 spin_lock(&nm_i->free_nid_list_lock);
2059 list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) {
9850cf4a 2060 f2fs_bug_on(sbi, i->state == NID_ALLOC);
8a7ed66a 2061 __del_from_free_nid_list(nm_i, i);
e05df3b1 2062 nm_i->fcnt--;
cf0ee0f0
CY
2063 spin_unlock(&nm_i->free_nid_list_lock);
2064 kmem_cache_free(free_nid_slab, i);
2065 spin_lock(&nm_i->free_nid_list_lock);
e05df3b1 2066 }
9850cf4a 2067 f2fs_bug_on(sbi, nm_i->fcnt);
e05df3b1
JK
2068 spin_unlock(&nm_i->free_nid_list_lock);
2069
2070 /* destroy nat cache */
8b26ef98 2071 down_write(&nm_i->nat_tree_lock);
e05df3b1
JK
2072 while ((found = __gang_lookup_nat_cache(nm_i,
2073 nid, NATVEC_SIZE, natvec))) {
2074 unsigned idx;
7aed0d45 2075
b6ce391e
GZ
2076 nid = nat_get_nid(natvec[found - 1]) + 1;
2077 for (idx = 0; idx < found; idx++)
2078 __del_from_nat_cache(nm_i, natvec[idx]);
e05df3b1 2079 }
9850cf4a 2080 f2fs_bug_on(sbi, nm_i->nat_cnt);
7aed0d45
JK
2081
2082 /* destroy nat set cache */
2083 nid = 0;
2084 while ((found = __gang_lookup_nat_set(nm_i,
2085 nid, SETVEC_SIZE, setvec))) {
2086 unsigned idx;
2087
2088 nid = setvec[found - 1]->set + 1;
2089 for (idx = 0; idx < found; idx++) {
2090 /* entry_cnt is not zero, when cp_error was occurred */
2091 f2fs_bug_on(sbi, !list_empty(&setvec[idx]->entry_list));
2092 radix_tree_delete(&nm_i->nat_set_root, setvec[idx]->set);
2093 kmem_cache_free(nat_entry_set_slab, setvec[idx]);
2094 }
2095 }
8b26ef98 2096 up_write(&nm_i->nat_tree_lock);
e05df3b1
JK
2097
2098 kfree(nm_i->nat_bitmap);
2099 sbi->nm_info = NULL;
2100 kfree(nm_i);
2101}
2102
6e6093a8 2103int __init create_node_manager_caches(void)
e05df3b1
JK
2104{
2105 nat_entry_slab = f2fs_kmem_cache_create("nat_entry",
e8512d2e 2106 sizeof(struct nat_entry));
e05df3b1 2107 if (!nat_entry_slab)
aec71382 2108 goto fail;
e05df3b1
JK
2109
2110 free_nid_slab = f2fs_kmem_cache_create("free_nid",
e8512d2e 2111 sizeof(struct free_nid));
aec71382 2112 if (!free_nid_slab)
ce3e6d25 2113 goto destroy_nat_entry;
aec71382
CY
2114
2115 nat_entry_set_slab = f2fs_kmem_cache_create("nat_entry_set",
2116 sizeof(struct nat_entry_set));
2117 if (!nat_entry_set_slab)
ce3e6d25 2118 goto destroy_free_nid;
e05df3b1 2119 return 0;
aec71382 2120
ce3e6d25 2121destroy_free_nid:
aec71382 2122 kmem_cache_destroy(free_nid_slab);
ce3e6d25 2123destroy_nat_entry:
aec71382
CY
2124 kmem_cache_destroy(nat_entry_slab);
2125fail:
2126 return -ENOMEM;
e05df3b1
JK
2127}
2128
2129void destroy_node_manager_caches(void)
2130{
aec71382 2131 kmem_cache_destroy(nat_entry_set_slab);
e05df3b1
JK
2132 kmem_cache_destroy(free_nid_slab);
2133 kmem_cache_destroy(nat_entry_slab);
2134}