folio->index != nid_of_node(&folio->page));
dec_page_count(sbi, type);
- if (f2fs_in_warm_node_list(sbi, &folio->page))
+ if (f2fs_in_warm_node_list(sbi, folio))
f2fs_del_fsync_node_entry(sbi, &folio->page);
clear_page_private_gcing(&folio->page);
folio_end_writeback(folio);
int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid);
bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type);
-bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct page *page);
+bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi,
+ const struct folio *folio);
void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi);
void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct page *page);
void f2fs_reset_fsync_node_info(struct f2fs_sb_info *sbi);
start, nr);
}
-bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct page *page)
+bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, const struct folio *folio)
{
- return NODE_MAPPING(sbi) == page->mapping &&
- IS_DNODE(page) && is_cold_node(page);
+ return NODE_MAPPING(sbi) == folio->mapping &&
+ IS_DNODE(&folio->page) && is_cold_node(&folio->page);
}
void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi)
fio.op_flags |= REQ_PREFLUSH | REQ_FUA;
/* should add to global list before clearing PAGECACHE status */
- if (f2fs_in_warm_node_list(sbi, page)) {
+ if (f2fs_in_warm_node_list(sbi, folio)) {
seq = f2fs_add_fsync_node_entry(sbi, page);
if (seq_id)
*seq_id = seq;
if (fscrypt_inode_uses_fs_layer_crypto(folio->mapping->host))
fscrypt_finalize_bounce_page(&fio->encrypted_page);
folio_end_writeback(folio);
- if (f2fs_in_warm_node_list(fio->sbi, fio->page))
+ if (f2fs_in_warm_node_list(fio->sbi, folio))
f2fs_del_fsync_node_entry(fio->sbi, fio->page);
goto out;
}