#define RAW_IS_INODE(p) ((p)->footer.nid == (p)->footer.ino)
-static inline bool IS_INODE(struct page *page)
+static inline bool IS_INODE(const struct folio *folio)
{
- struct f2fs_node *p = F2FS_NODE(page);
+ struct f2fs_node *p = F2FS_NODE(&folio->page);
return RAW_IS_INODE(p);
}
static inline unsigned int get_dnode_base(struct inode *inode,
struct folio *node_folio)
{
- if (!IS_INODE(&node_folio->page))
+ if (!IS_INODE(node_folio))
return 0;
return inode ? get_extra_isize(inode) :
count -= dn.ofs_in_node;
f2fs_bug_on(sbi, count < 0);
- if (dn.ofs_in_node || IS_INODE(&dn.node_folio->page)) {
+ if (dn.ofs_in_node || IS_INODE(dn.node_folio)) {
f2fs_truncate_data_blocks_range(&dn, count);
free_from += count;
}
return false;
}
- if (IS_INODE(&node_folio->page)) {
+ if (IS_INODE(node_folio)) {
base = offset_in_addr(F2FS_INODE(node_folio));
max_addrs = DEF_ADDRS_PER_INODE;
} else {
* x o -> remove data blocks, and then recover inline_data
* x x -> recover data blocks
*/
- if (IS_INODE(&nfolio->page))
+ if (IS_INODE(nfolio))
ri = F2FS_INODE(nfolio);
if (f2fs_has_inline_data(inode) &&
if (!f2fs_sb_has_inode_chksum(sbi))
return false;
- if (!IS_INODE(&folio->page) || !(ri->i_inline & F2FS_EXTRA_ATTR))
+ if (!IS_INODE(folio) || !(ri->i_inline & F2FS_EXTRA_ATTR))
return false;
if (!F2FS_FITS_IN_INODE(ri, le16_to_cpu(ri->i_extra_isize),
else if (IS_ERR(folio))
return PTR_ERR(folio);
- if (IS_INODE(&folio->page) || ino_of_node(folio) != dn->inode->i_ino) {
+ if (IS_INODE(folio) || ino_of_node(folio) != dn->inode->i_ino) {
f2fs_err(sbi, "incorrect node reference, ino: %lu, nid: %u, ino_of_node: %u",
dn->inode->i_ino, dn->nid, ino_of_node(folio));
set_sbi_flag(sbi, SBI_NEED_FSCK);
struct folio *folio, pgoff_t nid,
enum node_type ntype)
{
- struct page *page = &folio->page;
-
if (unlikely(nid != nid_of_node(folio) ||
- (ntype == NODE_TYPE_INODE && !IS_INODE(page)) ||
+ (ntype == NODE_TYPE_INODE && !IS_INODE(folio)) ||
(ntype == NODE_TYPE_XATTR &&
!f2fs_has_xattr_block(ofs_of_node(folio))) ||
time_to_inject(sbi, FAULT_INCONSISTENT_FOOTER))) {
if (!atomic || folio == last_folio) {
set_fsync_mark(folio, 1);
percpu_counter_inc(&sbi->rf_node_block_count);
- if (IS_INODE(&folio->page)) {
+ if (IS_INODE(folio)) {
if (is_inode_flag_set(inode,
FI_DIRTY_INODE))
f2fs_update_inode(inode, folio);
for (i = 0; i < nr_folios; i++) {
struct folio *folio = fbatch.folios[i];
- if (!IS_INODE(&folio->page))
+ if (!IS_INODE(folio))
continue;
folio_lock(folio);
}
/* flush dirty inode */
- if (IS_INODE(&folio->page) && flush_dirty_inode(folio))
+ if (IS_INODE(folio) && flush_dirty_inode(folio))
goto lock_node;
write_node:
f2fs_folio_wait_writeback(folio, NODE, true, true);
if (!folio_test_uptodate(folio))
folio_mark_uptodate(folio);
#ifdef CONFIG_F2FS_CHECK_FS
- if (IS_INODE(&folio->page))
+ if (IS_INODE(folio))
f2fs_inode_chksum_set(F2FS_M_SB(mapping), folio);
#endif
if (filemap_dirty_folio(mapping, folio)) {
bool quota_inode = false;
if (!check_only &&
- IS_INODE(&folio->page) &&
+ IS_INODE(folio) &&
is_dent_dnode(folio)) {
err = f2fs_recover_inode_page(sbi, folio);
if (err) {
}
entry->blkaddr = blkaddr;
- if (IS_INODE(&folio->page) && is_dent_dnode(folio))
+ if (IS_INODE(folio) && is_dent_dnode(folio))
entry->last_dentry = blkaddr;
next:
/* check next segment */
int err = 0, recovered = 0;
/* step 1: recover xattr */
- if (IS_INODE(&folio->page)) {
+ if (IS_INODE(folio)) {
err = f2fs_recover_inline_xattr(inode, folio);
if (err)
goto out;
* In this case, we can lose the latest inode(x).
* So, call recover_inode for the inode update.
*/
- if (IS_INODE(&folio->page)) {
+ if (IS_INODE(folio)) {
err = recover_inode(entry->inode, folio);
if (err) {
f2fs_folio_put(folio, true);
/* Node IDs in an Indirect Block */
#define NIDS_PER_BLOCK ((F2FS_BLKSIZE - sizeof(struct node_footer)) / sizeof(__le32))
-#define ADDRS_PER_PAGE(folio, inode) (addrs_per_page(inode, IS_INODE(&folio->page)))
+#define ADDRS_PER_PAGE(folio, inode) (addrs_per_page(inode, IS_INODE(folio)))
#define NODE_DIR1_BLOCK (DEF_ADDRS_PER_INODE + 1)
#define NODE_DIR2_BLOCK (DEF_ADDRS_PER_INODE + 2)