LE64_BITMASK(BCH_SB_ERASURE_CODE, struct bch_sb, flags[3], 0, 16);
LE64_BITMASK(BCH_SB_METADATA_TARGET, struct bch_sb, flags[3], 16, 28);
LE64_BITMASK(BCH_SB_SHARD_INUMS, struct bch_sb, flags[3], 28, 29);
+LE64_BITMASK(BCH_SB_INODES_USE_KEY_CACHE,struct bch_sb, flags[3], 29, 30);
/*
* Features:
struct bkey_s_c k;
int ret;
- iter = bch2_trans_get_iter(trans, BTREE_ID_inodes, POS(0, inum),
- BTREE_ITER_CACHED|flags);
+ if (trans->c->opts.inodes_use_key_cache)
+ flags |= BTREE_ITER_CACHED;
+
+ iter = bch2_trans_get_iter(trans, BTREE_ID_inodes, POS(0, inum), flags);
k = bch2_btree_iter_peek_slot(iter);
ret = bkey_err(k);
if (ret)
struct bpos end = POS(inode_nr + 1, 0);
struct bch_inode_unpacked inode_u;
struct bkey_s_c k;
+ unsigned iter_flags = BTREE_ITER_INTENT;
int ret;
+ if (cached && c->opts.inodes_use_key_cache)
+ iter_flags |= BTREE_ITER_CACHED;
+
bch2_trans_init(&trans, c, 0, 1024);
/*
retry:
bch2_trans_begin(&trans);
- iter = bch2_trans_get_iter(&trans, BTREE_ID_inodes, POS(0, inode_nr),
- (cached
- ? BTREE_ITER_CACHED
- : BTREE_ITER_SLOTS)|
- BTREE_ITER_INTENT);
+ iter = bch2_trans_get_iter(&trans, BTREE_ID_inodes,
+ POS(0, inode_nr), iter_flags);
k = bch2_btree_iter_peek_slot(iter);
ret = bkey_err(k);
OPT_BOOL(), \
BCH_SB_SHARD_INUMS, false, \
NULL, "Shard new inode numbers by CPU id") \
+ x(inodes_use_key_cache, u8, \
+ OPT_FORMAT|OPT_MOUNT, \
+ OPT_BOOL(), \
+ BCH_SB_INODES_USE_KEY_CACHE, true, \
+ NULL, "Use the btree key cache for the inodes btree") \
x(gc_reserve_percent, u8, \
OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME, \
OPT_UINT(5, 21), \