struct bch_inode_info *inode = to_bch_ei(dentry->d_inode);
struct bch_fs *c = inode->v.i_sb->s_fs_info;
struct bch_hash_info hash = bch2_hash_info_init(c, &inode->ei_inode);
+ struct xattr_search_key search = X_SEARCH(acl_to_xattr_type(type), "", 0);
struct btree_trans trans;
struct btree_iter iter = { NULL };
struct bkey_s_c_xattr xattr;
bch2_trans_begin(&trans);
ret = bch2_hash_lookup(&trans, &iter, bch2_xattr_hash_desc,
- &hash, inode_inum(inode),
- &X_SEARCH(acl_to_xattr_type(type), "", 0),
- 0);
+ &hash, inode_inum(inode), &search, 0);
if (ret) {
if (!bch2_err_matches(ret, ENOENT))
acl = ERR_PTR(ret);
struct posix_acl **new_acl)
{
struct bch_hash_info hash_info = bch2_hash_info_init(trans->c, inode);
+ struct xattr_search_key search = X_SEARCH(KEY_TYPE_XATTR_INDEX_POSIX_ACL_ACCESS, "", 0);
struct btree_iter iter;
struct bkey_s_c_xattr xattr;
struct bkey_i_xattr *new;
int ret;
ret = bch2_hash_lookup(trans, &iter, bch2_xattr_hash_desc,
- &hash_info, inum,
- &X_SEARCH(KEY_TYPE_XATTR_INDEX_POSIX_ACL_ACCESS, "", 0),
- BTREE_ITER_INTENT);
+ &hash_info, inum, &search, BTREE_ITER_INTENT);
if (ret)
return bch2_err_matches(ret, ENOENT) ? 0 : ret;
* This synthesizes deleted extents for holes, similar to BTREE_ITER_SLOTS for
* extents style btrees, but works on non-extents btrees:
*/
-struct bkey_s_c bch2_get_key_or_hole(struct btree_iter *iter, struct bpos end, struct bkey *hole)
+static struct bkey_s_c bch2_get_key_or_hole(struct btree_iter *iter, struct bpos end, struct bkey *hole)
{
struct bkey_s_c k = bch2_btree_iter_peek_slot(iter);
return ca != NULL;
}
-struct bkey_s_c bch2_get_key_or_real_bucket_hole(struct btree_iter *iter, struct bkey *hole)
+static struct bkey_s_c bch2_get_key_or_real_bucket_hole(struct btree_iter *iter, struct bkey *hole)
{
struct bch_fs *c = iter->trans->c;
struct bkey_s_c k;
return div_u64(mem_bytes >> 1, btree_bytes(c));
}
-int bch2_get_btree_in_memory_pos(struct btree_trans *trans,
- unsigned btree_leaf_mask,
- unsigned btree_interior_mask,
- struct bbpos start, struct bbpos *end)
+static int bch2_get_btree_in_memory_pos(struct btree_trans *trans,
+ unsigned btree_leaf_mask,
+ unsigned btree_interior_mask,
+ struct bbpos start, struct bbpos *end)
{
struct btree_iter iter;
struct bkey_s_c k;
: bucket;
}
-int bch2_get_alloc_in_memory_pos(struct btree_trans *trans,
- struct bpos start, struct bpos *end)
+static int bch2_get_alloc_in_memory_pos(struct btree_trans *trans,
+ struct bpos start, struct bpos *end)
{
struct btree_iter alloc_iter;
struct btree_iter bp_iter;
struct bch_replicas_entry_v0 {
__u8 data_type;
__u8 nr_devs;
- __u8 devs[];
+ __u8 devs[0];
} __packed;
struct bch_sb_field_replicas_v0 {
struct bch_sb_field field;
- struct bch_replicas_entry_v0 entries[];
+ struct bch_replicas_entry_v0 entries[0];
} __packed __aligned(8);
struct bch_replicas_entry {
__u8 data_type;
__u8 nr_devs;
__u8 nr_required;
- __u8 devs[];
+ __u8 devs[0];
} __packed;
#define replicas_entry_bytes(_i) \
struct bch_sb_field_replicas {
struct bch_sb_field field;
- struct bch_replicas_entry entries[];
+ struct bch_replicas_entry entries[0];
} __packed __aligned(8);
/* BCH_SB_FIELD_quota: */
u->k.p.snapshot = write
? 0 : U32_MAX;
} else {
- u64 min_packed = f->field_offset[BKEY_FIELD_SNAPSHOT];
+ u64 min_packed = le64_to_cpu(f->field_offset[BKEY_FIELD_SNAPSHOT]);
u64 max_packed = min_packed +
~(~0ULL << f->bits_per_field[BKEY_FIELD_SNAPSHOT]);
return (u16) v;
}
-__always_inline
-static inline void make_bfloat(struct btree *b, struct bset_tree *t,
- unsigned j,
- struct bkey_packed *min_key,
- struct bkey_packed *max_key)
+static __always_inline void make_bfloat(struct btree *b, struct bset_tree *t,
+ unsigned j,
+ struct bkey_packed *min_key,
+ struct bkey_packed *max_key)
{
struct bkey_float *f = bkey_float(b, t, j);
struct bkey_packed *m = tree_to_bkey(b, t, j);
case KEY_TYPE_btree_ptr:
return *((u64 *) bkey_i_to_btree_ptr_c(k)->v.start);
case KEY_TYPE_btree_ptr_v2:
- return bkey_i_to_btree_ptr_v2_c(k)->v.seq;
+ /*
+ * The cast/deref is only necessary to avoid sparse endianness
+ * warnings:
+ */
+ return *((u64 *) &bkey_i_to_btree_ptr_v2_c(k)->v.seq);
default:
return 0;
}
for_each_member_device(ca, c, dev) {
struct bch_dev_usage *dst = ca->usage_base;
struct bch_dev_usage *src = (void *)
- bch2_acc_percpu_u64s((void *) ca->usage_gc,
+ bch2_acc_percpu_u64s((u64 __percpu *) ca->usage_gc,
dev_usage_u64s());
copy_dev_field(buckets_ec, "buckets_ec");
unsigned nr = fs_usage_u64s(c);
struct bch_fs_usage *dst = c->usage_base;
struct bch_fs_usage *src = (void *)
- bch2_acc_percpu_u64s((void *) c->usage_gc, nr);
+ bch2_acc_percpu_u64s((u64 __percpu *) c->usage_gc, nr);
copy_fs_field(hidden, "hidden");
copy_fs_field(btree, "btree");
unsigned nr;
void *buf[BCH_REPLICAS_MAX];
struct bio *bio[BCH_REPLICAS_MAX];
- int err[BCH_REPLICAS_MAX];
+ blk_status_t err[BCH_REPLICAS_MAX];
};
static unsigned btree_node_sectors_written(struct bch_fs *c, void *data)
return __bch2_btree_flush_all(c, BTREE_NODE_write_in_flight);
}
-const char * const bch2_btree_write_types[] = {
+static const char * const bch2_btree_write_types[] = {
#define x(t, n) [n] = #t,
BCH_BTREE_WRITE_TYPES()
NULL
f->field_offset[BKEY_FIELD_SNAPSHOT] = write
? 0
- : U32_MAX - max_packed;
+ : cpu_to_le64(U32_MAX - max_packed);
}
}
struct btree_node *bn)
{
if (version < bcachefs_metadata_version_inode_btree_change &&
- btree_node_type_is_extents(btree_id) &&
+ btree_id_is_extents(btree_id) &&
!bpos_eq(bn->min_key, POS_MIN) &&
write)
bn->min_key = bpos_nosnap_predecessor(bn->min_key);
bn->max_key.snapshot = U32_MAX;
if (version < bcachefs_metadata_version_inode_btree_change &&
- btree_node_type_is_extents(btree_id) &&
+ btree_id_is_extents(btree_id) &&
!bpos_eq(bn->min_key, POS_MIN) &&
!write)
bn->min_key = bpos_nosnap_successor(bn->min_key);
prt_newline(out);
}
-noinline __cold
+static noinline __cold
void __bch2_trans_paths_to_text(struct printbuf *out, struct btree_trans *trans,
bool nosort)
{
__bch2_trans_paths_to_text(out, trans, false);
}
-noinline __cold
+static noinline __cold
void __bch2_dump_trans_paths_updates(struct btree_trans *trans, bool nosort)
{
struct printbuf buf = PRINTBUF;
: NULL;
}
-struct bkey_i *bch2_btree_journal_peek(struct btree_trans *trans,
- struct btree_iter *iter,
- struct bpos end_pos)
+static struct bkey_i *bch2_btree_journal_peek(struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct bpos end_pos)
{
struct bkey_i *k;
}
__always_inline
-static inline int btree_trans_restart_nounlock(struct btree_trans *trans, int err)
+static int btree_trans_restart_nounlock(struct btree_trans *trans, int err)
{
BUG_ON(err <= 0);
BUG_ON(!bch2_err_matches(-err, BCH_ERR_transaction_restart));
}
__always_inline
-static inline int btree_trans_restart(struct btree_trans *trans, int err)
+static int btree_trans_restart(struct btree_trans *trans, int err)
{
btree_trans_restart_nounlock(trans, err);
return -err;
return 0;
}
-__flatten
-bool bch2_btree_path_upgrade_norestart(struct btree_trans *trans,
- struct btree_path *path, unsigned long trace_ip)
-{
- return btree_path_get_locks(trans, path, true);
-}
-
bool bch2_btree_path_upgrade_noupgrade_sibs(struct btree_trans *trans,
struct btree_path *path,
unsigned new_locks_want)
unsigned level,
enum six_lock_type type)
{
- mark_btree_node_locked_noreset(path, level, type);
+ mark_btree_node_locked_noreset(path, level, (enum btree_node_locked_type) type);
#ifdef CONFIG_BCACHEFS_LOCK_TIME_STATS
path->l[level].lock_taken_time = local_clock();
#endif
trans_for_each_path(trans, path)
if (&path->l[level].b->c == b &&
btree_node_locked_type(path, level) >= want) {
- six_lock_increment(&b->lock, want);
+ six_lock_increment(&b->lock, (enum six_lock_type) want);
return true;
}
EBUG_ON(!(trans->paths_allocated & (1ULL << path->idx)));
if (likely(six_trylock_type(&b->lock, type)) ||
- btree_node_lock_increment(trans, b, level, type) ||
+ btree_node_lock_increment(trans, b, level, (enum btree_node_locked_type) type) ||
!(ret = btree_node_lock_nopath(trans, b, type, btree_path_ip_allocated(path)))) {
#ifdef CONFIG_BCACHEFS_LOCK_TIME_STATS
path->l[b->level].lock_taken_time = local_clock();
return (1U << type) & BTREE_ID_IS_EXTENTS;
}
+static inline bool btree_id_is_extents(enum btree_id btree)
+{
+ return btree_node_type_is_extents((enum btree_node_type) btree);
+}
+
#define BTREE_ID_HAS_SNAPSHOTS \
((1U << BTREE_ID_extents)| \
(1U << BTREE_ID_inodes)| \
return ret;
}
-void async_btree_node_rewrite_work(struct work_struct *work)
+static void async_btree_node_rewrite_work(struct work_struct *work)
{
struct async_btree_rewrite *a =
container_of(work, struct async_btree_rewrite, work);
* bch2_btree_path_peek_slot() for a cached iterator might return a key in a
* different snapshot:
*/
-struct bkey_s_c bch2_btree_path_peek_slot_exact(struct btree_path *path, struct bkey *u)
+static struct bkey_s_c bch2_btree_path_peek_slot_exact(struct btree_path *path, struct bkey *u)
{
struct bkey_s_c k = bch2_btree_path_peek_slot(path, u);
struct bch_replicas_entry *r, s64 sectors,
unsigned journal_seq, bool gc)
{
- struct bch_fs_usage __percpu *fs_usage;
+ struct bch_fs_usage *fs_usage;
int idx, ret = 0;
struct printbuf buf = PRINTBUF;
unsigned flags)
{
struct bch_fs *c = trans->c;
- struct bch_fs_usage __percpu *fs_usage;
+ struct bch_fs_usage *fs_usage;
u64 journal_seq = trans->journal_res.seq;
if (flags & BTREE_TRIGGER_INSERT) {
{
struct bch_fs *c = trans->c;
struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE ? old : new;
- struct bch_fs_usage __percpu *fs_usage;
+ struct bch_fs_usage *fs_usage;
unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
s64 sectors = (s64) k.k->size;
state.type = type;
bch2_checksum_init(&state);
- state.seed = a.lo;
+ state.seed = (u64 __force) a.lo;
BUG_ON(!bch2_checksum_mergeable(type));
page_address(ZERO_PAGE(0)), b);
b_len -= b;
}
- a.lo = bch2_checksum_final(&state);
+ a.lo = (__le64 __force) bch2_checksum_final(&state);
a.lo ^= b.lo;
a.hi ^= b.hi;
return a;
if (ret)
goto out;
- crypt->key.magic = BCH_KEY_MAGIC;
+ crypt->key.magic = cpu_to_le64(BCH_KEY_MAGIC);
crypt->key.key = key;
SET_BCH_SB_ENCRYPTION_TYPE(c->disk_sb.sb, 0);
if (ret)
goto err;
- key.magic = BCH_KEY_MAGIC;
+ key.magic = cpu_to_le64(BCH_KEY_MAGIC);
get_random_bytes(&key.key, sizeof(key.key));
if (keyed) {
/* BCH_SB_FIELD_counters */
-const char * const bch2_counter_names[] = {
+static const char * const bch2_counter_names[] = {
#define x(t, n, ...) (#t),
BCH_PERSISTENT_COUNTERS()
#undef x
return 0;
};
-void bch2_sb_counters_to_text(struct printbuf *out, struct bch_sb *sb,
+static void bch2_sb_counters_to_text(struct printbuf *out, struct bch_sb *sb,
struct bch_sb_field *f)
{
struct bch_sb_field_counters *ctrs = field_to_type(f, counters);
int ret = 0;
if (d.v->d_type == DT_SUBVOL &&
- d.v->d_parent_subvol != dir.subvol)
+ le32_to_cpu(d.v->d_parent_subvol) != dir.subvol)
return 1;
if (likely(d.v->d_type != DT_SUBVOL)) {
}
static void ec_block_io(struct bch_fs *c, struct ec_stripe_buf *buf,
- unsigned rw, unsigned idx, struct closure *cl)
+ blk_opf_t opf, unsigned idx, struct closure *cl)
{
struct bch_stripe *v = &buf->key.v;
unsigned offset = 0, bytes = buf->size << 9;
enum bch_data_type data_type = idx < buf->key.v.nr_blocks - buf->key.v.nr_redundant
? BCH_DATA_user
: BCH_DATA_parity;
+ int rw = op_is_write(opf);
if (ptr_stale(ca, ptr)) {
bch_err_ratelimited(c,
ec_bio = container_of(bio_alloc_bioset(ca->disk_sb.bdev,
nr_iovecs,
- rw,
+ opf,
GFP_KERNEL,
&c->ec_bioset),
struct ec_bio, bio);
mutex_unlock(&h->lock);
}
-struct ec_stripe_head *__bch2_ec_stripe_head_get(struct btree_trans *trans,
- unsigned target,
- unsigned algo,
- unsigned redundancy,
- enum bch_watermark watermark)
+static struct ec_stripe_head *
+__bch2_ec_stripe_head_get(struct btree_trans *trans,
+ unsigned target,
+ unsigned algo,
+ unsigned redundancy,
+ enum bch_watermark watermark)
{
struct bch_fs *c = trans->c;
struct ec_stripe_head *h;
}
BUG_ON(h->s->existing_stripe.size != h->blocksize);
- BUG_ON(h->s->existing_stripe.size != h->s->existing_stripe.key.v.sectors);
+ BUG_ON(h->s->existing_stripe.size != le16_to_cpu(h->s->existing_stripe.key.v.sectors));
/*
* Free buckets we initially allocated - they might conflict with
compat_bpos(0, btree_id, version, big_endian, write, &bp.v->min_key);
if (version < bcachefs_metadata_version_inode_btree_change &&
- btree_node_type_is_extents(btree_id) &&
+ btree_id_is_extents(btree_id) &&
!bkey_eq(bp.v->min_key, POS_MIN))
bp.v->min_key = write
? bpos_nosnap_predecessor(bp.v->min_key)
switch (type) {
case BCH_EXTENT_ENTRY_crc32:
set_common_fields(dst->crc32, src);
- dst->crc32.csum = *((__le32 *) &src.csum.lo);
+ dst->crc32.csum = (u32 __force) *((__le32 *) &src.csum.lo);
break;
case BCH_EXTENT_ENTRY_crc64:
set_common_fields(dst->crc64, src);
dst->crc64.nonce = src.nonce;
- dst->crc64.csum_lo = src.csum.lo;
- dst->crc64.csum_hi = *((__le16 *) &src.csum.hi);
+ dst->crc64.csum_lo = (u64 __force) src.csum.lo;
+ dst->crc64.csum_hi = (u64 __force) *((__le16 *) &src.csum.hi);
break;
case BCH_EXTENT_ENTRY_crc128:
set_common_fields(dst->crc128, src);
common_fields(crc->crc32),
};
- *((__le32 *) &ret.csum.lo) = crc->crc32.csum;
-
- memcpy(&ret.csum.lo, &crc->crc32.csum,
- sizeof(crc->crc32.csum));
-
+ *((__le32 *) &ret.csum.lo) = (__le32 __force) crc->crc32.csum;
return ret;
}
case BCH_EXTENT_ENTRY_crc64: {
.csum.lo = (__force __le64) crc->crc64.csum_lo,
};
- *((__le16 *) &ret.csum.hi) = crc->crc64.csum_hi;
+ *((__le16 *) &ret.csum.hi) = (__le16 __force) crc->crc64.csum_hi;
return ret;
}
#undef x
};
-const char * const bch2_folio_sector_states[] = {
+static const char * const bch2_folio_sector_states[] = {
#define x(n) #n,
BCH_FOLIO_SECTOR_STATE()
#undef x
struct address_space *mapping = file->f_mapping;
struct address_space *fdm = faults_disabled_mapping();
struct bch_inode_info *inode = file_bch_inode(file);
- int ret;
+ vm_fault_t ret;
if (fdm == mapping)
return VM_FAULT_SIGBUS;
struct bch2_folio_reservation res;
unsigned len;
loff_t isize;
- int ret;
+ vm_fault_t ret;
bch2_folio_reservation_init(c, inode, &res);
goto err;
if (fsck_err_on(ret, c,
- "dirent points to missing subvolume %llu",
- le64_to_cpu(d.v->d_child_subvol))) {
+ "dirent points to missing subvolume %u",
+ le32_to_cpu(d.v->d_child_subvol))) {
ret = __remove_dirent(trans, d.k->p);
goto err;
}
* Backpointer and directory structure checks are sufficient for
* directories, since they can't have hardlinks:
*/
- if (S_ISDIR(le16_to_cpu(u.bi_mode)))
+ if (S_ISDIR(u.bi_mode))
continue;
if (!u.bi_nlink)
BUG_ON(bch2_inode_unpack(k, &u));
- if (S_ISDIR(le16_to_cpu(u.bi_mode)))
+ if (S_ISDIR(u.bi_mode))
return 0;
if (!u.bi_nlink)
op->end_io(op);
}
-const char * const bch2_write_flags[] = {
+static const char * const bch2_write_flags[] = {
#define x(f) #f,
BCH_WRITE_FLAGS()
#undef x
spin_unlock(&j->lock);
}
-enum journal_pin_type journal_pin_type(journal_pin_flush_fn fn)
+static enum journal_pin_type journal_pin_type(journal_pin_flush_fn fn)
{
if (fn == bch2_btree_node_flush0 ||
fn == bch2_btree_node_flush1)
bch2_sb_field_delete(&ca->disk_sb, BCH_SB_FIELD_journal);
- j->d[dst].start = le64_to_cpu(buckets[0]);
- j->d[dst].nr = le64_to_cpu(1);
+ j->d[dst].start = cpu_to_le64(buckets[0]);
+ j->d[dst].nr = cpu_to_le64(1);
for (i = 1; i < nr; i++) {
if (buckets[i] == buckets[i - 1] + 1) {
le64_add_cpu(&j->d[dst].nr, 1);
} else {
dst++;
- j->d[dst].start = le64_to_cpu(buckets[i]);
- j->d[dst].nr = le64_to_cpu(1);
+ j->d[dst].start = cpu_to_le64(buckets[i]);
+ j->d[dst].nr = cpu_to_le64(1);
}
}
}
if (qdq && qdq->d_fieldmask & QC_SPC_TIMER)
- mq->c[Q_SPC].timer = cpu_to_le64(qdq->d_spc_timer);
+ mq->c[Q_SPC].timer = qdq->d_spc_timer;
if (qdq && qdq->d_fieldmask & QC_SPC_WARNS)
- mq->c[Q_SPC].warns = cpu_to_le64(qdq->d_spc_warns);
+ mq->c[Q_SPC].warns = qdq->d_spc_warns;
if (qdq && qdq->d_fieldmask & QC_INO_TIMER)
- mq->c[Q_INO].timer = cpu_to_le64(qdq->d_ino_timer);
+ mq->c[Q_INO].timer = qdq->d_ino_timer;
if (qdq && qdq->d_fieldmask & QC_INO_WARNS)
- mq->c[Q_INO].warns = cpu_to_le64(qdq->d_ino_warns);
+ mq->c[Q_INO].warns = qdq->d_ino_warns;
mutex_unlock(&q->lock);
}
}
}
-struct bkey_s_c bch2_journal_iter_peek(struct journal_iter *iter)
+static struct bkey_s_c bch2_journal_iter_peek(struct journal_iter *iter)
{
struct journal_key *k = iter->keys->d + iter->idx;
root_snapshot.k.p.offset = U32_MAX;
root_snapshot.v.flags = 0;
root_snapshot.v.parent = 0;
- root_snapshot.v.subvol = BCACHEFS_ROOT_SUBVOL;
+ root_snapshot.v.subvol = cpu_to_le32(BCACHEFS_ROOT_SUBVOL);
root_snapshot.v.tree = cpu_to_le32(1);
SET_BCH_SNAPSHOT_SUBVOL(&root_snapshot.v, true);
if (!(c->sb.compat & (1ULL << BCH_COMPAT_extents_above_btree_updates_done)) ||
!(c->sb.compat & (1ULL << BCH_COMPAT_bformat_overflow_done)) ||
- le16_to_cpu(c->sb.version_min) < bcachefs_metadata_version_btree_ptr_sectors_written) {
+ c->sb.version_min < bcachefs_metadata_version_btree_ptr_sectors_written) {
struct bch_move_stats stats;
bch2_move_stats_init(&stats, "recovery");
eytzinger0_sort(r->entries, r->nr, r->entry_size, memcmp, NULL);
}
-void bch2_replicas_entry_v0_to_text(struct printbuf *out,
- struct bch_replicas_entry_v0 *e)
+static void bch2_replicas_entry_v0_to_text(struct printbuf *out,
+ struct bch_replicas_entry_v0 *e)
{
unsigned i;
{
unsigned src_nr = sizeof(struct bch_fs_usage) / sizeof(u64) + src_r->nr;
struct bch_fs_usage *dst, *src = (void *)
- bch2_acc_percpu_u64s((void *) src_p, src_nr);
+ bch2_acc_percpu_u64s((u64 __percpu *) src_p, src_nr);
preempt_disable();
dst = this_cpu_ptr(dst_p);
goto err;
if (s.v->children[0]) {
- s_t->v.root_snapshot = cpu_to_le32(s.v->children[0]);
+ s_t->v.root_snapshot = s.v->children[0];
} else {
s_t->k.type = KEY_TYPE_deleted;
set_bkey_val_u64s(&s_t->k, 0);
__bch2_subvolume_delete(trans, subvolid));
}
-void bch2_subvolume_wait_for_pagecache_and_delete(struct work_struct *work)
+static void bch2_subvolume_wait_for_pagecache_and_delete(struct work_struct *work)
{
struct bch_fs *c = container_of(work, struct bch_fs,
snapshot_wait_for_pagecache_and_delete_work);
u32 subvol;
};
-int bch2_subvolume_wait_for_pagecache_and_delete_hook(struct btree_trans *trans,
+static int bch2_subvolume_wait_for_pagecache_and_delete_hook(struct btree_trans *trans,
struct btree_trans_commit_hook *_h)
{
struct subvolume_unlink_hook *h = container_of(_h, struct subvolume_unlink_hook, h);
goto err;
/* Compat: */
- if (sb->version <= bcachefs_metadata_version_inode_v2 &&
+ if (le16_to_cpu(sb->version) <= bcachefs_metadata_version_inode_v2 &&
!BCH_SB_JOURNAL_FLUSH_DELAY(sb))
SET_BCH_SB_JOURNAL_FLUSH_DELAY(sb, 1000);
- if (sb->version <= bcachefs_metadata_version_inode_v2 &&
+ if (le16_to_cpu(sb->version) <= bcachefs_metadata_version_inode_v2 &&
!BCH_SB_JOURNAL_RECLAIM_DELAY(sb))
SET_BCH_SB_JOURNAL_RECLAIM_DELAY(sb, 100);
BCH_DEBUG_PARAMS()
#undef BCH_DEBUG_PARAM
-unsigned bch2_metadata_version = bcachefs_metadata_version_current;
+static unsigned bch2_metadata_version = bcachefs_metadata_version_current;
module_param_named(version, bch2_metadata_version, uint, 0400);
module_exit(bcachefs_exit);
#ifdef BCH_WRITE_REF_DEBUG
read_attribute(write_refs);
-const char * const bch2_write_refs[] = {
+static const char * const bch2_write_refs[] = {
#define x(n) #n,
BCH_WRITE_REFS()
#undef x
/* extent unit tests */
-u64 test_version;
+static u64 test_version;
static int insert_test_extent(struct bch_fs *c,
u64 start, u64 end)
{
unsigned bits = fls64(v|1);
unsigned bytes = DIV_ROUND_UP(bits, 7);
+ __le64 v_le;
if (likely(bytes < 9)) {
v <<= bytes;
v |= ~(~0 << (bytes - 1));
- v = cpu_to_le64(v);
- memcpy(out, &v, bytes);
+ v_le = cpu_to_le64(v);
+ memcpy(out, &v_le, bytes);
} else {
*out++ = 255;
bytes = 9;
return -1;
if (likely(bytes < 9)) {
- v = 0;
- memcpy(&v, in, bytes);
- v = le64_to_cpu(v);
+ __le64 v_le = 0;
+ memcpy(&v_le, in, bytes);
+ v = le64_to_cpu(v_le);
v >>= bytes;
} else {
v = get_unaligned_le64(++in);
const char *name, void *buffer, size_t size, int type)
{
struct bch_hash_info hash = bch2_hash_info_init(trans->c, &inode->ei_inode);
+ struct xattr_search_key search = X_SEARCH(type, name, strlen(name));
struct btree_iter iter;
struct bkey_s_c_xattr xattr;
struct bkey_s_c k;
int ret;
ret = bch2_hash_lookup(trans, &iter, bch2_xattr_hash_desc, &hash,
- inode_inum(inode),
- &X_SEARCH(type, name, strlen(name)),
- 0);
+ inode_inum(inode), &search, 0);
if (ret)
goto err1;