bch2_trans_init(&trans, c);
- for_each_btree_key(&trans, iter, BTREE_ID_ALLOC, POS_MIN, 0, k) {
+ for_each_btree_key(&trans, iter, BTREE_ID_ALLOC, POS_MIN, 0, k, ret)
bch2_alloc_read_key(c, k);
- bch2_trans_cond_resched(&trans);
- }
- ret = bch2_trans_exit(&trans);
- if (ret)
+ ret = bch2_trans_exit(&trans) ?: ret;
+ if (ret) {
+ bch_err(c, "error reading alloc info: %i", ret);
return ret;
+ }
for_each_journal_key(*journal_keys, j)
if (j->btree_id == BTREE_ID_ALLOC)
bch2_trans_init(&trans, c);
for_each_btree_key(&trans, iter, id, bkey_start_pos(&insert->k),
- BTREE_ITER_SLOTS, k) {
+ BTREE_ITER_SLOTS, k, ret) {
percpu_down_read(&c->mark_lock);
ret = bch2_mark_overwrite(&trans, iter, k, insert, NULL,
BCH_BUCKET_MARK_GC|
break;
}
- return bch2_trans_exit(&trans);
+ return bch2_trans_exit(&trans) ?: ret;
}
static int bch2_gc_btrees(struct bch_fs *c, struct journal_keys *journal_keys,
: bch2_btree_iter_next(iter);
}
-#define for_each_btree_key(_trans, _iter, _btree_id, _start, _flags, _k)\
- for (iter = bch2_trans_get_iter((_trans), (_btree_id), \
- (_start), (_flags)), \
- (_k) = __bch2_btree_iter_peek(_iter, _flags); \
- !IS_ERR_OR_NULL((_k).k); \
- (_k) = __bch2_btree_iter_next(_iter, _flags))
+#define for_each_btree_key(_trans, _iter, _btree_id, \
+ _start, _flags, _k, _ret) \
+ for ((_ret) = PTR_ERR_OR_ZERO((_iter) = \
+ bch2_trans_get_iter((_trans), (_btree_id), \
+ (_start), (_flags))) ?: \
+ PTR_ERR_OR_ZERO(((_k) = \
+ __bch2_btree_iter_peek(_iter, _flags)).k); \
+ !ret && (_k).k; \
+ (_ret) = PTR_ERR_OR_ZERO(((_k) = \
+ __bch2_btree_iter_next(_iter, _flags)).k))
#define for_each_btree_key_continue(_iter, _flags, _k) \
for ((_k) = __bch2_btree_iter_peek(_iter, _flags); \
return ret;
}
-inline bool bch2_mark_overwrite(struct btree_trans *trans,
- struct btree_iter *iter,
- struct bkey_s_c old,
- struct bkey_i *new,
- struct bch_fs_usage *fs_usage,
- unsigned flags)
+inline int bch2_mark_overwrite(struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct bkey_s_c old,
+ struct bkey_i *new,
+ struct bch_fs_usage *fs_usage,
+ unsigned flags)
{
struct bch_fs *c = trans->c;
struct btree *b = iter->l[0].b;
if (btree_node_is_extents(b)
? bkey_cmp(new->k.p, bkey_start_pos(old.k)) <= 0
: bkey_cmp(new->k.p, old.k->p))
- return false;
+ return 0;
if (btree_node_is_extents(b)) {
switch (bch2_extent_overlap(&new->k, old.k)) {
BUG_ON(sectors >= 0);
}
- bch2_mark_key_locked(c, old, false, sectors,
- fs_usage, trans->journal_res.seq, flags);
- return true;
+ return bch2_mark_key_locked(c, old, false, sectors, fs_usage,
+ trans->journal_res.seq, flags) ?: 1;
}
-void bch2_mark_update(struct btree_trans *trans,
- struct btree_insert_entry *insert,
- struct bch_fs_usage *fs_usage,
- unsigned flags)
+int bch2_mark_update(struct btree_trans *trans,
+ struct btree_insert_entry *insert,
+ struct bch_fs_usage *fs_usage,
+ unsigned flags)
{
struct bch_fs *c = trans->c;
struct btree_iter *iter = insert->iter;
struct btree *b = iter->l[0].b;
struct btree_node_iter node_iter = iter->l[0].iter;
struct bkey_packed *_k;
+ int ret = 0;
if (!btree_node_type_needs_gc(iter->btree_id))
- return;
+ return 0;
bch2_mark_key_locked(c, bkey_i_to_s_c(insert->k), true,
bpos_min(insert->k->k.p, b->key.k.p).offset -
fs_usage, trans->journal_res.seq, flags);
if (unlikely(trans->flags & BTREE_INSERT_NOMARK_OVERWRITES))
- return;
+ return 0;
/*
* For non extents, we only mark the new key, not the key being
if ((iter->btree_id == BTREE_ID_ALLOC ||
iter->btree_id == BTREE_ID_EC) &&
!bkey_deleted(&insert->k->k))
- return;
+ return 0;
while ((_k = bch2_btree_node_iter_peek_filter(&node_iter, b,
KEY_TYPE_discard))) {
struct bkey unpacked;
struct bkey_s_c k = bkey_disassemble(b, _k, &unpacked);
- if (!bch2_mark_overwrite(trans, iter, k, insert->k,
- fs_usage, flags))
+ ret = bch2_mark_overwrite(trans, iter, k, insert->k,
+ fs_usage, flags);
+ if (ret <= 0)
break;
bch2_btree_node_iter_advance(&node_iter, b);
}
+
+ return ret;
}
void bch2_trans_fs_usage_apply(struct btree_trans *trans,
int bch2_fs_usage_apply(struct bch_fs *, struct bch_fs_usage *,
struct disk_reservation *);
-bool bch2_mark_overwrite(struct btree_trans *, struct btree_iter *,
- struct bkey_s_c, struct bkey_i *,
- struct bch_fs_usage *, unsigned);
-void bch2_mark_update(struct btree_trans *, struct btree_insert_entry *,
- struct bch_fs_usage *, unsigned);
+int bch2_mark_overwrite(struct btree_trans *, struct btree_iter *,
+ struct bkey_s_c, struct bkey_i *,
+ struct bch_fs_usage *, unsigned);
+int bch2_mark_update(struct btree_trans *, struct btree_insert_entry *,
+ struct bch_fs_usage *, unsigned);
void bch2_trans_fs_usage_apply(struct btree_trans *, struct bch_fs_usage *);
/* disk reservations: */
{
struct btree_iter *iter;
struct bkey_s_c k;
- int ret = 0;
-
- iter = bch2_trans_get_iter(trans, BTREE_ID_DIRENTS,
- POS(dir_inum, 0), 0);
- if (IS_ERR(iter))
- return PTR_ERR(iter);
+ int ret;
- for_each_btree_key_continue(iter, 0, k) {
+ for_each_btree_key(trans, iter, BTREE_ID_DIRENTS,
+ POS(dir_inum, 0), 0, k, ret) {
if (k.k->p.inode > dir_inum)
break;
struct bkey_s_c k;
struct bkey_s_c_dirent dirent;
unsigned len;
+ int ret;
if (!dir_emit_dots(file, ctx))
return 0;
bch2_trans_init(&trans, c);
for_each_btree_key(&trans, iter, BTREE_ID_DIRENTS,
- POS(inode->v.i_ino, ctx->pos), 0, k) {
+ POS(inode->v.i_ino, ctx->pos), 0, k, ret) {
if (k.k->type != KEY_TYPE_dirent)
continue;
ctx->pos = k.k->p.offset + 1;
}
- bch2_trans_exit(&trans);
+ ret = bch2_trans_exit(&trans) ?: ret;
- return 0;
+ return ret;
}
bch2_trans_begin(&trans);
/* XXX: start pos hint */
- iter = bch2_trans_get_iter(&trans, BTREE_ID_EC, POS_MIN,
- BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
-
- for_each_btree_key_continue(iter, BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k) {
+ for_each_btree_key(&trans, iter, BTREE_ID_EC, POS_MIN,
+ BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k, ret) {
if (bkey_cmp(k.k->p, POS(0, U32_MAX)) > 0)
break;
goto found_slot;
}
- ret = -ENOSPC;
+ if (!ret)
+ ret = -ENOSPC;
goto out;
found_slot:
ret = ec_stripe_mem_alloc(c, iter);
bch2_trans_init(&trans, c);
- for_each_btree_key(&trans, iter, BTREE_ID_EC, POS_MIN, 0, k) {
+ for_each_btree_key(&trans, iter, BTREE_ID_EC, POS_MIN, 0, k, ret)
bch2_stripe_read_key(c, k);
- bch2_trans_cond_resched(&trans);
- }
- ret = bch2_trans_exit(&trans);
- if (ret)
+ ret = bch2_trans_exit(&trans) ?: ret;
+ if (ret) {
+ bch_err(c, "error reading stripes: %i", ret);
return ret;
+ }
for_each_journal_key(*journal_keys, i)
if (i->btree_id == BTREE_ID_EC)
struct bpos end = pos;
struct bkey_s_c k;
bool ret = true;
+ int err;
end.offset += size;
bch2_trans_init(&trans, c);
for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, pos,
- BTREE_ITER_SLOTS, k) {
+ BTREE_ITER_SLOTS, k, err) {
if (bkey_cmp(bkey_start_pos(k.k), end) >= 0)
break;
bch2_trans_init(&trans, c);
- for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, start, 0, k) {
+ for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, start, 0, k, ret) {
if (bkey_cmp(bkey_start_pos(k.k), end) >= 0)
break;
bch2_trans_init(&trans, c);
for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS,
- POS(inode->v.i_ino, offset >> 9), 0, k) {
+ POS(inode->v.i_ino, offset >> 9), 0, k, ret) {
if (k.k->p.inode != inode->v.i_ino) {
break;
} else if (bkey_extent_is_data(k.k)) {
break;
}
- ret = bch2_trans_exit(&trans);
+ ret = bch2_trans_exit(&trans) ?: ret;
if (ret)
return ret;
for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS,
POS(inode->v.i_ino, offset >> 9),
- BTREE_ITER_SLOTS, k) {
+ BTREE_ITER_SLOTS, k, ret) {
if (k.k->p.inode != inode->v.i_ino) {
next_hole = bch2_next_pagecache_hole(&inode->v,
offset, MAX_LFS_FILESIZE);
}
}
- ret = bch2_trans_exit(&trans);
+ ret = bch2_trans_exit(&trans) ?: ret;
if (ret)
return ret;
bch2_trans_init(&trans, c);
for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS,
- POS(ei->v.i_ino, start >> 9), 0, k)
+ POS(ei->v.i_ino, start >> 9), 0, k, ret)
if (bkey_extent_is_data(k.k) ||
k.k->type == KEY_TYPE_reservation) {
if (bkey_cmp(bkey_start_pos(k.k),
if (have_extent) {
ret = bch2_fill_extent(info, &tmp.k, 0);
if (ret)
- goto out;
+ break;
}
bkey_reassemble(&tmp.k, k);
have_extent = true;
}
- if (have_extent)
+ if (!ret && have_extent)
ret = bch2_fill_extent(info, &tmp.k, FIEMAP_EXTENT_LAST);
-out:
- bch2_trans_exit(&trans);
+
+ ret = bch2_trans_exit(&trans) ?: ret;
return ret < 0 ? ret : 0;
}
struct btree_iter *iter;
struct bkey_s_c k;
u64 sectors = 0;
+ int ret;
- for_each_btree_key(trans, iter, BTREE_ID_EXTENTS, POS(inum, 0), 0, k) {
+ for_each_btree_key(trans, iter, BTREE_ID_EXTENTS,
+ POS(inum, 0), 0, k, ret) {
if (k.k->p.inode != inum)
break;
sectors += k.k->size;
}
- return bch2_trans_iter_free(trans, iter) ?: sectors;
+ bch2_trans_iter_free(trans, iter);
+
+ return ret ?: sectors;
}
static int remove_dirent(struct btree_trans *trans,
goto up;
for_each_btree_key(&trans, iter, BTREE_ID_DIRENTS,
- POS(e->inum, e->offset + 1), 0, k) {
+ POS(e->inum, e->offset + 1), 0, k, ret) {
if (k.k->p.inode != e->inum)
break;
}
goto next;
}
- ret = bch2_trans_iter_free(&trans, iter);
+ ret = bch2_trans_iter_free(&trans, iter) ?: ret;
if (ret) {
bch_err(c, "btree error %i in fsck", ret);
goto err;
inc_link(c, links, range_start, range_end, BCACHEFS_ROOT_INO, false);
- for_each_btree_key(&trans, iter, BTREE_ID_DIRENTS, POS_MIN, 0, k) {
+ for_each_btree_key(&trans, iter, BTREE_ID_DIRENTS, POS_MIN, 0, k, ret) {
switch (k.k->type) {
case KEY_TYPE_dirent:
d = bkey_s_c_to_dirent(k);
bch2_trans_cond_resched(&trans);
}
- ret = bch2_trans_exit(&trans);
+ ret = bch2_trans_exit(&trans) ?: ret;
if (ret)
bch_err(c, "error in fs gc: btree error %i while walking dirents", ret);
struct btree_iter *iter;
struct bkey_s_c k;
struct bkey_s_c_inode inode;
- int ret = 0, ret2;
+ int ret;
bch2_trans_init(&trans, c);
bch2_trans_preload_iters(&trans);
- iter = bch2_trans_get_iter(&trans, BTREE_ID_INODES,
- POS_MIN, 0);
-
- for_each_btree_key_continue(iter, 0, k) {
+ for_each_btree_key(&trans, iter, BTREE_ID_INODES, POS_MIN, 0, k, ret) {
if (k.k->type != KEY_TYPE_inode)
continue;
break;
}
}
+ BUG_ON(ret == -EINTR);
- ret2 = bch2_trans_exit(&trans);
-
- return ret ?: ret2;
+ return bch2_trans_exit(&trans) ?: ret;
}
/*
retry:
for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS,
POS(inode, bvec_iter.bi_sector),
- BTREE_ITER_SLOTS, k) {
+ BTREE_ITER_SLOTS, k, ret) {
BKEY_PADDED(k) tmp;
unsigned bytes;
* If we get here, it better have been because there was an error
* reading a btree node
*/
- BUG_ON(!btree_iter_err(iter));
- __bcache_io_error(c, "btree IO error");
+ BUG_ON(!ret);
+ __bcache_io_error(c, "btree IO error: %i", ret);
err:
rbio->bio.bi_status = BLK_STS_IOERR;
out:
unsigned flags = BCH_READ_RETRY_IF_STALE|
BCH_READ_MAY_PROMOTE|
BCH_READ_USER_MAPPED;
+ int ret;
bch2_trans_init(&trans, c);
for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS,
POS(inode, rbio->bio.bi_iter.bi_sector),
- BTREE_ITER_SLOTS, k) {
+ BTREE_ITER_SLOTS, k, ret) {
BKEY_PADDED(k) tmp;
unsigned bytes;
* If we get here, it better have been because there was an error
* reading a btree node
*/
- BUG_ON(!btree_iter_err(iter));
- bcache_io_error(c, &rbio->bio, "btree IO error");
+ BUG_ON(!ret);
+ bcache_io_error(c, &rbio->bio, "btree IO error: %i", ret);
bch2_trans_exit(&trans);
bch2_rbio_done(rbio);
bch2_replicas_gc_start(c, (1 << BCH_DATA_USER)|(1 << BCH_DATA_CACHED));
for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, POS_MIN,
- BTREE_ITER_PREFETCH, k) {
+ BTREE_ITER_PREFETCH, k, ret) {
ret = bch2_mark_bkey_replicas(c, k);
if (ret)
break;
bch2_trans_init(&trans, c);
for_each_btree_key(&trans, iter, BTREE_ID_QUOTAS, POS(type, 0),
- BTREE_ITER_PREFETCH, k) {
+ BTREE_ITER_PREFETCH, k, ret) {
if (k.k->p.inode != type)
break;
bch2_trans_init(&trans, c);
for_each_btree_key(&trans, iter, BTREE_ID_INODES, POS_MIN,
- BTREE_ITER_PREFETCH, k) {
+ BTREE_ITER_PREFETCH, k, ret) {
switch (k.k->type) {
case KEY_TYPE_inode:
ret = bch2_inode_unpack(bkey_s_c_to_inode(k), &u);
{
struct btree_iter *iter;
struct bkey_s_c k;
+ int ret;
- iter = bch2_trans_get_iter(trans, desc.btree_id,
- POS(inode, desc.hash_key(info, key)),
- BTREE_ITER_SLOTS|flags);
- if (IS_ERR(iter))
- return iter;
-
- for_each_btree_key_continue(iter, BTREE_ITER_SLOTS, k) {
+ for_each_btree_key(trans, iter, desc.btree_id,
+ POS(inode, desc.hash_key(info, key)),
+ BTREE_ITER_SLOTS|flags, k, ret) {
if (iter->pos.inode != inode)
break;
}
}
- return IS_ERR(k.k) ? ERR_CAST(k.k) : ERR_PTR(-ENOENT);
+ return ERR_PTR(ret ?: -ENOENT);
}
static __always_inline struct btree_iter *
{
struct btree_iter *iter;
struct bkey_s_c k;
+ int ret;
- iter = bch2_trans_get_iter(trans, desc.btree_id,
- POS(inode, desc.hash_key(info, key)),
- BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
- if (IS_ERR(iter))
- return iter;
-
- for_each_btree_key_continue(iter, BTREE_ITER_SLOTS, k) {
+ for_each_btree_key(trans, iter, desc.btree_id,
+ POS(inode, desc.hash_key(info, key)),
+ BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k, ret) {
if (iter->pos.inode != inode)
break;
return iter;
}
- return IS_ERR(k.k) ? ERR_CAST(k.k) : ERR_PTR(-ENOSPC);
+ return ERR_PTR(ret ?: -ENOSPC);
}
static __always_inline
struct btree_iter *iter, *slot = NULL;
struct bkey_s_c k;
bool found = false;
- int ret = 0;
-
- iter = bch2_trans_get_iter(trans, desc.btree_id,
- POS(inode, desc.hash_bkey(info, bkey_i_to_s_c(insert))),
- BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
- if (IS_ERR(iter))
- return PTR_ERR(iter);
+ int ret;
- for_each_btree_key_continue(iter, BTREE_ITER_SLOTS, k) {
+ for_each_btree_key(trans, iter, desc.btree_id,
+ POS(inode, desc.hash_bkey(info, bkey_i_to_s_c(insert))),
+ BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k, ret) {
if (iter->pos.inode != inode)
break;
}
if (slot)
- bch2_trans_iter_free(trans, iter);
+ bch2_trans_iter_free(trans, slot);
+ bch2_trans_iter_free(trans, iter);
- return bch2_trans_iter_free(trans, iter) ?: -ENOSPC;
+ return ret ?: -ENOSPC;
found:
found = true;
not_found:
nr_compressed_extents = 0,
compressed_sectors_compressed = 0,
compressed_sectors_uncompressed = 0;
+ int ret;
if (!test_bit(BCH_FS_STARTED, &c->flags))
return -EPERM;
bch2_trans_init(&trans, c);
- for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, POS_MIN, 0, k)
+ for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, POS_MIN, 0, k, ret)
if (k.k->type == KEY_TYPE_extent) {
struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
const union bch_extent_entry *entry;
break;
}
}
- bch2_trans_exit(&trans);
+
+ ret = bch2_trans_exit(&trans) ?: ret;
+ if (ret)
+ return ret;
return scnprintf(buf, PAGE_SIZE,
"uncompressed data:\n"
i = 0;
- for_each_btree_key(&trans, iter, BTREE_ID_DIRENTS, POS(0, 0), 0, k)
+ for_each_btree_key(&trans, iter, BTREE_ID_DIRENTS,
+ POS_MIN, 0, k, ret)
BUG_ON(k.k->p.offset != i++);
BUG_ON(i != nr);
i = 0;
- for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, POS(0, 0), 0, k) {
+ for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS,
+ POS_MIN, 0, k, ret) {
BUG_ON(bkey_start_offset(k.k) != i);
i = k.k->p.offset;
}
i = 0;
- for_each_btree_key(&trans, iter, BTREE_ID_DIRENTS, POS(0, 0), 0, k) {
+ for_each_btree_key(&trans, iter, BTREE_ID_DIRENTS, POS_MIN,
+ 0, k, ret) {
BUG_ON(k.k->p.offset != i);
i += 2;
}
i = 0;
- for_each_btree_key(&trans, iter, BTREE_ID_DIRENTS, POS(0, 0),
- BTREE_ITER_SLOTS, k) {
+ for_each_btree_key(&trans, iter, BTREE_ID_DIRENTS, POS_MIN,
+ BTREE_ITER_SLOTS, k, ret) {
BUG_ON(bkey_deleted(k.k) != (i & 1));
BUG_ON(k.k->p.offset != i++);
i = 0;
- for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, POS(0, 0), 0, k) {
+ for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, POS_MIN,
+ 0, k, ret) {
BUG_ON(bkey_start_offset(k.k) != i + 8);
BUG_ON(k.k->size != 8);
i += 16;
i = 0;
- for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, POS(0, 0),
- BTREE_ITER_SLOTS, k) {
+ for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, POS_MIN,
+ BTREE_ITER_SLOTS, k, ret) {
BUG_ON(bkey_deleted(k.k) != !(i % 16));
BUG_ON(bkey_start_offset(k.k) != i);
bch2_trans_init(&trans, c);
- iter = bch2_trans_get_iter(&trans, BTREE_ID_DIRENTS, POS_MIN,
- BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
-
- for_each_btree_key_continue(iter, BTREE_ITER_SLOTS, k) {
+ for_each_btree_key(&trans, iter, BTREE_ID_DIRENTS, POS_MIN,
+ BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k, ret) {
insert.k.p = iter->pos;
bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, &insert.k_i));
struct btree_trans trans;
struct btree_iter *iter;
struct bkey_s_c k;
+ int ret;
bch2_trans_init(&trans, c);
- for_each_btree_key(&trans, iter, BTREE_ID_DIRENTS, POS_MIN, 0, k)
+ for_each_btree_key(&trans, iter, BTREE_ID_DIRENTS, POS_MIN, 0, k, ret)
;
bch2_trans_exit(&trans);
}
bch2_trans_init(&trans, c);
- iter = bch2_trans_get_iter(&trans, BTREE_ID_DIRENTS, POS_MIN,
- BTREE_ITER_INTENT);
-
- for_each_btree_key_continue(iter, 0, k) {
+ for_each_btree_key(&trans, iter, BTREE_ID_DIRENTS, POS_MIN,
+ BTREE_ITER_INTENT, k, ret) {
struct bkey_i_cookie u;
bkey_reassemble(&u.k_i, k);
return ret;
}
-static void __bch2_xattr_emit(const char *prefix,
- const char *name, size_t name_len,
- char **buffer, size_t *buffer_size,
- ssize_t *ret)
+struct xattr_buf {
+ char *buf;
+ size_t len;
+ size_t used;
+};
+
+static int __bch2_xattr_emit(const char *prefix,
+ const char *name, size_t name_len,
+ struct xattr_buf *buf)
{
const size_t prefix_len = strlen(prefix);
const size_t total_len = prefix_len + name_len + 1;
- if (*buffer) {
- if (total_len > *buffer_size) {
- *ret = -ERANGE;
- return;
- }
+ if (buf->buf) {
+ if (buf->used + total_len > buf->len)
+ return -ERANGE;
- memcpy(*buffer, prefix, prefix_len);
- memcpy(*buffer + prefix_len,
+ memcpy(buf->buf + buf->used, prefix, prefix_len);
+ memcpy(buf->buf + buf->used + prefix_len,
name, name_len);
- (*buffer)[prefix_len + name_len] = '\0';
-
- *buffer += total_len;
- *buffer_size -= total_len;
+ buf->buf[buf->used + prefix_len + name_len] = '\0';
}
- *ret += total_len;
+ buf->used += total_len;
+ return 0;
}
-static void bch2_xattr_emit(struct dentry *dentry,
+static int bch2_xattr_emit(struct dentry *dentry,
const struct bch_xattr *xattr,
- char **buffer, size_t *buffer_size,
- ssize_t *ret)
+ struct xattr_buf *buf)
{
const struct xattr_handler *handler =
bch2_xattr_type_to_handler(xattr->x_type);
- if (handler && (!handler->list || handler->list(dentry)))
- __bch2_xattr_emit(handler->prefix ?: handler->name,
- xattr->x_name, xattr->x_name_len,
- buffer, buffer_size, ret);
+ return handler && (!handler->list || handler->list(dentry))
+ ? __bch2_xattr_emit(handler->prefix ?: handler->name,
+ xattr->x_name, xattr->x_name_len, buf)
+ : 0;
}
-static void bch2_xattr_list_bcachefs(struct bch_fs *c,
- struct bch_inode_info *inode,
- char **buffer,
- size_t *buffer_size,
- ssize_t *ret,
- bool all)
+static int bch2_xattr_list_bcachefs(struct bch_fs *c,
+ struct bch_inode_info *inode,
+ struct xattr_buf *buf,
+ bool all)
{
const char *prefix = all ? "bcachefs_effective." : "bcachefs.";
unsigned id;
+ int ret = 0;
u64 v;
for (id = 0; id < Inode_opt_nr; id++) {
!(inode->ei_inode.bi_fields_set & (1 << id)))
continue;
- __bch2_xattr_emit(prefix,
- bch2_inode_opts[id],
- strlen(bch2_inode_opts[id]),
- buffer, buffer_size, ret);
- if (*ret < 0)
+ ret = __bch2_xattr_emit(prefix, bch2_inode_opts[id],
+ strlen(bch2_inode_opts[id]), buf);
+ if (ret)
break;
}
+
+ return ret;
}
ssize_t bch2_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
struct btree_trans trans;
struct btree_iter *iter;
struct bkey_s_c k;
+ struct xattr_buf buf = { .buf = buffer, .len = buffer_size };
u64 inum = dentry->d_inode->i_ino;
- ssize_t ret = 0;
+ int ret;
bch2_trans_init(&trans, c);
for_each_btree_key(&trans, iter, BTREE_ID_XATTRS,
- POS(inum, 0), 0, k) {
+ POS(inum, 0), 0, k, ret) {
BUG_ON(k.k->p.inode < inum);
if (k.k->p.inode > inum)
if (k.k->type != KEY_TYPE_xattr)
continue;
- bch2_xattr_emit(dentry, bkey_s_c_to_xattr(k).v,
- &buffer, &buffer_size, &ret);
- if (ret < 0)
+ ret = bch2_xattr_emit(dentry, bkey_s_c_to_xattr(k).v, &buf);
+ if (ret)
break;
}
- bch2_trans_exit(&trans);
+ ret = bch2_trans_exit(&trans) ?: ret;
- if (ret < 0)
+ if (ret)
return ret;
- bch2_xattr_list_bcachefs(c, inode, &buffer,
- &buffer_size, &ret, false);
- if (ret < 0)
+ ret = bch2_xattr_list_bcachefs(c, inode, &buf, false);
+ if (ret)
return ret;
- bch2_xattr_list_bcachefs(c, inode, &buffer,
- &buffer_size, &ret, true);
- if (ret < 0)
+ ret = bch2_xattr_list_bcachefs(c, inode, &buf, true);
+ if (ret)
return ret;
- return ret;
+ return buf.used;
}
static int bch2_xattr_get_handler(const struct xattr_handler *handler,