set_btree_node_dirty(b);
}
-static enum btree_insert_ret
-bch2_insert_fixup_key(struct btree_insert *trans,
- struct btree_insert_entry *insert)
+static void bch2_insert_fixup_key(struct btree_insert *trans,
+ struct btree_insert_entry *insert)
{
struct btree_iter *iter = insert->iter;
struct btree_iter_level *l = &iter->l[0];
if (bch2_btree_bset_insert_key(iter, l->b, &l->iter,
insert->k))
bch2_btree_journal_key(trans, iter, insert->k);
-
- return BTREE_INSERT_OK;
}
/**
* btree_insert_key - insert a key one key into a leaf node
*/
-static enum btree_insert_ret
-btree_insert_key_leaf(struct btree_insert *trans,
- struct btree_insert_entry *insert)
+static void btree_insert_key_leaf(struct btree_insert *trans,
+ struct btree_insert_entry *insert)
{
struct bch_fs *c = trans->c;
struct btree_iter *iter = insert->iter;
struct btree *b = iter->l[0].b;
- enum btree_insert_ret ret;
int old_u64s = le16_to_cpu(btree_bset_last(b)->u64s);
int old_live_u64s = b->nr.live_u64s;
int live_u64s_added, u64s_added;
bch2_mark_update(trans, insert);
- ret = !btree_node_is_extents(b)
- ? bch2_insert_fixup_key(trans, insert)
- : bch2_insert_fixup_extent(trans, insert);
+ if (!btree_node_is_extents(b))
+ bch2_insert_fixup_key(trans, insert);
+ else
+ bch2_insert_fixup_extent(trans, insert);
live_u64s_added = (int) b->nr.live_u64s - old_live_u64s;
u64s_added = (int) le16_to_cpu(btree_bset_last(b)->u64s) - old_u64s;
bch2_btree_iter_reinit_node(iter, b);
trace_btree_insert_key(c, b, insert->k);
- return ret;
}
/* Deferred btree updates: */
kfree(k);
}
-static enum btree_insert_ret
-btree_insert_key_deferred(struct btree_insert *trans,
- struct btree_insert_entry *insert)
+static void btree_insert_key_deferred(struct btree_insert *trans,
+ struct btree_insert_entry *insert)
{
struct bch_fs *c = trans->c;
struct journal *j = &c->journal;
bch2_journal_pin_update(j, trans->journal_res.seq, &d->journal,
deferred_update_flush);
spin_unlock(&d->lock);
-
- return BTREE_INSERT_OK;
}
void bch2_deferred_update_free(struct bch_fs *c,
return BTREE_INSERT_OK;
}
-static inline enum btree_insert_ret
-do_btree_insert_one(struct btree_insert *trans,
- struct btree_insert_entry *insert)
+static inline void do_btree_insert_one(struct btree_insert *trans,
+ struct btree_insert_entry *insert)
{
- return likely(!insert->deferred)
- ? btree_insert_key_leaf(trans, insert)
- : btree_insert_key_deferred(trans, insert);
+ if (likely(!insert->deferred))
+ btree_insert_key_leaf(trans, insert);
+ else
+ btree_insert_key_deferred(trans, insert);
}
/*
}
trans->did_work = true;
- trans_for_each_entry(trans, i) {
- switch (do_btree_insert_one(trans, i)) {
- case BTREE_INSERT_OK:
- break;
- case BTREE_INSERT_NEED_TRAVERSE:
- BUG_ON((trans->flags &
- (BTREE_INSERT_ATOMIC|BTREE_INSERT_NOUNLOCK)));
- ret = -EINTR;
- goto out;
- default:
- BUG();
- }
- }
+ trans_for_each_entry(trans, i)
+ do_btree_insert_one(trans, i);
out:
BUG_ON(ret &&
(trans->flags & BTREE_INSERT_JOURNAL_RESERVED) &&
if (!i->deferred) {
BUG_ON(i->iter->level);
BUG_ON(bkey_cmp(bkey_start_pos(&i->k->k), i->iter->pos));
+ EBUG_ON((i->iter->flags & BTREE_ITER_IS_EXTENTS) &&
+ !bch2_extent_is_atomic(i->k, i->iter));
bch2_btree_iter_verify_locks(i->iter);
}
BTREE_INSERT_ENTRY(iter, &k));
}
-int bch2_btree_insert_list_at(struct btree_iter *iter,
- struct keylist *keys,
- struct disk_reservation *disk_res,
- u64 *journal_seq, unsigned flags)
-{
- BUG_ON(flags & BTREE_INSERT_ATOMIC);
- BUG_ON(bch2_keylist_empty(keys));
- bch2_verify_keylist_sorted(keys);
-
- while (!bch2_keylist_empty(keys)) {
- int ret = bch2_btree_insert_at(iter->c, disk_res,
- journal_seq, flags,
- BTREE_INSERT_ENTRY(iter, bch2_keylist_front(keys)));
- if (ret)
- return ret;
-
- bch2_keylist_pop_front(keys);
- }
-
- return 0;
-}
-
/**
* bch_btree_insert - insert keys into the extent btree
* @c: pointer to struct bch_fs
/* create the biggest key we can */
bch2_key_resize(&delete.k, max_sectors);
bch2_cut_back(end, &delete.k);
+ bch2_extent_trim_atomic(&delete, &iter);
}
ret = bch2_btree_insert_at(c, NULL, journal_seq,
insert->k.needs_whiteout = false;
}
-void bch2_extent_trim_atomic(struct bkey_i *k, struct btree_iter *iter)
+static inline struct bpos
+bch2_extent_atomic_end(struct bkey_i *k, struct btree_iter *iter)
{
struct btree *b = iter->l[0].b;
BUG_ON(iter->uptodate > BTREE_ITER_NEED_PEEK);
+ BUG_ON(bkey_cmp(bkey_start_pos(&k->k), b->data->min_key) < 0);
- bch2_cut_back(b->key.k.p, &k->k);
+ return bpos_min(k->k.p, b->key.k.p);
+}
- BUG_ON(bkey_cmp(bkey_start_pos(&k->k), b->data->min_key) < 0);
+void bch2_extent_trim_atomic(struct bkey_i *k, struct btree_iter *iter)
+{
+ bch2_cut_back(bch2_extent_atomic_end(k, iter), &k->k);
+}
+
+bool bch2_extent_is_atomic(struct bkey_i *k, struct btree_iter *iter)
+{
+ return !bkey_cmp(bch2_extent_atomic_end(k, iter), k->k.p);
}
enum btree_insert_ret
struct bkey_s_c k;
int sectors;
- BUG_ON(trans->flags & BTREE_INSERT_ATOMIC &&
- !bch2_extent_is_atomic(&insert->k->k, insert->iter));
-
/*
* We avoid creating whiteouts whenever possible when deleting, but
* those optimizations mean we may potentially insert two whiteouts
* If the end of iter->pos is not the same as the end of insert, then
* key insertion needs to continue/be retried.
*/
-enum btree_insert_ret
-bch2_insert_fixup_extent(struct btree_insert *trans,
- struct btree_insert_entry *insert)
+void bch2_insert_fixup_extent(struct btree_insert *trans,
+ struct btree_insert_entry *insert)
{
struct btree_iter *iter = insert->iter;
- struct btree *b = iter->l[0].b;
struct extent_insert_state s = {
.trans = trans,
.insert = insert,
extent_insert_committed(&s);
+ BUG_ON(insert->k->k.size);
EBUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&insert->k->k)));
EBUG_ON(bkey_cmp(iter->pos, s.committed));
-
- if (insert->k->k.size) {
- /* got to the end of this leaf node */
- BUG_ON(bkey_cmp(iter->pos, b->key.k.p));
- return BTREE_INSERT_NEED_TRAVERSE;
- }
-
- return BTREE_INSERT_OK;
}
const char *bch2_extent_invalid(const struct bch_fs *c, struct bkey_s_c k)
int bch2_write_index_default(struct bch_write_op *op)
{
+ struct bch_fs *c = op->c;
struct keylist *keys = &op->insert_keys;
struct btree_iter iter;
int ret;
- bch2_btree_iter_init(&iter, op->c, BTREE_ID_EXTENTS,
+ bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS,
bkey_start_pos(&bch2_keylist_front(keys)->k),
BTREE_ITER_INTENT);
- ret = bch2_btree_insert_list_at(&iter, keys, &op->res,
- op_journal_seq(op),
- BTREE_INSERT_NOFAIL|
- BTREE_INSERT_USE_RESERVE);
+ do {
+ BKEY_PADDED(k) split;
+
+ bkey_copy(&split.k, bch2_keylist_front(keys));
+
+ bch2_extent_trim_atomic(&split.k, &iter);
+
+ ret = bch2_btree_insert_at(c, &op->res,
+ op_journal_seq(op),
+ BTREE_INSERT_NOFAIL|
+ BTREE_INSERT_USE_RESERVE,
+ BTREE_INSERT_ENTRY(&iter, &split.k));
+ if (ret)
+ break;
+
+ if (bkey_cmp(iter.pos, bch2_keylist_front(keys)->k.p) < 0)
+ bch2_cut_front(iter.pos, bch2_keylist_front(keys));
+ else
+ bch2_keylist_pop_front(keys);
+ } while (!bch2_keylist_empty(keys));
+
bch2_btree_iter_unlock(&iter);
return ret;