};
#define BCH_WATERMARKS() \
- x(reclaim) \
- x(btree_copygc) \
- x(btree) \
- x(copygc) \
+ x(stripe) \
x(normal) \
- x(stripe)
+ x(copygc) \
+ x(btree) \
+ x(btree_copygc) \
+ x(reclaim)
enum bch_watermark {
#define x(name) BCH_WATERMARK_##name,
BCH_WATERMARK_NR,
};
+#define BCH_WATERMARK_BITS 3
+#define BCH_WATERMARK_MASK ~(~0 << BCH_WATERMARK_BITS)
+
#define OPEN_BUCKETS_COUNT 1024
#define WRITE_POINT_HASH_NR 32
BTREE_INSERT_NOFAIL|
BTREE_INSERT_USE_RESERVE|
(ck->journal.seq == journal_last_seq(j)
- ? JOURNAL_WATERMARK_reserved
+ ? BCH_WATERMARK_reclaim
: 0)|
commit_flags);
struct bkey_i *, u64);
enum btree_insert_flags {
- /* First two bits for journal watermark: */
- __BTREE_INSERT_NOFAIL = 2,
+ /* First bits for bch_watermark: */
+ __BTREE_INSERT_NOFAIL = BCH_WATERMARK_BITS,
__BTREE_INSERT_NOCHECK_RW,
__BTREE_INSERT_LAZY_RW,
__BTREE_INSERT_USE_RESERVE,
BTREE_INSERT_NOCHECK_RW|
BTREE_INSERT_USE_RESERVE|
BTREE_INSERT_JOURNAL_RECLAIM|
- JOURNAL_WATERMARK_reserved,
+ BCH_WATERMARK_reclaim,
btree_update_nodes_written_trans(&trans, as));
bch2_trans_unlock(&trans);
? BCH_DISK_RESERVATION_NOFAIL : 0;
unsigned nr_nodes[2] = { 0, 0 };
unsigned update_level = level;
- int journal_flags = flags & JOURNAL_WATERMARK_MASK;
+ int journal_flags = flags & BCH_WATERMARK_MASK;
int ret = 0;
u32 restart_count = trans->restart_count;
BTREE_INSERT_NOCHECK_RW|
BTREE_INSERT_USE_RESERVE|
BTREE_INSERT_JOURNAL_RECLAIM|
- JOURNAL_WATERMARK_reserved);
+ BCH_WATERMARK_reclaim);
if (ret)
goto err;
bch2_journal_preres_get(&trans->c->journal,
&trans->journal_preres,
trans->journal_preres_u64s,
- (flags & JOURNAL_WATERMARK_MASK)));
+ (flags & BCH_WATERMARK_MASK)));
}
static __always_inline int bch2_trans_journal_res_get(struct btree_trans *trans,
*/
if (likely(!(flags & BTREE_INSERT_JOURNAL_REPLAY))) {
ret = bch2_trans_journal_res_get(trans,
- (flags & JOURNAL_WATERMARK_MASK)|
+ (flags & BCH_WATERMARK_MASK)|
JOURNAL_RES_GET_NONBLOCK);
if (ret)
return ret;
ret = bch2_journal_preres_get(&c->journal,
&trans->journal_preres, trans->journal_preres_u64s,
- (flags & JOURNAL_WATERMARK_MASK)|JOURNAL_RES_GET_NONBLOCK);
+ (flags & BCH_WATERMARK_MASK)|JOURNAL_RES_GET_NONBLOCK);
if (unlikely(ret == -BCH_ERR_journal_preres_get_blocked))
ret = bch2_trans_journal_preres_get_cold(trans, flags, trace_ip);
if (unlikely(ret))
break;
case -BCH_ERR_journal_res_get_blocked:
if ((flags & BTREE_INSERT_JOURNAL_RECLAIM) &&
- !(flags & JOURNAL_WATERMARK_reserved)) {
+ (flags & BCH_WATERMARK_MASK) != BCH_WATERMARK_reclaim) {
ret = -BCH_ERR_journal_reclaim_would_deadlock;
break;
}
ret = drop_locks_do(trans,
bch2_trans_journal_res_get(trans,
- (flags & JOURNAL_WATERMARK_MASK)|
+ (flags & BCH_WATERMARK_MASK)|
JOURNAL_RES_GET_CHECK));
break;
case -BCH_ERR_btree_insert_need_journal_reclaim:
int ret;
va_start(args, fmt);
- ret = __bch2_fs_log_msg(c, JOURNAL_WATERMARK_reserved, fmt, args);
+ ret = __bch2_fs_log_msg(c, BCH_WATERMARK_reclaim, fmt, args);
va_end(args);
return ret;
}
commit_flags|
BTREE_INSERT_NOFAIL|
BTREE_INSERT_JOURNAL_RECLAIM|
- JOURNAL_WATERMARK_reserved,
+ BCH_WATERMARK_reclaim,
__bch2_btree_insert(trans, i->btree, &i->k, 0));
if (bch2_fs_fatal_err_on(ret, c, "%s: insert error %s", __func__, bch2_err_str(ret)))
break;
#include "journal_seq_blacklist.h"
#include "trace.h"
-#define x(n) #n,
-static const char * const bch2_journal_watermarks[] = {
- JOURNAL_WATERMARKS()
- NULL
-};
-
static const char * const bch2_journal_errors[] = {
+#define x(n) #n,
JOURNAL_ERRORS()
+#undef x
NULL
};
-#undef x
static inline bool journal_seq_unwritten(struct journal *j, u64 seq)
{
if (!(error == JOURNAL_ERR_journal_full ||
error == JOURNAL_ERR_journal_pin_full) ||
nr_unwritten_journal_entries(j) ||
- (flags & JOURNAL_WATERMARK_MASK) != JOURNAL_WATERMARK_reserved)
+ (flags & BCH_WATERMARK_MASK) != BCH_WATERMARK_reclaim)
return stuck;
spin_lock(&j->lock);
return 0;
}
- if ((flags & JOURNAL_WATERMARK_MASK) < j->watermark) {
+ if ((flags & BCH_WATERMARK_MASK) < j->watermark) {
/*
* Don't want to close current journal entry, just need to
* invoke reclaim:
prt_printf(out, "last_seq_ondisk:\t%llu\n", j->last_seq_ondisk);
prt_printf(out, "flushed_seq_ondisk:\t%llu\n", j->flushed_seq_ondisk);
prt_printf(out, "prereserved:\t\t%u/%u\n", j->prereserved.reserved, j->prereserved.remaining);
- prt_printf(out, "watermark:\t\t%s\n", bch2_journal_watermarks[j->watermark]);
+ prt_printf(out, "watermark:\t\t%s\n", bch2_watermarks[j->watermark]);
prt_printf(out, "each entry reserved:\t%u\n", j->entry_u64s_reserved);
prt_printf(out, "nr flush writes:\t%llu\n", j->nr_flush_writes);
prt_printf(out, "nr noflush writes:\t%llu\n", j->nr_noflush_writes);
int bch2_journal_res_get_slowpath(struct journal *, struct journal_res *,
unsigned);
-/* First two bits for JOURNAL_WATERMARK: */
-#define JOURNAL_RES_GET_NONBLOCK (1 << 2)
-#define JOURNAL_RES_GET_CHECK (1 << 3)
+/* First bits for BCH_WATERMARK: */
+enum journal_res_flags {
+ __JOURNAL_RES_GET_NONBLOCK = BCH_WATERMARK_BITS,
+ __JOURNAL_RES_GET_CHECK,
+};
+
+#define JOURNAL_RES_GET_NONBLOCK (1 << __JOURNAL_RES_GET_NONBLOCK)
+#define JOURNAL_RES_GET_CHECK (1 << __JOURNAL_RES_GET_CHECK)
static inline int journal_res_get_fast(struct journal *j,
struct journal_res *res,
EBUG_ON(!journal_state_count(new, new.idx));
- if ((flags & JOURNAL_WATERMARK_MASK) < j->watermark)
+ if ((flags & BCH_WATERMARK_MASK) < j->watermark)
return 0;
new.cur_entry_offset += res->u64s;
static inline void journal_set_watermark(struct journal *j)
{
union journal_preres_state s = READ_ONCE(j->prereserved);
- unsigned watermark = JOURNAL_WATERMARK_any;
+ unsigned watermark = BCH_WATERMARK_stripe;
if (fifo_free(&j->pin) < j->pin.size / 4)
- watermark = max_t(unsigned, watermark, JOURNAL_WATERMARK_copygc);
+ watermark = max_t(unsigned, watermark, BCH_WATERMARK_copygc);
if (fifo_free(&j->pin) < j->pin.size / 8)
- watermark = max_t(unsigned, watermark, JOURNAL_WATERMARK_reserved);
+ watermark = max_t(unsigned, watermark, BCH_WATERMARK_reclaim);
if (s.reserved > s.remaining)
- watermark = max_t(unsigned, watermark, JOURNAL_WATERMARK_copygc);
+ watermark = max_t(unsigned, watermark, BCH_WATERMARK_copygc);
if (!s.remaining)
- watermark = max_t(unsigned, watermark, JOURNAL_WATERMARK_reserved);
+ watermark = max_t(unsigned, watermark, BCH_WATERMARK_reclaim);
if (watermark == j->watermark)
return;
int d = new_u64s - res->u64s;
union journal_preres_state old, new;
u64 v = atomic64_read(&j->prereserved.counter);
+ enum bch_watermark watermark = flags & BCH_WATERMARK_MASK;
int ret;
do {
old.v = new.v = v;
ret = 0;
- if ((flags & JOURNAL_WATERMARK_reserved) ||
+ if (watermark == BCH_WATERMARK_reclaim ||
new.reserved + d < new.remaining) {
new.reserved += d;
ret = 1;
* Must come before signaling write completion, for
* bch2_fs_journal_stop():
*/
- if (j->watermark)
+ if (j->watermark != BCH_WATERMARK_stripe)
journal_reclaim_kick(&c->journal);
/* also must come before signalling write completion: */
JOURNAL_NEED_FLUSH_WRITE,
};
-#define JOURNAL_WATERMARKS() \
- x(any) \
- x(copygc) \
- x(reserved)
-
-enum journal_watermark {
-#define x(n) JOURNAL_WATERMARK_##n,
- JOURNAL_WATERMARKS()
-#undef x
-};
-
-#define JOURNAL_WATERMARK_MASK 3
-
/* Reasons we may fail to get a journal reservation: */
#define JOURNAL_ERRORS() \
x(ok) \
struct {
union journal_res_state reservations;
- enum journal_watermark watermark;
+ enum bch_watermark watermark;
union journal_preres_state prereserved;
{
struct bch_fs *c = trans->c;
struct data_update_opts data_opts = {
- .btree_insert_flags = BTREE_INSERT_USE_RESERVE|JOURNAL_WATERMARK_copygc,
+ .btree_insert_flags = BTREE_INSERT_USE_RESERVE|BCH_WATERMARK_copygc,
};
move_buckets buckets = { 0 };
struct move_bucket_in_flight *f;
BTREE_INSERT_LAZY_RW|
BTREE_INSERT_NOFAIL|
(!k->allocated
- ? BTREE_INSERT_JOURNAL_REPLAY|JOURNAL_WATERMARK_reserved
+ ? BTREE_INSERT_JOURNAL_REPLAY|BCH_WATERMARK_reclaim
: 0),
bch2_journal_replay_key(&trans, k));
if (ret) {