static int wait_buckets_available(struct bch_fs *c, struct bch_dev *ca)
{
unsigned long gc_count = c->gc_count;
+ u64 available;
int ret = 0;
ca->allocator_state = ALLOCATOR_BLOCKED;
if (gc_count != c->gc_count)
ca->inc_gen_really_needs_gc = 0;
- if ((ssize_t) (dev_buckets_available(c, ca) -
- ca->inc_gen_really_needs_gc) >=
- (ssize_t) fifo_free(&ca->free_inc))
+ available = max_t(s64, 0, dev_buckets_available(c, ca) -
+ ca->inc_gen_really_needs_gc);
+
+ if (available > fifo_free(&ca->free_inc) ||
+ (available && !fifo_full(&ca->free[RESERVE_BTREE])))
break;
up_read(&c->gc_lock);
#define BTREE_RESERVE_MAX (BTREE_MAX_DEPTH + (BTREE_MAX_DEPTH - 1))
/* Size of the freelist we allocate btree nodes from: */
-#define BTREE_NODE_RESERVE BTREE_RESERVE_MAX
+#define BTREE_NODE_RESERVE (BTREE_RESERVE_MAX * 4)
#define BTREE_NODE_OPEN_BUCKET_RESERVE (BTREE_RESERVE_MAX * BCH_REPLICAS_MAX)