unsigned long gc_count = c->gc_count;
int ret = 0;
+ ca->allocator_state = ALLOCATOR_BLOCKED;
+ closure_wake_up(&c->freelist_wait);
+
while (1) {
set_current_state(TASK_INTERRUPTIBLE);
if (kthread_should_stop()) {
}
__set_current_state(TASK_RUNNING);
+ ca->allocator_state = ALLOCATOR_RUNNING;
+ closure_wake_up(&c->freelist_wait);
+
return ret;
}
fifo_pop(&ca->free_inc, bucket);
closure_wake_up(&c->freelist_wait);
- ca->allocator_blocked_full = false;
+ ca->allocator_state = ALLOCATOR_RUNNING;
spin_unlock(&c->freelist_lock);
goto out;
}
- if (!ca->allocator_blocked_full) {
- ca->allocator_blocked_full = true;
+ if (ca->allocator_state != ALLOCATOR_BLOCKED_FULL) {
+ ca->allocator_state = ALLOCATOR_BLOCKED_FULL;
closure_wake_up(&c->freelist_wait);
}
int ret;
set_freezable();
+ ca->allocator_state = ALLOCATOR_RUNNING;
while (1) {
cond_resched();
if (!nr ||
(nr < ALLOC_SCAN_BATCH(ca) &&
!fifo_full(&ca->free[RESERVE_MOVINGGC]))) {
- ca->allocator_blocked = true;
- closure_wake_up(&c->freelist_wait);
-
ret = wait_buckets_available(c, ca);
if (ret) {
up_read(&c->gc_lock);
}
} while (!nr);
- ca->allocator_blocked = false;
up_read(&c->gc_lock);
pr_debug("%zu buckets to invalidate", nr);
stop:
pr_debug("alloc thread stopping (ret %i)", ret);
+ ca->allocator_state = ALLOCATOR_STOPPED;
+ closure_wake_up(&c->freelist_wait);
return 0;
}
void bch2_dev_allocator_quiesce(struct bch_fs *c, struct bch_dev *ca)
{
if (ca->alloc_thread)
- closure_wait_event(&c->freelist_wait, ca->allocator_blocked_full);
+ closure_wait_event(&c->freelist_wait,
+ ca->allocator_state != ALLOCATOR_RUNNING);
}
/* stop allocator thread: */