b->c.level = level;
b->c.btree_id = id;
+ if (level)
+ six_lock_pcpu_alloc(&b->c.lock);
+ else
+ six_lock_pcpu_free_rcu(&b->c.lock);
+
mutex_lock(&bc->lock);
ret = __bch2_btree_node_hash_insert(bc, b);
if (!ret)
while (!list_empty(&bc->freed)) {
b = list_first_entry(&bc->freed, struct btree, list);
list_del(&b->list);
+ six_lock_pcpu_free(&b->c.lock);
kfree(b);
}
* goes to 0, and it's safe because we have the node intent
* locked:
*/
- atomic64_sub(__SIX_VAL(read_lock, readers),
- &b->c.lock.state.counter);
+ if (!b->c.lock.readers)
+ atomic64_sub(__SIX_VAL(read_lock, readers),
+ &b->c.lock.state.counter);
+ else
+ this_cpu_sub(*b->c.lock.readers, readers);
+
btree_node_lock_type(iter->trans->c, b, SIX_LOCK_write);
- atomic64_add(__SIX_VAL(read_lock, readers),
- &b->c.lock.state.counter);
+
+ if (!b->c.lock.readers)
+ atomic64_add(__SIX_VAL(read_lock, readers),
+ &b->c.lock.state.counter);
+ else
+ this_cpu_add(*b->c.lock.readers, readers);
}
bool __bch2_btree_node_relock(struct btree_iter *iter, unsigned level)
list_del_init(&b->list);
mutex_unlock(&c->btree_cache.lock);
+ if (b->c.level)
+ six_lock_pcpu_alloc(&b->c.lock);
+ else
+ six_lock_pcpu_free(&b->c.lock);
+
mutex_lock(&c->btree_root_lock);
BUG_ON(btree_node_root(c, b) &&
(b->c.level < btree_node_root(c, b)->c.level ||