bcachefs: Simplify hash table checks
[linux-block.git] / fs / bcachefs / btree_key_cache.c
1
2 #include "bcachefs.h"
3 #include "btree_cache.h"
4 #include "btree_iter.h"
5 #include "btree_key_cache.h"
6 #include "btree_locking.h"
7 #include "btree_update.h"
8 #include "error.h"
9 #include "journal.h"
10 #include "journal_reclaim.h"
11 #include "trace.h"
12
13 #include <linux/sched/mm.h>
14
15 static struct kmem_cache *bch2_key_cache;
16
17 static int bch2_btree_key_cache_cmp_fn(struct rhashtable_compare_arg *arg,
18                                        const void *obj)
19 {
20         const struct bkey_cached *ck = obj;
21         const struct bkey_cached_key *key = arg->key;
22
23         return cmp_int(ck->key.btree_id, key->btree_id) ?:
24                 bpos_cmp(ck->key.pos, key->pos);
25 }
26
27 static const struct rhashtable_params bch2_btree_key_cache_params = {
28         .head_offset    = offsetof(struct bkey_cached, hash),
29         .key_offset     = offsetof(struct bkey_cached, key),
30         .key_len        = sizeof(struct bkey_cached_key),
31         .obj_cmpfn      = bch2_btree_key_cache_cmp_fn,
32 };
33
34 __flatten
35 inline struct bkey_cached *
36 bch2_btree_key_cache_find(struct bch_fs *c, enum btree_id btree_id, struct bpos pos)
37 {
38         struct bkey_cached_key key = {
39                 .btree_id       = btree_id,
40                 .pos            = pos,
41         };
42
43         return rhashtable_lookup_fast(&c->btree_key_cache.table, &key,
44                                       bch2_btree_key_cache_params);
45 }
46
47 static bool bkey_cached_lock_for_evict(struct bkey_cached *ck)
48 {
49         if (!six_trylock_intent(&ck->c.lock))
50                 return false;
51
52         if (!six_trylock_write(&ck->c.lock)) {
53                 six_unlock_intent(&ck->c.lock);
54                 return false;
55         }
56
57         if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
58                 six_unlock_write(&ck->c.lock);
59                 six_unlock_intent(&ck->c.lock);
60                 return false;
61         }
62
63         return true;
64 }
65
66 static void bkey_cached_evict(struct btree_key_cache *c,
67                               struct bkey_cached *ck)
68 {
69         BUG_ON(rhashtable_remove_fast(&c->table, &ck->hash,
70                                       bch2_btree_key_cache_params));
71         memset(&ck->key, ~0, sizeof(ck->key));
72
73         atomic_long_dec(&c->nr_keys);
74 }
75
76 static void bkey_cached_free(struct btree_key_cache *bc,
77                              struct bkey_cached *ck)
78 {
79         struct bch_fs *c = container_of(bc, struct bch_fs, btree_key_cache);
80
81         BUG_ON(test_bit(BKEY_CACHED_DIRTY, &ck->flags));
82
83         ck->btree_trans_barrier_seq =
84                 start_poll_synchronize_srcu(&c->btree_trans_barrier);
85
86         list_move_tail(&ck->list, &bc->freed);
87         bc->nr_freed++;
88
89         kfree(ck->k);
90         ck->k           = NULL;
91         ck->u64s        = 0;
92
93         six_unlock_write(&ck->c.lock);
94         six_unlock_intent(&ck->c.lock);
95 }
96
97 static struct bkey_cached *
98 bkey_cached_alloc(struct btree_key_cache *c)
99 {
100         struct bkey_cached *ck;
101
102         ck = kmem_cache_alloc(bch2_key_cache, GFP_NOFS|__GFP_ZERO);
103         if (likely(ck)) {
104                 INIT_LIST_HEAD(&ck->list);
105                 six_lock_init(&ck->c.lock);
106                 lockdep_set_novalidate_class(&ck->c.lock);
107                 BUG_ON(!six_trylock_intent(&ck->c.lock));
108                 BUG_ON(!six_trylock_write(&ck->c.lock));
109                 return ck;
110         }
111
112         return NULL;
113 }
114
115 static struct bkey_cached *
116 bkey_cached_reuse(struct btree_key_cache *c)
117 {
118         struct bucket_table *tbl;
119         struct rhash_head *pos;
120         struct bkey_cached *ck;
121         unsigned i;
122
123         mutex_lock(&c->lock);
124         list_for_each_entry_reverse(ck, &c->freed, list)
125                 if (bkey_cached_lock_for_evict(ck)) {
126                         c->nr_freed--;
127                         list_del(&ck->list);
128                         mutex_unlock(&c->lock);
129                         return ck;
130                 }
131         mutex_unlock(&c->lock);
132
133         rcu_read_lock();
134         tbl = rht_dereference_rcu(c->table.tbl, &c->table);
135         for (i = 0; i < tbl->size; i++)
136                 rht_for_each_entry_rcu(ck, pos, tbl, i, hash) {
137                         if (!test_bit(BKEY_CACHED_DIRTY, &ck->flags) &&
138                             bkey_cached_lock_for_evict(ck)) {
139                                 bkey_cached_evict(c, ck);
140                                 rcu_read_unlock();
141                                 return ck;
142                         }
143                 }
144         rcu_read_unlock();
145
146         return NULL;
147 }
148
149 static struct bkey_cached *
150 btree_key_cache_create(struct btree_key_cache *c,
151                        enum btree_id btree_id,
152                        struct bpos pos)
153 {
154         struct bkey_cached *ck;
155         bool was_new = true;
156
157         ck = bkey_cached_alloc(c);
158
159         if (unlikely(!ck)) {
160                 ck = bkey_cached_reuse(c);
161                 if (unlikely(!ck))
162                         return ERR_PTR(-ENOMEM);
163
164                 was_new = false;
165         }
166
167         ck->c.level             = 0;
168         ck->c.btree_id          = btree_id;
169         ck->key.btree_id        = btree_id;
170         ck->key.pos             = pos;
171         ck->valid               = false;
172         ck->flags               = 1U << BKEY_CACHED_ACCESSED;
173
174         if (unlikely(rhashtable_lookup_insert_fast(&c->table,
175                                           &ck->hash,
176                                           bch2_btree_key_cache_params))) {
177                 /* We raced with another fill: */
178
179                 if (likely(was_new)) {
180                         six_unlock_write(&ck->c.lock);
181                         six_unlock_intent(&ck->c.lock);
182                         kfree(ck);
183                 } else {
184                         mutex_lock(&c->lock);
185                         bkey_cached_free(c, ck);
186                         mutex_unlock(&c->lock);
187                 }
188
189                 return NULL;
190         }
191
192         atomic_long_inc(&c->nr_keys);
193
194         six_unlock_write(&ck->c.lock);
195
196         return ck;
197 }
198
199 static int btree_key_cache_fill(struct btree_trans *trans,
200                                 struct btree_iter *ck_iter,
201                                 struct bkey_cached *ck)
202 {
203         struct btree_iter *iter;
204         struct bkey_s_c k;
205         unsigned new_u64s = 0;
206         struct bkey_i *new_k = NULL;
207         int ret;
208
209         iter = bch2_trans_get_iter(trans, ck->key.btree_id,
210                                    ck->key.pos, BTREE_ITER_SLOTS);
211         k = bch2_btree_iter_peek_slot(iter);
212         ret = bkey_err(k);
213         if (ret)
214                 goto err;
215
216         if (!bch2_btree_node_relock(ck_iter, 0)) {
217                 trace_transaction_restart_ip(trans->ip, _THIS_IP_);
218                 ret = -EINTR;
219                 goto err;
220         }
221
222         if (k.k->u64s > ck->u64s) {
223                 new_u64s = roundup_pow_of_two(k.k->u64s);
224                 new_k = kmalloc(new_u64s * sizeof(u64), GFP_NOFS);
225                 if (!new_k) {
226                         ret = -ENOMEM;
227                         goto err;
228                 }
229         }
230
231         bch2_btree_node_lock_write(ck_iter->l[0].b, ck_iter);
232         if (new_k) {
233                 kfree(ck->k);
234                 ck->u64s = new_u64s;
235                 ck->k = new_k;
236         }
237
238         bkey_reassemble(ck->k, k);
239         ck->valid = true;
240         bch2_btree_node_unlock_write(ck_iter->l[0].b, ck_iter);
241
242         /* We're not likely to need this iterator again: */
243         set_btree_iter_dontneed(trans, iter);
244 err:
245         bch2_trans_iter_put(trans, iter);
246         return ret;
247 }
248
249 static int bkey_cached_check_fn(struct six_lock *lock, void *p)
250 {
251         struct bkey_cached *ck = container_of(lock, struct bkey_cached, c.lock);
252         const struct btree_iter *iter = p;
253
254         return ck->key.btree_id == iter->btree_id &&
255                 !bpos_cmp(ck->key.pos, iter->pos) ? 0 : -1;
256 }
257
258 __flatten
259 int bch2_btree_iter_traverse_cached(struct btree_iter *iter)
260 {
261         struct btree_trans *trans = iter->trans;
262         struct bch_fs *c = trans->c;
263         struct bkey_cached *ck;
264         int ret = 0;
265
266         BUG_ON(iter->level);
267
268         if (btree_node_locked(iter, 0)) {
269                 ck = (void *) iter->l[0].b;
270                 goto fill;
271         }
272 retry:
273         ck = bch2_btree_key_cache_find(c, iter->btree_id, iter->pos);
274         if (!ck) {
275                 if (iter->flags & BTREE_ITER_CACHED_NOCREATE) {
276                         iter->l[0].b = NULL;
277                         return 0;
278                 }
279
280                 ck = btree_key_cache_create(&c->btree_key_cache,
281                                             iter->btree_id, iter->pos);
282                 ret = PTR_ERR_OR_ZERO(ck);
283                 if (ret)
284                         goto err;
285                 if (!ck)
286                         goto retry;
287
288                 mark_btree_node_locked(iter, 0, SIX_LOCK_intent);
289                 iter->locks_want = 1;
290         } else {
291                 enum six_lock_type lock_want = __btree_lock_want(iter, 0);
292
293                 if (!btree_node_lock((void *) ck, iter->pos, 0, iter, lock_want,
294                                      bkey_cached_check_fn, iter, _THIS_IP_)) {
295                         if (ck->key.btree_id != iter->btree_id ||
296                             bpos_cmp(ck->key.pos, iter->pos)) {
297                                 goto retry;
298                         }
299
300                         trace_transaction_restart_ip(trans->ip, _THIS_IP_);
301                         ret = -EINTR;
302                         goto err;
303                 }
304
305                 if (ck->key.btree_id != iter->btree_id ||
306                     bpos_cmp(ck->key.pos, iter->pos)) {
307                         six_unlock_type(&ck->c.lock, lock_want);
308                         goto retry;
309                 }
310
311                 mark_btree_node_locked(iter, 0, lock_want);
312         }
313
314         iter->l[0].lock_seq     = ck->c.lock.state.seq;
315         iter->l[0].b            = (void *) ck;
316 fill:
317         if (!ck->valid && !(iter->flags & BTREE_ITER_CACHED_NOFILL)) {
318                 if (!btree_node_intent_locked(iter, 0))
319                         bch2_btree_iter_upgrade(iter, 1);
320                 if (!btree_node_intent_locked(iter, 0)) {
321                         trace_transaction_restart_ip(trans->ip, _THIS_IP_);
322                         ret = -EINTR;
323                         goto err;
324                 }
325
326                 ret = btree_key_cache_fill(trans, iter, ck);
327                 if (ret)
328                         goto err;
329         }
330
331         if (!test_bit(BKEY_CACHED_ACCESSED, &ck->flags))
332                 set_bit(BKEY_CACHED_ACCESSED, &ck->flags);
333
334         iter->uptodate = BTREE_ITER_NEED_PEEK;
335
336         if (!(iter->flags & BTREE_ITER_INTENT))
337                 bch2_btree_iter_downgrade(iter);
338         else if (!iter->locks_want) {
339                 if (!__bch2_btree_iter_upgrade(iter, 1))
340                         ret = -EINTR;
341         }
342
343         return ret;
344 err:
345         if (ret != -EINTR) {
346                 btree_node_unlock(iter, 0);
347                 iter->flags |= BTREE_ITER_ERROR;
348                 iter->l[0].b = BTREE_ITER_NO_NODE_ERROR;
349         }
350         return ret;
351 }
352
353 static int btree_key_cache_flush_pos(struct btree_trans *trans,
354                                      struct bkey_cached_key key,
355                                      u64 journal_seq,
356                                      unsigned commit_flags,
357                                      bool evict)
358 {
359         struct bch_fs *c = trans->c;
360         struct journal *j = &c->journal;
361         struct btree_iter *c_iter = NULL, *b_iter = NULL;
362         struct bkey_cached *ck = NULL;
363         int ret;
364
365         b_iter = bch2_trans_get_iter(trans, key.btree_id, key.pos,
366                                      BTREE_ITER_SLOTS|
367                                      BTREE_ITER_INTENT);
368         c_iter = bch2_trans_get_iter(trans, key.btree_id, key.pos,
369                                      BTREE_ITER_CACHED|
370                                      BTREE_ITER_CACHED_NOFILL|
371                                      BTREE_ITER_CACHED_NOCREATE|
372                                      BTREE_ITER_INTENT);
373 retry:
374         ret = bch2_btree_iter_traverse(c_iter);
375         if (ret)
376                 goto err;
377
378         ck = (void *) c_iter->l[0].b;
379         if (!ck ||
380             (journal_seq && ck->journal.seq != journal_seq))
381                 goto out;
382
383         if (!test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
384                 if (!evict)
385                         goto out;
386                 goto evict;
387         }
388
389         ret   = bch2_btree_iter_traverse(b_iter) ?:
390                 bch2_trans_update(trans, b_iter, ck->k, BTREE_TRIGGER_NORUN) ?:
391                 bch2_trans_commit(trans, NULL, NULL,
392                                   BTREE_INSERT_NOUNLOCK|
393                                   BTREE_INSERT_NOCHECK_RW|
394                                   BTREE_INSERT_NOFAIL|
395                                   (ck->journal.seq == journal_last_seq(j)
396                                    ? BTREE_INSERT_JOURNAL_RESERVED
397                                    : 0)|
398                                   commit_flags);
399 err:
400         if (ret == -EINTR)
401                 goto retry;
402
403         if (ret == -EAGAIN)
404                 goto out;
405
406         if (ret) {
407                 bch2_fs_fatal_err_on(!bch2_journal_error(j), c,
408                         "error flushing key cache: %i", ret);
409                 goto out;
410         }
411
412         bch2_journal_pin_drop(j, &ck->journal);
413         bch2_journal_preres_put(j, &ck->res);
414
415         BUG_ON(!btree_node_locked(c_iter, 0));
416
417         if (!evict) {
418                 if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
419                         clear_bit(BKEY_CACHED_DIRTY, &ck->flags);
420                         atomic_long_dec(&c->btree_key_cache.nr_dirty);
421                 }
422         } else {
423 evict:
424                 BUG_ON(!btree_node_intent_locked(c_iter, 0));
425
426                 mark_btree_node_unlocked(c_iter, 0);
427                 c_iter->l[0].b = NULL;
428
429                 six_lock_write(&ck->c.lock, NULL, NULL);
430
431                 if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
432                         clear_bit(BKEY_CACHED_DIRTY, &ck->flags);
433                         atomic_long_dec(&c->btree_key_cache.nr_dirty);
434                 }
435
436                 bkey_cached_evict(&c->btree_key_cache, ck);
437
438                 mutex_lock(&c->btree_key_cache.lock);
439                 bkey_cached_free(&c->btree_key_cache, ck);
440                 mutex_unlock(&c->btree_key_cache.lock);
441         }
442 out:
443         bch2_trans_iter_put(trans, b_iter);
444         bch2_trans_iter_put(trans, c_iter);
445         return ret;
446 }
447
448 int bch2_btree_key_cache_journal_flush(struct journal *j,
449                                 struct journal_entry_pin *pin, u64 seq)
450 {
451         struct bch_fs *c = container_of(j, struct bch_fs, journal);
452         struct bkey_cached *ck =
453                 container_of(pin, struct bkey_cached, journal);
454         struct bkey_cached_key key;
455         struct btree_trans trans;
456         int ret = 0;
457
458         int srcu_idx = srcu_read_lock(&c->btree_trans_barrier);
459
460         six_lock_read(&ck->c.lock, NULL, NULL);
461         key = ck->key;
462
463         if (ck->journal.seq != seq ||
464             !test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
465                 six_unlock_read(&ck->c.lock);
466                 goto unlock;
467         }
468         six_unlock_read(&ck->c.lock);
469
470         bch2_trans_init(&trans, c, 0, 0);
471         ret = btree_key_cache_flush_pos(&trans, key, seq,
472                                   BTREE_INSERT_JOURNAL_RECLAIM, false);
473         bch2_trans_exit(&trans);
474 unlock:
475         srcu_read_unlock(&c->btree_trans_barrier, srcu_idx);
476
477         return ret;
478 }
479
480 /*
481  * Flush and evict a key from the key cache:
482  */
483 int bch2_btree_key_cache_flush(struct btree_trans *trans,
484                                enum btree_id id, struct bpos pos)
485 {
486         struct bch_fs *c = trans->c;
487         struct bkey_cached_key key = { id, pos };
488
489         /* Fastpath - assume it won't be found: */
490         if (!bch2_btree_key_cache_find(c, id, pos))
491                 return 0;
492
493         return btree_key_cache_flush_pos(trans, key, 0, 0, true);
494 }
495
496 bool bch2_btree_insert_key_cached(struct btree_trans *trans,
497                                   struct btree_iter *iter,
498                                   struct bkey_i *insert)
499 {
500         struct bch_fs *c = trans->c;
501         struct bkey_cached *ck = (void *) iter->l[0].b;
502         bool kick_reclaim = false;
503
504         BUG_ON(insert->u64s > ck->u64s);
505
506         if (likely(!(trans->flags & BTREE_INSERT_JOURNAL_REPLAY))) {
507                 int difference;
508
509                 BUG_ON(jset_u64s(insert->u64s) > trans->journal_preres.u64s);
510
511                 difference = jset_u64s(insert->u64s) - ck->res.u64s;
512                 if (difference > 0) {
513                         trans->journal_preres.u64s      -= difference;
514                         ck->res.u64s                    += difference;
515                 }
516         }
517
518         bkey_copy(ck->k, insert);
519         ck->valid = true;
520
521         if (!test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
522                 set_bit(BKEY_CACHED_DIRTY, &ck->flags);
523                 atomic_long_inc(&c->btree_key_cache.nr_dirty);
524
525                 if (bch2_nr_btree_keys_need_flush(c))
526                         kick_reclaim = true;
527         }
528
529         bch2_journal_pin_update(&c->journal, trans->journal_res.seq,
530                                 &ck->journal, bch2_btree_key_cache_journal_flush);
531
532         if (kick_reclaim)
533                 journal_reclaim_kick(&c->journal);
534         return true;
535 }
536
537 #ifdef CONFIG_BCACHEFS_DEBUG
538 void bch2_btree_key_cache_verify_clean(struct btree_trans *trans,
539                                enum btree_id id, struct bpos pos)
540 {
541         BUG_ON(bch2_btree_key_cache_find(trans->c, id, pos));
542 }
543 #endif
544
545 static unsigned long bch2_btree_key_cache_scan(struct shrinker *shrink,
546                                            struct shrink_control *sc)
547 {
548         struct bch_fs *c = container_of(shrink, struct bch_fs,
549                                         btree_key_cache.shrink);
550         struct btree_key_cache *bc = &c->btree_key_cache;
551         struct bucket_table *tbl;
552         struct bkey_cached *ck, *t;
553         size_t scanned = 0, freed = 0, nr = sc->nr_to_scan;
554         unsigned start, flags;
555         int srcu_idx;
556
557         /* Return -1 if we can't do anything right now */
558         if (sc->gfp_mask & __GFP_FS)
559                 mutex_lock(&bc->lock);
560         else if (!mutex_trylock(&bc->lock))
561                 return -1;
562
563         srcu_idx = srcu_read_lock(&c->btree_trans_barrier);
564         flags = memalloc_nofs_save();
565
566         /*
567          * Newest freed entries are at the end of the list - once we hit one
568          * that's too new to be freed, we can bail out:
569          */
570         list_for_each_entry_safe(ck, t, &bc->freed, list) {
571                 if (!poll_state_synchronize_srcu(&c->btree_trans_barrier,
572                                                  ck->btree_trans_barrier_seq))
573                         break;
574
575                 list_del(&ck->list);
576                 kmem_cache_free(bch2_key_cache, ck);
577                 bc->nr_freed--;
578                 scanned++;
579                 freed++;
580         }
581
582         if (scanned >= nr)
583                 goto out;
584
585         rcu_read_lock();
586         tbl = rht_dereference_rcu(bc->table.tbl, &bc->table);
587         if (bc->shrink_iter >= tbl->size)
588                 bc->shrink_iter = 0;
589         start = bc->shrink_iter;
590
591         do {
592                 struct rhash_head *pos, *next;
593
594                 pos = rht_ptr_rcu(rht_bucket(tbl, bc->shrink_iter));
595
596                 while (!rht_is_a_nulls(pos)) {
597                         next = rht_dereference_bucket_rcu(pos->next, tbl, bc->shrink_iter);
598                         ck = container_of(pos, struct bkey_cached, hash);
599
600                         if (test_bit(BKEY_CACHED_DIRTY, &ck->flags))
601                                 goto next;
602
603                         if (test_bit(BKEY_CACHED_ACCESSED, &ck->flags))
604                                 clear_bit(BKEY_CACHED_ACCESSED, &ck->flags);
605                         else if (bkey_cached_lock_for_evict(ck)) {
606                                 bkey_cached_evict(bc, ck);
607                                 bkey_cached_free(bc, ck);
608                         }
609
610                         scanned++;
611                         if (scanned >= nr)
612                                 break;
613 next:
614                         pos = next;
615                 }
616
617                 bc->shrink_iter++;
618                 if (bc->shrink_iter >= tbl->size)
619                         bc->shrink_iter = 0;
620         } while (scanned < nr && bc->shrink_iter != start);
621
622         rcu_read_unlock();
623 out:
624         memalloc_nofs_restore(flags);
625         srcu_read_unlock(&c->btree_trans_barrier, srcu_idx);
626         mutex_unlock(&bc->lock);
627
628         return freed;
629 }
630
631 static unsigned long bch2_btree_key_cache_count(struct shrinker *shrink,
632                                             struct shrink_control *sc)
633 {
634         struct bch_fs *c = container_of(shrink, struct bch_fs,
635                                         btree_key_cache.shrink);
636         struct btree_key_cache *bc = &c->btree_key_cache;
637
638         return atomic_long_read(&bc->nr_keys);
639 }
640
641 void bch2_fs_btree_key_cache_exit(struct btree_key_cache *bc)
642 {
643         struct bch_fs *c = container_of(bc, struct bch_fs, btree_key_cache);
644         struct bucket_table *tbl;
645         struct bkey_cached *ck, *n;
646         struct rhash_head *pos;
647         unsigned i;
648
649         if (bc->shrink.list.next)
650                 unregister_shrinker(&bc->shrink);
651
652         mutex_lock(&bc->lock);
653
654         rcu_read_lock();
655         tbl = rht_dereference_rcu(bc->table.tbl, &bc->table);
656         for (i = 0; i < tbl->size; i++)
657                 rht_for_each_entry_rcu(ck, pos, tbl, i, hash) {
658                         bkey_cached_evict(bc, ck);
659                         list_add(&ck->list, &bc->freed);
660                 }
661         rcu_read_unlock();
662
663         list_for_each_entry_safe(ck, n, &bc->freed, list) {
664                 cond_resched();
665
666                 bch2_journal_pin_drop(&c->journal, &ck->journal);
667                 bch2_journal_preres_put(&c->journal, &ck->res);
668
669                 list_del(&ck->list);
670                 kfree(ck->k);
671                 kmem_cache_free(bch2_key_cache, ck);
672         }
673
674         BUG_ON(atomic_long_read(&bc->nr_dirty) && !bch2_journal_error(&c->journal));
675         BUG_ON(atomic_long_read(&bc->nr_keys));
676
677         mutex_unlock(&bc->lock);
678
679         if (bc->table_init_done)
680                 rhashtable_destroy(&bc->table);
681 }
682
683 void bch2_fs_btree_key_cache_init_early(struct btree_key_cache *c)
684 {
685         mutex_init(&c->lock);
686         INIT_LIST_HEAD(&c->freed);
687 }
688
689 int bch2_fs_btree_key_cache_init(struct btree_key_cache *bc)
690 {
691         struct bch_fs *c = container_of(bc, struct bch_fs, btree_key_cache);
692         int ret;
693
694         ret = rhashtable_init(&bc->table, &bch2_btree_key_cache_params);
695         if (ret)
696                 return ret;
697
698         bc->table_init_done = true;
699
700         bc->shrink.seeks                = 1;
701         bc->shrink.count_objects        = bch2_btree_key_cache_count;
702         bc->shrink.scan_objects         = bch2_btree_key_cache_scan;
703         return register_shrinker(&bc->shrink, "%s/btree_key_cache", c->name);
704 }
705
706 void bch2_btree_key_cache_to_text(struct printbuf *out, struct btree_key_cache *c)
707 {
708         pr_buf(out, "nr_freed:\t%zu\n", c->nr_freed);
709         pr_buf(out, "nr_keys:\t%zu\n",  atomic_long_read(&c->nr_keys));
710         pr_buf(out, "nr_dirty:\t%zu\n", atomic_long_read(&c->nr_dirty));
711 }
712
713 void bch2_btree_key_cache_exit(void)
714 {
715         if (bch2_key_cache)
716                 kmem_cache_destroy(bch2_key_cache);
717 }
718
719 int __init bch2_btree_key_cache_init(void)
720 {
721         bch2_key_cache = KMEM_CACHE(bkey_cached, 0);
722         if (!bch2_key_cache)
723                 return -ENOMEM;
724
725         return 0;
726 }