ec84ba5e93e57c1b477a160b85320a7ab786f464
[linux-block.git] / drivers / md / dm-bufio.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2009-2011 Red Hat, Inc.
4  *
5  * Author: Mikulas Patocka <mpatocka@redhat.com>
6  *
7  * This file is released under the GPL.
8  */
9
10 #include <linux/dm-bufio.h>
11
12 #include <linux/device-mapper.h>
13 #include <linux/dm-io.h>
14 #include <linux/slab.h>
15 #include <linux/sched/mm.h>
16 #include <linux/jiffies.h>
17 #include <linux/vmalloc.h>
18 #include <linux/shrinker.h>
19 #include <linux/module.h>
20 #include <linux/rbtree.h>
21 #include <linux/stacktrace.h>
22 #include <linux/jump_label.h>
23
24 #include "dm.h"
25
26 #define DM_MSG_PREFIX "bufio"
27
28 /*
29  * Memory management policy:
30  *      Limit the number of buffers to DM_BUFIO_MEMORY_PERCENT of main memory
31  *      or DM_BUFIO_VMALLOC_PERCENT of vmalloc memory (whichever is lower).
32  *      Always allocate at least DM_BUFIO_MIN_BUFFERS buffers.
33  *      Start background writeback when there are DM_BUFIO_WRITEBACK_PERCENT
34  *      dirty buffers.
35  */
36 #define DM_BUFIO_MIN_BUFFERS            8
37
38 #define DM_BUFIO_MEMORY_PERCENT         2
39 #define DM_BUFIO_VMALLOC_PERCENT        25
40 #define DM_BUFIO_WRITEBACK_RATIO        3
41 #define DM_BUFIO_LOW_WATERMARK_RATIO    16
42
43 /*
44  * The nr of bytes of cached data to keep around.
45  */
46 #define DM_BUFIO_DEFAULT_RETAIN_BYTES   (256 * 1024)
47
48 /*
49  * Align buffer writes to this boundary.
50  * Tests show that SSDs have the highest IOPS when using 4k writes.
51  */
52 #define DM_BUFIO_WRITE_ALIGN            4096
53
54 /*
55  * dm_buffer->list_mode
56  */
57 #define LIST_CLEAN      0
58 #define LIST_DIRTY      1
59 #define LIST_SIZE       2
60
61 #define SCAN_RESCHED_CYCLE      16
62
63 /*--------------------------------------------------------------*/
64
65 /*
66  * Rather than use an LRU list, we use a clock algorithm where entries
67  * are held in a circular list.  When an entry is 'hit' a reference bit
68  * is set.  The least recently used entry is approximated by running a
69  * cursor around the list selecting unreferenced entries. Referenced
70  * entries have their reference bit cleared as the cursor passes them.
71  */
72 struct lru_entry {
73         struct list_head list;
74         atomic_t referenced;
75 };
76
77 struct lru_iter {
78         struct lru *lru;
79         struct list_head list;
80         struct lru_entry *stop;
81         struct lru_entry *e;
82 };
83
84 struct lru {
85         struct list_head *cursor;
86         unsigned long count;
87
88         struct list_head iterators;
89 };
90
91 /*--------------*/
92
93 static void lru_init(struct lru *lru)
94 {
95         lru->cursor = NULL;
96         lru->count = 0;
97         INIT_LIST_HEAD(&lru->iterators);
98 }
99
100 static void lru_destroy(struct lru *lru)
101 {
102         WARN_ON_ONCE(lru->cursor);
103         WARN_ON_ONCE(!list_empty(&lru->iterators));
104 }
105
106 /*
107  * Insert a new entry into the lru.
108  */
109 static void lru_insert(struct lru *lru, struct lru_entry *le)
110 {
111         /*
112          * Don't be tempted to set to 1, makes the lru aspect
113          * perform poorly.
114          */
115         atomic_set(&le->referenced, 0);
116
117         if (lru->cursor) {
118                 list_add_tail(&le->list, lru->cursor);
119         } else {
120                 INIT_LIST_HEAD(&le->list);
121                 lru->cursor = &le->list;
122         }
123         lru->count++;
124 }
125
126 /*--------------*/
127
128 /*
129  * Convert a list_head pointer to an lru_entry pointer.
130  */
131 static inline struct lru_entry *to_le(struct list_head *l)
132 {
133         return container_of(l, struct lru_entry, list);
134 }
135
136 /*
137  * Initialize an lru_iter and add it to the list of cursors in the lru.
138  */
139 static void lru_iter_begin(struct lru *lru, struct lru_iter *it)
140 {
141         it->lru = lru;
142         it->stop = lru->cursor ? to_le(lru->cursor->prev) : NULL;
143         it->e = lru->cursor ? to_le(lru->cursor) : NULL;
144         list_add(&it->list, &lru->iterators);
145 }
146
147 /*
148  * Remove an lru_iter from the list of cursors in the lru.
149  */
150 static inline void lru_iter_end(struct lru_iter *it)
151 {
152         list_del(&it->list);
153 }
154
155 /* Predicate function type to be used with lru_iter_next */
156 typedef bool (*iter_predicate)(struct lru_entry *le, void *context);
157
158 /*
159  * Advance the cursor to the next entry that passes the
160  * predicate, and return that entry.  Returns NULL if the
161  * iteration is complete.
162  */
163 static struct lru_entry *lru_iter_next(struct lru_iter *it,
164                                        iter_predicate pred, void *context)
165 {
166         struct lru_entry *e;
167
168         while (it->e) {
169                 e = it->e;
170
171                 /* advance the cursor */
172                 if (it->e == it->stop)
173                         it->e = NULL;
174                 else
175                         it->e = to_le(it->e->list.next);
176
177                 if (pred(e, context))
178                         return e;
179         }
180
181         return NULL;
182 }
183
184 /*
185  * Invalidate a specific lru_entry and update all cursors in
186  * the lru accordingly.
187  */
188 static void lru_iter_invalidate(struct lru *lru, struct lru_entry *e)
189 {
190         struct lru_iter *it;
191
192         list_for_each_entry(it, &lru->iterators, list) {
193                 /* Move c->e forwards if necc. */
194                 if (it->e == e) {
195                         it->e = to_le(it->e->list.next);
196                         if (it->e == e)
197                                 it->e = NULL;
198                 }
199
200                 /* Move it->stop backwards if necc. */
201                 if (it->stop == e) {
202                         it->stop = to_le(it->stop->list.prev);
203                         if (it->stop == e)
204                                 it->stop = NULL;
205                 }
206         }
207 }
208
209 /*--------------*/
210
211 /*
212  * Remove a specific entry from the lru.
213  */
214 static void lru_remove(struct lru *lru, struct lru_entry *le)
215 {
216         lru_iter_invalidate(lru, le);
217         if (lru->count == 1) {
218                 lru->cursor = NULL;
219         } else {
220                 if (lru->cursor == &le->list)
221                         lru->cursor = lru->cursor->next;
222                 list_del(&le->list);
223         }
224         lru->count--;
225 }
226
227 /*
228  * Mark as referenced.
229  */
230 static inline void lru_reference(struct lru_entry *le)
231 {
232         atomic_set(&le->referenced, 1);
233 }
234
235 /*--------------*/
236
237 /*
238  * Remove the least recently used entry (approx), that passes the predicate.
239  * Returns NULL on failure.
240  */
241 enum evict_result {
242         ER_EVICT,
243         ER_DONT_EVICT,
244         ER_STOP, /* stop looking for something to evict */
245 };
246
247 typedef enum evict_result (*le_predicate)(struct lru_entry *le, void *context);
248
249 static struct lru_entry *lru_evict(struct lru *lru, le_predicate pred, void *context, bool no_sleep)
250 {
251         unsigned long tested = 0;
252         struct list_head *h = lru->cursor;
253         struct lru_entry *le;
254
255         if (!h)
256                 return NULL;
257         /*
258          * In the worst case we have to loop around twice. Once to clear
259          * the reference flags, and then again to discover the predicate
260          * fails for all entries.
261          */
262         while (tested < lru->count) {
263                 le = container_of(h, struct lru_entry, list);
264
265                 if (atomic_read(&le->referenced)) {
266                         atomic_set(&le->referenced, 0);
267                 } else {
268                         tested++;
269                         switch (pred(le, context)) {
270                         case ER_EVICT:
271                                 /*
272                                  * Adjust the cursor, so we start the next
273                                  * search from here.
274                                  */
275                                 lru->cursor = le->list.next;
276                                 lru_remove(lru, le);
277                                 return le;
278
279                         case ER_DONT_EVICT:
280                                 break;
281
282                         case ER_STOP:
283                                 lru->cursor = le->list.next;
284                                 return NULL;
285                         }
286                 }
287
288                 h = h->next;
289
290                 if (!no_sleep)
291                         cond_resched();
292         }
293
294         return NULL;
295 }
296
297 /*--------------------------------------------------------------*/
298
299 /*
300  * Buffer state bits.
301  */
302 #define B_READING       0
303 #define B_WRITING       1
304 #define B_DIRTY         2
305
306 /*
307  * Describes how the block was allocated:
308  * kmem_cache_alloc(), __get_free_pages() or vmalloc().
309  * See the comment at alloc_buffer_data.
310  */
311 enum data_mode {
312         DATA_MODE_SLAB = 0,
313         DATA_MODE_KMALLOC = 1,
314         DATA_MODE_GET_FREE_PAGES = 2,
315         DATA_MODE_VMALLOC = 3,
316         DATA_MODE_LIMIT = 4
317 };
318
319 struct dm_buffer {
320         /* protected by the locks in dm_buffer_cache */
321         struct rb_node node;
322
323         /* immutable, so don't need protecting */
324         sector_t block;
325         void *data;
326         unsigned char data_mode;                /* DATA_MODE_* */
327
328         /*
329          * These two fields are used in isolation, so do not need
330          * a surrounding lock.
331          */
332         atomic_t hold_count;
333         unsigned long last_accessed;
334
335         /*
336          * Everything else is protected by the mutex in
337          * dm_bufio_client
338          */
339         unsigned long state;
340         struct lru_entry lru;
341         unsigned char list_mode;                /* LIST_* */
342         blk_status_t read_error;
343         blk_status_t write_error;
344         unsigned int dirty_start;
345         unsigned int dirty_end;
346         unsigned int write_start;
347         unsigned int write_end;
348         struct list_head write_list;
349         struct dm_bufio_client *c;
350         void (*end_io)(struct dm_buffer *b, blk_status_t bs);
351 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
352 #define MAX_STACK 10
353         unsigned int stack_len;
354         unsigned long stack_entries[MAX_STACK];
355 #endif
356 };
357
358 /*--------------------------------------------------------------*/
359
360 /*
361  * The buffer cache manages buffers, particularly:
362  *  - inc/dec of holder count
363  *  - setting the last_accessed field
364  *  - maintains clean/dirty state along with lru
365  *  - selecting buffers that match predicates
366  *
367  * It does *not* handle:
368  *  - allocation/freeing of buffers.
369  *  - IO
370  *  - Eviction or cache sizing.
371  *
372  * cache_get() and cache_put() are threadsafe, you do not need to
373  * protect these calls with a surrounding mutex.  All the other
374  * methods are not threadsafe; they do use locking primitives, but
375  * only enough to ensure get/put are threadsafe.
376  */
377
378 struct buffer_tree {
379         union {
380                 struct rw_semaphore lock;
381                 rwlock_t spinlock;
382         } u;
383         struct rb_root root;
384 } ____cacheline_aligned_in_smp;
385
386 struct dm_buffer_cache {
387         struct lru lru[LIST_SIZE];
388         /*
389          * We spread entries across multiple trees to reduce contention
390          * on the locks.
391          */
392         unsigned int num_locks;
393         bool no_sleep;
394         struct buffer_tree trees[];
395 };
396
397 static DEFINE_STATIC_KEY_FALSE(no_sleep_enabled);
398
399 static inline unsigned int cache_index(sector_t block, unsigned int num_locks)
400 {
401         return dm_hash_locks_index(block, num_locks);
402 }
403
404 static inline void cache_read_lock(struct dm_buffer_cache *bc, sector_t block)
405 {
406         if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep)
407                 read_lock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock);
408         else
409                 down_read(&bc->trees[cache_index(block, bc->num_locks)].u.lock);
410 }
411
412 static inline void cache_read_unlock(struct dm_buffer_cache *bc, sector_t block)
413 {
414         if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep)
415                 read_unlock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock);
416         else
417                 up_read(&bc->trees[cache_index(block, bc->num_locks)].u.lock);
418 }
419
420 static inline void cache_write_lock(struct dm_buffer_cache *bc, sector_t block)
421 {
422         if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep)
423                 write_lock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock);
424         else
425                 down_write(&bc->trees[cache_index(block, bc->num_locks)].u.lock);
426 }
427
428 static inline void cache_write_unlock(struct dm_buffer_cache *bc, sector_t block)
429 {
430         if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep)
431                 write_unlock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock);
432         else
433                 up_write(&bc->trees[cache_index(block, bc->num_locks)].u.lock);
434 }
435
436 /*
437  * Sometimes we want to repeatedly get and drop locks as part of an iteration.
438  * This struct helps avoid redundant drop and gets of the same lock.
439  */
440 struct lock_history {
441         struct dm_buffer_cache *cache;
442         bool write;
443         unsigned int previous;
444         unsigned int no_previous;
445 };
446
447 static void lh_init(struct lock_history *lh, struct dm_buffer_cache *cache, bool write)
448 {
449         lh->cache = cache;
450         lh->write = write;
451         lh->no_previous = cache->num_locks;
452         lh->previous = lh->no_previous;
453 }
454
455 static void __lh_lock(struct lock_history *lh, unsigned int index)
456 {
457         if (lh->write) {
458                 if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep)
459                         write_lock_bh(&lh->cache->trees[index].u.spinlock);
460                 else
461                         down_write(&lh->cache->trees[index].u.lock);
462         } else {
463                 if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep)
464                         read_lock_bh(&lh->cache->trees[index].u.spinlock);
465                 else
466                         down_read(&lh->cache->trees[index].u.lock);
467         }
468 }
469
470 static void __lh_unlock(struct lock_history *lh, unsigned int index)
471 {
472         if (lh->write) {
473                 if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep)
474                         write_unlock_bh(&lh->cache->trees[index].u.spinlock);
475                 else
476                         up_write(&lh->cache->trees[index].u.lock);
477         } else {
478                 if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep)
479                         read_unlock_bh(&lh->cache->trees[index].u.spinlock);
480                 else
481                         up_read(&lh->cache->trees[index].u.lock);
482         }
483 }
484
485 /*
486  * Make sure you call this since it will unlock the final lock.
487  */
488 static void lh_exit(struct lock_history *lh)
489 {
490         if (lh->previous != lh->no_previous) {
491                 __lh_unlock(lh, lh->previous);
492                 lh->previous = lh->no_previous;
493         }
494 }
495
496 /*
497  * Named 'next' because there is no corresponding
498  * 'up/unlock' call since it's done automatically.
499  */
500 static void lh_next(struct lock_history *lh, sector_t b)
501 {
502         unsigned int index = cache_index(b, lh->no_previous); /* no_previous is num_locks */
503
504         if (lh->previous != lh->no_previous) {
505                 if (lh->previous != index) {
506                         __lh_unlock(lh, lh->previous);
507                         __lh_lock(lh, index);
508                         lh->previous = index;
509                 }
510         } else {
511                 __lh_lock(lh, index);
512                 lh->previous = index;
513         }
514 }
515
516 static inline struct dm_buffer *le_to_buffer(struct lru_entry *le)
517 {
518         return container_of(le, struct dm_buffer, lru);
519 }
520
521 static struct dm_buffer *list_to_buffer(struct list_head *l)
522 {
523         struct lru_entry *le = list_entry(l, struct lru_entry, list);
524
525         return le_to_buffer(le);
526 }
527
528 static void cache_init(struct dm_buffer_cache *bc, unsigned int num_locks, bool no_sleep)
529 {
530         unsigned int i;
531
532         bc->num_locks = num_locks;
533         bc->no_sleep = no_sleep;
534
535         for (i = 0; i < bc->num_locks; i++) {
536                 if (no_sleep)
537                         rwlock_init(&bc->trees[i].u.spinlock);
538                 else
539                         init_rwsem(&bc->trees[i].u.lock);
540                 bc->trees[i].root = RB_ROOT;
541         }
542
543         lru_init(&bc->lru[LIST_CLEAN]);
544         lru_init(&bc->lru[LIST_DIRTY]);
545 }
546
547 static void cache_destroy(struct dm_buffer_cache *bc)
548 {
549         unsigned int i;
550
551         for (i = 0; i < bc->num_locks; i++)
552                 WARN_ON_ONCE(!RB_EMPTY_ROOT(&bc->trees[i].root));
553
554         lru_destroy(&bc->lru[LIST_CLEAN]);
555         lru_destroy(&bc->lru[LIST_DIRTY]);
556 }
557
558 /*--------------*/
559
560 /*
561  * not threadsafe, or racey depending how you look at it
562  */
563 static inline unsigned long cache_count(struct dm_buffer_cache *bc, int list_mode)
564 {
565         return bc->lru[list_mode].count;
566 }
567
568 static inline unsigned long cache_total(struct dm_buffer_cache *bc)
569 {
570         return cache_count(bc, LIST_CLEAN) + cache_count(bc, LIST_DIRTY);
571 }
572
573 /*--------------*/
574
575 /*
576  * Gets a specific buffer, indexed by block.
577  * If the buffer is found then its holder count will be incremented and
578  * lru_reference will be called.
579  *
580  * threadsafe
581  */
582 static struct dm_buffer *__cache_get(const struct rb_root *root, sector_t block)
583 {
584         struct rb_node *n = root->rb_node;
585         struct dm_buffer *b;
586
587         while (n) {
588                 b = container_of(n, struct dm_buffer, node);
589
590                 if (b->block == block)
591                         return b;
592
593                 n = block < b->block ? n->rb_left : n->rb_right;
594         }
595
596         return NULL;
597 }
598
599 static void __cache_inc_buffer(struct dm_buffer *b)
600 {
601         atomic_inc(&b->hold_count);
602         WRITE_ONCE(b->last_accessed, jiffies);
603 }
604
605 static struct dm_buffer *cache_get(struct dm_buffer_cache *bc, sector_t block)
606 {
607         struct dm_buffer *b;
608
609         cache_read_lock(bc, block);
610         b = __cache_get(&bc->trees[cache_index(block, bc->num_locks)].root, block);
611         if (b) {
612                 lru_reference(&b->lru);
613                 __cache_inc_buffer(b);
614         }
615         cache_read_unlock(bc, block);
616
617         return b;
618 }
619
620 /*--------------*/
621
622 /*
623  * Returns true if the hold count hits zero.
624  * threadsafe
625  */
626 static bool cache_put(struct dm_buffer_cache *bc, struct dm_buffer *b)
627 {
628         bool r;
629
630         cache_read_lock(bc, b->block);
631         BUG_ON(!atomic_read(&b->hold_count));
632         r = atomic_dec_and_test(&b->hold_count);
633         cache_read_unlock(bc, b->block);
634
635         return r;
636 }
637
638 /*--------------*/
639
640 typedef enum evict_result (*b_predicate)(struct dm_buffer *, void *);
641
642 /*
643  * Evicts a buffer based on a predicate.  The oldest buffer that
644  * matches the predicate will be selected.  In addition to the
645  * predicate the hold_count of the selected buffer will be zero.
646  */
647 struct evict_wrapper {
648         struct lock_history *lh;
649         b_predicate pred;
650         void *context;
651 };
652
653 /*
654  * Wraps the buffer predicate turning it into an lru predicate.  Adds
655  * extra test for hold_count.
656  */
657 static enum evict_result __evict_pred(struct lru_entry *le, void *context)
658 {
659         struct evict_wrapper *w = context;
660         struct dm_buffer *b = le_to_buffer(le);
661
662         lh_next(w->lh, b->block);
663
664         if (atomic_read(&b->hold_count))
665                 return ER_DONT_EVICT;
666
667         return w->pred(b, w->context);
668 }
669
670 static struct dm_buffer *__cache_evict(struct dm_buffer_cache *bc, int list_mode,
671                                        b_predicate pred, void *context,
672                                        struct lock_history *lh)
673 {
674         struct evict_wrapper w = {.lh = lh, .pred = pred, .context = context};
675         struct lru_entry *le;
676         struct dm_buffer *b;
677
678         le = lru_evict(&bc->lru[list_mode], __evict_pred, &w, bc->no_sleep);
679         if (!le)
680                 return NULL;
681
682         b = le_to_buffer(le);
683         /* __evict_pred will have locked the appropriate tree. */
684         rb_erase(&b->node, &bc->trees[cache_index(b->block, bc->num_locks)].root);
685
686         return b;
687 }
688
689 static struct dm_buffer *cache_evict(struct dm_buffer_cache *bc, int list_mode,
690                                      b_predicate pred, void *context)
691 {
692         struct dm_buffer *b;
693         struct lock_history lh;
694
695         lh_init(&lh, bc, true);
696         b = __cache_evict(bc, list_mode, pred, context, &lh);
697         lh_exit(&lh);
698
699         return b;
700 }
701
702 /*--------------*/
703
704 /*
705  * Mark a buffer as clean or dirty. Not threadsafe.
706  */
707 static void cache_mark(struct dm_buffer_cache *bc, struct dm_buffer *b, int list_mode)
708 {
709         cache_write_lock(bc, b->block);
710         if (list_mode != b->list_mode) {
711                 lru_remove(&bc->lru[b->list_mode], &b->lru);
712                 b->list_mode = list_mode;
713                 lru_insert(&bc->lru[b->list_mode], &b->lru);
714         }
715         cache_write_unlock(bc, b->block);
716 }
717
718 /*--------------*/
719
720 /*
721  * Runs through the lru associated with 'old_mode', if the predicate matches then
722  * it moves them to 'new_mode'.  Not threadsafe.
723  */
724 static void __cache_mark_many(struct dm_buffer_cache *bc, int old_mode, int new_mode,
725                               b_predicate pred, void *context, struct lock_history *lh)
726 {
727         struct lru_entry *le;
728         struct dm_buffer *b;
729         struct evict_wrapper w = {.lh = lh, .pred = pred, .context = context};
730
731         while (true) {
732                 le = lru_evict(&bc->lru[old_mode], __evict_pred, &w, bc->no_sleep);
733                 if (!le)
734                         break;
735
736                 b = le_to_buffer(le);
737                 b->list_mode = new_mode;
738                 lru_insert(&bc->lru[b->list_mode], &b->lru);
739         }
740 }
741
742 static void cache_mark_many(struct dm_buffer_cache *bc, int old_mode, int new_mode,
743                             b_predicate pred, void *context)
744 {
745         struct lock_history lh;
746
747         lh_init(&lh, bc, true);
748         __cache_mark_many(bc, old_mode, new_mode, pred, context, &lh);
749         lh_exit(&lh);
750 }
751
752 /*--------------*/
753
754 /*
755  * Iterates through all clean or dirty entries calling a function for each
756  * entry.  The callback may terminate the iteration early.  Not threadsafe.
757  */
758
759 /*
760  * Iterator functions should return one of these actions to indicate
761  * how the iteration should proceed.
762  */
763 enum it_action {
764         IT_NEXT,
765         IT_COMPLETE,
766 };
767
768 typedef enum it_action (*iter_fn)(struct dm_buffer *b, void *context);
769
770 static void __cache_iterate(struct dm_buffer_cache *bc, int list_mode,
771                             iter_fn fn, void *context, struct lock_history *lh)
772 {
773         struct lru *lru = &bc->lru[list_mode];
774         struct lru_entry *le, *first;
775
776         if (!lru->cursor)
777                 return;
778
779         first = le = to_le(lru->cursor);
780         do {
781                 struct dm_buffer *b = le_to_buffer(le);
782
783                 lh_next(lh, b->block);
784
785                 switch (fn(b, context)) {
786                 case IT_NEXT:
787                         break;
788
789                 case IT_COMPLETE:
790                         return;
791                 }
792                 cond_resched();
793
794                 le = to_le(le->list.next);
795         } while (le != first);
796 }
797
798 static void cache_iterate(struct dm_buffer_cache *bc, int list_mode,
799                           iter_fn fn, void *context)
800 {
801         struct lock_history lh;
802
803         lh_init(&lh, bc, false);
804         __cache_iterate(bc, list_mode, fn, context, &lh);
805         lh_exit(&lh);
806 }
807
808 /*--------------*/
809
810 /*
811  * Passes ownership of the buffer to the cache. Returns false if the
812  * buffer was already present (in which case ownership does not pass).
813  * eg, a race with another thread.
814  *
815  * Holder count should be 1 on insertion.
816  *
817  * Not threadsafe.
818  */
819 static bool __cache_insert(struct rb_root *root, struct dm_buffer *b)
820 {
821         struct rb_node **new = &root->rb_node, *parent = NULL;
822         struct dm_buffer *found;
823
824         while (*new) {
825                 found = container_of(*new, struct dm_buffer, node);
826
827                 if (found->block == b->block)
828                         return false;
829
830                 parent = *new;
831                 new = b->block < found->block ?
832                         &found->node.rb_left : &found->node.rb_right;
833         }
834
835         rb_link_node(&b->node, parent, new);
836         rb_insert_color(&b->node, root);
837
838         return true;
839 }
840
841 static bool cache_insert(struct dm_buffer_cache *bc, struct dm_buffer *b)
842 {
843         bool r;
844
845         if (WARN_ON_ONCE(b->list_mode >= LIST_SIZE))
846                 return false;
847
848         cache_write_lock(bc, b->block);
849         BUG_ON(atomic_read(&b->hold_count) != 1);
850         r = __cache_insert(&bc->trees[cache_index(b->block, bc->num_locks)].root, b);
851         if (r)
852                 lru_insert(&bc->lru[b->list_mode], &b->lru);
853         cache_write_unlock(bc, b->block);
854
855         return r;
856 }
857
858 /*--------------*/
859
860 /*
861  * Removes buffer from cache, ownership of the buffer passes back to the caller.
862  * Fails if the hold_count is not one (ie. the caller holds the only reference).
863  *
864  * Not threadsafe.
865  */
866 static bool cache_remove(struct dm_buffer_cache *bc, struct dm_buffer *b)
867 {
868         bool r;
869
870         cache_write_lock(bc, b->block);
871
872         if (atomic_read(&b->hold_count) != 1) {
873                 r = false;
874         } else {
875                 r = true;
876                 rb_erase(&b->node, &bc->trees[cache_index(b->block, bc->num_locks)].root);
877                 lru_remove(&bc->lru[b->list_mode], &b->lru);
878         }
879
880         cache_write_unlock(bc, b->block);
881
882         return r;
883 }
884
885 /*--------------*/
886
887 typedef void (*b_release)(struct dm_buffer *);
888
889 static struct dm_buffer *__find_next(struct rb_root *root, sector_t block)
890 {
891         struct rb_node *n = root->rb_node;
892         struct dm_buffer *b;
893         struct dm_buffer *best = NULL;
894
895         while (n) {
896                 b = container_of(n, struct dm_buffer, node);
897
898                 if (b->block == block)
899                         return b;
900
901                 if (block <= b->block) {
902                         n = n->rb_left;
903                         best = b;
904                 } else {
905                         n = n->rb_right;
906                 }
907         }
908
909         return best;
910 }
911
912 static void __remove_range(struct dm_buffer_cache *bc,
913                            struct rb_root *root,
914                            sector_t begin, sector_t end,
915                            b_predicate pred, b_release release)
916 {
917         struct dm_buffer *b;
918
919         while (true) {
920                 cond_resched();
921
922                 b = __find_next(root, begin);
923                 if (!b || (b->block >= end))
924                         break;
925
926                 begin = b->block + 1;
927
928                 if (atomic_read(&b->hold_count))
929                         continue;
930
931                 if (pred(b, NULL) == ER_EVICT) {
932                         rb_erase(&b->node, root);
933                         lru_remove(&bc->lru[b->list_mode], &b->lru);
934                         release(b);
935                 }
936         }
937 }
938
939 static void cache_remove_range(struct dm_buffer_cache *bc,
940                                sector_t begin, sector_t end,
941                                b_predicate pred, b_release release)
942 {
943         unsigned int i;
944
945         BUG_ON(bc->no_sleep);
946         for (i = 0; i < bc->num_locks; i++) {
947                 down_write(&bc->trees[i].u.lock);
948                 __remove_range(bc, &bc->trees[i].root, begin, end, pred, release);
949                 up_write(&bc->trees[i].u.lock);
950         }
951 }
952
953 /*----------------------------------------------------------------*/
954
955 /*
956  * Linking of buffers:
957  *      All buffers are linked to buffer_cache with their node field.
958  *
959  *      Clean buffers that are not being written (B_WRITING not set)
960  *      are linked to lru[LIST_CLEAN] with their lru_list field.
961  *
962  *      Dirty and clean buffers that are being written are linked to
963  *      lru[LIST_DIRTY] with their lru_list field. When the write
964  *      finishes, the buffer cannot be relinked immediately (because we
965  *      are in an interrupt context and relinking requires process
966  *      context), so some clean-not-writing buffers can be held on
967  *      dirty_lru too.  They are later added to lru in the process
968  *      context.
969  */
970 struct dm_bufio_client {
971         struct block_device *bdev;
972         unsigned int block_size;
973         s8 sectors_per_block_bits;
974
975         bool no_sleep;
976         struct mutex lock;
977         spinlock_t spinlock;
978
979         int async_write_error;
980
981         void (*alloc_callback)(struct dm_buffer *buf);
982         void (*write_callback)(struct dm_buffer *buf);
983         struct kmem_cache *slab_buffer;
984         struct kmem_cache *slab_cache;
985         struct dm_io_client *dm_io;
986
987         struct list_head reserved_buffers;
988         unsigned int need_reserved_buffers;
989
990         unsigned int minimum_buffers;
991
992         sector_t start;
993
994         struct shrinker *shrinker;
995         struct work_struct shrink_work;
996         atomic_long_t need_shrink;
997
998         wait_queue_head_t free_buffer_wait;
999
1000         struct list_head client_list;
1001
1002         /*
1003          * Used by global_cleanup to sort the clients list.
1004          */
1005         unsigned long oldest_buffer;
1006
1007         struct dm_buffer_cache cache; /* must be last member */
1008 };
1009
1010 /*----------------------------------------------------------------*/
1011
1012 #define dm_bufio_in_request()   (!!current->bio_list)
1013
1014 static void dm_bufio_lock(struct dm_bufio_client *c)
1015 {
1016         if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
1017                 spin_lock_bh(&c->spinlock);
1018         else
1019                 mutex_lock_nested(&c->lock, dm_bufio_in_request());
1020 }
1021
1022 static void dm_bufio_unlock(struct dm_bufio_client *c)
1023 {
1024         if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
1025                 spin_unlock_bh(&c->spinlock);
1026         else
1027                 mutex_unlock(&c->lock);
1028 }
1029
1030 /*----------------------------------------------------------------*/
1031
1032 /*
1033  * Default cache size: available memory divided by the ratio.
1034  */
1035 static unsigned long dm_bufio_default_cache_size;
1036
1037 /*
1038  * Total cache size set by the user.
1039  */
1040 static unsigned long dm_bufio_cache_size;
1041
1042 /*
1043  * A copy of dm_bufio_cache_size because dm_bufio_cache_size can change
1044  * at any time.  If it disagrees, the user has changed cache size.
1045  */
1046 static unsigned long dm_bufio_cache_size_latch;
1047
1048 static DEFINE_SPINLOCK(global_spinlock);
1049
1050 static unsigned int dm_bufio_max_age; /* No longer does anything */
1051
1052 static unsigned long dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES;
1053
1054 static unsigned long dm_bufio_peak_allocated;
1055 static unsigned long dm_bufio_allocated_kmem_cache;
1056 static unsigned long dm_bufio_allocated_kmalloc;
1057 static unsigned long dm_bufio_allocated_get_free_pages;
1058 static unsigned long dm_bufio_allocated_vmalloc;
1059 static unsigned long dm_bufio_current_allocated;
1060
1061 /*----------------------------------------------------------------*/
1062
1063 /*
1064  * The current number of clients.
1065  */
1066 static int dm_bufio_client_count;
1067
1068 /*
1069  * The list of all clients.
1070  */
1071 static LIST_HEAD(dm_bufio_all_clients);
1072
1073 /*
1074  * This mutex protects dm_bufio_cache_size_latch and dm_bufio_client_count
1075  */
1076 static DEFINE_MUTEX(dm_bufio_clients_lock);
1077
1078 static struct workqueue_struct *dm_bufio_wq;
1079 static struct work_struct dm_bufio_replacement_work;
1080
1081
1082 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1083 static void buffer_record_stack(struct dm_buffer *b)
1084 {
1085         b->stack_len = stack_trace_save(b->stack_entries, MAX_STACK, 2);
1086 }
1087 #endif
1088
1089 /*----------------------------------------------------------------*/
1090
1091 static void adjust_total_allocated(struct dm_buffer *b, bool unlink)
1092 {
1093         unsigned char data_mode;
1094         long diff;
1095
1096         static unsigned long * const class_ptr[DATA_MODE_LIMIT] = {
1097                 &dm_bufio_allocated_kmem_cache,
1098                 &dm_bufio_allocated_kmalloc,
1099                 &dm_bufio_allocated_get_free_pages,
1100                 &dm_bufio_allocated_vmalloc,
1101         };
1102
1103         data_mode = b->data_mode;
1104         diff = (long)b->c->block_size;
1105         if (unlink)
1106                 diff = -diff;
1107
1108         spin_lock(&global_spinlock);
1109
1110         *class_ptr[data_mode] += diff;
1111
1112         dm_bufio_current_allocated += diff;
1113
1114         if (dm_bufio_current_allocated > dm_bufio_peak_allocated)
1115                 dm_bufio_peak_allocated = dm_bufio_current_allocated;
1116
1117         if (!unlink) {
1118                 if (dm_bufio_current_allocated > dm_bufio_cache_size)
1119                         queue_work(dm_bufio_wq, &dm_bufio_replacement_work);
1120         }
1121
1122         spin_unlock(&global_spinlock);
1123 }
1124
1125 /*
1126  * Change the number of clients and recalculate per-client limit.
1127  */
1128 static void __cache_size_refresh(void)
1129 {
1130         if (WARN_ON(!mutex_is_locked(&dm_bufio_clients_lock)))
1131                 return;
1132         if (WARN_ON(dm_bufio_client_count < 0))
1133                 return;
1134
1135         dm_bufio_cache_size_latch = READ_ONCE(dm_bufio_cache_size);
1136
1137         /*
1138          * Use default if set to 0 and report the actual cache size used.
1139          */
1140         if (!dm_bufio_cache_size_latch) {
1141                 (void)cmpxchg(&dm_bufio_cache_size, 0,
1142                               dm_bufio_default_cache_size);
1143                 dm_bufio_cache_size_latch = dm_bufio_default_cache_size;
1144         }
1145 }
1146
1147 /*
1148  * Allocating buffer data.
1149  *
1150  * Small buffers are allocated with kmem_cache, to use space optimally.
1151  *
1152  * For large buffers, we choose between get_free_pages and vmalloc.
1153  * Each has advantages and disadvantages.
1154  *
1155  * __get_free_pages can randomly fail if the memory is fragmented.
1156  * __vmalloc won't randomly fail, but vmalloc space is limited (it may be
1157  * as low as 128M) so using it for caching is not appropriate.
1158  *
1159  * If the allocation may fail we use __get_free_pages. Memory fragmentation
1160  * won't have a fatal effect here, but it just causes flushes of some other
1161  * buffers and more I/O will be performed. Don't use __get_free_pages if it
1162  * always fails (i.e. order > MAX_PAGE_ORDER).
1163  *
1164  * If the allocation shouldn't fail we use __vmalloc. This is only for the
1165  * initial reserve allocation, so there's no risk of wasting all vmalloc
1166  * space.
1167  */
1168 static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
1169                                unsigned char *data_mode)
1170 {
1171         if (unlikely(c->slab_cache != NULL)) {
1172                 *data_mode = DATA_MODE_SLAB;
1173                 return kmem_cache_alloc(c->slab_cache, gfp_mask);
1174         }
1175
1176         if (unlikely(c->block_size < PAGE_SIZE)) {
1177                 *data_mode = DATA_MODE_KMALLOC;
1178                 return kmalloc(c->block_size, gfp_mask | __GFP_RECLAIMABLE);
1179         }
1180
1181         if (c->block_size <= KMALLOC_MAX_SIZE &&
1182             gfp_mask & __GFP_NORETRY) {
1183                 *data_mode = DATA_MODE_GET_FREE_PAGES;
1184                 return (void *)__get_free_pages(gfp_mask,
1185                                                 c->sectors_per_block_bits - (PAGE_SHIFT - SECTOR_SHIFT));
1186         }
1187
1188         *data_mode = DATA_MODE_VMALLOC;
1189
1190         return __vmalloc(c->block_size, gfp_mask);
1191 }
1192
1193 /*
1194  * Free buffer's data.
1195  */
1196 static void free_buffer_data(struct dm_bufio_client *c,
1197                              void *data, unsigned char data_mode)
1198 {
1199         switch (data_mode) {
1200         case DATA_MODE_SLAB:
1201                 kmem_cache_free(c->slab_cache, data);
1202                 break;
1203
1204         case DATA_MODE_KMALLOC:
1205                 kfree(data);
1206                 break;
1207
1208         case DATA_MODE_GET_FREE_PAGES:
1209                 free_pages((unsigned long)data,
1210                            c->sectors_per_block_bits - (PAGE_SHIFT - SECTOR_SHIFT));
1211                 break;
1212
1213         case DATA_MODE_VMALLOC:
1214                 vfree(data);
1215                 break;
1216
1217         default:
1218                 DMCRIT("dm_bufio_free_buffer_data: bad data mode: %d",
1219                        data_mode);
1220                 BUG();
1221         }
1222 }
1223
1224 /*
1225  * Allocate buffer and its data.
1226  */
1227 static struct dm_buffer *alloc_buffer(struct dm_bufio_client *c, gfp_t gfp_mask)
1228 {
1229         struct dm_buffer *b = kmem_cache_alloc(c->slab_buffer, gfp_mask);
1230
1231         if (!b)
1232                 return NULL;
1233
1234         b->c = c;
1235
1236         b->data = alloc_buffer_data(c, gfp_mask, &b->data_mode);
1237         if (!b->data) {
1238                 kmem_cache_free(c->slab_buffer, b);
1239                 return NULL;
1240         }
1241         adjust_total_allocated(b, false);
1242
1243 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1244         b->stack_len = 0;
1245 #endif
1246         return b;
1247 }
1248
1249 /*
1250  * Free buffer and its data.
1251  */
1252 static void free_buffer(struct dm_buffer *b)
1253 {
1254         struct dm_bufio_client *c = b->c;
1255
1256         adjust_total_allocated(b, true);
1257         free_buffer_data(c, b->data, b->data_mode);
1258         kmem_cache_free(c->slab_buffer, b);
1259 }
1260
1261 /*
1262  *--------------------------------------------------------------------------
1263  * Submit I/O on the buffer.
1264  *
1265  * Bio interface is faster but it has some problems:
1266  *      the vector list is limited (increasing this limit increases
1267  *      memory-consumption per buffer, so it is not viable);
1268  *
1269  *      the memory must be direct-mapped, not vmalloced;
1270  *
1271  * If the buffer is small enough (up to DM_BUFIO_INLINE_VECS pages) and
1272  * it is not vmalloced, try using the bio interface.
1273  *
1274  * If the buffer is big, if it is vmalloced or if the underlying device
1275  * rejects the bio because it is too large, use dm-io layer to do the I/O.
1276  * The dm-io layer splits the I/O into multiple requests, avoiding the above
1277  * shortcomings.
1278  *--------------------------------------------------------------------------
1279  */
1280
1281 /*
1282  * dm-io completion routine. It just calls b->bio.bi_end_io, pretending
1283  * that the request was handled directly with bio interface.
1284  */
1285 static void dmio_complete(unsigned long error, void *context)
1286 {
1287         struct dm_buffer *b = context;
1288
1289         b->end_io(b, unlikely(error != 0) ? BLK_STS_IOERR : 0);
1290 }
1291
1292 static void use_dmio(struct dm_buffer *b, enum req_op op, sector_t sector,
1293                      unsigned int n_sectors, unsigned int offset,
1294                      unsigned short ioprio)
1295 {
1296         int r;
1297         struct dm_io_request io_req = {
1298                 .bi_opf = op,
1299                 .notify.fn = dmio_complete,
1300                 .notify.context = b,
1301                 .client = b->c->dm_io,
1302         };
1303         struct dm_io_region region = {
1304                 .bdev = b->c->bdev,
1305                 .sector = sector,
1306                 .count = n_sectors,
1307         };
1308
1309         if (b->data_mode != DATA_MODE_VMALLOC) {
1310                 io_req.mem.type = DM_IO_KMEM;
1311                 io_req.mem.ptr.addr = (char *)b->data + offset;
1312         } else {
1313                 io_req.mem.type = DM_IO_VMA;
1314                 io_req.mem.ptr.vma = (char *)b->data + offset;
1315         }
1316
1317         r = dm_io(&io_req, 1, &region, NULL, ioprio);
1318         if (unlikely(r))
1319                 b->end_io(b, errno_to_blk_status(r));
1320 }
1321
1322 static void bio_complete(struct bio *bio)
1323 {
1324         struct dm_buffer *b = bio->bi_private;
1325         blk_status_t status = bio->bi_status;
1326
1327         bio_uninit(bio);
1328         kfree(bio);
1329         b->end_io(b, status);
1330 }
1331
1332 static void use_bio(struct dm_buffer *b, enum req_op op, sector_t sector,
1333                     unsigned int n_sectors, unsigned int offset,
1334                     unsigned short ioprio)
1335 {
1336         struct bio *bio;
1337         char *ptr;
1338         unsigned int len;
1339
1340         bio = bio_kmalloc(1, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOWARN);
1341         if (!bio) {
1342                 use_dmio(b, op, sector, n_sectors, offset, ioprio);
1343                 return;
1344         }
1345         bio_init(bio, b->c->bdev, bio->bi_inline_vecs, 1, op);
1346         bio->bi_iter.bi_sector = sector;
1347         bio->bi_end_io = bio_complete;
1348         bio->bi_private = b;
1349         bio->bi_ioprio = ioprio;
1350
1351         ptr = (char *)b->data + offset;
1352         len = n_sectors << SECTOR_SHIFT;
1353
1354         bio_add_virt_nofail(bio, ptr, len);
1355
1356         submit_bio(bio);
1357 }
1358
1359 static inline sector_t block_to_sector(struct dm_bufio_client *c, sector_t block)
1360 {
1361         sector_t sector;
1362
1363         if (likely(c->sectors_per_block_bits >= 0))
1364                 sector = block << c->sectors_per_block_bits;
1365         else
1366                 sector = block * (c->block_size >> SECTOR_SHIFT);
1367         sector += c->start;
1368
1369         return sector;
1370 }
1371
1372 static void submit_io(struct dm_buffer *b, enum req_op op, unsigned short ioprio,
1373                       void (*end_io)(struct dm_buffer *, blk_status_t))
1374 {
1375         unsigned int n_sectors;
1376         sector_t sector;
1377         unsigned int offset, end;
1378
1379         b->end_io = end_io;
1380
1381         sector = block_to_sector(b->c, b->block);
1382
1383         if (op != REQ_OP_WRITE) {
1384                 n_sectors = b->c->block_size >> SECTOR_SHIFT;
1385                 offset = 0;
1386         } else {
1387                 if (b->c->write_callback)
1388                         b->c->write_callback(b);
1389                 offset = b->write_start;
1390                 end = b->write_end;
1391                 offset &= -DM_BUFIO_WRITE_ALIGN;
1392                 end += DM_BUFIO_WRITE_ALIGN - 1;
1393                 end &= -DM_BUFIO_WRITE_ALIGN;
1394                 if (unlikely(end > b->c->block_size))
1395                         end = b->c->block_size;
1396
1397                 sector += offset >> SECTOR_SHIFT;
1398                 n_sectors = (end - offset) >> SECTOR_SHIFT;
1399         }
1400
1401         if (b->data_mode != DATA_MODE_VMALLOC)
1402                 use_bio(b, op, sector, n_sectors, offset, ioprio);
1403         else
1404                 use_dmio(b, op, sector, n_sectors, offset, ioprio);
1405 }
1406
1407 /*
1408  *--------------------------------------------------------------
1409  * Writing dirty buffers
1410  *--------------------------------------------------------------
1411  */
1412
1413 /*
1414  * The endio routine for write.
1415  *
1416  * Set the error, clear B_WRITING bit and wake anyone who was waiting on
1417  * it.
1418  */
1419 static void write_endio(struct dm_buffer *b, blk_status_t status)
1420 {
1421         b->write_error = status;
1422         if (unlikely(status)) {
1423                 struct dm_bufio_client *c = b->c;
1424
1425                 (void)cmpxchg(&c->async_write_error, 0,
1426                                 blk_status_to_errno(status));
1427         }
1428
1429         BUG_ON(!test_bit(B_WRITING, &b->state));
1430
1431         smp_mb__before_atomic();
1432         clear_bit(B_WRITING, &b->state);
1433         smp_mb__after_atomic();
1434
1435         wake_up_bit(&b->state, B_WRITING);
1436 }
1437
1438 /*
1439  * Initiate a write on a dirty buffer, but don't wait for it.
1440  *
1441  * - If the buffer is not dirty, exit.
1442  * - If there some previous write going on, wait for it to finish (we can't
1443  *   have two writes on the same buffer simultaneously).
1444  * - Submit our write and don't wait on it. We set B_WRITING indicating
1445  *   that there is a write in progress.
1446  */
1447 static void __write_dirty_buffer(struct dm_buffer *b,
1448                                  struct list_head *write_list)
1449 {
1450         if (!test_bit(B_DIRTY, &b->state))
1451                 return;
1452
1453         clear_bit(B_DIRTY, &b->state);
1454         wait_on_bit_lock_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
1455
1456         b->write_start = b->dirty_start;
1457         b->write_end = b->dirty_end;
1458
1459         if (!write_list)
1460                 submit_io(b, REQ_OP_WRITE, IOPRIO_DEFAULT, write_endio);
1461         else
1462                 list_add_tail(&b->write_list, write_list);
1463 }
1464
1465 static void __flush_write_list(struct list_head *write_list)
1466 {
1467         struct blk_plug plug;
1468
1469         blk_start_plug(&plug);
1470         while (!list_empty(write_list)) {
1471                 struct dm_buffer *b =
1472                         list_entry(write_list->next, struct dm_buffer, write_list);
1473                 list_del(&b->write_list);
1474                 submit_io(b, REQ_OP_WRITE, IOPRIO_DEFAULT, write_endio);
1475                 cond_resched();
1476         }
1477         blk_finish_plug(&plug);
1478 }
1479
1480 /*
1481  * Wait until any activity on the buffer finishes.  Possibly write the
1482  * buffer if it is dirty.  When this function finishes, there is no I/O
1483  * running on the buffer and the buffer is not dirty.
1484  */
1485 static void __make_buffer_clean(struct dm_buffer *b)
1486 {
1487         BUG_ON(atomic_read(&b->hold_count));
1488
1489         /* smp_load_acquire() pairs with read_endio()'s smp_mb__before_atomic() */
1490         if (!smp_load_acquire(&b->state))       /* fast case */
1491                 return;
1492
1493         wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
1494         __write_dirty_buffer(b, NULL);
1495         wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
1496 }
1497
1498 static enum evict_result is_clean(struct dm_buffer *b, void *context)
1499 {
1500         struct dm_bufio_client *c = context;
1501
1502         /* These should never happen */
1503         if (WARN_ON_ONCE(test_bit(B_WRITING, &b->state)))
1504                 return ER_DONT_EVICT;
1505         if (WARN_ON_ONCE(test_bit(B_DIRTY, &b->state)))
1506                 return ER_DONT_EVICT;
1507         if (WARN_ON_ONCE(b->list_mode != LIST_CLEAN))
1508                 return ER_DONT_EVICT;
1509
1510         if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep &&
1511             unlikely(test_bit(B_READING, &b->state)))
1512                 return ER_DONT_EVICT;
1513
1514         return ER_EVICT;
1515 }
1516
1517 static enum evict_result is_dirty(struct dm_buffer *b, void *context)
1518 {
1519         /* These should never happen */
1520         if (WARN_ON_ONCE(test_bit(B_READING, &b->state)))
1521                 return ER_DONT_EVICT;
1522         if (WARN_ON_ONCE(b->list_mode != LIST_DIRTY))
1523                 return ER_DONT_EVICT;
1524
1525         return ER_EVICT;
1526 }
1527
1528 /*
1529  * Find some buffer that is not held by anybody, clean it, unlink it and
1530  * return it.
1531  */
1532 static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c)
1533 {
1534         struct dm_buffer *b;
1535
1536         b = cache_evict(&c->cache, LIST_CLEAN, is_clean, c);
1537         if (b) {
1538                 /* this also waits for pending reads */
1539                 __make_buffer_clean(b);
1540                 return b;
1541         }
1542
1543         if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
1544                 return NULL;
1545
1546         b = cache_evict(&c->cache, LIST_DIRTY, is_dirty, NULL);
1547         if (b) {
1548                 __make_buffer_clean(b);
1549                 return b;
1550         }
1551
1552         return NULL;
1553 }
1554
1555 /*
1556  * Wait until some other threads free some buffer or release hold count on
1557  * some buffer.
1558  *
1559  * This function is entered with c->lock held, drops it and regains it
1560  * before exiting.
1561  */
1562 static void __wait_for_free_buffer(struct dm_bufio_client *c)
1563 {
1564         DECLARE_WAITQUEUE(wait, current);
1565
1566         add_wait_queue(&c->free_buffer_wait, &wait);
1567         set_current_state(TASK_UNINTERRUPTIBLE);
1568         dm_bufio_unlock(c);
1569
1570         /*
1571          * It's possible to miss a wake up event since we don't always
1572          * hold c->lock when wake_up is called.  So we have a timeout here,
1573          * just in case.
1574          */
1575         io_schedule_timeout(5 * HZ);
1576
1577         remove_wait_queue(&c->free_buffer_wait, &wait);
1578
1579         dm_bufio_lock(c);
1580 }
1581
1582 enum new_flag {
1583         NF_FRESH = 0,
1584         NF_READ = 1,
1585         NF_GET = 2,
1586         NF_PREFETCH = 3
1587 };
1588
1589 /*
1590  * Allocate a new buffer. If the allocation is not possible, wait until
1591  * some other thread frees a buffer.
1592  *
1593  * May drop the lock and regain it.
1594  */
1595 static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client *c, enum new_flag nf)
1596 {
1597         struct dm_buffer *b;
1598         bool tried_noio_alloc = false;
1599
1600         /*
1601          * dm-bufio is resistant to allocation failures (it just keeps
1602          * one buffer reserved in cases all the allocations fail).
1603          * So set flags to not try too hard:
1604          *      GFP_NOWAIT: don't wait; if we need to sleep we'll release our
1605          *                  mutex and wait ourselves.
1606          *      __GFP_NORETRY: don't retry and rather return failure
1607          *      __GFP_NOMEMALLOC: don't use emergency reserves
1608          *      __GFP_NOWARN: don't print a warning in case of failure
1609          *
1610          * For debugging, if we set the cache size to 1, no new buffers will
1611          * be allocated.
1612          */
1613         while (1) {
1614                 if (dm_bufio_cache_size_latch != 1) {
1615                         b = alloc_buffer(c, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
1616                         if (b)
1617                                 return b;
1618                 }
1619
1620                 if (nf == NF_PREFETCH)
1621                         return NULL;
1622
1623                 if (dm_bufio_cache_size_latch != 1 && !tried_noio_alloc) {
1624                         dm_bufio_unlock(c);
1625                         b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
1626                         dm_bufio_lock(c);
1627                         if (b)
1628                                 return b;
1629                         tried_noio_alloc = true;
1630                 }
1631
1632                 if (!list_empty(&c->reserved_buffers)) {
1633                         b = list_to_buffer(c->reserved_buffers.next);
1634                         list_del(&b->lru.list);
1635                         c->need_reserved_buffers++;
1636
1637                         return b;
1638                 }
1639
1640                 b = __get_unclaimed_buffer(c);
1641                 if (b)
1642                         return b;
1643
1644                 __wait_for_free_buffer(c);
1645         }
1646 }
1647
1648 static struct dm_buffer *__alloc_buffer_wait(struct dm_bufio_client *c, enum new_flag nf)
1649 {
1650         struct dm_buffer *b = __alloc_buffer_wait_no_callback(c, nf);
1651
1652         if (!b)
1653                 return NULL;
1654
1655         if (c->alloc_callback)
1656                 c->alloc_callback(b);
1657
1658         return b;
1659 }
1660
1661 /*
1662  * Free a buffer and wake other threads waiting for free buffers.
1663  */
1664 static void __free_buffer_wake(struct dm_buffer *b)
1665 {
1666         struct dm_bufio_client *c = b->c;
1667
1668         b->block = -1;
1669         if (!c->need_reserved_buffers)
1670                 free_buffer(b);
1671         else {
1672                 list_add(&b->lru.list, &c->reserved_buffers);
1673                 c->need_reserved_buffers--;
1674         }
1675
1676         /*
1677          * We hold the bufio lock here, so no one can add entries to the
1678          * wait queue anyway.
1679          */
1680         if (unlikely(waitqueue_active(&c->free_buffer_wait)))
1681                 wake_up(&c->free_buffer_wait);
1682 }
1683
1684 static enum evict_result cleaned(struct dm_buffer *b, void *context)
1685 {
1686         if (WARN_ON_ONCE(test_bit(B_READING, &b->state)))
1687                 return ER_DONT_EVICT; /* should never happen */
1688
1689         if (test_bit(B_DIRTY, &b->state) || test_bit(B_WRITING, &b->state))
1690                 return ER_DONT_EVICT;
1691         else
1692                 return ER_EVICT;
1693 }
1694
1695 static void __move_clean_buffers(struct dm_bufio_client *c)
1696 {
1697         cache_mark_many(&c->cache, LIST_DIRTY, LIST_CLEAN, cleaned, NULL);
1698 }
1699
1700 struct write_context {
1701         int no_wait;
1702         struct list_head *write_list;
1703 };
1704
1705 static enum it_action write_one(struct dm_buffer *b, void *context)
1706 {
1707         struct write_context *wc = context;
1708
1709         if (wc->no_wait && test_bit(B_WRITING, &b->state))
1710                 return IT_COMPLETE;
1711
1712         __write_dirty_buffer(b, wc->write_list);
1713         return IT_NEXT;
1714 }
1715
1716 static void __write_dirty_buffers_async(struct dm_bufio_client *c, int no_wait,
1717                                         struct list_head *write_list)
1718 {
1719         struct write_context wc = {.no_wait = no_wait, .write_list = write_list};
1720
1721         __move_clean_buffers(c);
1722         cache_iterate(&c->cache, LIST_DIRTY, write_one, &wc);
1723 }
1724
1725 /*
1726  * Check if we're over watermark.
1727  * If we are over threshold_buffers, start freeing buffers.
1728  * If we're over "limit_buffers", block until we get under the limit.
1729  */
1730 static void __check_watermark(struct dm_bufio_client *c,
1731                               struct list_head *write_list)
1732 {
1733         if (cache_count(&c->cache, LIST_DIRTY) >
1734             cache_count(&c->cache, LIST_CLEAN) * DM_BUFIO_WRITEBACK_RATIO)
1735                 __write_dirty_buffers_async(c, 1, write_list);
1736 }
1737
1738 /*
1739  *--------------------------------------------------------------
1740  * Getting a buffer
1741  *--------------------------------------------------------------
1742  */
1743
1744 static void cache_put_and_wake(struct dm_bufio_client *c, struct dm_buffer *b)
1745 {
1746         /*
1747          * Relying on waitqueue_active() is racey, but we sleep
1748          * with schedule_timeout anyway.
1749          */
1750         if (cache_put(&c->cache, b) &&
1751             unlikely(waitqueue_active(&c->free_buffer_wait)))
1752                 wake_up(&c->free_buffer_wait);
1753 }
1754
1755 /*
1756  * This assumes you have already checked the cache to see if the buffer
1757  * is already present (it will recheck after dropping the lock for allocation).
1758  */
1759 static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block,
1760                                      enum new_flag nf, int *need_submit,
1761                                      struct list_head *write_list)
1762 {
1763         struct dm_buffer *b, *new_b = NULL;
1764
1765         *need_submit = 0;
1766
1767         /* This can't be called with NF_GET */
1768         if (WARN_ON_ONCE(nf == NF_GET))
1769                 return NULL;
1770
1771         new_b = __alloc_buffer_wait(c, nf);
1772         if (!new_b)
1773                 return NULL;
1774
1775         /*
1776          * We've had a period where the mutex was unlocked, so need to
1777          * recheck the buffer tree.
1778          */
1779         b = cache_get(&c->cache, block);
1780         if (b) {
1781                 __free_buffer_wake(new_b);
1782                 goto found_buffer;
1783         }
1784
1785         __check_watermark(c, write_list);
1786
1787         b = new_b;
1788         atomic_set(&b->hold_count, 1);
1789         WRITE_ONCE(b->last_accessed, jiffies);
1790         b->block = block;
1791         b->read_error = 0;
1792         b->write_error = 0;
1793         b->list_mode = LIST_CLEAN;
1794
1795         if (nf == NF_FRESH)
1796                 b->state = 0;
1797         else {
1798                 b->state = 1 << B_READING;
1799                 *need_submit = 1;
1800         }
1801
1802         /*
1803          * We mustn't insert into the cache until the B_READING state
1804          * is set.  Otherwise another thread could get it and use
1805          * it before it had been read.
1806          */
1807         cache_insert(&c->cache, b);
1808
1809         return b;
1810
1811 found_buffer:
1812         if (nf == NF_PREFETCH) {
1813                 cache_put_and_wake(c, b);
1814                 return NULL;
1815         }
1816
1817         /*
1818          * Note: it is essential that we don't wait for the buffer to be
1819          * read if dm_bufio_get function is used. Both dm_bufio_get and
1820          * dm_bufio_prefetch can be used in the driver request routine.
1821          * If the user called both dm_bufio_prefetch and dm_bufio_get on
1822          * the same buffer, it would deadlock if we waited.
1823          */
1824         if (nf == NF_GET && unlikely(test_bit_acquire(B_READING, &b->state))) {
1825                 cache_put_and_wake(c, b);
1826                 return NULL;
1827         }
1828
1829         return b;
1830 }
1831
1832 /*
1833  * The endio routine for reading: set the error, clear the bit and wake up
1834  * anyone waiting on the buffer.
1835  */
1836 static void read_endio(struct dm_buffer *b, blk_status_t status)
1837 {
1838         b->read_error = status;
1839
1840         BUG_ON(!test_bit(B_READING, &b->state));
1841
1842         smp_mb__before_atomic();
1843         clear_bit(B_READING, &b->state);
1844         smp_mb__after_atomic();
1845
1846         wake_up_bit(&b->state, B_READING);
1847 }
1848
1849 /*
1850  * A common routine for dm_bufio_new and dm_bufio_read.  Operation of these
1851  * functions is similar except that dm_bufio_new doesn't read the
1852  * buffer from the disk (assuming that the caller overwrites all the data
1853  * and uses dm_bufio_mark_buffer_dirty to write new data back).
1854  */
1855 static void *new_read(struct dm_bufio_client *c, sector_t block,
1856                       enum new_flag nf, struct dm_buffer **bp,
1857                       unsigned short ioprio)
1858 {
1859         int need_submit = 0;
1860         struct dm_buffer *b;
1861
1862         LIST_HEAD(write_list);
1863
1864         *bp = NULL;
1865
1866         /*
1867          * Fast path, hopefully the block is already in the cache.  No need
1868          * to get the client lock for this.
1869          */
1870         b = cache_get(&c->cache, block);
1871         if (b) {
1872                 if (nf == NF_PREFETCH) {
1873                         cache_put_and_wake(c, b);
1874                         return NULL;
1875                 }
1876
1877                 /*
1878                  * Note: it is essential that we don't wait for the buffer to be
1879                  * read if dm_bufio_get function is used. Both dm_bufio_get and
1880                  * dm_bufio_prefetch can be used in the driver request routine.
1881                  * If the user called both dm_bufio_prefetch and dm_bufio_get on
1882                  * the same buffer, it would deadlock if we waited.
1883                  */
1884                 if (nf == NF_GET && unlikely(test_bit_acquire(B_READING, &b->state))) {
1885                         cache_put_and_wake(c, b);
1886                         return NULL;
1887                 }
1888         }
1889
1890         if (!b) {
1891                 if (nf == NF_GET)
1892                         return NULL;
1893
1894                 dm_bufio_lock(c);
1895                 b = __bufio_new(c, block, nf, &need_submit, &write_list);
1896                 dm_bufio_unlock(c);
1897         }
1898
1899 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1900         if (b && (atomic_read(&b->hold_count) == 1))
1901                 buffer_record_stack(b);
1902 #endif
1903
1904         __flush_write_list(&write_list);
1905
1906         if (!b)
1907                 return NULL;
1908
1909         if (need_submit)
1910                 submit_io(b, REQ_OP_READ, ioprio, read_endio);
1911
1912         if (nf != NF_GET)       /* we already tested this condition above */
1913                 wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
1914
1915         if (b->read_error) {
1916                 int error = blk_status_to_errno(b->read_error);
1917
1918                 dm_bufio_release(b);
1919
1920                 return ERR_PTR(error);
1921         }
1922
1923         *bp = b;
1924
1925         return b->data;
1926 }
1927
1928 void *dm_bufio_get(struct dm_bufio_client *c, sector_t block,
1929                    struct dm_buffer **bp)
1930 {
1931         return new_read(c, block, NF_GET, bp, IOPRIO_DEFAULT);
1932 }
1933 EXPORT_SYMBOL_GPL(dm_bufio_get);
1934
1935 static void *__dm_bufio_read(struct dm_bufio_client *c, sector_t block,
1936                         struct dm_buffer **bp, unsigned short ioprio)
1937 {
1938         if (WARN_ON_ONCE(dm_bufio_in_request()))
1939                 return ERR_PTR(-EINVAL);
1940
1941         return new_read(c, block, NF_READ, bp, ioprio);
1942 }
1943
1944 void *dm_bufio_read(struct dm_bufio_client *c, sector_t block,
1945                     struct dm_buffer **bp)
1946 {
1947         return __dm_bufio_read(c, block, bp, IOPRIO_DEFAULT);
1948 }
1949 EXPORT_SYMBOL_GPL(dm_bufio_read);
1950
1951 void *dm_bufio_read_with_ioprio(struct dm_bufio_client *c, sector_t block,
1952                                 struct dm_buffer **bp, unsigned short ioprio)
1953 {
1954         return __dm_bufio_read(c, block, bp, ioprio);
1955 }
1956 EXPORT_SYMBOL_GPL(dm_bufio_read_with_ioprio);
1957
1958 void *dm_bufio_new(struct dm_bufio_client *c, sector_t block,
1959                    struct dm_buffer **bp)
1960 {
1961         if (WARN_ON_ONCE(dm_bufio_in_request()))
1962                 return ERR_PTR(-EINVAL);
1963
1964         return new_read(c, block, NF_FRESH, bp, IOPRIO_DEFAULT);
1965 }
1966 EXPORT_SYMBOL_GPL(dm_bufio_new);
1967
1968 static void __dm_bufio_prefetch(struct dm_bufio_client *c,
1969                         sector_t block, unsigned int n_blocks,
1970                         unsigned short ioprio)
1971 {
1972         struct blk_plug plug;
1973
1974         LIST_HEAD(write_list);
1975
1976         if (WARN_ON_ONCE(dm_bufio_in_request()))
1977                 return; /* should never happen */
1978
1979         blk_start_plug(&plug);
1980
1981         for (; n_blocks--; block++) {
1982                 int need_submit;
1983                 struct dm_buffer *b;
1984
1985                 b = cache_get(&c->cache, block);
1986                 if (b) {
1987                         /* already in cache */
1988                         cache_put_and_wake(c, b);
1989                         continue;
1990                 }
1991
1992                 dm_bufio_lock(c);
1993                 b = __bufio_new(c, block, NF_PREFETCH, &need_submit,
1994                                 &write_list);
1995                 if (unlikely(!list_empty(&write_list))) {
1996                         dm_bufio_unlock(c);
1997                         blk_finish_plug(&plug);
1998                         __flush_write_list(&write_list);
1999                         blk_start_plug(&plug);
2000                         dm_bufio_lock(c);
2001                 }
2002                 if (unlikely(b != NULL)) {
2003                         dm_bufio_unlock(c);
2004
2005                         if (need_submit)
2006                                 submit_io(b, REQ_OP_READ, ioprio, read_endio);
2007                         dm_bufio_release(b);
2008
2009                         cond_resched();
2010
2011                         if (!n_blocks)
2012                                 goto flush_plug;
2013                         dm_bufio_lock(c);
2014                 }
2015                 dm_bufio_unlock(c);
2016         }
2017
2018 flush_plug:
2019         blk_finish_plug(&plug);
2020 }
2021
2022 void dm_bufio_prefetch(struct dm_bufio_client *c, sector_t block, unsigned int n_blocks)
2023 {
2024         return __dm_bufio_prefetch(c, block, n_blocks, IOPRIO_DEFAULT);
2025 }
2026 EXPORT_SYMBOL_GPL(dm_bufio_prefetch);
2027
2028 void dm_bufio_prefetch_with_ioprio(struct dm_bufio_client *c, sector_t block,
2029                                 unsigned int n_blocks, unsigned short ioprio)
2030 {
2031         return __dm_bufio_prefetch(c, block, n_blocks, ioprio);
2032 }
2033 EXPORT_SYMBOL_GPL(dm_bufio_prefetch_with_ioprio);
2034
2035 void dm_bufio_release(struct dm_buffer *b)
2036 {
2037         struct dm_bufio_client *c = b->c;
2038
2039         /*
2040          * If there were errors on the buffer, and the buffer is not
2041          * to be written, free the buffer. There is no point in caching
2042          * invalid buffer.
2043          */
2044         if ((b->read_error || b->write_error) &&
2045             !test_bit_acquire(B_READING, &b->state) &&
2046             !test_bit(B_WRITING, &b->state) &&
2047             !test_bit(B_DIRTY, &b->state)) {
2048                 dm_bufio_lock(c);
2049
2050                 /* cache remove can fail if there are other holders */
2051                 if (cache_remove(&c->cache, b)) {
2052                         __free_buffer_wake(b);
2053                         dm_bufio_unlock(c);
2054                         return;
2055                 }
2056
2057                 dm_bufio_unlock(c);
2058         }
2059
2060         cache_put_and_wake(c, b);
2061 }
2062 EXPORT_SYMBOL_GPL(dm_bufio_release);
2063
2064 void dm_bufio_mark_partial_buffer_dirty(struct dm_buffer *b,
2065                                         unsigned int start, unsigned int end)
2066 {
2067         struct dm_bufio_client *c = b->c;
2068
2069         BUG_ON(start >= end);
2070         BUG_ON(end > b->c->block_size);
2071
2072         dm_bufio_lock(c);
2073
2074         BUG_ON(test_bit(B_READING, &b->state));
2075
2076         if (!test_and_set_bit(B_DIRTY, &b->state)) {
2077                 b->dirty_start = start;
2078                 b->dirty_end = end;
2079                 cache_mark(&c->cache, b, LIST_DIRTY);
2080         } else {
2081                 if (start < b->dirty_start)
2082                         b->dirty_start = start;
2083                 if (end > b->dirty_end)
2084                         b->dirty_end = end;
2085         }
2086
2087         dm_bufio_unlock(c);
2088 }
2089 EXPORT_SYMBOL_GPL(dm_bufio_mark_partial_buffer_dirty);
2090
2091 void dm_bufio_mark_buffer_dirty(struct dm_buffer *b)
2092 {
2093         dm_bufio_mark_partial_buffer_dirty(b, 0, b->c->block_size);
2094 }
2095 EXPORT_SYMBOL_GPL(dm_bufio_mark_buffer_dirty);
2096
2097 void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c)
2098 {
2099         LIST_HEAD(write_list);
2100
2101         if (WARN_ON_ONCE(dm_bufio_in_request()))
2102                 return; /* should never happen */
2103
2104         dm_bufio_lock(c);
2105         __write_dirty_buffers_async(c, 0, &write_list);
2106         dm_bufio_unlock(c);
2107         __flush_write_list(&write_list);
2108 }
2109 EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async);
2110
2111 /*
2112  * For performance, it is essential that the buffers are written asynchronously
2113  * and simultaneously (so that the block layer can merge the writes) and then
2114  * waited upon.
2115  *
2116  * Finally, we flush hardware disk cache.
2117  */
2118 static bool is_writing(struct lru_entry *e, void *context)
2119 {
2120         struct dm_buffer *b = le_to_buffer(e);
2121
2122         return test_bit(B_WRITING, &b->state);
2123 }
2124
2125 int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c)
2126 {
2127         int a, f;
2128         unsigned long nr_buffers;
2129         struct lru_entry *e;
2130         struct lru_iter it;
2131
2132         LIST_HEAD(write_list);
2133
2134         dm_bufio_lock(c);
2135         __write_dirty_buffers_async(c, 0, &write_list);
2136         dm_bufio_unlock(c);
2137         __flush_write_list(&write_list);
2138         dm_bufio_lock(c);
2139
2140         nr_buffers = cache_count(&c->cache, LIST_DIRTY);
2141         lru_iter_begin(&c->cache.lru[LIST_DIRTY], &it);
2142         while ((e = lru_iter_next(&it, is_writing, c))) {
2143                 struct dm_buffer *b = le_to_buffer(e);
2144                 __cache_inc_buffer(b);
2145
2146                 BUG_ON(test_bit(B_READING, &b->state));
2147
2148                 if (nr_buffers) {
2149                         nr_buffers--;
2150                         dm_bufio_unlock(c);
2151                         wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
2152                         dm_bufio_lock(c);
2153                 } else {
2154                         wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
2155                 }
2156
2157                 if (!test_bit(B_DIRTY, &b->state) && !test_bit(B_WRITING, &b->state))
2158                         cache_mark(&c->cache, b, LIST_CLEAN);
2159
2160                 cache_put_and_wake(c, b);
2161
2162                 cond_resched();
2163         }
2164         lru_iter_end(&it);
2165
2166         wake_up(&c->free_buffer_wait);
2167         dm_bufio_unlock(c);
2168
2169         a = xchg(&c->async_write_error, 0);
2170         f = dm_bufio_issue_flush(c);
2171         if (a)
2172                 return a;
2173
2174         return f;
2175 }
2176 EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers);
2177
2178 /*
2179  * Use dm-io to send an empty barrier to flush the device.
2180  */
2181 int dm_bufio_issue_flush(struct dm_bufio_client *c)
2182 {
2183         struct dm_io_request io_req = {
2184                 .bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC,
2185                 .mem.type = DM_IO_KMEM,
2186                 .mem.ptr.addr = NULL,
2187                 .client = c->dm_io,
2188         };
2189         struct dm_io_region io_reg = {
2190                 .bdev = c->bdev,
2191                 .sector = 0,
2192                 .count = 0,
2193         };
2194
2195         if (WARN_ON_ONCE(dm_bufio_in_request()))
2196                 return -EINVAL;
2197
2198         return dm_io(&io_req, 1, &io_reg, NULL, IOPRIO_DEFAULT);
2199 }
2200 EXPORT_SYMBOL_GPL(dm_bufio_issue_flush);
2201
2202 /*
2203  * Use dm-io to send a discard request to flush the device.
2204  */
2205 int dm_bufio_issue_discard(struct dm_bufio_client *c, sector_t block, sector_t count)
2206 {
2207         struct dm_io_request io_req = {
2208                 .bi_opf = REQ_OP_DISCARD | REQ_SYNC,
2209                 .mem.type = DM_IO_KMEM,
2210                 .mem.ptr.addr = NULL,
2211                 .client = c->dm_io,
2212         };
2213         struct dm_io_region io_reg = {
2214                 .bdev = c->bdev,
2215                 .sector = block_to_sector(c, block),
2216                 .count = block_to_sector(c, count),
2217         };
2218
2219         if (WARN_ON_ONCE(dm_bufio_in_request()))
2220                 return -EINVAL; /* discards are optional */
2221
2222         return dm_io(&io_req, 1, &io_reg, NULL, IOPRIO_DEFAULT);
2223 }
2224 EXPORT_SYMBOL_GPL(dm_bufio_issue_discard);
2225
2226 static void forget_buffer(struct dm_bufio_client *c, sector_t block)
2227 {
2228         struct dm_buffer *b;
2229
2230         b = cache_get(&c->cache, block);
2231         if (b) {
2232                 if (likely(!smp_load_acquire(&b->state))) {
2233                         if (cache_remove(&c->cache, b))
2234                                 __free_buffer_wake(b);
2235                         else
2236                                 cache_put_and_wake(c, b);
2237                 } else {
2238                         cache_put_and_wake(c, b);
2239                 }
2240         }
2241 }
2242
2243 /*
2244  * Free the given buffer.
2245  *
2246  * This is just a hint, if the buffer is in use or dirty, this function
2247  * does nothing.
2248  */
2249 void dm_bufio_forget(struct dm_bufio_client *c, sector_t block)
2250 {
2251         dm_bufio_lock(c);
2252         forget_buffer(c, block);
2253         dm_bufio_unlock(c);
2254 }
2255 EXPORT_SYMBOL_GPL(dm_bufio_forget);
2256
2257 static enum evict_result idle(struct dm_buffer *b, void *context)
2258 {
2259         return b->state ? ER_DONT_EVICT : ER_EVICT;
2260 }
2261
2262 void dm_bufio_forget_buffers(struct dm_bufio_client *c, sector_t block, sector_t n_blocks)
2263 {
2264         dm_bufio_lock(c);
2265         cache_remove_range(&c->cache, block, block + n_blocks, idle, __free_buffer_wake);
2266         dm_bufio_unlock(c);
2267 }
2268 EXPORT_SYMBOL_GPL(dm_bufio_forget_buffers);
2269
2270 void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned int n)
2271 {
2272         c->minimum_buffers = n;
2273 }
2274 EXPORT_SYMBOL_GPL(dm_bufio_set_minimum_buffers);
2275
2276 unsigned int dm_bufio_get_block_size(struct dm_bufio_client *c)
2277 {
2278         return c->block_size;
2279 }
2280 EXPORT_SYMBOL_GPL(dm_bufio_get_block_size);
2281
2282 sector_t dm_bufio_get_device_size(struct dm_bufio_client *c)
2283 {
2284         sector_t s = bdev_nr_sectors(c->bdev);
2285
2286         if (s >= c->start)
2287                 s -= c->start;
2288         else
2289                 s = 0;
2290         if (likely(c->sectors_per_block_bits >= 0))
2291                 s >>= c->sectors_per_block_bits;
2292         else
2293                 sector_div(s, c->block_size >> SECTOR_SHIFT);
2294         return s;
2295 }
2296 EXPORT_SYMBOL_GPL(dm_bufio_get_device_size);
2297
2298 struct dm_io_client *dm_bufio_get_dm_io_client(struct dm_bufio_client *c)
2299 {
2300         return c->dm_io;
2301 }
2302 EXPORT_SYMBOL_GPL(dm_bufio_get_dm_io_client);
2303
2304 sector_t dm_bufio_get_block_number(struct dm_buffer *b)
2305 {
2306         return b->block;
2307 }
2308 EXPORT_SYMBOL_GPL(dm_bufio_get_block_number);
2309
2310 void *dm_bufio_get_block_data(struct dm_buffer *b)
2311 {
2312         return b->data;
2313 }
2314 EXPORT_SYMBOL_GPL(dm_bufio_get_block_data);
2315
2316 void *dm_bufio_get_aux_data(struct dm_buffer *b)
2317 {
2318         return b + 1;
2319 }
2320 EXPORT_SYMBOL_GPL(dm_bufio_get_aux_data);
2321
2322 struct dm_bufio_client *dm_bufio_get_client(struct dm_buffer *b)
2323 {
2324         return b->c;
2325 }
2326 EXPORT_SYMBOL_GPL(dm_bufio_get_client);
2327
2328 static enum it_action warn_leak(struct dm_buffer *b, void *context)
2329 {
2330         bool *warned = context;
2331
2332         WARN_ON(!(*warned));
2333         *warned = true;
2334         DMERR("leaked buffer %llx, hold count %u, list %d",
2335               (unsigned long long)b->block, atomic_read(&b->hold_count), b->list_mode);
2336 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
2337         stack_trace_print(b->stack_entries, b->stack_len, 1);
2338         /* mark unclaimed to avoid WARN_ON at end of drop_buffers() */
2339         atomic_set(&b->hold_count, 0);
2340 #endif
2341         return IT_NEXT;
2342 }
2343
2344 static void drop_buffers(struct dm_bufio_client *c)
2345 {
2346         int i;
2347         struct dm_buffer *b;
2348
2349         if (WARN_ON(dm_bufio_in_request()))
2350                 return; /* should never happen */
2351
2352         /*
2353          * An optimization so that the buffers are not written one-by-one.
2354          */
2355         dm_bufio_write_dirty_buffers_async(c);
2356
2357         dm_bufio_lock(c);
2358
2359         while ((b = __get_unclaimed_buffer(c)))
2360                 __free_buffer_wake(b);
2361
2362         for (i = 0; i < LIST_SIZE; i++) {
2363                 bool warned = false;
2364
2365                 cache_iterate(&c->cache, i, warn_leak, &warned);
2366         }
2367
2368 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
2369         while ((b = __get_unclaimed_buffer(c)))
2370                 __free_buffer_wake(b);
2371 #endif
2372
2373         for (i = 0; i < LIST_SIZE; i++)
2374                 WARN_ON(cache_count(&c->cache, i));
2375
2376         dm_bufio_unlock(c);
2377 }
2378
2379 static unsigned long get_retain_buffers(struct dm_bufio_client *c)
2380 {
2381         unsigned long retain_bytes = READ_ONCE(dm_bufio_retain_bytes);
2382
2383         if (likely(c->sectors_per_block_bits >= 0))
2384                 retain_bytes >>= c->sectors_per_block_bits + SECTOR_SHIFT;
2385         else
2386                 retain_bytes /= c->block_size;
2387
2388         return retain_bytes;
2389 }
2390
2391 static void __scan(struct dm_bufio_client *c)
2392 {
2393         int l;
2394         struct dm_buffer *b;
2395         unsigned long freed = 0;
2396         unsigned long retain_target = get_retain_buffers(c);
2397         unsigned long count = cache_total(&c->cache);
2398
2399         for (l = 0; l < LIST_SIZE; l++) {
2400                 while (true) {
2401                         if (count - freed <= retain_target)
2402                                 atomic_long_set(&c->need_shrink, 0);
2403                         if (!atomic_long_read(&c->need_shrink))
2404                                 break;
2405
2406                         b = cache_evict(&c->cache, l,
2407                                         l == LIST_CLEAN ? is_clean : is_dirty, c);
2408                         if (!b)
2409                                 break;
2410
2411                         __make_buffer_clean(b);
2412                         __free_buffer_wake(b);
2413
2414                         atomic_long_dec(&c->need_shrink);
2415                         freed++;
2416
2417                         if (unlikely(freed % SCAN_RESCHED_CYCLE == 0)) {
2418                                 dm_bufio_unlock(c);
2419                                 cond_resched();
2420                                 dm_bufio_lock(c);
2421                         }
2422                 }
2423         }
2424 }
2425
2426 static void shrink_work(struct work_struct *w)
2427 {
2428         struct dm_bufio_client *c = container_of(w, struct dm_bufio_client, shrink_work);
2429
2430         dm_bufio_lock(c);
2431         __scan(c);
2432         dm_bufio_unlock(c);
2433 }
2434
2435 static unsigned long dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
2436 {
2437         struct dm_bufio_client *c;
2438
2439         c = shrink->private_data;
2440         atomic_long_add(sc->nr_to_scan, &c->need_shrink);
2441         queue_work(dm_bufio_wq, &c->shrink_work);
2442
2443         return sc->nr_to_scan;
2444 }
2445
2446 static unsigned long dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
2447 {
2448         struct dm_bufio_client *c = shrink->private_data;
2449         unsigned long count = cache_total(&c->cache);
2450         unsigned long retain_target = get_retain_buffers(c);
2451         unsigned long queued_for_cleanup = atomic_long_read(&c->need_shrink);
2452
2453         if (unlikely(count < retain_target))
2454                 count = 0;
2455         else
2456                 count -= retain_target;
2457
2458         if (unlikely(count < queued_for_cleanup))
2459                 count = 0;
2460         else
2461                 count -= queued_for_cleanup;
2462
2463         return count;
2464 }
2465
2466 /*
2467  * Create the buffering interface
2468  */
2469 struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsigned int block_size,
2470                                                unsigned int reserved_buffers, unsigned int aux_size,
2471                                                void (*alloc_callback)(struct dm_buffer *),
2472                                                void (*write_callback)(struct dm_buffer *),
2473                                                unsigned int flags)
2474 {
2475         int r;
2476         unsigned int num_locks;
2477         struct dm_bufio_client *c;
2478         char slab_name[64];
2479         static atomic_t seqno = ATOMIC_INIT(0);
2480
2481         if (!block_size || block_size & ((1 << SECTOR_SHIFT) - 1)) {
2482                 DMERR("%s: block size not specified or is not multiple of 512b", __func__);
2483                 r = -EINVAL;
2484                 goto bad_client;
2485         }
2486
2487         num_locks = dm_num_hash_locks();
2488         c = kzalloc(sizeof(*c) + (num_locks * sizeof(struct buffer_tree)), GFP_KERNEL);
2489         if (!c) {
2490                 r = -ENOMEM;
2491                 goto bad_client;
2492         }
2493         cache_init(&c->cache, num_locks, (flags & DM_BUFIO_CLIENT_NO_SLEEP) != 0);
2494
2495         c->bdev = bdev;
2496         c->block_size = block_size;
2497         if (is_power_of_2(block_size))
2498                 c->sectors_per_block_bits = __ffs(block_size) - SECTOR_SHIFT;
2499         else
2500                 c->sectors_per_block_bits = -1;
2501
2502         c->alloc_callback = alloc_callback;
2503         c->write_callback = write_callback;
2504
2505         if (flags & DM_BUFIO_CLIENT_NO_SLEEP) {
2506                 c->no_sleep = true;
2507                 static_branch_inc(&no_sleep_enabled);
2508         }
2509
2510         mutex_init(&c->lock);
2511         spin_lock_init(&c->spinlock);
2512         INIT_LIST_HEAD(&c->reserved_buffers);
2513         c->need_reserved_buffers = reserved_buffers;
2514
2515         dm_bufio_set_minimum_buffers(c, DM_BUFIO_MIN_BUFFERS);
2516
2517         init_waitqueue_head(&c->free_buffer_wait);
2518         c->async_write_error = 0;
2519
2520         c->dm_io = dm_io_client_create();
2521         if (IS_ERR(c->dm_io)) {
2522                 r = PTR_ERR(c->dm_io);
2523                 goto bad_dm_io;
2524         }
2525
2526         if (block_size <= KMALLOC_MAX_SIZE && !is_power_of_2(block_size)) {
2527                 unsigned int align = min(1U << __ffs(block_size), (unsigned int)PAGE_SIZE);
2528
2529                 snprintf(slab_name, sizeof(slab_name), "dm_bufio_cache-%u-%u",
2530                                         block_size, atomic_inc_return(&seqno));
2531                 c->slab_cache = kmem_cache_create(slab_name, block_size, align,
2532                                                   SLAB_RECLAIM_ACCOUNT, NULL);
2533                 if (!c->slab_cache) {
2534                         r = -ENOMEM;
2535                         goto bad;
2536                 }
2537         }
2538         if (aux_size)
2539                 snprintf(slab_name, sizeof(slab_name), "dm_bufio_buffer-%u-%u",
2540                                         aux_size, atomic_inc_return(&seqno));
2541         else
2542                 snprintf(slab_name, sizeof(slab_name), "dm_bufio_buffer-%u",
2543                                         atomic_inc_return(&seqno));
2544         c->slab_buffer = kmem_cache_create(slab_name, sizeof(struct dm_buffer) + aux_size,
2545                                            0, SLAB_RECLAIM_ACCOUNT, NULL);
2546         if (!c->slab_buffer) {
2547                 r = -ENOMEM;
2548                 goto bad;
2549         }
2550
2551         while (c->need_reserved_buffers) {
2552                 struct dm_buffer *b = alloc_buffer(c, GFP_KERNEL);
2553
2554                 if (!b) {
2555                         r = -ENOMEM;
2556                         goto bad;
2557                 }
2558                 __free_buffer_wake(b);
2559         }
2560
2561         INIT_WORK(&c->shrink_work, shrink_work);
2562         atomic_long_set(&c->need_shrink, 0);
2563
2564         c->shrinker = shrinker_alloc(0, "dm-bufio:(%u:%u)",
2565                                      MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev));
2566         if (!c->shrinker) {
2567                 r = -ENOMEM;
2568                 goto bad;
2569         }
2570
2571         c->shrinker->count_objects = dm_bufio_shrink_count;
2572         c->shrinker->scan_objects = dm_bufio_shrink_scan;
2573         c->shrinker->seeks = 1;
2574         c->shrinker->batch = 0;
2575         c->shrinker->private_data = c;
2576
2577         shrinker_register(c->shrinker);
2578
2579         mutex_lock(&dm_bufio_clients_lock);
2580         dm_bufio_client_count++;
2581         list_add(&c->client_list, &dm_bufio_all_clients);
2582         __cache_size_refresh();
2583         mutex_unlock(&dm_bufio_clients_lock);
2584
2585         return c;
2586
2587 bad:
2588         while (!list_empty(&c->reserved_buffers)) {
2589                 struct dm_buffer *b = list_to_buffer(c->reserved_buffers.next);
2590
2591                 list_del(&b->lru.list);
2592                 free_buffer(b);
2593         }
2594         kmem_cache_destroy(c->slab_cache);
2595         kmem_cache_destroy(c->slab_buffer);
2596         dm_io_client_destroy(c->dm_io);
2597 bad_dm_io:
2598         mutex_destroy(&c->lock);
2599         if (c->no_sleep)
2600                 static_branch_dec(&no_sleep_enabled);
2601         kfree(c);
2602 bad_client:
2603         return ERR_PTR(r);
2604 }
2605 EXPORT_SYMBOL_GPL(dm_bufio_client_create);
2606
2607 /*
2608  * Free the buffering interface.
2609  * It is required that there are no references on any buffers.
2610  */
2611 void dm_bufio_client_destroy(struct dm_bufio_client *c)
2612 {
2613         unsigned int i;
2614
2615         drop_buffers(c);
2616
2617         shrinker_free(c->shrinker);
2618         flush_work(&c->shrink_work);
2619
2620         mutex_lock(&dm_bufio_clients_lock);
2621
2622         list_del(&c->client_list);
2623         dm_bufio_client_count--;
2624         __cache_size_refresh();
2625
2626         mutex_unlock(&dm_bufio_clients_lock);
2627
2628         WARN_ON(c->need_reserved_buffers);
2629
2630         while (!list_empty(&c->reserved_buffers)) {
2631                 struct dm_buffer *b = list_to_buffer(c->reserved_buffers.next);
2632
2633                 list_del(&b->lru.list);
2634                 free_buffer(b);
2635         }
2636
2637         for (i = 0; i < LIST_SIZE; i++)
2638                 if (cache_count(&c->cache, i))
2639                         DMERR("leaked buffer count %d: %lu", i, cache_count(&c->cache, i));
2640
2641         for (i = 0; i < LIST_SIZE; i++)
2642                 WARN_ON(cache_count(&c->cache, i));
2643
2644         cache_destroy(&c->cache);
2645         kmem_cache_destroy(c->slab_cache);
2646         kmem_cache_destroy(c->slab_buffer);
2647         dm_io_client_destroy(c->dm_io);
2648         mutex_destroy(&c->lock);
2649         if (c->no_sleep)
2650                 static_branch_dec(&no_sleep_enabled);
2651         kfree(c);
2652 }
2653 EXPORT_SYMBOL_GPL(dm_bufio_client_destroy);
2654
2655 void dm_bufio_client_reset(struct dm_bufio_client *c)
2656 {
2657         drop_buffers(c);
2658         flush_work(&c->shrink_work);
2659 }
2660 EXPORT_SYMBOL_GPL(dm_bufio_client_reset);
2661
2662 void dm_bufio_set_sector_offset(struct dm_bufio_client *c, sector_t start)
2663 {
2664         c->start = start;
2665 }
2666 EXPORT_SYMBOL_GPL(dm_bufio_set_sector_offset);
2667
2668 /*--------------------------------------------------------------*/
2669
2670 /*
2671  * Global cleanup tries to evict the oldest buffers from across _all_
2672  * the clients.  It does this by repeatedly evicting a few buffers from
2673  * the client that holds the oldest buffer.  It's approximate, but hopefully
2674  * good enough.
2675  */
2676 static struct dm_bufio_client *__pop_client(void)
2677 {
2678         struct list_head *h;
2679
2680         if (list_empty(&dm_bufio_all_clients))
2681                 return NULL;
2682
2683         h = dm_bufio_all_clients.next;
2684         list_del(h);
2685         return container_of(h, struct dm_bufio_client, client_list);
2686 }
2687
2688 /*
2689  * Inserts the client in the global client list based on its
2690  * 'oldest_buffer' field.
2691  */
2692 static void __insert_client(struct dm_bufio_client *new_client)
2693 {
2694         struct dm_bufio_client *c;
2695         struct list_head *h = dm_bufio_all_clients.next;
2696
2697         while (h != &dm_bufio_all_clients) {
2698                 c = container_of(h, struct dm_bufio_client, client_list);
2699                 if (time_after_eq(c->oldest_buffer, new_client->oldest_buffer))
2700                         break;
2701                 h = h->next;
2702         }
2703
2704         list_add_tail(&new_client->client_list, h);
2705 }
2706
2707 static enum evict_result select_for_evict(struct dm_buffer *b, void *context)
2708 {
2709         /* In no-sleep mode, we cannot wait on IO. */
2710         if (static_branch_unlikely(&no_sleep_enabled) && b->c->no_sleep) {
2711                 if (test_bit_acquire(B_READING, &b->state) ||
2712                     test_bit(B_WRITING, &b->state) ||
2713                     test_bit(B_DIRTY, &b->state))
2714                         return ER_DONT_EVICT;
2715         }
2716         return ER_EVICT;
2717 }
2718
2719 static unsigned long __evict_a_few(unsigned long nr_buffers)
2720 {
2721         struct dm_bufio_client *c;
2722         unsigned long oldest_buffer = jiffies;
2723         unsigned long last_accessed;
2724         unsigned long count;
2725         struct dm_buffer *b;
2726
2727         c = __pop_client();
2728         if (!c)
2729                 return 0;
2730
2731         dm_bufio_lock(c);
2732
2733         for (count = 0; count < nr_buffers; count++) {
2734                 b = cache_evict(&c->cache, LIST_CLEAN, select_for_evict, NULL);
2735                 if (!b)
2736                         break;
2737
2738                 last_accessed = READ_ONCE(b->last_accessed);
2739                 if (time_after_eq(oldest_buffer, last_accessed))
2740                         oldest_buffer = last_accessed;
2741
2742                 __make_buffer_clean(b);
2743                 __free_buffer_wake(b);
2744
2745                 cond_resched();
2746         }
2747
2748         dm_bufio_unlock(c);
2749
2750         if (count)
2751                 c->oldest_buffer = oldest_buffer;
2752         __insert_client(c);
2753
2754         return count;
2755 }
2756
2757 static void check_watermarks(void)
2758 {
2759         LIST_HEAD(write_list);
2760         struct dm_bufio_client *c;
2761
2762         mutex_lock(&dm_bufio_clients_lock);
2763         list_for_each_entry(c, &dm_bufio_all_clients, client_list) {
2764                 dm_bufio_lock(c);
2765                 __check_watermark(c, &write_list);
2766                 dm_bufio_unlock(c);
2767         }
2768         mutex_unlock(&dm_bufio_clients_lock);
2769
2770         __flush_write_list(&write_list);
2771 }
2772
2773 static void evict_old(void)
2774 {
2775         unsigned long threshold = dm_bufio_cache_size -
2776                 dm_bufio_cache_size / DM_BUFIO_LOW_WATERMARK_RATIO;
2777
2778         mutex_lock(&dm_bufio_clients_lock);
2779         while (dm_bufio_current_allocated > threshold) {
2780                 if (!__evict_a_few(64))
2781                         break;
2782                 cond_resched();
2783         }
2784         mutex_unlock(&dm_bufio_clients_lock);
2785 }
2786
2787 static void do_global_cleanup(struct work_struct *w)
2788 {
2789         check_watermarks();
2790         evict_old();
2791 }
2792
2793 /*
2794  *--------------------------------------------------------------
2795  * Module setup
2796  *--------------------------------------------------------------
2797  */
2798
2799 /*
2800  * This is called only once for the whole dm_bufio module.
2801  * It initializes memory limit.
2802  */
2803 static int __init dm_bufio_init(void)
2804 {
2805         __u64 mem;
2806
2807         dm_bufio_allocated_kmem_cache = 0;
2808         dm_bufio_allocated_kmalloc = 0;
2809         dm_bufio_allocated_get_free_pages = 0;
2810         dm_bufio_allocated_vmalloc = 0;
2811         dm_bufio_current_allocated = 0;
2812
2813         mem = (__u64)mult_frac(totalram_pages() - totalhigh_pages(),
2814                                DM_BUFIO_MEMORY_PERCENT, 100) << PAGE_SHIFT;
2815
2816         if (mem > ULONG_MAX)
2817                 mem = ULONG_MAX;
2818
2819 #ifdef CONFIG_MMU
2820         if (mem > mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100))
2821                 mem = mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100);
2822 #endif
2823
2824         dm_bufio_default_cache_size = mem;
2825
2826         mutex_lock(&dm_bufio_clients_lock);
2827         __cache_size_refresh();
2828         mutex_unlock(&dm_bufio_clients_lock);
2829
2830         dm_bufio_wq = alloc_workqueue("dm_bufio_cache", WQ_MEM_RECLAIM, 0);
2831         if (!dm_bufio_wq)
2832                 return -ENOMEM;
2833
2834         INIT_WORK(&dm_bufio_replacement_work, do_global_cleanup);
2835
2836         return 0;
2837 }
2838
2839 /*
2840  * This is called once when unloading the dm_bufio module.
2841  */
2842 static void __exit dm_bufio_exit(void)
2843 {
2844         int bug = 0;
2845
2846         destroy_workqueue(dm_bufio_wq);
2847
2848         if (dm_bufio_client_count) {
2849                 DMCRIT("%s: dm_bufio_client_count leaked: %d",
2850                         __func__, dm_bufio_client_count);
2851                 bug = 1;
2852         }
2853
2854         if (dm_bufio_current_allocated) {
2855                 DMCRIT("%s: dm_bufio_current_allocated leaked: %lu",
2856                         __func__, dm_bufio_current_allocated);
2857                 bug = 1;
2858         }
2859
2860         if (dm_bufio_allocated_get_free_pages) {
2861                 DMCRIT("%s: dm_bufio_allocated_get_free_pages leaked: %lu",
2862                        __func__, dm_bufio_allocated_get_free_pages);
2863                 bug = 1;
2864         }
2865
2866         if (dm_bufio_allocated_vmalloc) {
2867                 DMCRIT("%s: dm_bufio_vmalloc leaked: %lu",
2868                        __func__, dm_bufio_allocated_vmalloc);
2869                 bug = 1;
2870         }
2871
2872         WARN_ON(bug); /* leaks are not worth crashing the system */
2873 }
2874
2875 module_init(dm_bufio_init)
2876 module_exit(dm_bufio_exit)
2877
2878 module_param_named(max_cache_size_bytes, dm_bufio_cache_size, ulong, 0644);
2879 MODULE_PARM_DESC(max_cache_size_bytes, "Size of metadata cache");
2880
2881 module_param_named(max_age_seconds, dm_bufio_max_age, uint, 0644);
2882 MODULE_PARM_DESC(max_age_seconds, "No longer does anything");
2883
2884 module_param_named(retain_bytes, dm_bufio_retain_bytes, ulong, 0644);
2885 MODULE_PARM_DESC(retain_bytes, "Try to keep at least this many bytes cached in memory");
2886
2887 module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, 0644);
2888 MODULE_PARM_DESC(peak_allocated_bytes, "Tracks the maximum allocated memory");
2889
2890 module_param_named(allocated_kmem_cache_bytes, dm_bufio_allocated_kmem_cache, ulong, 0444);
2891 MODULE_PARM_DESC(allocated_kmem_cache_bytes, "Memory allocated with kmem_cache_alloc");
2892
2893 module_param_named(allocated_kmalloc_bytes, dm_bufio_allocated_kmalloc, ulong, 0444);
2894 MODULE_PARM_DESC(allocated_kmalloc_bytes, "Memory allocated with kmalloc_alloc");
2895
2896 module_param_named(allocated_get_free_pages_bytes, dm_bufio_allocated_get_free_pages, ulong, 0444);
2897 MODULE_PARM_DESC(allocated_get_free_pages_bytes, "Memory allocated with get_free_pages");
2898
2899 module_param_named(allocated_vmalloc_bytes, dm_bufio_allocated_vmalloc, ulong, 0444);
2900 MODULE_PARM_DESC(allocated_vmalloc_bytes, "Memory allocated with vmalloc");
2901
2902 module_param_named(current_allocated_bytes, dm_bufio_current_allocated, ulong, 0444);
2903 MODULE_PARM_DESC(current_allocated_bytes, "Memory currently used by the cache");
2904
2905 MODULE_AUTHOR("Mikulas Patocka <dm-devel@lists.linux.dev>");
2906 MODULE_DESCRIPTION(DM_NAME " buffered I/O library");
2907 MODULE_LICENSE("GPL");