dm: add missing empty lines
[linux-block.git] / drivers / md / dm-bufio.c
CommitLineData
3bd94003 1// SPDX-License-Identifier: GPL-2.0-only
95d402f0
MP
2/*
3 * Copyright (C) 2009-2011 Red Hat, Inc.
4 *
5 * Author: Mikulas Patocka <mpatocka@redhat.com>
6 *
7 * This file is released under the GPL.
8 */
9
afa53df8 10#include <linux/dm-bufio.h>
95d402f0
MP
11
12#include <linux/device-mapper.h>
13#include <linux/dm-io.h>
14#include <linux/slab.h>
5b3cc15a 15#include <linux/sched/mm.h>
f495339c 16#include <linux/jiffies.h>
95d402f0 17#include <linux/vmalloc.h>
95d402f0 18#include <linux/shrinker.h>
6f66263f 19#include <linux/module.h>
4e420c45 20#include <linux/rbtree.h>
86bad0c7 21#include <linux/stacktrace.h>
3c1c875d 22#include <linux/jump_label.h>
95d402f0
MP
23
24#define DM_MSG_PREFIX "bufio"
25
26/*
27 * Memory management policy:
28 * Limit the number of buffers to DM_BUFIO_MEMORY_PERCENT of main memory
29 * or DM_BUFIO_VMALLOC_PERCENT of vmalloc memory (whichever is lower).
30 * Always allocate at least DM_BUFIO_MIN_BUFFERS buffers.
31 * Start background writeback when there are DM_BUFIO_WRITEBACK_PERCENT
32 * dirty buffers.
33 */
34#define DM_BUFIO_MIN_BUFFERS 8
35
36#define DM_BUFIO_MEMORY_PERCENT 2
37#define DM_BUFIO_VMALLOC_PERCENT 25
b132ff33 38#define DM_BUFIO_WRITEBACK_RATIO 3
6e913b28 39#define DM_BUFIO_LOW_WATERMARK_RATIO 16
95d402f0
MP
40
41/*
42 * Check buffer ages in this interval (seconds)
43 */
33096a78 44#define DM_BUFIO_WORK_TIMER_SECS 30
95d402f0
MP
45
46/*
47 * Free buffers when they are older than this (seconds)
48 */
33096a78 49#define DM_BUFIO_DEFAULT_AGE_SECS 300
95d402f0
MP
50
51/*
33096a78 52 * The nr of bytes of cached data to keep around.
95d402f0 53 */
33096a78 54#define DM_BUFIO_DEFAULT_RETAIN_BYTES (256 * 1024)
95d402f0 55
1e3b21c6
MP
56/*
57 * Align buffer writes to this boundary.
58 * Tests show that SSDs have the highest IOPS when using 4k writes.
59 */
60#define DM_BUFIO_WRITE_ALIGN 4096
61
95d402f0
MP
62/*
63 * dm_buffer->list_mode
64 */
65#define LIST_CLEAN 0
66#define LIST_DIRTY 1
67#define LIST_SIZE 2
68
69/*
70 * Linking of buffers:
ef992373 71 * All buffers are linked to buffer_tree with their node field.
95d402f0
MP
72 *
73 * Clean buffers that are not being written (B_WRITING not set)
74 * are linked to lru[LIST_CLEAN] with their lru_list field.
75 *
76 * Dirty and clean buffers that are being written are linked to
77 * lru[LIST_DIRTY] with their lru_list field. When the write
78 * finishes, the buffer cannot be relinked immediately (because we
79 * are in an interrupt context and relinking requires process
80 * context), so some clean-not-writing buffers can be held on
81 * dirty_lru too. They are later added to lru in the process
82 * context.
83 */
84struct dm_bufio_client {
85 struct mutex lock;
b32d4582 86 spinlock_t spinlock;
b33b6fdc 87 bool no_sleep;
95d402f0
MP
88
89 struct list_head lru[LIST_SIZE];
90 unsigned long n_buffers[LIST_SIZE];
91
92 struct block_device *bdev;
86a3238c 93 unsigned int block_size;
f51f2e0a 94 s8 sectors_per_block_bits;
02f10ba1
HM
95 void (*alloc_callback)(struct dm_buffer *buf);
96 void (*write_callback)(struct dm_buffer *buf);
359dbf19 97 struct kmem_cache *slab_buffer;
21bb1327 98 struct kmem_cache *slab_cache;
95d402f0
MP
99 struct dm_io_client *dm_io;
100
101 struct list_head reserved_buffers;
86a3238c 102 unsigned int need_reserved_buffers;
95d402f0 103
86a3238c 104 unsigned int minimum_buffers;
55b082e6 105
4e420c45 106 struct rb_root buffer_tree;
95d402f0
MP
107 wait_queue_head_t free_buffer_wait;
108
400a0bef
MP
109 sector_t start;
110
95d402f0
MP
111 int async_write_error;
112
113 struct list_head client_list;
70704c33 114
95d402f0 115 struct shrinker shrinker;
70704c33
MP
116 struct work_struct shrink_work;
117 atomic_long_t need_shrink;
95d402f0
MP
118};
119
120/*
121 * Buffer state bits.
122 */
123#define B_READING 0
124#define B_WRITING 1
125#define B_DIRTY 2
126
127/*
128 * Describes how the block was allocated:
129 * kmem_cache_alloc(), __get_free_pages() or vmalloc().
130 * See the comment at alloc_buffer_data.
131 */
132enum data_mode {
133 DATA_MODE_SLAB = 0,
134 DATA_MODE_GET_FREE_PAGES = 1,
135 DATA_MODE_VMALLOC = 2,
136 DATA_MODE_LIMIT = 3
137};
138
139struct dm_buffer {
4e420c45 140 struct rb_node node;
95d402f0 141 struct list_head lru_list;
af53badc 142 struct list_head global_list;
95d402f0
MP
143 sector_t block;
144 void *data;
03b02939 145 unsigned char data_mode; /* DATA_MODE_* */
95d402f0 146 unsigned char list_mode; /* LIST_* */
4e4cbee9
CH
147 blk_status_t read_error;
148 blk_status_t write_error;
86a3238c
HM
149 unsigned int accessed;
150 unsigned int hold_count;
95d402f0
MP
151 unsigned long state;
152 unsigned long last_accessed;
86a3238c
HM
153 unsigned int dirty_start;
154 unsigned int dirty_end;
155 unsigned int write_start;
156 unsigned int write_end;
95d402f0 157 struct dm_bufio_client *c;
2480945c 158 struct list_head write_list;
02f10ba1 159 void (*end_io)(struct dm_buffer *buf, blk_status_t stat);
86bad0c7
MP
160#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
161#define MAX_STACK 10
741b58f3 162 unsigned int stack_len;
86bad0c7
MP
163 unsigned long stack_entries[MAX_STACK];
164#endif
95d402f0
MP
165};
166
3c1c875d
MS
167static DEFINE_STATIC_KEY_FALSE(no_sleep_enabled);
168
95d402f0
MP
169/*----------------------------------------------------------------*/
170
95d402f0
MP
171#define dm_bufio_in_request() (!!current->bio_list)
172
173static void dm_bufio_lock(struct dm_bufio_client *c)
174{
3c1c875d 175 if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
b33b6fdc 176 spin_lock_bh(&c->spinlock);
b32d4582
NH
177 else
178 mutex_lock_nested(&c->lock, dm_bufio_in_request());
95d402f0
MP
179}
180
181static int dm_bufio_trylock(struct dm_bufio_client *c)
182{
3c1c875d 183 if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
b33b6fdc 184 return spin_trylock_bh(&c->spinlock);
b32d4582
NH
185 else
186 return mutex_trylock(&c->lock);
95d402f0
MP
187}
188
189static void dm_bufio_unlock(struct dm_bufio_client *c)
190{
3c1c875d 191 if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
b33b6fdc 192 spin_unlock_bh(&c->spinlock);
b32d4582
NH
193 else
194 mutex_unlock(&c->lock);
95d402f0
MP
195}
196
95d402f0
MP
197/*----------------------------------------------------------------*/
198
199/*
200 * Default cache size: available memory divided by the ratio.
201 */
202static unsigned long dm_bufio_default_cache_size;
203
204/*
205 * Total cache size set by the user.
206 */
207static unsigned long dm_bufio_cache_size;
208
209/*
210 * A copy of dm_bufio_cache_size because dm_bufio_cache_size can change
211 * at any time. If it disagrees, the user has changed cache size.
212 */
213static unsigned long dm_bufio_cache_size_latch;
214
af53badc
MP
215static DEFINE_SPINLOCK(global_spinlock);
216
217static LIST_HEAD(global_queue);
95d402f0 218
2f06cd12 219static unsigned long global_num;
6e913b28 220
95d402f0
MP
221/*
222 * Buffers are freed after this timeout
223 */
86a3238c 224static unsigned int dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS;
13840d38 225static unsigned long dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES;
95d402f0
MP
226
227static unsigned long dm_bufio_peak_allocated;
228static unsigned long dm_bufio_allocated_kmem_cache;
229static unsigned long dm_bufio_allocated_get_free_pages;
230static unsigned long dm_bufio_allocated_vmalloc;
231static unsigned long dm_bufio_current_allocated;
232
233/*----------------------------------------------------------------*/
234
95d402f0
MP
235/*
236 * The current number of clients.
237 */
238static int dm_bufio_client_count;
239
240/*
241 * The list of all clients.
242 */
243static LIST_HEAD(dm_bufio_all_clients);
244
245/*
b132ff33 246 * This mutex protects dm_bufio_cache_size_latch and dm_bufio_client_count
95d402f0
MP
247 */
248static DEFINE_MUTEX(dm_bufio_clients_lock);
249
6e913b28
MP
250static struct workqueue_struct *dm_bufio_wq;
251static struct delayed_work dm_bufio_cleanup_old_work;
252static struct work_struct dm_bufio_replacement_work;
253
254
86bad0c7
MP
255#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
256static void buffer_record_stack(struct dm_buffer *b)
257{
741b58f3 258 b->stack_len = stack_trace_save(b->stack_entries, MAX_STACK, 2);
86bad0c7
MP
259}
260#endif
261
a4a82ce3
HM
262/*
263 *----------------------------------------------------------------
4e420c45 264 * A red/black tree acts as an index for all the buffers.
a4a82ce3
HM
265 *----------------------------------------------------------------
266 */
4e420c45
JT
267static struct dm_buffer *__find(struct dm_bufio_client *c, sector_t block)
268{
269 struct rb_node *n = c->buffer_tree.rb_node;
270 struct dm_buffer *b;
271
272 while (n) {
273 b = container_of(n, struct dm_buffer, node);
274
275 if (b->block == block)
276 return b;
277
88f878e5 278 n = block < b->block ? n->rb_left : n->rb_right;
4e420c45
JT
279 }
280
281 return NULL;
282}
283
33a18062
MP
284static struct dm_buffer *__find_next(struct dm_bufio_client *c, sector_t block)
285{
286 struct rb_node *n = c->buffer_tree.rb_node;
287 struct dm_buffer *b;
288 struct dm_buffer *best = NULL;
289
290 while (n) {
291 b = container_of(n, struct dm_buffer, node);
292
293 if (b->block == block)
294 return b;
295
296 if (block <= b->block) {
297 n = n->rb_left;
298 best = b;
299 } else {
300 n = n->rb_right;
301 }
302 }
303
304 return best;
305}
306
4e420c45
JT
307static void __insert(struct dm_bufio_client *c, struct dm_buffer *b)
308{
309 struct rb_node **new = &c->buffer_tree.rb_node, *parent = NULL;
310 struct dm_buffer *found;
311
312 while (*new) {
313 found = container_of(*new, struct dm_buffer, node);
314
315 if (found->block == b->block) {
316 BUG_ON(found != b);
317 return;
318 }
319
320 parent = *new;
88f878e5
MP
321 new = b->block < found->block ?
322 &found->node.rb_left : &found->node.rb_right;
4e420c45
JT
323 }
324
325 rb_link_node(&b->node, parent, new);
326 rb_insert_color(&b->node, &c->buffer_tree);
327}
328
329static void __remove(struct dm_bufio_client *c, struct dm_buffer *b)
330{
331 rb_erase(&b->node, &c->buffer_tree);
332}
333
95d402f0
MP
334/*----------------------------------------------------------------*/
335
d0a328a3 336static void adjust_total_allocated(struct dm_buffer *b, bool unlink)
95d402f0 337{
d0a328a3
MP
338 unsigned char data_mode;
339 long diff;
340
95d402f0
MP
341 static unsigned long * const class_ptr[DATA_MODE_LIMIT] = {
342 &dm_bufio_allocated_kmem_cache,
343 &dm_bufio_allocated_get_free_pages,
344 &dm_bufio_allocated_vmalloc,
345 };
346
d0a328a3
MP
347 data_mode = b->data_mode;
348 diff = (long)b->c->block_size;
349 if (unlink)
350 diff = -diff;
351
af53badc 352 spin_lock(&global_spinlock);
95d402f0
MP
353
354 *class_ptr[data_mode] += diff;
355
356 dm_bufio_current_allocated += diff;
357
358 if (dm_bufio_current_allocated > dm_bufio_peak_allocated)
359 dm_bufio_peak_allocated = dm_bufio_current_allocated;
360
6e913b28
MP
361 b->accessed = 1;
362
af53badc
MP
363 if (!unlink) {
364 list_add(&b->global_list, &global_queue);
6e913b28
MP
365 global_num++;
366 if (dm_bufio_current_allocated > dm_bufio_cache_size)
367 queue_work(dm_bufio_wq, &dm_bufio_replacement_work);
af53badc
MP
368 } else {
369 list_del(&b->global_list);
6e913b28 370 global_num--;
af53badc
MP
371 }
372
373 spin_unlock(&global_spinlock);
95d402f0
MP
374}
375
376/*
377 * Change the number of clients and recalculate per-client limit.
378 */
379static void __cache_size_refresh(void)
380{
381 BUG_ON(!mutex_is_locked(&dm_bufio_clients_lock));
382 BUG_ON(dm_bufio_client_count < 0);
383
6aa7de05 384 dm_bufio_cache_size_latch = READ_ONCE(dm_bufio_cache_size);
95d402f0
MP
385
386 /*
387 * Use default if set to 0 and report the actual cache size used.
388 */
389 if (!dm_bufio_cache_size_latch) {
390 (void)cmpxchg(&dm_bufio_cache_size, 0,
391 dm_bufio_default_cache_size);
392 dm_bufio_cache_size_latch = dm_bufio_default_cache_size;
393 }
95d402f0
MP
394}
395
396/*
397 * Allocating buffer data.
398 *
399 * Small buffers are allocated with kmem_cache, to use space optimally.
400 *
401 * For large buffers, we choose between get_free_pages and vmalloc.
402 * Each has advantages and disadvantages.
403 *
404 * __get_free_pages can randomly fail if the memory is fragmented.
405 * __vmalloc won't randomly fail, but vmalloc space is limited (it may be
406 * as low as 128M) so using it for caching is not appropriate.
407 *
408 * If the allocation may fail we use __get_free_pages. Memory fragmentation
409 * won't have a fatal effect here, but it just causes flushes of some other
410 * buffers and more I/O will be performed. Don't use __get_free_pages if it
411 * always fails (i.e. order >= MAX_ORDER).
412 *
413 * If the allocation shouldn't fail we use __vmalloc. This is only for the
414 * initial reserve allocation, so there's no risk of wasting all vmalloc
415 * space.
416 */
417static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
03b02939 418 unsigned char *data_mode)
95d402f0 419{
21bb1327 420 if (unlikely(c->slab_cache != NULL)) {
95d402f0 421 *data_mode = DATA_MODE_SLAB;
21bb1327 422 return kmem_cache_alloc(c->slab_cache, gfp_mask);
95d402f0
MP
423 }
424
f51f2e0a 425 if (c->block_size <= KMALLOC_MAX_SIZE &&
95d402f0
MP
426 gfp_mask & __GFP_NORETRY) {
427 *data_mode = DATA_MODE_GET_FREE_PAGES;
428 return (void *)__get_free_pages(gfp_mask,
f51f2e0a 429 c->sectors_per_block_bits - (PAGE_SHIFT - SECTOR_SHIFT));
95d402f0
MP
430 }
431
432 *data_mode = DATA_MODE_VMALLOC;
502624bd
MP
433
434 /*
435 * __vmalloc allocates the data pages and auxiliary structures with
436 * gfp_flags that were specified, but pagetables are always allocated
437 * with GFP_KERNEL, no matter what was specified as gfp_mask.
438 *
439 * Consequently, we must set per-process flag PF_MEMALLOC_NOIO so that
440 * all allocations done by this process (including pagetables) are done
441 * as if GFP_NOIO was specified.
442 */
590347e4 443 if (gfp_mask & __GFP_NORETRY) {
86a3238c 444 unsigned int noio_flag = memalloc_noio_save();
88dca4ca 445 void *ptr = __vmalloc(c->block_size, gfp_mask);
502624bd 446
502624bd 447 memalloc_noio_restore(noio_flag);
590347e4
AB
448 return ptr;
449 }
502624bd 450
88dca4ca 451 return __vmalloc(c->block_size, gfp_mask);
95d402f0
MP
452}
453
454/*
455 * Free buffer's data.
456 */
457static void free_buffer_data(struct dm_bufio_client *c,
03b02939 458 void *data, unsigned char data_mode)
95d402f0
MP
459{
460 switch (data_mode) {
461 case DATA_MODE_SLAB:
21bb1327 462 kmem_cache_free(c->slab_cache, data);
95d402f0
MP
463 break;
464
465 case DATA_MODE_GET_FREE_PAGES:
f51f2e0a
MP
466 free_pages((unsigned long)data,
467 c->sectors_per_block_bits - (PAGE_SHIFT - SECTOR_SHIFT));
95d402f0
MP
468 break;
469
470 case DATA_MODE_VMALLOC:
471 vfree(data);
472 break;
473
474 default:
475 DMCRIT("dm_bufio_free_buffer_data: bad data mode: %d",
476 data_mode);
477 BUG();
478 }
479}
480
481/*
482 * Allocate buffer and its data.
483 */
484static struct dm_buffer *alloc_buffer(struct dm_bufio_client *c, gfp_t gfp_mask)
485{
359dbf19 486 struct dm_buffer *b = kmem_cache_alloc(c->slab_buffer, gfp_mask);
95d402f0
MP
487
488 if (!b)
489 return NULL;
490
491 b->c = c;
492
493 b->data = alloc_buffer_data(c, gfp_mask, &b->data_mode);
494 if (!b->data) {
359dbf19 495 kmem_cache_free(c->slab_buffer, b);
95d402f0
MP
496 return NULL;
497 }
498
86bad0c7 499#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
741b58f3 500 b->stack_len = 0;
86bad0c7 501#endif
95d402f0
MP
502 return b;
503}
504
505/*
506 * Free buffer and its data.
507 */
508static void free_buffer(struct dm_buffer *b)
509{
510 struct dm_bufio_client *c = b->c;
511
95d402f0 512 free_buffer_data(c, b->data, b->data_mode);
359dbf19 513 kmem_cache_free(c->slab_buffer, b);
95d402f0
MP
514}
515
516/*
ef992373 517 * Link buffer to the buffer tree and clean or dirty queue.
95d402f0
MP
518 */
519static void __link_buffer(struct dm_buffer *b, sector_t block, int dirty)
520{
521 struct dm_bufio_client *c = b->c;
522
523 c->n_buffers[dirty]++;
524 b->block = block;
525 b->list_mode = dirty;
526 list_add(&b->lru_list, &c->lru[dirty]);
4e420c45 527 __insert(b->c, b);
95d402f0 528 b->last_accessed = jiffies;
26d2ef0c 529
d0a328a3 530 adjust_total_allocated(b, false);
95d402f0
MP
531}
532
533/*
ef992373 534 * Unlink buffer from the buffer tree and dirty or clean queue.
95d402f0
MP
535 */
536static void __unlink_buffer(struct dm_buffer *b)
537{
538 struct dm_bufio_client *c = b->c;
539
540 BUG_ON(!c->n_buffers[b->list_mode]);
541
542 c->n_buffers[b->list_mode]--;
4e420c45 543 __remove(b->c, b);
95d402f0 544 list_del(&b->lru_list);
26d2ef0c 545
d0a328a3 546 adjust_total_allocated(b, true);
95d402f0
MP
547}
548
549/*
550 * Place the buffer to the head of dirty or clean LRU queue.
551 */
552static void __relink_lru(struct dm_buffer *b, int dirty)
553{
554 struct dm_bufio_client *c = b->c;
555
6e913b28
MP
556 b->accessed = 1;
557
95d402f0
MP
558 BUG_ON(!c->n_buffers[b->list_mode]);
559
560 c->n_buffers[b->list_mode]--;
561 c->n_buffers[dirty]++;
562 b->list_mode = dirty;
54499afb 563 list_move(&b->lru_list, &c->lru[dirty]);
eb76faf5 564 b->last_accessed = jiffies;
95d402f0
MP
565}
566
a4a82ce3
HM
567/*
568 *--------------------------------------------------------------------------
95d402f0
MP
569 * Submit I/O on the buffer.
570 *
571 * Bio interface is faster but it has some problems:
572 * the vector list is limited (increasing this limit increases
573 * memory-consumption per buffer, so it is not viable);
574 *
575 * the memory must be direct-mapped, not vmalloced;
576 *
95d402f0
MP
577 * If the buffer is small enough (up to DM_BUFIO_INLINE_VECS pages) and
578 * it is not vmalloced, try using the bio interface.
579 *
580 * If the buffer is big, if it is vmalloced or if the underlying device
581 * rejects the bio because it is too large, use dm-io layer to do the I/O.
582 * The dm-io layer splits the I/O into multiple requests, avoiding the above
583 * shortcomings.
a4a82ce3
HM
584 *--------------------------------------------------------------------------
585 */
95d402f0
MP
586
587/*
588 * dm-io completion routine. It just calls b->bio.bi_end_io, pretending
589 * that the request was handled directly with bio interface.
590 */
591static void dmio_complete(unsigned long error, void *context)
592{
593 struct dm_buffer *b = context;
594
45354f1e 595 b->end_io(b, unlikely(error != 0) ? BLK_STS_IOERR : 0);
95d402f0
MP
596}
597
a3282b43 598static void use_dmio(struct dm_buffer *b, enum req_op op, sector_t sector,
86a3238c 599 unsigned int n_sectors, unsigned int offset)
95d402f0
MP
600{
601 int r;
602 struct dm_io_request io_req = {
a3282b43 603 .bi_opf = op,
95d402f0
MP
604 .notify.fn = dmio_complete,
605 .notify.context = b,
606 .client = b->c->dm_io,
607 };
608 struct dm_io_region region = {
609 .bdev = b->c->bdev,
400a0bef
MP
610 .sector = sector,
611 .count = n_sectors,
95d402f0
MP
612 };
613
614 if (b->data_mode != DATA_MODE_VMALLOC) {
615 io_req.mem.type = DM_IO_KMEM;
1e3b21c6 616 io_req.mem.ptr.addr = (char *)b->data + offset;
95d402f0
MP
617 } else {
618 io_req.mem.type = DM_IO_VMA;
1e3b21c6 619 io_req.mem.ptr.vma = (char *)b->data + offset;
95d402f0
MP
620 }
621
95d402f0 622 r = dm_io(&io_req, 1, &region, NULL);
45354f1e
MP
623 if (unlikely(r))
624 b->end_io(b, errno_to_blk_status(r));
95d402f0
MP
625}
626
45354f1e 627static void bio_complete(struct bio *bio)
445559cd 628{
45354f1e 629 struct dm_buffer *b = bio->bi_private;
4e4cbee9 630 blk_status_t status = bio->bi_status;
0ef0b471 631
066ff571
CH
632 bio_uninit(bio);
633 kfree(bio);
45354f1e 634 b->end_io(b, status);
445559cd
DW
635}
636
a3282b43 637static void use_bio(struct dm_buffer *b, enum req_op op, sector_t sector,
86a3238c 638 unsigned int n_sectors, unsigned int offset)
95d402f0 639{
45354f1e 640 struct bio *bio;
95d402f0 641 char *ptr;
86a3238c 642 unsigned int vec_size, len;
95d402f0 643
45354f1e
MP
644 vec_size = b->c->block_size >> PAGE_SHIFT;
645 if (unlikely(b->c->sectors_per_block_bits < PAGE_SHIFT - SECTOR_SHIFT))
646 vec_size += 2;
647
066ff571 648 bio = bio_kmalloc(vec_size, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOWARN);
45354f1e
MP
649 if (!bio) {
650dmio:
a3282b43 651 use_dmio(b, op, sector, n_sectors, offset);
45354f1e
MP
652 return;
653 }
a3282b43 654 bio_init(bio, b->c->bdev, bio->bi_inline_vecs, vec_size, op);
45354f1e 655 bio->bi_iter.bi_sector = sector;
45354f1e
MP
656 bio->bi_end_io = bio_complete;
657 bio->bi_private = b;
95d402f0 658
1e3b21c6 659 ptr = (char *)b->data + offset;
400a0bef 660 len = n_sectors << SECTOR_SHIFT;
95d402f0 661
95d402f0 662 do {
86a3238c 663 unsigned int this_step = min((unsigned int)(PAGE_SIZE - offset_in_page(ptr)), len);
0ef0b471 664
45354f1e 665 if (!bio_add_page(bio, virt_to_page(ptr), this_step,
756d097b 666 offset_in_page(ptr))) {
45354f1e
MP
667 bio_put(bio);
668 goto dmio;
95d402f0
MP
669 }
670
1e3b21c6
MP
671 len -= this_step;
672 ptr += this_step;
95d402f0
MP
673 } while (len > 0);
674
45354f1e 675 submit_bio(bio);
95d402f0
MP
676}
677
6fbeb004
MP
678static inline sector_t block_to_sector(struct dm_bufio_client *c, sector_t block)
679{
680 sector_t sector;
681
682 if (likely(c->sectors_per_block_bits >= 0))
683 sector = block << c->sectors_per_block_bits;
684 else
685 sector = block * (c->block_size >> SECTOR_SHIFT);
686 sector += c->start;
687
688 return sector;
689}
690
a3282b43
BVA
691static void submit_io(struct dm_buffer *b, enum req_op op,
692 void (*end_io)(struct dm_buffer *, blk_status_t))
95d402f0 693{
86a3238c 694 unsigned int n_sectors;
400a0bef 695 sector_t sector;
86a3238c 696 unsigned int offset, end;
95d402f0 697
45354f1e
MP
698 b->end_io = end_io;
699
6fbeb004 700 sector = block_to_sector(b->c, b->block);
1e3b21c6 701
a3282b43 702 if (op != REQ_OP_WRITE) {
f51f2e0a 703 n_sectors = b->c->block_size >> SECTOR_SHIFT;
1e3b21c6
MP
704 offset = 0;
705 } else {
706 if (b->c->write_callback)
707 b->c->write_callback(b);
708 offset = b->write_start;
709 end = b->write_end;
710 offset &= -DM_BUFIO_WRITE_ALIGN;
711 end += DM_BUFIO_WRITE_ALIGN - 1;
712 end &= -DM_BUFIO_WRITE_ALIGN;
713 if (unlikely(end > b->c->block_size))
714 end = b->c->block_size;
715
716 sector += offset >> SECTOR_SHIFT;
717 n_sectors = (end - offset) >> SECTOR_SHIFT;
718 }
400a0bef 719
45354f1e 720 if (b->data_mode != DATA_MODE_VMALLOC)
a3282b43 721 use_bio(b, op, sector, n_sectors, offset);
95d402f0 722 else
a3282b43 723 use_dmio(b, op, sector, n_sectors, offset);
95d402f0
MP
724}
725
a4a82ce3
HM
726/*
727 *--------------------------------------------------------------
95d402f0 728 * Writing dirty buffers
a4a82ce3
HM
729 *--------------------------------------------------------------
730 */
95d402f0
MP
731
732/*
733 * The endio routine for write.
734 *
735 * Set the error, clear B_WRITING bit and wake anyone who was waiting on
736 * it.
737 */
45354f1e 738static void write_endio(struct dm_buffer *b, blk_status_t status)
95d402f0 739{
45354f1e
MP
740 b->write_error = status;
741 if (unlikely(status)) {
95d402f0 742 struct dm_bufio_client *c = b->c;
4e4cbee9
CH
743
744 (void)cmpxchg(&c->async_write_error, 0,
45354f1e 745 blk_status_to_errno(status));
95d402f0
MP
746 }
747
748 BUG_ON(!test_bit(B_WRITING, &b->state));
749
4e857c58 750 smp_mb__before_atomic();
95d402f0 751 clear_bit(B_WRITING, &b->state);
4e857c58 752 smp_mb__after_atomic();
95d402f0
MP
753
754 wake_up_bit(&b->state, B_WRITING);
755}
756
95d402f0
MP
757/*
758 * Initiate a write on a dirty buffer, but don't wait for it.
759 *
760 * - If the buffer is not dirty, exit.
761 * - If there some previous write going on, wait for it to finish (we can't
762 * have two writes on the same buffer simultaneously).
763 * - Submit our write and don't wait on it. We set B_WRITING indicating
764 * that there is a write in progress.
765 */
2480945c
MP
766static void __write_dirty_buffer(struct dm_buffer *b,
767 struct list_head *write_list)
95d402f0
MP
768{
769 if (!test_bit(B_DIRTY, &b->state))
770 return;
771
772 clear_bit(B_DIRTY, &b->state);
74316201 773 wait_on_bit_lock_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
95d402f0 774
1e3b21c6
MP
775 b->write_start = b->dirty_start;
776 b->write_end = b->dirty_end;
777
2480945c 778 if (!write_list)
905be0a1 779 submit_io(b, REQ_OP_WRITE, write_endio);
2480945c
MP
780 else
781 list_add_tail(&b->write_list, write_list);
782}
783
784static void __flush_write_list(struct list_head *write_list)
785{
786 struct blk_plug plug;
0ef0b471 787
2480945c
MP
788 blk_start_plug(&plug);
789 while (!list_empty(write_list)) {
790 struct dm_buffer *b =
791 list_entry(write_list->next, struct dm_buffer, write_list);
792 list_del(&b->write_list);
905be0a1 793 submit_io(b, REQ_OP_WRITE, write_endio);
7cd32674 794 cond_resched();
2480945c
MP
795 }
796 blk_finish_plug(&plug);
95d402f0
MP
797}
798
799/*
800 * Wait until any activity on the buffer finishes. Possibly write the
801 * buffer if it is dirty. When this function finishes, there is no I/O
802 * running on the buffer and the buffer is not dirty.
803 */
804static void __make_buffer_clean(struct dm_buffer *b)
805{
806 BUG_ON(b->hold_count);
807
141b3523
MP
808 /* smp_load_acquire() pairs with read_endio()'s smp_mb__before_atomic() */
809 if (!smp_load_acquire(&b->state)) /* fast case */
95d402f0
MP
810 return;
811
74316201 812 wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
2480945c 813 __write_dirty_buffer(b, NULL);
74316201 814 wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
95d402f0
MP
815}
816
817/*
818 * Find some buffer that is not held by anybody, clean it, unlink it and
819 * return it.
820 */
821static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c)
822{
823 struct dm_buffer *b;
824
825 list_for_each_entry_reverse(b, &c->lru[LIST_CLEAN], lru_list) {
826 BUG_ON(test_bit(B_WRITING, &b->state));
827 BUG_ON(test_bit(B_DIRTY, &b->state));
828
e3a7c294 829 if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep &&
141b3523 830 unlikely(test_bit_acquire(B_READING, &b->state)))
e3a7c294
MP
831 continue;
832
95d402f0
MP
833 if (!b->hold_count) {
834 __make_buffer_clean(b);
835 __unlink_buffer(b);
836 return b;
837 }
7cd32674 838 cond_resched();
95d402f0
MP
839 }
840
e3a7c294
MP
841 if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
842 return NULL;
843
95d402f0
MP
844 list_for_each_entry_reverse(b, &c->lru[LIST_DIRTY], lru_list) {
845 BUG_ON(test_bit(B_READING, &b->state));
846
847 if (!b->hold_count) {
848 __make_buffer_clean(b);
849 __unlink_buffer(b);
850 return b;
851 }
7cd32674 852 cond_resched();
95d402f0
MP
853 }
854
855 return NULL;
856}
857
858/*
859 * Wait until some other threads free some buffer or release hold count on
860 * some buffer.
861 *
862 * This function is entered with c->lock held, drops it and regains it
863 * before exiting.
864 */
865static void __wait_for_free_buffer(struct dm_bufio_client *c)
866{
867 DECLARE_WAITQUEUE(wait, current);
868
869 add_wait_queue(&c->free_buffer_wait, &wait);
642fa448 870 set_current_state(TASK_UNINTERRUPTIBLE);
95d402f0
MP
871 dm_bufio_unlock(c);
872
873 io_schedule();
874
95d402f0
MP
875 remove_wait_queue(&c->free_buffer_wait, &wait);
876
877 dm_bufio_lock(c);
878}
879
a66cc28f
MP
880enum new_flag {
881 NF_FRESH = 0,
882 NF_READ = 1,
883 NF_GET = 2,
884 NF_PREFETCH = 3
885};
886
95d402f0
MP
887/*
888 * Allocate a new buffer. If the allocation is not possible, wait until
889 * some other thread frees a buffer.
890 *
891 * May drop the lock and regain it.
892 */
a66cc28f 893static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client *c, enum new_flag nf)
95d402f0
MP
894{
895 struct dm_buffer *b;
41c73a49 896 bool tried_noio_alloc = false;
95d402f0
MP
897
898 /*
899 * dm-bufio is resistant to allocation failures (it just keeps
900 * one buffer reserved in cases all the allocations fail).
901 * So set flags to not try too hard:
9ea61cac
DA
902 * GFP_NOWAIT: don't wait; if we need to sleep we'll release our
903 * mutex and wait ourselves.
95d402f0
MP
904 * __GFP_NORETRY: don't retry and rather return failure
905 * __GFP_NOMEMALLOC: don't use emergency reserves
906 * __GFP_NOWARN: don't print a warning in case of failure
907 *
908 * For debugging, if we set the cache size to 1, no new buffers will
909 * be allocated.
910 */
911 while (1) {
912 if (dm_bufio_cache_size_latch != 1) {
9ea61cac 913 b = alloc_buffer(c, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
95d402f0
MP
914 if (b)
915 return b;
916 }
917
a66cc28f
MP
918 if (nf == NF_PREFETCH)
919 return NULL;
920
41c73a49
MP
921 if (dm_bufio_cache_size_latch != 1 && !tried_noio_alloc) {
922 dm_bufio_unlock(c);
923 b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
924 dm_bufio_lock(c);
925 if (b)
926 return b;
927 tried_noio_alloc = true;
928 }
929
95d402f0
MP
930 if (!list_empty(&c->reserved_buffers)) {
931 b = list_entry(c->reserved_buffers.next,
932 struct dm_buffer, lru_list);
933 list_del(&b->lru_list);
934 c->need_reserved_buffers++;
935
936 return b;
937 }
938
939 b = __get_unclaimed_buffer(c);
940 if (b)
941 return b;
942
943 __wait_for_free_buffer(c);
944 }
945}
946
a66cc28f 947static struct dm_buffer *__alloc_buffer_wait(struct dm_bufio_client *c, enum new_flag nf)
95d402f0 948{
a66cc28f
MP
949 struct dm_buffer *b = __alloc_buffer_wait_no_callback(c, nf);
950
951 if (!b)
952 return NULL;
95d402f0
MP
953
954 if (c->alloc_callback)
955 c->alloc_callback(b);
956
957 return b;
958}
959
960/*
961 * Free a buffer and wake other threads waiting for free buffers.
962 */
963static void __free_buffer_wake(struct dm_buffer *b)
964{
965 struct dm_bufio_client *c = b->c;
966
967 if (!c->need_reserved_buffers)
968 free_buffer(b);
969 else {
970 list_add(&b->lru_list, &c->reserved_buffers);
971 c->need_reserved_buffers--;
972 }
973
974 wake_up(&c->free_buffer_wait);
975}
976
2480945c
MP
977static void __write_dirty_buffers_async(struct dm_bufio_client *c, int no_wait,
978 struct list_head *write_list)
95d402f0
MP
979{
980 struct dm_buffer *b, *tmp;
981
982 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) {
983 BUG_ON(test_bit(B_READING, &b->state));
984
985 if (!test_bit(B_DIRTY, &b->state) &&
986 !test_bit(B_WRITING, &b->state)) {
987 __relink_lru(b, LIST_CLEAN);
988 continue;
989 }
990
991 if (no_wait && test_bit(B_WRITING, &b->state))
992 return;
993
2480945c 994 __write_dirty_buffer(b, write_list);
7cd32674 995 cond_resched();
95d402f0
MP
996 }
997}
998
95d402f0
MP
999/*
1000 * Check if we're over watermark.
1001 * If we are over threshold_buffers, start freeing buffers.
1002 * If we're over "limit_buffers", block until we get under the limit.
1003 */
2480945c
MP
1004static void __check_watermark(struct dm_bufio_client *c,
1005 struct list_head *write_list)
95d402f0 1006{
b132ff33 1007 if (c->n_buffers[LIST_DIRTY] > c->n_buffers[LIST_CLEAN] * DM_BUFIO_WRITEBACK_RATIO)
2480945c 1008 __write_dirty_buffers_async(c, 1, write_list);
95d402f0
MP
1009}
1010
a4a82ce3
HM
1011/*
1012 *--------------------------------------------------------------
95d402f0 1013 * Getting a buffer
a4a82ce3
HM
1014 *--------------------------------------------------------------
1015 */
95d402f0 1016
95d402f0 1017static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block,
2480945c
MP
1018 enum new_flag nf, int *need_submit,
1019 struct list_head *write_list)
95d402f0
MP
1020{
1021 struct dm_buffer *b, *new_b = NULL;
1022
1023 *need_submit = 0;
1024
1025 b = __find(c, block);
a66cc28f
MP
1026 if (b)
1027 goto found_buffer;
95d402f0
MP
1028
1029 if (nf == NF_GET)
1030 return NULL;
1031
a66cc28f
MP
1032 new_b = __alloc_buffer_wait(c, nf);
1033 if (!new_b)
1034 return NULL;
95d402f0
MP
1035
1036 /*
1037 * We've had a period where the mutex was unlocked, so need to
ef992373 1038 * recheck the buffer tree.
95d402f0
MP
1039 */
1040 b = __find(c, block);
1041 if (b) {
1042 __free_buffer_wake(new_b);
a66cc28f 1043 goto found_buffer;
95d402f0
MP
1044 }
1045
2480945c 1046 __check_watermark(c, write_list);
95d402f0
MP
1047
1048 b = new_b;
1049 b->hold_count = 1;
1050 b->read_error = 0;
1051 b->write_error = 0;
1052 __link_buffer(b, block, LIST_CLEAN);
1053
1054 if (nf == NF_FRESH) {
1055 b->state = 0;
1056 return b;
1057 }
1058
1059 b->state = 1 << B_READING;
1060 *need_submit = 1;
1061
1062 return b;
a66cc28f
MP
1063
1064found_buffer:
1065 if (nf == NF_PREFETCH)
1066 return NULL;
1067 /*
1068 * Note: it is essential that we don't wait for the buffer to be
1069 * read if dm_bufio_get function is used. Both dm_bufio_get and
1070 * dm_bufio_prefetch can be used in the driver request routine.
1071 * If the user called both dm_bufio_prefetch and dm_bufio_get on
1072 * the same buffer, it would deadlock if we waited.
1073 */
141b3523 1074 if (nf == NF_GET && unlikely(test_bit_acquire(B_READING, &b->state)))
a66cc28f
MP
1075 return NULL;
1076
1077 b->hold_count++;
1078 __relink_lru(b, test_bit(B_DIRTY, &b->state) ||
1079 test_bit(B_WRITING, &b->state));
1080 return b;
95d402f0
MP
1081}
1082
1083/*
1084 * The endio routine for reading: set the error, clear the bit and wake up
1085 * anyone waiting on the buffer.
1086 */
45354f1e 1087static void read_endio(struct dm_buffer *b, blk_status_t status)
95d402f0 1088{
45354f1e 1089 b->read_error = status;
95d402f0
MP
1090
1091 BUG_ON(!test_bit(B_READING, &b->state));
1092
4e857c58 1093 smp_mb__before_atomic();
95d402f0 1094 clear_bit(B_READING, &b->state);
4e857c58 1095 smp_mb__after_atomic();
95d402f0
MP
1096
1097 wake_up_bit(&b->state, B_READING);
1098}
1099
1100/*
1101 * A common routine for dm_bufio_new and dm_bufio_read. Operation of these
1102 * functions is similar except that dm_bufio_new doesn't read the
1103 * buffer from the disk (assuming that the caller overwrites all the data
1104 * and uses dm_bufio_mark_buffer_dirty to write new data back).
1105 */
1106static void *new_read(struct dm_bufio_client *c, sector_t block,
1107 enum new_flag nf, struct dm_buffer **bp)
1108{
1109 int need_submit;
1110 struct dm_buffer *b;
1111
2480945c
MP
1112 LIST_HEAD(write_list);
1113
95d402f0 1114 dm_bufio_lock(c);
2480945c 1115 b = __bufio_new(c, block, nf, &need_submit, &write_list);
86bad0c7
MP
1116#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1117 if (b && b->hold_count == 1)
1118 buffer_record_stack(b);
1119#endif
95d402f0
MP
1120 dm_bufio_unlock(c);
1121
2480945c
MP
1122 __flush_write_list(&write_list);
1123
a66cc28f 1124 if (!b)
f98c8f79 1125 return NULL;
95d402f0
MP
1126
1127 if (need_submit)
905be0a1 1128 submit_io(b, REQ_OP_READ, read_endio);
95d402f0 1129
74316201 1130 wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
95d402f0
MP
1131
1132 if (b->read_error) {
4e4cbee9 1133 int error = blk_status_to_errno(b->read_error);
95d402f0
MP
1134
1135 dm_bufio_release(b);
1136
1137 return ERR_PTR(error);
1138 }
1139
1140 *bp = b;
1141
1142 return b->data;
1143}
1144
1145void *dm_bufio_get(struct dm_bufio_client *c, sector_t block,
1146 struct dm_buffer **bp)
1147{
1148 return new_read(c, block, NF_GET, bp);
1149}
1150EXPORT_SYMBOL_GPL(dm_bufio_get);
1151
1152void *dm_bufio_read(struct dm_bufio_client *c, sector_t block,
1153 struct dm_buffer **bp)
1154{
1155 BUG_ON(dm_bufio_in_request());
1156
1157 return new_read(c, block, NF_READ, bp);
1158}
1159EXPORT_SYMBOL_GPL(dm_bufio_read);
1160
1161void *dm_bufio_new(struct dm_bufio_client *c, sector_t block,
1162 struct dm_buffer **bp)
1163{
1164 BUG_ON(dm_bufio_in_request());
1165
1166 return new_read(c, block, NF_FRESH, bp);
1167}
1168EXPORT_SYMBOL_GPL(dm_bufio_new);
1169
a66cc28f 1170void dm_bufio_prefetch(struct dm_bufio_client *c,
86a3238c 1171 sector_t block, unsigned int n_blocks)
a66cc28f
MP
1172{
1173 struct blk_plug plug;
1174
2480945c
MP
1175 LIST_HEAD(write_list);
1176
3b6b7813
MP
1177 BUG_ON(dm_bufio_in_request());
1178
a66cc28f
MP
1179 blk_start_plug(&plug);
1180 dm_bufio_lock(c);
1181
1182 for (; n_blocks--; block++) {
1183 int need_submit;
1184 struct dm_buffer *b;
0ef0b471 1185
2480945c
MP
1186 b = __bufio_new(c, block, NF_PREFETCH, &need_submit,
1187 &write_list);
1188 if (unlikely(!list_empty(&write_list))) {
1189 dm_bufio_unlock(c);
1190 blk_finish_plug(&plug);
1191 __flush_write_list(&write_list);
1192 blk_start_plug(&plug);
1193 dm_bufio_lock(c);
1194 }
a66cc28f
MP
1195 if (unlikely(b != NULL)) {
1196 dm_bufio_unlock(c);
1197
1198 if (need_submit)
905be0a1 1199 submit_io(b, REQ_OP_READ, read_endio);
a66cc28f
MP
1200 dm_bufio_release(b);
1201
7cd32674 1202 cond_resched();
a66cc28f
MP
1203
1204 if (!n_blocks)
1205 goto flush_plug;
1206 dm_bufio_lock(c);
1207 }
a66cc28f
MP
1208 }
1209
1210 dm_bufio_unlock(c);
1211
1212flush_plug:
1213 blk_finish_plug(&plug);
1214}
1215EXPORT_SYMBOL_GPL(dm_bufio_prefetch);
1216
95d402f0
MP
1217void dm_bufio_release(struct dm_buffer *b)
1218{
1219 struct dm_bufio_client *c = b->c;
1220
1221 dm_bufio_lock(c);
1222
95d402f0
MP
1223 BUG_ON(!b->hold_count);
1224
1225 b->hold_count--;
1226 if (!b->hold_count) {
1227 wake_up(&c->free_buffer_wait);
1228
1229 /*
1230 * If there were errors on the buffer, and the buffer is not
1231 * to be written, free the buffer. There is no point in caching
1232 * invalid buffer.
1233 */
1234 if ((b->read_error || b->write_error) &&
141b3523 1235 !test_bit_acquire(B_READING, &b->state) &&
95d402f0
MP
1236 !test_bit(B_WRITING, &b->state) &&
1237 !test_bit(B_DIRTY, &b->state)) {
1238 __unlink_buffer(b);
1239 __free_buffer_wake(b);
1240 }
1241 }
1242
1243 dm_bufio_unlock(c);
1244}
1245EXPORT_SYMBOL_GPL(dm_bufio_release);
1246
1e3b21c6 1247void dm_bufio_mark_partial_buffer_dirty(struct dm_buffer *b,
86a3238c 1248 unsigned int start, unsigned int end)
95d402f0
MP
1249{
1250 struct dm_bufio_client *c = b->c;
1251
1e3b21c6
MP
1252 BUG_ON(start >= end);
1253 BUG_ON(end > b->c->block_size);
1254
95d402f0
MP
1255 dm_bufio_lock(c);
1256
a66cc28f
MP
1257 BUG_ON(test_bit(B_READING, &b->state));
1258
1e3b21c6
MP
1259 if (!test_and_set_bit(B_DIRTY, &b->state)) {
1260 b->dirty_start = start;
1261 b->dirty_end = end;
95d402f0 1262 __relink_lru(b, LIST_DIRTY);
1e3b21c6
MP
1263 } else {
1264 if (start < b->dirty_start)
1265 b->dirty_start = start;
1266 if (end > b->dirty_end)
1267 b->dirty_end = end;
1268 }
95d402f0
MP
1269
1270 dm_bufio_unlock(c);
1271}
1e3b21c6
MP
1272EXPORT_SYMBOL_GPL(dm_bufio_mark_partial_buffer_dirty);
1273
1274void dm_bufio_mark_buffer_dirty(struct dm_buffer *b)
1275{
1276 dm_bufio_mark_partial_buffer_dirty(b, 0, b->c->block_size);
1277}
95d402f0
MP
1278EXPORT_SYMBOL_GPL(dm_bufio_mark_buffer_dirty);
1279
1280void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c)
1281{
2480945c
MP
1282 LIST_HEAD(write_list);
1283
95d402f0
MP
1284 BUG_ON(dm_bufio_in_request());
1285
1286 dm_bufio_lock(c);
2480945c 1287 __write_dirty_buffers_async(c, 0, &write_list);
95d402f0 1288 dm_bufio_unlock(c);
2480945c 1289 __flush_write_list(&write_list);
95d402f0
MP
1290}
1291EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async);
1292
1293/*
1294 * For performance, it is essential that the buffers are written asynchronously
1295 * and simultaneously (so that the block layer can merge the writes) and then
1296 * waited upon.
1297 *
1298 * Finally, we flush hardware disk cache.
1299 */
1300int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c)
1301{
edc11d49 1302 int a, f;
95d402f0
MP
1303 unsigned long buffers_processed = 0;
1304 struct dm_buffer *b, *tmp;
1305
2480945c
MP
1306 LIST_HEAD(write_list);
1307
1308 dm_bufio_lock(c);
1309 __write_dirty_buffers_async(c, 0, &write_list);
1310 dm_bufio_unlock(c);
1311 __flush_write_list(&write_list);
95d402f0 1312 dm_bufio_lock(c);
95d402f0
MP
1313
1314again:
1315 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) {
1316 int dropped_lock = 0;
1317
1318 if (buffers_processed < c->n_buffers[LIST_DIRTY])
1319 buffers_processed++;
1320
1321 BUG_ON(test_bit(B_READING, &b->state));
1322
1323 if (test_bit(B_WRITING, &b->state)) {
1324 if (buffers_processed < c->n_buffers[LIST_DIRTY]) {
1325 dropped_lock = 1;
1326 b->hold_count++;
1327 dm_bufio_unlock(c);
74316201
N
1328 wait_on_bit_io(&b->state, B_WRITING,
1329 TASK_UNINTERRUPTIBLE);
95d402f0
MP
1330 dm_bufio_lock(c);
1331 b->hold_count--;
1332 } else
74316201
N
1333 wait_on_bit_io(&b->state, B_WRITING,
1334 TASK_UNINTERRUPTIBLE);
95d402f0
MP
1335 }
1336
1337 if (!test_bit(B_DIRTY, &b->state) &&
1338 !test_bit(B_WRITING, &b->state))
1339 __relink_lru(b, LIST_CLEAN);
1340
7cd32674 1341 cond_resched();
95d402f0
MP
1342
1343 /*
1344 * If we dropped the lock, the list is no longer consistent,
1345 * so we must restart the search.
1346 *
1347 * In the most common case, the buffer just processed is
1348 * relinked to the clean list, so we won't loop scanning the
1349 * same buffer again and again.
1350 *
1351 * This may livelock if there is another thread simultaneously
1352 * dirtying buffers, so we count the number of buffers walked
1353 * and if it exceeds the total number of buffers, it means that
1354 * someone is doing some writes simultaneously with us. In
1355 * this case, stop, dropping the lock.
1356 */
1357 if (dropped_lock)
1358 goto again;
1359 }
1360 wake_up(&c->free_buffer_wait);
1361 dm_bufio_unlock(c);
1362
1363 a = xchg(&c->async_write_error, 0);
1364 f = dm_bufio_issue_flush(c);
1365 if (a)
1366 return a;
1367
1368 return f;
1369}
1370EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers);
1371
1372/*
ef992373 1373 * Use dm-io to send an empty barrier to flush the device.
95d402f0
MP
1374 */
1375int dm_bufio_issue_flush(struct dm_bufio_client *c)
1376{
1377 struct dm_io_request io_req = {
581075e4 1378 .bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC,
95d402f0
MP
1379 .mem.type = DM_IO_KMEM,
1380 .mem.ptr.addr = NULL,
1381 .client = c->dm_io,
1382 };
1383 struct dm_io_region io_reg = {
1384 .bdev = c->bdev,
1385 .sector = 0,
1386 .count = 0,
1387 };
1388
1389 BUG_ON(dm_bufio_in_request());
1390
1391 return dm_io(&io_req, 1, &io_reg, NULL);
1392}
1393EXPORT_SYMBOL_GPL(dm_bufio_issue_flush);
1394
6fbeb004
MP
1395/*
1396 * Use dm-io to send a discard request to flush the device.
1397 */
1398int dm_bufio_issue_discard(struct dm_bufio_client *c, sector_t block, sector_t count)
1399{
1400 struct dm_io_request io_req = {
581075e4 1401 .bi_opf = REQ_OP_DISCARD | REQ_SYNC,
6fbeb004
MP
1402 .mem.type = DM_IO_KMEM,
1403 .mem.ptr.addr = NULL,
1404 .client = c->dm_io,
1405 };
1406 struct dm_io_region io_reg = {
1407 .bdev = c->bdev,
1408 .sector = block_to_sector(c, block),
1409 .count = block_to_sector(c, count),
1410 };
1411
1412 BUG_ON(dm_bufio_in_request());
1413
1414 return dm_io(&io_req, 1, &io_reg, NULL);
1415}
1416EXPORT_SYMBOL_GPL(dm_bufio_issue_discard);
1417
95d402f0
MP
1418/*
1419 * We first delete any other buffer that may be at that new location.
1420 *
1421 * Then, we write the buffer to the original location if it was dirty.
1422 *
1423 * Then, if we are the only one who is holding the buffer, relink the buffer
ef992373 1424 * in the buffer tree for the new location.
95d402f0
MP
1425 *
1426 * If there was someone else holding the buffer, we write it to the new
1427 * location but not relink it, because that other user needs to have the buffer
1428 * at the same place.
1429 */
1430void dm_bufio_release_move(struct dm_buffer *b, sector_t new_block)
1431{
1432 struct dm_bufio_client *c = b->c;
1433 struct dm_buffer *new;
1434
1435 BUG_ON(dm_bufio_in_request());
1436
1437 dm_bufio_lock(c);
1438
1439retry:
1440 new = __find(c, new_block);
1441 if (new) {
1442 if (new->hold_count) {
1443 __wait_for_free_buffer(c);
1444 goto retry;
1445 }
1446
1447 /*
1448 * FIXME: Is there any point waiting for a write that's going
1449 * to be overwritten in a bit?
1450 */
1451 __make_buffer_clean(new);
1452 __unlink_buffer(new);
1453 __free_buffer_wake(new);
1454 }
1455
1456 BUG_ON(!b->hold_count);
1457 BUG_ON(test_bit(B_READING, &b->state));
1458
2480945c 1459 __write_dirty_buffer(b, NULL);
95d402f0 1460 if (b->hold_count == 1) {
74316201
N
1461 wait_on_bit_io(&b->state, B_WRITING,
1462 TASK_UNINTERRUPTIBLE);
95d402f0 1463 set_bit(B_DIRTY, &b->state);
1e3b21c6
MP
1464 b->dirty_start = 0;
1465 b->dirty_end = c->block_size;
95d402f0
MP
1466 __unlink_buffer(b);
1467 __link_buffer(b, new_block, LIST_DIRTY);
1468 } else {
1469 sector_t old_block;
0ef0b471 1470
74316201
N
1471 wait_on_bit_lock_io(&b->state, B_WRITING,
1472 TASK_UNINTERRUPTIBLE);
95d402f0
MP
1473 /*
1474 * Relink buffer to "new_block" so that write_callback
1475 * sees "new_block" as a block number.
1476 * After the write, link the buffer back to old_block.
1477 * All this must be done in bufio lock, so that block number
1478 * change isn't visible to other threads.
1479 */
1480 old_block = b->block;
1481 __unlink_buffer(b);
1482 __link_buffer(b, new_block, b->list_mode);
905be0a1 1483 submit_io(b, REQ_OP_WRITE, write_endio);
74316201
N
1484 wait_on_bit_io(&b->state, B_WRITING,
1485 TASK_UNINTERRUPTIBLE);
95d402f0
MP
1486 __unlink_buffer(b);
1487 __link_buffer(b, old_block, b->list_mode);
1488 }
1489
1490 dm_bufio_unlock(c);
1491 dm_bufio_release(b);
1492}
1493EXPORT_SYMBOL_GPL(dm_bufio_release_move);
1494
33a18062
MP
1495static void forget_buffer_locked(struct dm_buffer *b)
1496{
141b3523 1497 if (likely(!b->hold_count) && likely(!smp_load_acquire(&b->state))) {
33a18062
MP
1498 __unlink_buffer(b);
1499 __free_buffer_wake(b);
1500 }
1501}
1502
55494bf2
MP
1503/*
1504 * Free the given buffer.
1505 *
1506 * This is just a hint, if the buffer is in use or dirty, this function
1507 * does nothing.
1508 */
1509void dm_bufio_forget(struct dm_bufio_client *c, sector_t block)
1510{
1511 struct dm_buffer *b;
1512
1513 dm_bufio_lock(c);
1514
1515 b = __find(c, block);
33a18062
MP
1516 if (b)
1517 forget_buffer_locked(b);
55494bf2
MP
1518
1519 dm_bufio_unlock(c);
1520}
afa53df8 1521EXPORT_SYMBOL_GPL(dm_bufio_forget);
55494bf2 1522
33a18062
MP
1523void dm_bufio_forget_buffers(struct dm_bufio_client *c, sector_t block, sector_t n_blocks)
1524{
1525 struct dm_buffer *b;
1526 sector_t end_block = block + n_blocks;
1527
1528 while (block < end_block) {
1529 dm_bufio_lock(c);
1530
1531 b = __find_next(c, block);
1532 if (b) {
1533 block = b->block + 1;
1534 forget_buffer_locked(b);
1535 }
1536
1537 dm_bufio_unlock(c);
1538
1539 if (!b)
1540 break;
1541 }
1542
1543}
1544EXPORT_SYMBOL_GPL(dm_bufio_forget_buffers);
1545
86a3238c 1546void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned int n)
55b082e6
MP
1547{
1548 c->minimum_buffers = n;
1549}
afa53df8 1550EXPORT_SYMBOL_GPL(dm_bufio_set_minimum_buffers);
55b082e6 1551
86a3238c 1552unsigned int dm_bufio_get_block_size(struct dm_bufio_client *c)
95d402f0
MP
1553{
1554 return c->block_size;
1555}
1556EXPORT_SYMBOL_GPL(dm_bufio_get_block_size);
1557
1558sector_t dm_bufio_get_device_size(struct dm_bufio_client *c)
1559{
6dcbb52c 1560 sector_t s = bdev_nr_sectors(c->bdev);
0ef0b471 1561
a14e5ec6
MP
1562 if (s >= c->start)
1563 s -= c->start;
1564 else
1565 s = 0;
f51f2e0a
MP
1566 if (likely(c->sectors_per_block_bits >= 0))
1567 s >>= c->sectors_per_block_bits;
1568 else
1569 sector_div(s, c->block_size >> SECTOR_SHIFT);
1570 return s;
95d402f0
MP
1571}
1572EXPORT_SYMBOL_GPL(dm_bufio_get_device_size);
1573
9b594826
MP
1574struct dm_io_client *dm_bufio_get_dm_io_client(struct dm_bufio_client *c)
1575{
1576 return c->dm_io;
1577}
1578EXPORT_SYMBOL_GPL(dm_bufio_get_dm_io_client);
1579
95d402f0
MP
1580sector_t dm_bufio_get_block_number(struct dm_buffer *b)
1581{
1582 return b->block;
1583}
1584EXPORT_SYMBOL_GPL(dm_bufio_get_block_number);
1585
1586void *dm_bufio_get_block_data(struct dm_buffer *b)
1587{
1588 return b->data;
1589}
1590EXPORT_SYMBOL_GPL(dm_bufio_get_block_data);
1591
1592void *dm_bufio_get_aux_data(struct dm_buffer *b)
1593{
1594 return b + 1;
1595}
1596EXPORT_SYMBOL_GPL(dm_bufio_get_aux_data);
1597
1598struct dm_bufio_client *dm_bufio_get_client(struct dm_buffer *b)
1599{
1600 return b->c;
1601}
1602EXPORT_SYMBOL_GPL(dm_bufio_get_client);
1603
1604static void drop_buffers(struct dm_bufio_client *c)
1605{
1606 struct dm_buffer *b;
1607 int i;
86bad0c7 1608 bool warned = false;
95d402f0
MP
1609
1610 BUG_ON(dm_bufio_in_request());
1611
1612 /*
1613 * An optimization so that the buffers are not written one-by-one.
1614 */
1615 dm_bufio_write_dirty_buffers_async(c);
1616
1617 dm_bufio_lock(c);
1618
1619 while ((b = __get_unclaimed_buffer(c)))
1620 __free_buffer_wake(b);
1621
1622 for (i = 0; i < LIST_SIZE; i++)
86bad0c7
MP
1623 list_for_each_entry(b, &c->lru[i], lru_list) {
1624 WARN_ON(!warned);
1625 warned = true;
95d402f0
MP
1626 DMERR("leaked buffer %llx, hold count %u, list %d",
1627 (unsigned long long)b->block, b->hold_count, i);
86bad0c7 1628#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
741b58f3
TG
1629 stack_trace_print(b->stack_entries, b->stack_len, 1);
1630 /* mark unclaimed to avoid BUG_ON below */
1631 b->hold_count = 0;
86bad0c7
MP
1632#endif
1633 }
1634
1635#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1636 while ((b = __get_unclaimed_buffer(c)))
1637 __free_buffer_wake(b);
1638#endif
95d402f0
MP
1639
1640 for (i = 0; i < LIST_SIZE; i++)
1641 BUG_ON(!list_empty(&c->lru[i]));
1642
1643 dm_bufio_unlock(c);
1644}
1645
1646/*
33096a78
JT
1647 * We may not be able to evict this buffer if IO pending or the client
1648 * is still using it. Caller is expected to know buffer is too old.
1649 *
9d28eb12
MP
1650 * And if GFP_NOFS is used, we must not do any I/O because we hold
1651 * dm_bufio_clients_lock and we would risk deadlock if the I/O gets
1652 * rerouted to different bufio client.
95d402f0 1653 */
33096a78 1654static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp)
95d402f0 1655{
e3a7c294
MP
1656 if (!(gfp & __GFP_FS) ||
1657 (static_branch_unlikely(&no_sleep_enabled) && b->c->no_sleep)) {
141b3523 1658 if (test_bit_acquire(B_READING, &b->state) ||
95d402f0
MP
1659 test_bit(B_WRITING, &b->state) ||
1660 test_bit(B_DIRTY, &b->state))
33096a78 1661 return false;
95d402f0
MP
1662 }
1663
1664 if (b->hold_count)
33096a78 1665 return false;
95d402f0
MP
1666
1667 __make_buffer_clean(b);
1668 __unlink_buffer(b);
1669 __free_buffer_wake(b);
1670
33096a78 1671 return true;
95d402f0
MP
1672}
1673
13840d38 1674static unsigned long get_retain_buffers(struct dm_bufio_client *c)
33096a78 1675{
f51f2e0a 1676 unsigned long retain_bytes = READ_ONCE(dm_bufio_retain_bytes);
0ef0b471 1677
f51f2e0a
MP
1678 if (likely(c->sectors_per_block_bits >= 0))
1679 retain_bytes >>= c->sectors_per_block_bits + SECTOR_SHIFT;
1680 else
1681 retain_bytes /= c->block_size;
0ef0b471 1682
f51f2e0a 1683 return retain_bytes;
33096a78
JT
1684}
1685
70704c33 1686static void __scan(struct dm_bufio_client *c)
95d402f0
MP
1687{
1688 int l;
1689 struct dm_buffer *b, *tmp;
33096a78 1690 unsigned long freed = 0;
fbc7c07e
SB
1691 unsigned long count = c->n_buffers[LIST_CLEAN] +
1692 c->n_buffers[LIST_DIRTY];
13840d38 1693 unsigned long retain_target = get_retain_buffers(c);
95d402f0
MP
1694
1695 for (l = 0; l < LIST_SIZE; l++) {
7dc19d5a 1696 list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) {
70704c33
MP
1697 if (count - freed <= retain_target)
1698 atomic_long_set(&c->need_shrink, 0);
1699 if (!atomic_long_read(&c->need_shrink))
1700 return;
1701 if (__try_evict_buffer(b, GFP_KERNEL)) {
1702 atomic_long_dec(&c->need_shrink);
33096a78 1703 freed++;
70704c33 1704 }
7cd32674 1705 cond_resched();
7dc19d5a 1706 }
95d402f0
MP
1707 }
1708}
1709
70704c33
MP
1710static void shrink_work(struct work_struct *w)
1711{
1712 struct dm_bufio_client *c = container_of(w, struct dm_bufio_client, shrink_work);
1713
1714 dm_bufio_lock(c);
1715 __scan(c);
1716 dm_bufio_unlock(c);
1717}
1718
1719static unsigned long dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
95d402f0 1720{
7dc19d5a 1721 struct dm_bufio_client *c;
95d402f0 1722
7dc19d5a 1723 c = container_of(shrink, struct dm_bufio_client, shrinker);
70704c33
MP
1724 atomic_long_add(sc->nr_to_scan, &c->need_shrink);
1725 queue_work(dm_bufio_wq, &c->shrink_work);
95d402f0 1726
70704c33 1727 return sc->nr_to_scan;
7dc19d5a 1728}
95d402f0 1729
70704c33 1730static unsigned long dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
7dc19d5a 1731{
d12067f4 1732 struct dm_bufio_client *c = container_of(shrink, struct dm_bufio_client, shrinker);
fbc7c07e
SB
1733 unsigned long count = READ_ONCE(c->n_buffers[LIST_CLEAN]) +
1734 READ_ONCE(c->n_buffers[LIST_DIRTY]);
1735 unsigned long retain_target = get_retain_buffers(c);
70704c33
MP
1736 unsigned long queued_for_cleanup = atomic_long_read(&c->need_shrink);
1737
1738 if (unlikely(count < retain_target))
1739 count = 0;
1740 else
1741 count -= retain_target;
95d402f0 1742
70704c33
MP
1743 if (unlikely(count < queued_for_cleanup))
1744 count = 0;
1745 else
1746 count -= queued_for_cleanup;
1747
1748 return count;
95d402f0
MP
1749}
1750
1751/*
1752 * Create the buffering interface
1753 */
86a3238c
HM
1754struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsigned int block_size,
1755 unsigned int reserved_buffers, unsigned int aux_size,
95d402f0 1756 void (*alloc_callback)(struct dm_buffer *),
0fcb100d
NH
1757 void (*write_callback)(struct dm_buffer *),
1758 unsigned int flags)
95d402f0
MP
1759{
1760 int r;
1761 struct dm_bufio_client *c;
86a3238c 1762 unsigned int i;
359dbf19 1763 char slab_name[27];
95d402f0 1764
f51f2e0a
MP
1765 if (!block_size || block_size & ((1 << SECTOR_SHIFT) - 1)) {
1766 DMERR("%s: block size not specified or is not multiple of 512b", __func__);
1767 r = -EINVAL;
1768 goto bad_client;
1769 }
95d402f0 1770
d8c712ea 1771 c = kzalloc(sizeof(*c), GFP_KERNEL);
95d402f0
MP
1772 if (!c) {
1773 r = -ENOMEM;
1774 goto bad_client;
1775 }
4e420c45 1776 c->buffer_tree = RB_ROOT;
95d402f0
MP
1777
1778 c->bdev = bdev;
1779 c->block_size = block_size;
f51f2e0a
MP
1780 if (is_power_of_2(block_size))
1781 c->sectors_per_block_bits = __ffs(block_size) - SECTOR_SHIFT;
1782 else
1783 c->sectors_per_block_bits = -1;
95d402f0 1784
95d402f0
MP
1785 c->alloc_callback = alloc_callback;
1786 c->write_callback = write_callback;
1787
3c1c875d 1788 if (flags & DM_BUFIO_CLIENT_NO_SLEEP) {
b32d4582 1789 c->no_sleep = true;
3c1c875d
MS
1790 static_branch_inc(&no_sleep_enabled);
1791 }
b32d4582 1792
95d402f0
MP
1793 for (i = 0; i < LIST_SIZE; i++) {
1794 INIT_LIST_HEAD(&c->lru[i]);
1795 c->n_buffers[i] = 0;
1796 }
1797
95d402f0 1798 mutex_init(&c->lock);
b32d4582 1799 spin_lock_init(&c->spinlock);
95d402f0
MP
1800 INIT_LIST_HEAD(&c->reserved_buffers);
1801 c->need_reserved_buffers = reserved_buffers;
1802
afa53df8 1803 dm_bufio_set_minimum_buffers(c, DM_BUFIO_MIN_BUFFERS);
55b082e6 1804
95d402f0
MP
1805 init_waitqueue_head(&c->free_buffer_wait);
1806 c->async_write_error = 0;
1807
1808 c->dm_io = dm_io_client_create();
1809 if (IS_ERR(c->dm_io)) {
1810 r = PTR_ERR(c->dm_io);
1811 goto bad_dm_io;
1812 }
1813
f51f2e0a
MP
1814 if (block_size <= KMALLOC_MAX_SIZE &&
1815 (block_size < PAGE_SIZE || !is_power_of_2(block_size))) {
86a3238c 1816 unsigned int align = min(1U << __ffs(block_size), (unsigned int)PAGE_SIZE);
0ef0b471 1817
f7879b4c
MP
1818 snprintf(slab_name, sizeof slab_name, "dm_bufio_cache-%u", block_size);
1819 c->slab_cache = kmem_cache_create(slab_name, block_size, align,
6b5e718c 1820 SLAB_RECLAIM_ACCOUNT, NULL);
21bb1327
MP
1821 if (!c->slab_cache) {
1822 r = -ENOMEM;
1823 goto bad;
95d402f0
MP
1824 }
1825 }
359dbf19
MP
1826 if (aux_size)
1827 snprintf(slab_name, sizeof slab_name, "dm_bufio_buffer-%u", aux_size);
1828 else
1829 snprintf(slab_name, sizeof slab_name, "dm_bufio_buffer");
1830 c->slab_buffer = kmem_cache_create(slab_name, sizeof(struct dm_buffer) + aux_size,
1831 0, SLAB_RECLAIM_ACCOUNT, NULL);
1832 if (!c->slab_buffer) {
1833 r = -ENOMEM;
1834 goto bad;
1835 }
95d402f0
MP
1836
1837 while (c->need_reserved_buffers) {
1838 struct dm_buffer *b = alloc_buffer(c, GFP_KERNEL);
1839
1840 if (!b) {
1841 r = -ENOMEM;
0e696d38 1842 goto bad;
95d402f0
MP
1843 }
1844 __free_buffer_wake(b);
1845 }
1846
70704c33
MP
1847 INIT_WORK(&c->shrink_work, shrink_work);
1848 atomic_long_set(&c->need_shrink, 0);
1849
46898e9a
AK
1850 c->shrinker.count_objects = dm_bufio_shrink_count;
1851 c->shrinker.scan_objects = dm_bufio_shrink_scan;
1852 c->shrinker.seeks = 1;
1853 c->shrinker.batch = 0;
c87791bc 1854 r = register_shrinker(&c->shrinker, "dm-bufio:(%u:%u)",
e33c267a 1855 MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev));
46898e9a 1856 if (r)
0e696d38 1857 goto bad;
46898e9a 1858
95d402f0
MP
1859 mutex_lock(&dm_bufio_clients_lock);
1860 dm_bufio_client_count++;
1861 list_add(&c->client_list, &dm_bufio_all_clients);
1862 __cache_size_refresh();
1863 mutex_unlock(&dm_bufio_clients_lock);
1864
95d402f0
MP
1865 return c;
1866
0e696d38 1867bad:
95d402f0
MP
1868 while (!list_empty(&c->reserved_buffers)) {
1869 struct dm_buffer *b = list_entry(c->reserved_buffers.next,
1870 struct dm_buffer, lru_list);
1871 list_del(&b->lru_list);
1872 free_buffer(b);
1873 }
21bb1327 1874 kmem_cache_destroy(c->slab_cache);
359dbf19 1875 kmem_cache_destroy(c->slab_buffer);
95d402f0
MP
1876 dm_io_client_destroy(c->dm_io);
1877bad_dm_io:
bde14184 1878 mutex_destroy(&c->lock);
0dfc1f4c
ZC
1879 if (c->no_sleep)
1880 static_branch_dec(&no_sleep_enabled);
95d402f0
MP
1881 kfree(c);
1882bad_client:
1883 return ERR_PTR(r);
1884}
1885EXPORT_SYMBOL_GPL(dm_bufio_client_create);
1886
1887/*
1888 * Free the buffering interface.
1889 * It is required that there are no references on any buffers.
1890 */
1891void dm_bufio_client_destroy(struct dm_bufio_client *c)
1892{
86a3238c 1893 unsigned int i;
95d402f0
MP
1894
1895 drop_buffers(c);
1896
1897 unregister_shrinker(&c->shrinker);
70704c33 1898 flush_work(&c->shrink_work);
95d402f0
MP
1899
1900 mutex_lock(&dm_bufio_clients_lock);
1901
1902 list_del(&c->client_list);
1903 dm_bufio_client_count--;
1904 __cache_size_refresh();
1905
1906 mutex_unlock(&dm_bufio_clients_lock);
1907
4e420c45 1908 BUG_ON(!RB_EMPTY_ROOT(&c->buffer_tree));
95d402f0
MP
1909 BUG_ON(c->need_reserved_buffers);
1910
1911 while (!list_empty(&c->reserved_buffers)) {
1912 struct dm_buffer *b = list_entry(c->reserved_buffers.next,
1913 struct dm_buffer, lru_list);
1914 list_del(&b->lru_list);
1915 free_buffer(b);
1916 }
1917
1918 for (i = 0; i < LIST_SIZE; i++)
1919 if (c->n_buffers[i])
1920 DMERR("leaked buffer count %d: %ld", i, c->n_buffers[i]);
1921
1922 for (i = 0; i < LIST_SIZE; i++)
1923 BUG_ON(c->n_buffers[i]);
1924
21bb1327 1925 kmem_cache_destroy(c->slab_cache);
359dbf19 1926 kmem_cache_destroy(c->slab_buffer);
95d402f0 1927 dm_io_client_destroy(c->dm_io);
bde14184 1928 mutex_destroy(&c->lock);
3c1c875d
MS
1929 if (c->no_sleep)
1930 static_branch_dec(&no_sleep_enabled);
95d402f0
MP
1931 kfree(c);
1932}
1933EXPORT_SYMBOL_GPL(dm_bufio_client_destroy);
1934
400a0bef
MP
1935void dm_bufio_set_sector_offset(struct dm_bufio_client *c, sector_t start)
1936{
1937 c->start = start;
1938}
1939EXPORT_SYMBOL_GPL(dm_bufio_set_sector_offset);
1940
86a3238c 1941static unsigned int get_max_age_hz(void)
95d402f0 1942{
86a3238c 1943 unsigned int max_age = READ_ONCE(dm_bufio_max_age);
95d402f0 1944
33096a78
JT
1945 if (max_age > UINT_MAX / HZ)
1946 max_age = UINT_MAX / HZ;
95d402f0 1947
33096a78
JT
1948 return max_age * HZ;
1949}
95d402f0 1950
33096a78
JT
1951static bool older_than(struct dm_buffer *b, unsigned long age_hz)
1952{
f495339c 1953 return time_after_eq(jiffies, b->last_accessed + age_hz);
33096a78
JT
1954}
1955
1956static void __evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz)
1957{
1958 struct dm_buffer *b, *tmp;
13840d38
MP
1959 unsigned long retain_target = get_retain_buffers(c);
1960 unsigned long count;
390020ad 1961 LIST_HEAD(write_list);
33096a78
JT
1962
1963 dm_bufio_lock(c);
1964
390020ad
MP
1965 __check_watermark(c, &write_list);
1966 if (unlikely(!list_empty(&write_list))) {
1967 dm_bufio_unlock(c);
1968 __flush_write_list(&write_list);
1969 dm_bufio_lock(c);
1970 }
1971
33096a78
JT
1972 count = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY];
1973 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_CLEAN], lru_list) {
1974 if (count <= retain_target)
1975 break;
1976
1977 if (!older_than(b, age_hz))
1978 break;
1979
1980 if (__try_evict_buffer(b, 0))
1981 count--;
95d402f0 1982
7cd32674 1983 cond_resched();
95d402f0 1984 }
33096a78
JT
1985
1986 dm_bufio_unlock(c);
1987}
1988
6e913b28
MP
1989static void do_global_cleanup(struct work_struct *w)
1990{
1991 struct dm_bufio_client *locked_client = NULL;
1992 struct dm_bufio_client *current_client;
1993 struct dm_buffer *b;
86a3238c 1994 unsigned int spinlock_hold_count;
6e913b28
MP
1995 unsigned long threshold = dm_bufio_cache_size -
1996 dm_bufio_cache_size / DM_BUFIO_LOW_WATERMARK_RATIO;
1997 unsigned long loops = global_num * 2;
1998
1999 mutex_lock(&dm_bufio_clients_lock);
2000
2001 while (1) {
2002 cond_resched();
2003
2004 spin_lock(&global_spinlock);
2005 if (unlikely(dm_bufio_current_allocated <= threshold))
2006 break;
2007
2008 spinlock_hold_count = 0;
2009get_next:
2010 if (!loops--)
2011 break;
2012 if (unlikely(list_empty(&global_queue)))
2013 break;
2014 b = list_entry(global_queue.prev, struct dm_buffer, global_list);
2015
2016 if (b->accessed) {
2017 b->accessed = 0;
2018 list_move(&b->global_list, &global_queue);
2019 if (likely(++spinlock_hold_count < 16))
2020 goto get_next;
2021 spin_unlock(&global_spinlock);
2022 continue;
2023 }
2024
2025 current_client = b->c;
2026 if (unlikely(current_client != locked_client)) {
2027 if (locked_client)
2028 dm_bufio_unlock(locked_client);
2029
2030 if (!dm_bufio_trylock(current_client)) {
2031 spin_unlock(&global_spinlock);
2032 dm_bufio_lock(current_client);
2033 locked_client = current_client;
2034 continue;
2035 }
2036
2037 locked_client = current_client;
2038 }
2039
2040 spin_unlock(&global_spinlock);
2041
2042 if (unlikely(!__try_evict_buffer(b, GFP_KERNEL))) {
2043 spin_lock(&global_spinlock);
2044 list_move(&b->global_list, &global_queue);
2045 spin_unlock(&global_spinlock);
2046 }
2047 }
2048
2049 spin_unlock(&global_spinlock);
2050
2051 if (locked_client)
2052 dm_bufio_unlock(locked_client);
2053
2054 mutex_unlock(&dm_bufio_clients_lock);
2055}
2056
33096a78
JT
2057static void cleanup_old_buffers(void)
2058{
2059 unsigned long max_age_hz = get_max_age_hz();
2060 struct dm_bufio_client *c;
2061
2062 mutex_lock(&dm_bufio_clients_lock);
2063
390020ad
MP
2064 __cache_size_refresh();
2065
33096a78
JT
2066 list_for_each_entry(c, &dm_bufio_all_clients, client_list)
2067 __evict_old_buffers(c, max_age_hz);
2068
95d402f0
MP
2069 mutex_unlock(&dm_bufio_clients_lock);
2070}
2071
95d402f0
MP
2072static void work_fn(struct work_struct *w)
2073{
2074 cleanup_old_buffers();
2075
6e913b28 2076 queue_delayed_work(dm_bufio_wq, &dm_bufio_cleanup_old_work,
95d402f0
MP
2077 DM_BUFIO_WORK_TIMER_SECS * HZ);
2078}
2079
a4a82ce3
HM
2080/*
2081 *--------------------------------------------------------------
95d402f0 2082 * Module setup
a4a82ce3
HM
2083 *--------------------------------------------------------------
2084 */
95d402f0
MP
2085
2086/*
2087 * This is called only once for the whole dm_bufio module.
2088 * It initializes memory limit.
2089 */
2090static int __init dm_bufio_init(void)
2091{
2092 __u64 mem;
2093
4cb57ab4
MP
2094 dm_bufio_allocated_kmem_cache = 0;
2095 dm_bufio_allocated_get_free_pages = 0;
2096 dm_bufio_allocated_vmalloc = 0;
2097 dm_bufio_current_allocated = 0;
2098
ca79b0c2 2099 mem = (__u64)mult_frac(totalram_pages() - totalhigh_pages(),
74d4108d 2100 DM_BUFIO_MEMORY_PERCENT, 100) << PAGE_SHIFT;
95d402f0
MP
2101
2102 if (mem > ULONG_MAX)
2103 mem = ULONG_MAX;
2104
2105#ifdef CONFIG_MMU
74d4108d
EB
2106 if (mem > mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100))
2107 mem = mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100);
95d402f0
MP
2108#endif
2109
2110 dm_bufio_default_cache_size = mem;
2111
2112 mutex_lock(&dm_bufio_clients_lock);
2113 __cache_size_refresh();
2114 mutex_unlock(&dm_bufio_clients_lock);
2115
edd1ea2a 2116 dm_bufio_wq = alloc_workqueue("dm_bufio_cache", WQ_MEM_RECLAIM, 0);
95d402f0
MP
2117 if (!dm_bufio_wq)
2118 return -ENOMEM;
2119
6e913b28
MP
2120 INIT_DELAYED_WORK(&dm_bufio_cleanup_old_work, work_fn);
2121 INIT_WORK(&dm_bufio_replacement_work, do_global_cleanup);
2122 queue_delayed_work(dm_bufio_wq, &dm_bufio_cleanup_old_work,
95d402f0
MP
2123 DM_BUFIO_WORK_TIMER_SECS * HZ);
2124
2125 return 0;
2126}
2127
2128/*
2129 * This is called once when unloading the dm_bufio module.
2130 */
2131static void __exit dm_bufio_exit(void)
2132{
2133 int bug = 0;
95d402f0 2134
6e913b28 2135 cancel_delayed_work_sync(&dm_bufio_cleanup_old_work);
95d402f0
MP
2136 destroy_workqueue(dm_bufio_wq);
2137
95d402f0
MP
2138 if (dm_bufio_client_count) {
2139 DMCRIT("%s: dm_bufio_client_count leaked: %d",
2140 __func__, dm_bufio_client_count);
2141 bug = 1;
2142 }
2143
2144 if (dm_bufio_current_allocated) {
2145 DMCRIT("%s: dm_bufio_current_allocated leaked: %lu",
2146 __func__, dm_bufio_current_allocated);
2147 bug = 1;
2148 }
2149
2150 if (dm_bufio_allocated_get_free_pages) {
2151 DMCRIT("%s: dm_bufio_allocated_get_free_pages leaked: %lu",
2152 __func__, dm_bufio_allocated_get_free_pages);
2153 bug = 1;
2154 }
2155
2156 if (dm_bufio_allocated_vmalloc) {
2157 DMCRIT("%s: dm_bufio_vmalloc leaked: %lu",
2158 __func__, dm_bufio_allocated_vmalloc);
2159 bug = 1;
2160 }
2161
86a49e2d 2162 BUG_ON(bug);
95d402f0
MP
2163}
2164
2165module_init(dm_bufio_init)
2166module_exit(dm_bufio_exit)
2167
2168module_param_named(max_cache_size_bytes, dm_bufio_cache_size, ulong, S_IRUGO | S_IWUSR);
2169MODULE_PARM_DESC(max_cache_size_bytes, "Size of metadata cache");
2170
2171module_param_named(max_age_seconds, dm_bufio_max_age, uint, S_IRUGO | S_IWUSR);
2172MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds");
33096a78 2173
13840d38 2174module_param_named(retain_bytes, dm_bufio_retain_bytes, ulong, S_IRUGO | S_IWUSR);
33096a78 2175MODULE_PARM_DESC(retain_bytes, "Try to keep at least this many bytes cached in memory");
95d402f0
MP
2176
2177module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, S_IRUGO | S_IWUSR);
2178MODULE_PARM_DESC(peak_allocated_bytes, "Tracks the maximum allocated memory");
2179
2180module_param_named(allocated_kmem_cache_bytes, dm_bufio_allocated_kmem_cache, ulong, S_IRUGO);
2181MODULE_PARM_DESC(allocated_kmem_cache_bytes, "Memory allocated with kmem_cache_alloc");
2182
2183module_param_named(allocated_get_free_pages_bytes, dm_bufio_allocated_get_free_pages, ulong, S_IRUGO);
2184MODULE_PARM_DESC(allocated_get_free_pages_bytes, "Memory allocated with get_free_pages");
2185
2186module_param_named(allocated_vmalloc_bytes, dm_bufio_allocated_vmalloc, ulong, S_IRUGO);
2187MODULE_PARM_DESC(allocated_vmalloc_bytes, "Memory allocated with vmalloc");
2188
2189module_param_named(current_allocated_bytes, dm_bufio_current_allocated, ulong, S_IRUGO);
2190MODULE_PARM_DESC(current_allocated_bytes, "Memory currently used by the cache");
2191
2192MODULE_AUTHOR("Mikulas Patocka <dm-devel@redhat.com>");
2193MODULE_DESCRIPTION(DM_NAME " buffered I/O library");
2194MODULE_LICENSE("GPL");