dm bufio: change __GFP_IO to __GFP_FS in shrinker callbacks
[linux-2.6-block.git] / drivers / md / dm-bufio.c
CommitLineData
95d402f0
MP
1/*
2 * Copyright (C) 2009-2011 Red Hat, Inc.
3 *
4 * Author: Mikulas Patocka <mpatocka@redhat.com>
5 *
6 * This file is released under the GPL.
7 */
8
9#include "dm-bufio.h"
10
11#include <linux/device-mapper.h>
12#include <linux/dm-io.h>
13#include <linux/slab.h>
14#include <linux/vmalloc.h>
95d402f0 15#include <linux/shrinker.h>
6f66263f 16#include <linux/module.h>
95d402f0
MP
17
18#define DM_MSG_PREFIX "bufio"
19
20/*
21 * Memory management policy:
22 * Limit the number of buffers to DM_BUFIO_MEMORY_PERCENT of main memory
23 * or DM_BUFIO_VMALLOC_PERCENT of vmalloc memory (whichever is lower).
24 * Always allocate at least DM_BUFIO_MIN_BUFFERS buffers.
25 * Start background writeback when there are DM_BUFIO_WRITEBACK_PERCENT
26 * dirty buffers.
27 */
28#define DM_BUFIO_MIN_BUFFERS 8
29
30#define DM_BUFIO_MEMORY_PERCENT 2
31#define DM_BUFIO_VMALLOC_PERCENT 25
32#define DM_BUFIO_WRITEBACK_PERCENT 75
33
34/*
35 * Check buffer ages in this interval (seconds)
36 */
37#define DM_BUFIO_WORK_TIMER_SECS 10
38
39/*
40 * Free buffers when they are older than this (seconds)
41 */
42#define DM_BUFIO_DEFAULT_AGE_SECS 60
43
44/*
45 * The number of bvec entries that are embedded directly in the buffer.
46 * If the chunk size is larger, dm-io is used to do the io.
47 */
48#define DM_BUFIO_INLINE_VECS 16
49
50/*
51 * Buffer hash
52 */
53#define DM_BUFIO_HASH_BITS 20
54#define DM_BUFIO_HASH(block) \
55 ((((block) >> DM_BUFIO_HASH_BITS) ^ (block)) & \
56 ((1 << DM_BUFIO_HASH_BITS) - 1))
57
58/*
59 * Don't try to use kmem_cache_alloc for blocks larger than this.
60 * For explanation, see alloc_buffer_data below.
61 */
62#define DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT (PAGE_SIZE >> 1)
63#define DM_BUFIO_BLOCK_SIZE_GFP_LIMIT (PAGE_SIZE << (MAX_ORDER - 1))
64
65/*
66 * dm_buffer->list_mode
67 */
68#define LIST_CLEAN 0
69#define LIST_DIRTY 1
70#define LIST_SIZE 2
71
72/*
73 * Linking of buffers:
74 * All buffers are linked to cache_hash with their hash_list field.
75 *
76 * Clean buffers that are not being written (B_WRITING not set)
77 * are linked to lru[LIST_CLEAN] with their lru_list field.
78 *
79 * Dirty and clean buffers that are being written are linked to
80 * lru[LIST_DIRTY] with their lru_list field. When the write
81 * finishes, the buffer cannot be relinked immediately (because we
82 * are in an interrupt context and relinking requires process
83 * context), so some clean-not-writing buffers can be held on
84 * dirty_lru too. They are later added to lru in the process
85 * context.
86 */
87struct dm_bufio_client {
88 struct mutex lock;
89
90 struct list_head lru[LIST_SIZE];
91 unsigned long n_buffers[LIST_SIZE];
92
93 struct block_device *bdev;
94 unsigned block_size;
95 unsigned char sectors_per_block_bits;
96 unsigned char pages_per_block_bits;
97 unsigned char blocks_per_page_bits;
98 unsigned aux_size;
99 void (*alloc_callback)(struct dm_buffer *);
100 void (*write_callback)(struct dm_buffer *);
101
102 struct dm_io_client *dm_io;
103
104 struct list_head reserved_buffers;
105 unsigned need_reserved_buffers;
106
55b082e6
MP
107 unsigned minimum_buffers;
108
95d402f0
MP
109 struct hlist_head *cache_hash;
110 wait_queue_head_t free_buffer_wait;
111
112 int async_write_error;
113
114 struct list_head client_list;
115 struct shrinker shrinker;
116};
117
118/*
119 * Buffer state bits.
120 */
121#define B_READING 0
122#define B_WRITING 1
123#define B_DIRTY 2
124
125/*
126 * Describes how the block was allocated:
127 * kmem_cache_alloc(), __get_free_pages() or vmalloc().
128 * See the comment at alloc_buffer_data.
129 */
130enum data_mode {
131 DATA_MODE_SLAB = 0,
132 DATA_MODE_GET_FREE_PAGES = 1,
133 DATA_MODE_VMALLOC = 2,
134 DATA_MODE_LIMIT = 3
135};
136
137struct dm_buffer {
138 struct hlist_node hash_list;
139 struct list_head lru_list;
140 sector_t block;
141 void *data;
142 enum data_mode data_mode;
143 unsigned char list_mode; /* LIST_* */
144 unsigned hold_count;
145 int read_error;
146 int write_error;
147 unsigned long state;
148 unsigned long last_accessed;
149 struct dm_bufio_client *c;
2480945c 150 struct list_head write_list;
95d402f0
MP
151 struct bio bio;
152 struct bio_vec bio_vec[DM_BUFIO_INLINE_VECS];
153};
154
155/*----------------------------------------------------------------*/
156
157static struct kmem_cache *dm_bufio_caches[PAGE_SHIFT - SECTOR_SHIFT];
158static char *dm_bufio_cache_names[PAGE_SHIFT - SECTOR_SHIFT];
159
160static inline int dm_bufio_cache_index(struct dm_bufio_client *c)
161{
162 unsigned ret = c->blocks_per_page_bits - 1;
163
164 BUG_ON(ret >= ARRAY_SIZE(dm_bufio_caches));
165
166 return ret;
167}
168
169#define DM_BUFIO_CACHE(c) (dm_bufio_caches[dm_bufio_cache_index(c)])
170#define DM_BUFIO_CACHE_NAME(c) (dm_bufio_cache_names[dm_bufio_cache_index(c)])
171
172#define dm_bufio_in_request() (!!current->bio_list)
173
174static void dm_bufio_lock(struct dm_bufio_client *c)
175{
176 mutex_lock_nested(&c->lock, dm_bufio_in_request());
177}
178
179static int dm_bufio_trylock(struct dm_bufio_client *c)
180{
181 return mutex_trylock(&c->lock);
182}
183
184static void dm_bufio_unlock(struct dm_bufio_client *c)
185{
186 mutex_unlock(&c->lock);
187}
188
189/*
190 * FIXME Move to sched.h?
191 */
192#ifdef CONFIG_PREEMPT_VOLUNTARY
193# define dm_bufio_cond_resched() \
194do { \
195 if (unlikely(need_resched())) \
196 _cond_resched(); \
197} while (0)
198#else
199# define dm_bufio_cond_resched() do { } while (0)
200#endif
201
202/*----------------------------------------------------------------*/
203
204/*
205 * Default cache size: available memory divided by the ratio.
206 */
207static unsigned long dm_bufio_default_cache_size;
208
209/*
210 * Total cache size set by the user.
211 */
212static unsigned long dm_bufio_cache_size;
213
214/*
215 * A copy of dm_bufio_cache_size because dm_bufio_cache_size can change
216 * at any time. If it disagrees, the user has changed cache size.
217 */
218static unsigned long dm_bufio_cache_size_latch;
219
220static DEFINE_SPINLOCK(param_spinlock);
221
222/*
223 * Buffers are freed after this timeout
224 */
225static unsigned dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS;
226
227static unsigned long dm_bufio_peak_allocated;
228static unsigned long dm_bufio_allocated_kmem_cache;
229static unsigned long dm_bufio_allocated_get_free_pages;
230static unsigned long dm_bufio_allocated_vmalloc;
231static unsigned long dm_bufio_current_allocated;
232
233/*----------------------------------------------------------------*/
234
235/*
236 * Per-client cache: dm_bufio_cache_size / dm_bufio_client_count
237 */
238static unsigned long dm_bufio_cache_size_per_client;
239
240/*
241 * The current number of clients.
242 */
243static int dm_bufio_client_count;
244
245/*
246 * The list of all clients.
247 */
248static LIST_HEAD(dm_bufio_all_clients);
249
250/*
251 * This mutex protects dm_bufio_cache_size_latch,
252 * dm_bufio_cache_size_per_client and dm_bufio_client_count
253 */
254static DEFINE_MUTEX(dm_bufio_clients_lock);
255
256/*----------------------------------------------------------------*/
257
258static void adjust_total_allocated(enum data_mode data_mode, long diff)
259{
260 static unsigned long * const class_ptr[DATA_MODE_LIMIT] = {
261 &dm_bufio_allocated_kmem_cache,
262 &dm_bufio_allocated_get_free_pages,
263 &dm_bufio_allocated_vmalloc,
264 };
265
266 spin_lock(&param_spinlock);
267
268 *class_ptr[data_mode] += diff;
269
270 dm_bufio_current_allocated += diff;
271
272 if (dm_bufio_current_allocated > dm_bufio_peak_allocated)
273 dm_bufio_peak_allocated = dm_bufio_current_allocated;
274
275 spin_unlock(&param_spinlock);
276}
277
278/*
279 * Change the number of clients and recalculate per-client limit.
280 */
281static void __cache_size_refresh(void)
282{
283 BUG_ON(!mutex_is_locked(&dm_bufio_clients_lock));
284 BUG_ON(dm_bufio_client_count < 0);
285
fe5fe906 286 dm_bufio_cache_size_latch = ACCESS_ONCE(dm_bufio_cache_size);
95d402f0
MP
287
288 /*
289 * Use default if set to 0 and report the actual cache size used.
290 */
291 if (!dm_bufio_cache_size_latch) {
292 (void)cmpxchg(&dm_bufio_cache_size, 0,
293 dm_bufio_default_cache_size);
294 dm_bufio_cache_size_latch = dm_bufio_default_cache_size;
295 }
296
297 dm_bufio_cache_size_per_client = dm_bufio_cache_size_latch /
298 (dm_bufio_client_count ? : 1);
299}
300
301/*
302 * Allocating buffer data.
303 *
304 * Small buffers are allocated with kmem_cache, to use space optimally.
305 *
306 * For large buffers, we choose between get_free_pages and vmalloc.
307 * Each has advantages and disadvantages.
308 *
309 * __get_free_pages can randomly fail if the memory is fragmented.
310 * __vmalloc won't randomly fail, but vmalloc space is limited (it may be
311 * as low as 128M) so using it for caching is not appropriate.
312 *
313 * If the allocation may fail we use __get_free_pages. Memory fragmentation
314 * won't have a fatal effect here, but it just causes flushes of some other
315 * buffers and more I/O will be performed. Don't use __get_free_pages if it
316 * always fails (i.e. order >= MAX_ORDER).
317 *
318 * If the allocation shouldn't fail we use __vmalloc. This is only for the
319 * initial reserve allocation, so there's no risk of wasting all vmalloc
320 * space.
321 */
322static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
323 enum data_mode *data_mode)
324{
502624bd
MP
325 unsigned noio_flag;
326 void *ptr;
327
95d402f0
MP
328 if (c->block_size <= DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT) {
329 *data_mode = DATA_MODE_SLAB;
330 return kmem_cache_alloc(DM_BUFIO_CACHE(c), gfp_mask);
331 }
332
333 if (c->block_size <= DM_BUFIO_BLOCK_SIZE_GFP_LIMIT &&
334 gfp_mask & __GFP_NORETRY) {
335 *data_mode = DATA_MODE_GET_FREE_PAGES;
336 return (void *)__get_free_pages(gfp_mask,
337 c->pages_per_block_bits);
338 }
339
340 *data_mode = DATA_MODE_VMALLOC;
502624bd
MP
341
342 /*
343 * __vmalloc allocates the data pages and auxiliary structures with
344 * gfp_flags that were specified, but pagetables are always allocated
345 * with GFP_KERNEL, no matter what was specified as gfp_mask.
346 *
347 * Consequently, we must set per-process flag PF_MEMALLOC_NOIO so that
348 * all allocations done by this process (including pagetables) are done
349 * as if GFP_NOIO was specified.
350 */
351
352 if (gfp_mask & __GFP_NORETRY)
353 noio_flag = memalloc_noio_save();
354
220cd058 355 ptr = __vmalloc(c->block_size, gfp_mask | __GFP_HIGHMEM, PAGE_KERNEL);
502624bd
MP
356
357 if (gfp_mask & __GFP_NORETRY)
358 memalloc_noio_restore(noio_flag);
359
360 return ptr;
95d402f0
MP
361}
362
363/*
364 * Free buffer's data.
365 */
366static void free_buffer_data(struct dm_bufio_client *c,
367 void *data, enum data_mode data_mode)
368{
369 switch (data_mode) {
370 case DATA_MODE_SLAB:
371 kmem_cache_free(DM_BUFIO_CACHE(c), data);
372 break;
373
374 case DATA_MODE_GET_FREE_PAGES:
375 free_pages((unsigned long)data, c->pages_per_block_bits);
376 break;
377
378 case DATA_MODE_VMALLOC:
379 vfree(data);
380 break;
381
382 default:
383 DMCRIT("dm_bufio_free_buffer_data: bad data mode: %d",
384 data_mode);
385 BUG();
386 }
387}
388
389/*
390 * Allocate buffer and its data.
391 */
392static struct dm_buffer *alloc_buffer(struct dm_bufio_client *c, gfp_t gfp_mask)
393{
394 struct dm_buffer *b = kmalloc(sizeof(struct dm_buffer) + c->aux_size,
395 gfp_mask);
396
397 if (!b)
398 return NULL;
399
400 b->c = c;
401
402 b->data = alloc_buffer_data(c, gfp_mask, &b->data_mode);
403 if (!b->data) {
404 kfree(b);
405 return NULL;
406 }
407
408 adjust_total_allocated(b->data_mode, (long)c->block_size);
409
410 return b;
411}
412
413/*
414 * Free buffer and its data.
415 */
416static void free_buffer(struct dm_buffer *b)
417{
418 struct dm_bufio_client *c = b->c;
419
420 adjust_total_allocated(b->data_mode, -(long)c->block_size);
421
422 free_buffer_data(c, b->data, b->data_mode);
423 kfree(b);
424}
425
426/*
427 * Link buffer to the hash list and clean or dirty queue.
428 */
429static void __link_buffer(struct dm_buffer *b, sector_t block, int dirty)
430{
431 struct dm_bufio_client *c = b->c;
432
433 c->n_buffers[dirty]++;
434 b->block = block;
435 b->list_mode = dirty;
436 list_add(&b->lru_list, &c->lru[dirty]);
437 hlist_add_head(&b->hash_list, &c->cache_hash[DM_BUFIO_HASH(block)]);
438 b->last_accessed = jiffies;
439}
440
441/*
442 * Unlink buffer from the hash list and dirty or clean queue.
443 */
444static void __unlink_buffer(struct dm_buffer *b)
445{
446 struct dm_bufio_client *c = b->c;
447
448 BUG_ON(!c->n_buffers[b->list_mode]);
449
450 c->n_buffers[b->list_mode]--;
451 hlist_del(&b->hash_list);
452 list_del(&b->lru_list);
453}
454
455/*
456 * Place the buffer to the head of dirty or clean LRU queue.
457 */
458static void __relink_lru(struct dm_buffer *b, int dirty)
459{
460 struct dm_bufio_client *c = b->c;
461
462 BUG_ON(!c->n_buffers[b->list_mode]);
463
464 c->n_buffers[b->list_mode]--;
465 c->n_buffers[dirty]++;
466 b->list_mode = dirty;
54499afb 467 list_move(&b->lru_list, &c->lru[dirty]);
eb76faf5 468 b->last_accessed = jiffies;
95d402f0
MP
469}
470
471/*----------------------------------------------------------------
472 * Submit I/O on the buffer.
473 *
474 * Bio interface is faster but it has some problems:
475 * the vector list is limited (increasing this limit increases
476 * memory-consumption per buffer, so it is not viable);
477 *
478 * the memory must be direct-mapped, not vmalloced;
479 *
480 * the I/O driver can reject requests spuriously if it thinks that
481 * the requests are too big for the device or if they cross a
482 * controller-defined memory boundary.
483 *
484 * If the buffer is small enough (up to DM_BUFIO_INLINE_VECS pages) and
485 * it is not vmalloced, try using the bio interface.
486 *
487 * If the buffer is big, if it is vmalloced or if the underlying device
488 * rejects the bio because it is too large, use dm-io layer to do the I/O.
489 * The dm-io layer splits the I/O into multiple requests, avoiding the above
490 * shortcomings.
491 *--------------------------------------------------------------*/
492
493/*
494 * dm-io completion routine. It just calls b->bio.bi_end_io, pretending
495 * that the request was handled directly with bio interface.
496 */
497static void dmio_complete(unsigned long error, void *context)
498{
499 struct dm_buffer *b = context;
500
501 b->bio.bi_end_io(&b->bio, error ? -EIO : 0);
502}
503
504static void use_dmio(struct dm_buffer *b, int rw, sector_t block,
505 bio_end_io_t *end_io)
506{
507 int r;
508 struct dm_io_request io_req = {
509 .bi_rw = rw,
510 .notify.fn = dmio_complete,
511 .notify.context = b,
512 .client = b->c->dm_io,
513 };
514 struct dm_io_region region = {
515 .bdev = b->c->bdev,
516 .sector = block << b->c->sectors_per_block_bits,
517 .count = b->c->block_size >> SECTOR_SHIFT,
518 };
519
520 if (b->data_mode != DATA_MODE_VMALLOC) {
521 io_req.mem.type = DM_IO_KMEM;
522 io_req.mem.ptr.addr = b->data;
523 } else {
524 io_req.mem.type = DM_IO_VMA;
525 io_req.mem.ptr.vma = b->data;
526 }
527
528 b->bio.bi_end_io = end_io;
529
530 r = dm_io(&io_req, 1, &region, NULL);
531 if (r)
532 end_io(&b->bio, r);
533}
534
535static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block,
536 bio_end_io_t *end_io)
537{
538 char *ptr;
539 int len;
540
541 bio_init(&b->bio);
542 b->bio.bi_io_vec = b->bio_vec;
543 b->bio.bi_max_vecs = DM_BUFIO_INLINE_VECS;
4f024f37 544 b->bio.bi_iter.bi_sector = block << b->c->sectors_per_block_bits;
95d402f0
MP
545 b->bio.bi_bdev = b->c->bdev;
546 b->bio.bi_end_io = end_io;
547
548 /*
549 * We assume that if len >= PAGE_SIZE ptr is page-aligned.
550 * If len < PAGE_SIZE the buffer doesn't cross page boundary.
551 */
552 ptr = b->data;
553 len = b->c->block_size;
554
555 if (len >= PAGE_SIZE)
556 BUG_ON((unsigned long)ptr & (PAGE_SIZE - 1));
557 else
558 BUG_ON((unsigned long)ptr & (len - 1));
559
560 do {
561 if (!bio_add_page(&b->bio, virt_to_page(ptr),
562 len < PAGE_SIZE ? len : PAGE_SIZE,
563 virt_to_phys(ptr) & (PAGE_SIZE - 1))) {
564 BUG_ON(b->c->block_size <= PAGE_SIZE);
565 use_dmio(b, rw, block, end_io);
566 return;
567 }
568
569 len -= PAGE_SIZE;
570 ptr += PAGE_SIZE;
571 } while (len > 0);
572
573 submit_bio(rw, &b->bio);
574}
575
576static void submit_io(struct dm_buffer *b, int rw, sector_t block,
577 bio_end_io_t *end_io)
578{
579 if (rw == WRITE && b->c->write_callback)
580 b->c->write_callback(b);
581
582 if (b->c->block_size <= DM_BUFIO_INLINE_VECS * PAGE_SIZE &&
583 b->data_mode != DATA_MODE_VMALLOC)
584 use_inline_bio(b, rw, block, end_io);
585 else
586 use_dmio(b, rw, block, end_io);
587}
588
589/*----------------------------------------------------------------
590 * Writing dirty buffers
591 *--------------------------------------------------------------*/
592
593/*
594 * The endio routine for write.
595 *
596 * Set the error, clear B_WRITING bit and wake anyone who was waiting on
597 * it.
598 */
599static void write_endio(struct bio *bio, int error)
600{
601 struct dm_buffer *b = container_of(bio, struct dm_buffer, bio);
602
603 b->write_error = error;
a66cc28f 604 if (unlikely(error)) {
95d402f0
MP
605 struct dm_bufio_client *c = b->c;
606 (void)cmpxchg(&c->async_write_error, 0, error);
607 }
608
609 BUG_ON(!test_bit(B_WRITING, &b->state));
610
4e857c58 611 smp_mb__before_atomic();
95d402f0 612 clear_bit(B_WRITING, &b->state);
4e857c58 613 smp_mb__after_atomic();
95d402f0
MP
614
615 wake_up_bit(&b->state, B_WRITING);
616}
617
95d402f0
MP
618/*
619 * Initiate a write on a dirty buffer, but don't wait for it.
620 *
621 * - If the buffer is not dirty, exit.
622 * - If there some previous write going on, wait for it to finish (we can't
623 * have two writes on the same buffer simultaneously).
624 * - Submit our write and don't wait on it. We set B_WRITING indicating
625 * that there is a write in progress.
626 */
2480945c
MP
627static void __write_dirty_buffer(struct dm_buffer *b,
628 struct list_head *write_list)
95d402f0
MP
629{
630 if (!test_bit(B_DIRTY, &b->state))
631 return;
632
633 clear_bit(B_DIRTY, &b->state);
74316201 634 wait_on_bit_lock_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
95d402f0 635
2480945c
MP
636 if (!write_list)
637 submit_io(b, WRITE, b->block, write_endio);
638 else
639 list_add_tail(&b->write_list, write_list);
640}
641
642static void __flush_write_list(struct list_head *write_list)
643{
644 struct blk_plug plug;
645 blk_start_plug(&plug);
646 while (!list_empty(write_list)) {
647 struct dm_buffer *b =
648 list_entry(write_list->next, struct dm_buffer, write_list);
649 list_del(&b->write_list);
650 submit_io(b, WRITE, b->block, write_endio);
651 dm_bufio_cond_resched();
652 }
653 blk_finish_plug(&plug);
95d402f0
MP
654}
655
656/*
657 * Wait until any activity on the buffer finishes. Possibly write the
658 * buffer if it is dirty. When this function finishes, there is no I/O
659 * running on the buffer and the buffer is not dirty.
660 */
661static void __make_buffer_clean(struct dm_buffer *b)
662{
663 BUG_ON(b->hold_count);
664
665 if (!b->state) /* fast case */
666 return;
667
74316201 668 wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
2480945c 669 __write_dirty_buffer(b, NULL);
74316201 670 wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
95d402f0
MP
671}
672
673/*
674 * Find some buffer that is not held by anybody, clean it, unlink it and
675 * return it.
676 */
677static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c)
678{
679 struct dm_buffer *b;
680
681 list_for_each_entry_reverse(b, &c->lru[LIST_CLEAN], lru_list) {
682 BUG_ON(test_bit(B_WRITING, &b->state));
683 BUG_ON(test_bit(B_DIRTY, &b->state));
684
685 if (!b->hold_count) {
686 __make_buffer_clean(b);
687 __unlink_buffer(b);
688 return b;
689 }
690 dm_bufio_cond_resched();
691 }
692
693 list_for_each_entry_reverse(b, &c->lru[LIST_DIRTY], lru_list) {
694 BUG_ON(test_bit(B_READING, &b->state));
695
696 if (!b->hold_count) {
697 __make_buffer_clean(b);
698 __unlink_buffer(b);
699 return b;
700 }
701 dm_bufio_cond_resched();
702 }
703
704 return NULL;
705}
706
707/*
708 * Wait until some other threads free some buffer or release hold count on
709 * some buffer.
710 *
711 * This function is entered with c->lock held, drops it and regains it
712 * before exiting.
713 */
714static void __wait_for_free_buffer(struct dm_bufio_client *c)
715{
716 DECLARE_WAITQUEUE(wait, current);
717
718 add_wait_queue(&c->free_buffer_wait, &wait);
719 set_task_state(current, TASK_UNINTERRUPTIBLE);
720 dm_bufio_unlock(c);
721
722 io_schedule();
723
724 set_task_state(current, TASK_RUNNING);
725 remove_wait_queue(&c->free_buffer_wait, &wait);
726
727 dm_bufio_lock(c);
728}
729
a66cc28f
MP
730enum new_flag {
731 NF_FRESH = 0,
732 NF_READ = 1,
733 NF_GET = 2,
734 NF_PREFETCH = 3
735};
736
95d402f0
MP
737/*
738 * Allocate a new buffer. If the allocation is not possible, wait until
739 * some other thread frees a buffer.
740 *
741 * May drop the lock and regain it.
742 */
a66cc28f 743static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client *c, enum new_flag nf)
95d402f0
MP
744{
745 struct dm_buffer *b;
746
747 /*
748 * dm-bufio is resistant to allocation failures (it just keeps
749 * one buffer reserved in cases all the allocations fail).
750 * So set flags to not try too hard:
751 * GFP_NOIO: don't recurse into the I/O layer
752 * __GFP_NORETRY: don't retry and rather return failure
753 * __GFP_NOMEMALLOC: don't use emergency reserves
754 * __GFP_NOWARN: don't print a warning in case of failure
755 *
756 * For debugging, if we set the cache size to 1, no new buffers will
757 * be allocated.
758 */
759 while (1) {
760 if (dm_bufio_cache_size_latch != 1) {
761 b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
762 if (b)
763 return b;
764 }
765
a66cc28f
MP
766 if (nf == NF_PREFETCH)
767 return NULL;
768
95d402f0
MP
769 if (!list_empty(&c->reserved_buffers)) {
770 b = list_entry(c->reserved_buffers.next,
771 struct dm_buffer, lru_list);
772 list_del(&b->lru_list);
773 c->need_reserved_buffers++;
774
775 return b;
776 }
777
778 b = __get_unclaimed_buffer(c);
779 if (b)
780 return b;
781
782 __wait_for_free_buffer(c);
783 }
784}
785
a66cc28f 786static struct dm_buffer *__alloc_buffer_wait(struct dm_bufio_client *c, enum new_flag nf)
95d402f0 787{
a66cc28f
MP
788 struct dm_buffer *b = __alloc_buffer_wait_no_callback(c, nf);
789
790 if (!b)
791 return NULL;
95d402f0
MP
792
793 if (c->alloc_callback)
794 c->alloc_callback(b);
795
796 return b;
797}
798
799/*
800 * Free a buffer and wake other threads waiting for free buffers.
801 */
802static void __free_buffer_wake(struct dm_buffer *b)
803{
804 struct dm_bufio_client *c = b->c;
805
806 if (!c->need_reserved_buffers)
807 free_buffer(b);
808 else {
809 list_add(&b->lru_list, &c->reserved_buffers);
810 c->need_reserved_buffers--;
811 }
812
813 wake_up(&c->free_buffer_wait);
814}
815
2480945c
MP
816static void __write_dirty_buffers_async(struct dm_bufio_client *c, int no_wait,
817 struct list_head *write_list)
95d402f0
MP
818{
819 struct dm_buffer *b, *tmp;
820
821 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) {
822 BUG_ON(test_bit(B_READING, &b->state));
823
824 if (!test_bit(B_DIRTY, &b->state) &&
825 !test_bit(B_WRITING, &b->state)) {
826 __relink_lru(b, LIST_CLEAN);
827 continue;
828 }
829
830 if (no_wait && test_bit(B_WRITING, &b->state))
831 return;
832
2480945c 833 __write_dirty_buffer(b, write_list);
95d402f0
MP
834 dm_bufio_cond_resched();
835 }
836}
837
838/*
839 * Get writeback threshold and buffer limit for a given client.
840 */
841static void __get_memory_limit(struct dm_bufio_client *c,
842 unsigned long *threshold_buffers,
843 unsigned long *limit_buffers)
844{
845 unsigned long buffers;
846
fe5fe906 847 if (ACCESS_ONCE(dm_bufio_cache_size) != dm_bufio_cache_size_latch) {
95d402f0
MP
848 mutex_lock(&dm_bufio_clients_lock);
849 __cache_size_refresh();
850 mutex_unlock(&dm_bufio_clients_lock);
851 }
852
853 buffers = dm_bufio_cache_size_per_client >>
854 (c->sectors_per_block_bits + SECTOR_SHIFT);
855
55b082e6
MP
856 if (buffers < c->minimum_buffers)
857 buffers = c->minimum_buffers;
95d402f0
MP
858
859 *limit_buffers = buffers;
860 *threshold_buffers = buffers * DM_BUFIO_WRITEBACK_PERCENT / 100;
861}
862
863/*
864 * Check if we're over watermark.
865 * If we are over threshold_buffers, start freeing buffers.
866 * If we're over "limit_buffers", block until we get under the limit.
867 */
2480945c
MP
868static void __check_watermark(struct dm_bufio_client *c,
869 struct list_head *write_list)
95d402f0
MP
870{
871 unsigned long threshold_buffers, limit_buffers;
872
873 __get_memory_limit(c, &threshold_buffers, &limit_buffers);
874
875 while (c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY] >
876 limit_buffers) {
877
878 struct dm_buffer *b = __get_unclaimed_buffer(c);
879
880 if (!b)
881 return;
882
883 __free_buffer_wake(b);
884 dm_bufio_cond_resched();
885 }
886
887 if (c->n_buffers[LIST_DIRTY] > threshold_buffers)
2480945c 888 __write_dirty_buffers_async(c, 1, write_list);
95d402f0
MP
889}
890
891/*
892 * Find a buffer in the hash.
893 */
894static struct dm_buffer *__find(struct dm_bufio_client *c, sector_t block)
895{
896 struct dm_buffer *b;
95d402f0 897
b67bfe0d 898 hlist_for_each_entry(b, &c->cache_hash[DM_BUFIO_HASH(block)],
95d402f0
MP
899 hash_list) {
900 dm_bufio_cond_resched();
901 if (b->block == block)
902 return b;
903 }
904
905 return NULL;
906}
907
908/*----------------------------------------------------------------
909 * Getting a buffer
910 *--------------------------------------------------------------*/
911
95d402f0 912static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block,
2480945c
MP
913 enum new_flag nf, int *need_submit,
914 struct list_head *write_list)
95d402f0
MP
915{
916 struct dm_buffer *b, *new_b = NULL;
917
918 *need_submit = 0;
919
920 b = __find(c, block);
a66cc28f
MP
921 if (b)
922 goto found_buffer;
95d402f0
MP
923
924 if (nf == NF_GET)
925 return NULL;
926
a66cc28f
MP
927 new_b = __alloc_buffer_wait(c, nf);
928 if (!new_b)
929 return NULL;
95d402f0
MP
930
931 /*
932 * We've had a period where the mutex was unlocked, so need to
933 * recheck the hash table.
934 */
935 b = __find(c, block);
936 if (b) {
937 __free_buffer_wake(new_b);
a66cc28f 938 goto found_buffer;
95d402f0
MP
939 }
940
2480945c 941 __check_watermark(c, write_list);
95d402f0
MP
942
943 b = new_b;
944 b->hold_count = 1;
945 b->read_error = 0;
946 b->write_error = 0;
947 __link_buffer(b, block, LIST_CLEAN);
948
949 if (nf == NF_FRESH) {
950 b->state = 0;
951 return b;
952 }
953
954 b->state = 1 << B_READING;
955 *need_submit = 1;
956
957 return b;
a66cc28f
MP
958
959found_buffer:
960 if (nf == NF_PREFETCH)
961 return NULL;
962 /*
963 * Note: it is essential that we don't wait for the buffer to be
964 * read if dm_bufio_get function is used. Both dm_bufio_get and
965 * dm_bufio_prefetch can be used in the driver request routine.
966 * If the user called both dm_bufio_prefetch and dm_bufio_get on
967 * the same buffer, it would deadlock if we waited.
968 */
969 if (nf == NF_GET && unlikely(test_bit(B_READING, &b->state)))
970 return NULL;
971
972 b->hold_count++;
973 __relink_lru(b, test_bit(B_DIRTY, &b->state) ||
974 test_bit(B_WRITING, &b->state));
975 return b;
95d402f0
MP
976}
977
978/*
979 * The endio routine for reading: set the error, clear the bit and wake up
980 * anyone waiting on the buffer.
981 */
982static void read_endio(struct bio *bio, int error)
983{
984 struct dm_buffer *b = container_of(bio, struct dm_buffer, bio);
985
986 b->read_error = error;
987
988 BUG_ON(!test_bit(B_READING, &b->state));
989
4e857c58 990 smp_mb__before_atomic();
95d402f0 991 clear_bit(B_READING, &b->state);
4e857c58 992 smp_mb__after_atomic();
95d402f0
MP
993
994 wake_up_bit(&b->state, B_READING);
995}
996
997/*
998 * A common routine for dm_bufio_new and dm_bufio_read. Operation of these
999 * functions is similar except that dm_bufio_new doesn't read the
1000 * buffer from the disk (assuming that the caller overwrites all the data
1001 * and uses dm_bufio_mark_buffer_dirty to write new data back).
1002 */
1003static void *new_read(struct dm_bufio_client *c, sector_t block,
1004 enum new_flag nf, struct dm_buffer **bp)
1005{
1006 int need_submit;
1007 struct dm_buffer *b;
1008
2480945c
MP
1009 LIST_HEAD(write_list);
1010
95d402f0 1011 dm_bufio_lock(c);
2480945c 1012 b = __bufio_new(c, block, nf, &need_submit, &write_list);
95d402f0
MP
1013 dm_bufio_unlock(c);
1014
2480945c
MP
1015 __flush_write_list(&write_list);
1016
a66cc28f 1017 if (!b)
95d402f0
MP
1018 return b;
1019
1020 if (need_submit)
1021 submit_io(b, READ, b->block, read_endio);
1022
74316201 1023 wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
95d402f0
MP
1024
1025 if (b->read_error) {
1026 int error = b->read_error;
1027
1028 dm_bufio_release(b);
1029
1030 return ERR_PTR(error);
1031 }
1032
1033 *bp = b;
1034
1035 return b->data;
1036}
1037
1038void *dm_bufio_get(struct dm_bufio_client *c, sector_t block,
1039 struct dm_buffer **bp)
1040{
1041 return new_read(c, block, NF_GET, bp);
1042}
1043EXPORT_SYMBOL_GPL(dm_bufio_get);
1044
1045void *dm_bufio_read(struct dm_bufio_client *c, sector_t block,
1046 struct dm_buffer **bp)
1047{
1048 BUG_ON(dm_bufio_in_request());
1049
1050 return new_read(c, block, NF_READ, bp);
1051}
1052EXPORT_SYMBOL_GPL(dm_bufio_read);
1053
1054void *dm_bufio_new(struct dm_bufio_client *c, sector_t block,
1055 struct dm_buffer **bp)
1056{
1057 BUG_ON(dm_bufio_in_request());
1058
1059 return new_read(c, block, NF_FRESH, bp);
1060}
1061EXPORT_SYMBOL_GPL(dm_bufio_new);
1062
a66cc28f
MP
1063void dm_bufio_prefetch(struct dm_bufio_client *c,
1064 sector_t block, unsigned n_blocks)
1065{
1066 struct blk_plug plug;
1067
2480945c
MP
1068 LIST_HEAD(write_list);
1069
3b6b7813
MP
1070 BUG_ON(dm_bufio_in_request());
1071
a66cc28f
MP
1072 blk_start_plug(&plug);
1073 dm_bufio_lock(c);
1074
1075 for (; n_blocks--; block++) {
1076 int need_submit;
1077 struct dm_buffer *b;
2480945c
MP
1078 b = __bufio_new(c, block, NF_PREFETCH, &need_submit,
1079 &write_list);
1080 if (unlikely(!list_empty(&write_list))) {
1081 dm_bufio_unlock(c);
1082 blk_finish_plug(&plug);
1083 __flush_write_list(&write_list);
1084 blk_start_plug(&plug);
1085 dm_bufio_lock(c);
1086 }
a66cc28f
MP
1087 if (unlikely(b != NULL)) {
1088 dm_bufio_unlock(c);
1089
1090 if (need_submit)
1091 submit_io(b, READ, b->block, read_endio);
1092 dm_bufio_release(b);
1093
1094 dm_bufio_cond_resched();
1095
1096 if (!n_blocks)
1097 goto flush_plug;
1098 dm_bufio_lock(c);
1099 }
a66cc28f
MP
1100 }
1101
1102 dm_bufio_unlock(c);
1103
1104flush_plug:
1105 blk_finish_plug(&plug);
1106}
1107EXPORT_SYMBOL_GPL(dm_bufio_prefetch);
1108
95d402f0
MP
1109void dm_bufio_release(struct dm_buffer *b)
1110{
1111 struct dm_bufio_client *c = b->c;
1112
1113 dm_bufio_lock(c);
1114
95d402f0
MP
1115 BUG_ON(!b->hold_count);
1116
1117 b->hold_count--;
1118 if (!b->hold_count) {
1119 wake_up(&c->free_buffer_wait);
1120
1121 /*
1122 * If there were errors on the buffer, and the buffer is not
1123 * to be written, free the buffer. There is no point in caching
1124 * invalid buffer.
1125 */
1126 if ((b->read_error || b->write_error) &&
a66cc28f 1127 !test_bit(B_READING, &b->state) &&
95d402f0
MP
1128 !test_bit(B_WRITING, &b->state) &&
1129 !test_bit(B_DIRTY, &b->state)) {
1130 __unlink_buffer(b);
1131 __free_buffer_wake(b);
1132 }
1133 }
1134
1135 dm_bufio_unlock(c);
1136}
1137EXPORT_SYMBOL_GPL(dm_bufio_release);
1138
1139void dm_bufio_mark_buffer_dirty(struct dm_buffer *b)
1140{
1141 struct dm_bufio_client *c = b->c;
1142
1143 dm_bufio_lock(c);
1144
a66cc28f
MP
1145 BUG_ON(test_bit(B_READING, &b->state));
1146
95d402f0
MP
1147 if (!test_and_set_bit(B_DIRTY, &b->state))
1148 __relink_lru(b, LIST_DIRTY);
1149
1150 dm_bufio_unlock(c);
1151}
1152EXPORT_SYMBOL_GPL(dm_bufio_mark_buffer_dirty);
1153
1154void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c)
1155{
2480945c
MP
1156 LIST_HEAD(write_list);
1157
95d402f0
MP
1158 BUG_ON(dm_bufio_in_request());
1159
1160 dm_bufio_lock(c);
2480945c 1161 __write_dirty_buffers_async(c, 0, &write_list);
95d402f0 1162 dm_bufio_unlock(c);
2480945c 1163 __flush_write_list(&write_list);
95d402f0
MP
1164}
1165EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async);
1166
1167/*
1168 * For performance, it is essential that the buffers are written asynchronously
1169 * and simultaneously (so that the block layer can merge the writes) and then
1170 * waited upon.
1171 *
1172 * Finally, we flush hardware disk cache.
1173 */
1174int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c)
1175{
1176 int a, f;
1177 unsigned long buffers_processed = 0;
1178 struct dm_buffer *b, *tmp;
1179
2480945c
MP
1180 LIST_HEAD(write_list);
1181
1182 dm_bufio_lock(c);
1183 __write_dirty_buffers_async(c, 0, &write_list);
1184 dm_bufio_unlock(c);
1185 __flush_write_list(&write_list);
95d402f0 1186 dm_bufio_lock(c);
95d402f0
MP
1187
1188again:
1189 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) {
1190 int dropped_lock = 0;
1191
1192 if (buffers_processed < c->n_buffers[LIST_DIRTY])
1193 buffers_processed++;
1194
1195 BUG_ON(test_bit(B_READING, &b->state));
1196
1197 if (test_bit(B_WRITING, &b->state)) {
1198 if (buffers_processed < c->n_buffers[LIST_DIRTY]) {
1199 dropped_lock = 1;
1200 b->hold_count++;
1201 dm_bufio_unlock(c);
74316201
N
1202 wait_on_bit_io(&b->state, B_WRITING,
1203 TASK_UNINTERRUPTIBLE);
95d402f0
MP
1204 dm_bufio_lock(c);
1205 b->hold_count--;
1206 } else
74316201
N
1207 wait_on_bit_io(&b->state, B_WRITING,
1208 TASK_UNINTERRUPTIBLE);
95d402f0
MP
1209 }
1210
1211 if (!test_bit(B_DIRTY, &b->state) &&
1212 !test_bit(B_WRITING, &b->state))
1213 __relink_lru(b, LIST_CLEAN);
1214
1215 dm_bufio_cond_resched();
1216
1217 /*
1218 * If we dropped the lock, the list is no longer consistent,
1219 * so we must restart the search.
1220 *
1221 * In the most common case, the buffer just processed is
1222 * relinked to the clean list, so we won't loop scanning the
1223 * same buffer again and again.
1224 *
1225 * This may livelock if there is another thread simultaneously
1226 * dirtying buffers, so we count the number of buffers walked
1227 * and if it exceeds the total number of buffers, it means that
1228 * someone is doing some writes simultaneously with us. In
1229 * this case, stop, dropping the lock.
1230 */
1231 if (dropped_lock)
1232 goto again;
1233 }
1234 wake_up(&c->free_buffer_wait);
1235 dm_bufio_unlock(c);
1236
1237 a = xchg(&c->async_write_error, 0);
1238 f = dm_bufio_issue_flush(c);
1239 if (a)
1240 return a;
1241
1242 return f;
1243}
1244EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers);
1245
1246/*
1247 * Use dm-io to send and empty barrier flush the device.
1248 */
1249int dm_bufio_issue_flush(struct dm_bufio_client *c)
1250{
1251 struct dm_io_request io_req = {
3daec3b4 1252 .bi_rw = WRITE_FLUSH,
95d402f0
MP
1253 .mem.type = DM_IO_KMEM,
1254 .mem.ptr.addr = NULL,
1255 .client = c->dm_io,
1256 };
1257 struct dm_io_region io_reg = {
1258 .bdev = c->bdev,
1259 .sector = 0,
1260 .count = 0,
1261 };
1262
1263 BUG_ON(dm_bufio_in_request());
1264
1265 return dm_io(&io_req, 1, &io_reg, NULL);
1266}
1267EXPORT_SYMBOL_GPL(dm_bufio_issue_flush);
1268
1269/*
1270 * We first delete any other buffer that may be at that new location.
1271 *
1272 * Then, we write the buffer to the original location if it was dirty.
1273 *
1274 * Then, if we are the only one who is holding the buffer, relink the buffer
1275 * in the hash queue for the new location.
1276 *
1277 * If there was someone else holding the buffer, we write it to the new
1278 * location but not relink it, because that other user needs to have the buffer
1279 * at the same place.
1280 */
1281void dm_bufio_release_move(struct dm_buffer *b, sector_t new_block)
1282{
1283 struct dm_bufio_client *c = b->c;
1284 struct dm_buffer *new;
1285
1286 BUG_ON(dm_bufio_in_request());
1287
1288 dm_bufio_lock(c);
1289
1290retry:
1291 new = __find(c, new_block);
1292 if (new) {
1293 if (new->hold_count) {
1294 __wait_for_free_buffer(c);
1295 goto retry;
1296 }
1297
1298 /*
1299 * FIXME: Is there any point waiting for a write that's going
1300 * to be overwritten in a bit?
1301 */
1302 __make_buffer_clean(new);
1303 __unlink_buffer(new);
1304 __free_buffer_wake(new);
1305 }
1306
1307 BUG_ON(!b->hold_count);
1308 BUG_ON(test_bit(B_READING, &b->state));
1309
2480945c 1310 __write_dirty_buffer(b, NULL);
95d402f0 1311 if (b->hold_count == 1) {
74316201
N
1312 wait_on_bit_io(&b->state, B_WRITING,
1313 TASK_UNINTERRUPTIBLE);
95d402f0
MP
1314 set_bit(B_DIRTY, &b->state);
1315 __unlink_buffer(b);
1316 __link_buffer(b, new_block, LIST_DIRTY);
1317 } else {
1318 sector_t old_block;
74316201
N
1319 wait_on_bit_lock_io(&b->state, B_WRITING,
1320 TASK_UNINTERRUPTIBLE);
95d402f0
MP
1321 /*
1322 * Relink buffer to "new_block" so that write_callback
1323 * sees "new_block" as a block number.
1324 * After the write, link the buffer back to old_block.
1325 * All this must be done in bufio lock, so that block number
1326 * change isn't visible to other threads.
1327 */
1328 old_block = b->block;
1329 __unlink_buffer(b);
1330 __link_buffer(b, new_block, b->list_mode);
1331 submit_io(b, WRITE, new_block, write_endio);
74316201
N
1332 wait_on_bit_io(&b->state, B_WRITING,
1333 TASK_UNINTERRUPTIBLE);
95d402f0
MP
1334 __unlink_buffer(b);
1335 __link_buffer(b, old_block, b->list_mode);
1336 }
1337
1338 dm_bufio_unlock(c);
1339 dm_bufio_release(b);
1340}
1341EXPORT_SYMBOL_GPL(dm_bufio_release_move);
1342
55494bf2
MP
1343/*
1344 * Free the given buffer.
1345 *
1346 * This is just a hint, if the buffer is in use or dirty, this function
1347 * does nothing.
1348 */
1349void dm_bufio_forget(struct dm_bufio_client *c, sector_t block)
1350{
1351 struct dm_buffer *b;
1352
1353 dm_bufio_lock(c);
1354
1355 b = __find(c, block);
1356 if (b && likely(!b->hold_count) && likely(!b->state)) {
1357 __unlink_buffer(b);
1358 __free_buffer_wake(b);
1359 }
1360
1361 dm_bufio_unlock(c);
1362}
1363EXPORT_SYMBOL(dm_bufio_forget);
1364
55b082e6
MP
1365void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned n)
1366{
1367 c->minimum_buffers = n;
1368}
1369EXPORT_SYMBOL(dm_bufio_set_minimum_buffers);
1370
95d402f0
MP
1371unsigned dm_bufio_get_block_size(struct dm_bufio_client *c)
1372{
1373 return c->block_size;
1374}
1375EXPORT_SYMBOL_GPL(dm_bufio_get_block_size);
1376
1377sector_t dm_bufio_get_device_size(struct dm_bufio_client *c)
1378{
1379 return i_size_read(c->bdev->bd_inode) >>
1380 (SECTOR_SHIFT + c->sectors_per_block_bits);
1381}
1382EXPORT_SYMBOL_GPL(dm_bufio_get_device_size);
1383
1384sector_t dm_bufio_get_block_number(struct dm_buffer *b)
1385{
1386 return b->block;
1387}
1388EXPORT_SYMBOL_GPL(dm_bufio_get_block_number);
1389
1390void *dm_bufio_get_block_data(struct dm_buffer *b)
1391{
1392 return b->data;
1393}
1394EXPORT_SYMBOL_GPL(dm_bufio_get_block_data);
1395
1396void *dm_bufio_get_aux_data(struct dm_buffer *b)
1397{
1398 return b + 1;
1399}
1400EXPORT_SYMBOL_GPL(dm_bufio_get_aux_data);
1401
1402struct dm_bufio_client *dm_bufio_get_client(struct dm_buffer *b)
1403{
1404 return b->c;
1405}
1406EXPORT_SYMBOL_GPL(dm_bufio_get_client);
1407
1408static void drop_buffers(struct dm_bufio_client *c)
1409{
1410 struct dm_buffer *b;
1411 int i;
1412
1413 BUG_ON(dm_bufio_in_request());
1414
1415 /*
1416 * An optimization so that the buffers are not written one-by-one.
1417 */
1418 dm_bufio_write_dirty_buffers_async(c);
1419
1420 dm_bufio_lock(c);
1421
1422 while ((b = __get_unclaimed_buffer(c)))
1423 __free_buffer_wake(b);
1424
1425 for (i = 0; i < LIST_SIZE; i++)
1426 list_for_each_entry(b, &c->lru[i], lru_list)
1427 DMERR("leaked buffer %llx, hold count %u, list %d",
1428 (unsigned long long)b->block, b->hold_count, i);
1429
1430 for (i = 0; i < LIST_SIZE; i++)
1431 BUG_ON(!list_empty(&c->lru[i]));
1432
1433 dm_bufio_unlock(c);
1434}
1435
1436/*
1437 * Test if the buffer is unused and too old, and commit it.
9d28eb12
MP
1438 * And if GFP_NOFS is used, we must not do any I/O because we hold
1439 * dm_bufio_clients_lock and we would risk deadlock if the I/O gets
1440 * rerouted to different bufio client.
95d402f0
MP
1441 */
1442static int __cleanup_old_buffer(struct dm_buffer *b, gfp_t gfp,
1443 unsigned long max_jiffies)
1444{
1445 if (jiffies - b->last_accessed < max_jiffies)
7dc19d5a 1446 return 0;
95d402f0 1447
9d28eb12 1448 if (!(gfp & __GFP_FS)) {
95d402f0
MP
1449 if (test_bit(B_READING, &b->state) ||
1450 test_bit(B_WRITING, &b->state) ||
1451 test_bit(B_DIRTY, &b->state))
7dc19d5a 1452 return 0;
95d402f0
MP
1453 }
1454
1455 if (b->hold_count)
7dc19d5a 1456 return 0;
95d402f0
MP
1457
1458 __make_buffer_clean(b);
1459 __unlink_buffer(b);
1460 __free_buffer_wake(b);
1461
7dc19d5a 1462 return 1;
95d402f0
MP
1463}
1464
7dc19d5a
DC
1465static long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan,
1466 gfp_t gfp_mask)
95d402f0
MP
1467{
1468 int l;
1469 struct dm_buffer *b, *tmp;
7dc19d5a 1470 long freed = 0;
95d402f0
MP
1471
1472 for (l = 0; l < LIST_SIZE; l++) {
7dc19d5a
DC
1473 list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) {
1474 freed += __cleanup_old_buffer(b, gfp_mask, 0);
1475 if (!--nr_to_scan)
0e825862
MP
1476 return freed;
1477 dm_bufio_cond_resched();
7dc19d5a 1478 }
95d402f0 1479 }
7dc19d5a 1480 return freed;
95d402f0
MP
1481}
1482
7dc19d5a
DC
1483static unsigned long
1484dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
95d402f0 1485{
7dc19d5a
DC
1486 struct dm_bufio_client *c;
1487 unsigned long freed;
95d402f0 1488
7dc19d5a 1489 c = container_of(shrink, struct dm_bufio_client, shrinker);
9d28eb12 1490 if (sc->gfp_mask & __GFP_FS)
95d402f0
MP
1491 dm_bufio_lock(c);
1492 else if (!dm_bufio_trylock(c))
7dc19d5a 1493 return SHRINK_STOP;
95d402f0 1494
7dc19d5a
DC
1495 freed = __scan(c, sc->nr_to_scan, sc->gfp_mask);
1496 dm_bufio_unlock(c);
1497 return freed;
1498}
95d402f0 1499
7dc19d5a
DC
1500static unsigned long
1501dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
1502{
1503 struct dm_bufio_client *c;
1504 unsigned long count;
95d402f0 1505
7dc19d5a 1506 c = container_of(shrink, struct dm_bufio_client, shrinker);
9d28eb12 1507 if (sc->gfp_mask & __GFP_FS)
7dc19d5a
DC
1508 dm_bufio_lock(c);
1509 else if (!dm_bufio_trylock(c))
1510 return 0;
95d402f0 1511
7dc19d5a
DC
1512 count = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY];
1513 dm_bufio_unlock(c);
1514 return count;
95d402f0
MP
1515}
1516
1517/*
1518 * Create the buffering interface
1519 */
1520struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsigned block_size,
1521 unsigned reserved_buffers, unsigned aux_size,
1522 void (*alloc_callback)(struct dm_buffer *),
1523 void (*write_callback)(struct dm_buffer *))
1524{
1525 int r;
1526 struct dm_bufio_client *c;
1527 unsigned i;
1528
1529 BUG_ON(block_size < 1 << SECTOR_SHIFT ||
1530 (block_size & (block_size - 1)));
1531
d8c712ea 1532 c = kzalloc(sizeof(*c), GFP_KERNEL);
95d402f0
MP
1533 if (!c) {
1534 r = -ENOMEM;
1535 goto bad_client;
1536 }
1537 c->cache_hash = vmalloc(sizeof(struct hlist_head) << DM_BUFIO_HASH_BITS);
1538 if (!c->cache_hash) {
1539 r = -ENOMEM;
1540 goto bad_hash;
1541 }
1542
1543 c->bdev = bdev;
1544 c->block_size = block_size;
1545 c->sectors_per_block_bits = ffs(block_size) - 1 - SECTOR_SHIFT;
1546 c->pages_per_block_bits = (ffs(block_size) - 1 >= PAGE_SHIFT) ?
1547 ffs(block_size) - 1 - PAGE_SHIFT : 0;
1548 c->blocks_per_page_bits = (ffs(block_size) - 1 < PAGE_SHIFT ?
1549 PAGE_SHIFT - (ffs(block_size) - 1) : 0);
1550
1551 c->aux_size = aux_size;
1552 c->alloc_callback = alloc_callback;
1553 c->write_callback = write_callback;
1554
1555 for (i = 0; i < LIST_SIZE; i++) {
1556 INIT_LIST_HEAD(&c->lru[i]);
1557 c->n_buffers[i] = 0;
1558 }
1559
1560 for (i = 0; i < 1 << DM_BUFIO_HASH_BITS; i++)
1561 INIT_HLIST_HEAD(&c->cache_hash[i]);
1562
1563 mutex_init(&c->lock);
1564 INIT_LIST_HEAD(&c->reserved_buffers);
1565 c->need_reserved_buffers = reserved_buffers;
1566
55b082e6
MP
1567 c->minimum_buffers = DM_BUFIO_MIN_BUFFERS;
1568
95d402f0
MP
1569 init_waitqueue_head(&c->free_buffer_wait);
1570 c->async_write_error = 0;
1571
1572 c->dm_io = dm_io_client_create();
1573 if (IS_ERR(c->dm_io)) {
1574 r = PTR_ERR(c->dm_io);
1575 goto bad_dm_io;
1576 }
1577
1578 mutex_lock(&dm_bufio_clients_lock);
1579 if (c->blocks_per_page_bits) {
1580 if (!DM_BUFIO_CACHE_NAME(c)) {
1581 DM_BUFIO_CACHE_NAME(c) = kasprintf(GFP_KERNEL, "dm_bufio_cache-%u", c->block_size);
1582 if (!DM_BUFIO_CACHE_NAME(c)) {
1583 r = -ENOMEM;
1584 mutex_unlock(&dm_bufio_clients_lock);
1585 goto bad_cache;
1586 }
1587 }
1588
1589 if (!DM_BUFIO_CACHE(c)) {
1590 DM_BUFIO_CACHE(c) = kmem_cache_create(DM_BUFIO_CACHE_NAME(c),
1591 c->block_size,
1592 c->block_size, 0, NULL);
1593 if (!DM_BUFIO_CACHE(c)) {
1594 r = -ENOMEM;
1595 mutex_unlock(&dm_bufio_clients_lock);
1596 goto bad_cache;
1597 }
1598 }
1599 }
1600 mutex_unlock(&dm_bufio_clients_lock);
1601
1602 while (c->need_reserved_buffers) {
1603 struct dm_buffer *b = alloc_buffer(c, GFP_KERNEL);
1604
1605 if (!b) {
1606 r = -ENOMEM;
1607 goto bad_buffer;
1608 }
1609 __free_buffer_wake(b);
1610 }
1611
1612 mutex_lock(&dm_bufio_clients_lock);
1613 dm_bufio_client_count++;
1614 list_add(&c->client_list, &dm_bufio_all_clients);
1615 __cache_size_refresh();
1616 mutex_unlock(&dm_bufio_clients_lock);
1617
7dc19d5a
DC
1618 c->shrinker.count_objects = dm_bufio_shrink_count;
1619 c->shrinker.scan_objects = dm_bufio_shrink_scan;
95d402f0
MP
1620 c->shrinker.seeks = 1;
1621 c->shrinker.batch = 0;
1622 register_shrinker(&c->shrinker);
1623
1624 return c;
1625
1626bad_buffer:
1627bad_cache:
1628 while (!list_empty(&c->reserved_buffers)) {
1629 struct dm_buffer *b = list_entry(c->reserved_buffers.next,
1630 struct dm_buffer, lru_list);
1631 list_del(&b->lru_list);
1632 free_buffer(b);
1633 }
1634 dm_io_client_destroy(c->dm_io);
1635bad_dm_io:
1636 vfree(c->cache_hash);
1637bad_hash:
1638 kfree(c);
1639bad_client:
1640 return ERR_PTR(r);
1641}
1642EXPORT_SYMBOL_GPL(dm_bufio_client_create);
1643
1644/*
1645 * Free the buffering interface.
1646 * It is required that there are no references on any buffers.
1647 */
1648void dm_bufio_client_destroy(struct dm_bufio_client *c)
1649{
1650 unsigned i;
1651
1652 drop_buffers(c);
1653
1654 unregister_shrinker(&c->shrinker);
1655
1656 mutex_lock(&dm_bufio_clients_lock);
1657
1658 list_del(&c->client_list);
1659 dm_bufio_client_count--;
1660 __cache_size_refresh();
1661
1662 mutex_unlock(&dm_bufio_clients_lock);
1663
1664 for (i = 0; i < 1 << DM_BUFIO_HASH_BITS; i++)
1665 BUG_ON(!hlist_empty(&c->cache_hash[i]));
1666
1667 BUG_ON(c->need_reserved_buffers);
1668
1669 while (!list_empty(&c->reserved_buffers)) {
1670 struct dm_buffer *b = list_entry(c->reserved_buffers.next,
1671 struct dm_buffer, lru_list);
1672 list_del(&b->lru_list);
1673 free_buffer(b);
1674 }
1675
1676 for (i = 0; i < LIST_SIZE; i++)
1677 if (c->n_buffers[i])
1678 DMERR("leaked buffer count %d: %ld", i, c->n_buffers[i]);
1679
1680 for (i = 0; i < LIST_SIZE; i++)
1681 BUG_ON(c->n_buffers[i]);
1682
1683 dm_io_client_destroy(c->dm_io);
1684 vfree(c->cache_hash);
1685 kfree(c);
1686}
1687EXPORT_SYMBOL_GPL(dm_bufio_client_destroy);
1688
1689static void cleanup_old_buffers(void)
1690{
fe5fe906 1691 unsigned long max_age = ACCESS_ONCE(dm_bufio_max_age);
95d402f0
MP
1692 struct dm_bufio_client *c;
1693
95d402f0
MP
1694 if (max_age > ULONG_MAX / HZ)
1695 max_age = ULONG_MAX / HZ;
1696
1697 mutex_lock(&dm_bufio_clients_lock);
1698 list_for_each_entry(c, &dm_bufio_all_clients, client_list) {
1699 if (!dm_bufio_trylock(c))
1700 continue;
1701
1702 while (!list_empty(&c->lru[LIST_CLEAN])) {
1703 struct dm_buffer *b;
1704 b = list_entry(c->lru[LIST_CLEAN].prev,
1705 struct dm_buffer, lru_list);
7dc19d5a 1706 if (!__cleanup_old_buffer(b, 0, max_age * HZ))
95d402f0
MP
1707 break;
1708 dm_bufio_cond_resched();
1709 }
1710
1711 dm_bufio_unlock(c);
1712 dm_bufio_cond_resched();
1713 }
1714 mutex_unlock(&dm_bufio_clients_lock);
1715}
1716
1717static struct workqueue_struct *dm_bufio_wq;
1718static struct delayed_work dm_bufio_work;
1719
1720static void work_fn(struct work_struct *w)
1721{
1722 cleanup_old_buffers();
1723
1724 queue_delayed_work(dm_bufio_wq, &dm_bufio_work,
1725 DM_BUFIO_WORK_TIMER_SECS * HZ);
1726}
1727
1728/*----------------------------------------------------------------
1729 * Module setup
1730 *--------------------------------------------------------------*/
1731
1732/*
1733 * This is called only once for the whole dm_bufio module.
1734 * It initializes memory limit.
1735 */
1736static int __init dm_bufio_init(void)
1737{
1738 __u64 mem;
1739
4cb57ab4
MP
1740 dm_bufio_allocated_kmem_cache = 0;
1741 dm_bufio_allocated_get_free_pages = 0;
1742 dm_bufio_allocated_vmalloc = 0;
1743 dm_bufio_current_allocated = 0;
1744
95d402f0
MP
1745 memset(&dm_bufio_caches, 0, sizeof dm_bufio_caches);
1746 memset(&dm_bufio_cache_names, 0, sizeof dm_bufio_cache_names);
1747
1748 mem = (__u64)((totalram_pages - totalhigh_pages) *
1749 DM_BUFIO_MEMORY_PERCENT / 100) << PAGE_SHIFT;
1750
1751 if (mem > ULONG_MAX)
1752 mem = ULONG_MAX;
1753
1754#ifdef CONFIG_MMU
1755 /*
1756 * Get the size of vmalloc space the same way as VMALLOC_TOTAL
1757 * in fs/proc/internal.h
1758 */
1759 if (mem > (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100)
1760 mem = (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100;
1761#endif
1762
1763 dm_bufio_default_cache_size = mem;
1764
1765 mutex_lock(&dm_bufio_clients_lock);
1766 __cache_size_refresh();
1767 mutex_unlock(&dm_bufio_clients_lock);
1768
1769 dm_bufio_wq = create_singlethread_workqueue("dm_bufio_cache");
1770 if (!dm_bufio_wq)
1771 return -ENOMEM;
1772
1773 INIT_DELAYED_WORK(&dm_bufio_work, work_fn);
1774 queue_delayed_work(dm_bufio_wq, &dm_bufio_work,
1775 DM_BUFIO_WORK_TIMER_SECS * HZ);
1776
1777 return 0;
1778}
1779
1780/*
1781 * This is called once when unloading the dm_bufio module.
1782 */
1783static void __exit dm_bufio_exit(void)
1784{
1785 int bug = 0;
1786 int i;
1787
1788 cancel_delayed_work_sync(&dm_bufio_work);
1789 destroy_workqueue(dm_bufio_wq);
1790
1791 for (i = 0; i < ARRAY_SIZE(dm_bufio_caches); i++) {
1792 struct kmem_cache *kc = dm_bufio_caches[i];
1793
1794 if (kc)
1795 kmem_cache_destroy(kc);
1796 }
1797
1798 for (i = 0; i < ARRAY_SIZE(dm_bufio_cache_names); i++)
1799 kfree(dm_bufio_cache_names[i]);
1800
1801 if (dm_bufio_client_count) {
1802 DMCRIT("%s: dm_bufio_client_count leaked: %d",
1803 __func__, dm_bufio_client_count);
1804 bug = 1;
1805 }
1806
1807 if (dm_bufio_current_allocated) {
1808 DMCRIT("%s: dm_bufio_current_allocated leaked: %lu",
1809 __func__, dm_bufio_current_allocated);
1810 bug = 1;
1811 }
1812
1813 if (dm_bufio_allocated_get_free_pages) {
1814 DMCRIT("%s: dm_bufio_allocated_get_free_pages leaked: %lu",
1815 __func__, dm_bufio_allocated_get_free_pages);
1816 bug = 1;
1817 }
1818
1819 if (dm_bufio_allocated_vmalloc) {
1820 DMCRIT("%s: dm_bufio_vmalloc leaked: %lu",
1821 __func__, dm_bufio_allocated_vmalloc);
1822 bug = 1;
1823 }
1824
1825 if (bug)
1826 BUG();
1827}
1828
1829module_init(dm_bufio_init)
1830module_exit(dm_bufio_exit)
1831
1832module_param_named(max_cache_size_bytes, dm_bufio_cache_size, ulong, S_IRUGO | S_IWUSR);
1833MODULE_PARM_DESC(max_cache_size_bytes, "Size of metadata cache");
1834
1835module_param_named(max_age_seconds, dm_bufio_max_age, uint, S_IRUGO | S_IWUSR);
1836MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds");
1837
1838module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, S_IRUGO | S_IWUSR);
1839MODULE_PARM_DESC(peak_allocated_bytes, "Tracks the maximum allocated memory");
1840
1841module_param_named(allocated_kmem_cache_bytes, dm_bufio_allocated_kmem_cache, ulong, S_IRUGO);
1842MODULE_PARM_DESC(allocated_kmem_cache_bytes, "Memory allocated with kmem_cache_alloc");
1843
1844module_param_named(allocated_get_free_pages_bytes, dm_bufio_allocated_get_free_pages, ulong, S_IRUGO);
1845MODULE_PARM_DESC(allocated_get_free_pages_bytes, "Memory allocated with get_free_pages");
1846
1847module_param_named(allocated_vmalloc_bytes, dm_bufio_allocated_vmalloc, ulong, S_IRUGO);
1848MODULE_PARM_DESC(allocated_vmalloc_bytes, "Memory allocated with vmalloc");
1849
1850module_param_named(current_allocated_bytes, dm_bufio_current_allocated, ulong, S_IRUGO);
1851MODULE_PARM_DESC(current_allocated_bytes, "Memory currently used by the cache");
1852
1853MODULE_AUTHOR("Mikulas Patocka <dm-devel@redhat.com>");
1854MODULE_DESCRIPTION(DM_NAME " buffered I/O library");
1855MODULE_LICENSE("GPL");