1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2018 Red Hat. All rights reserved.
5 * This file is released under the GPL.
8 #include <linux/device-mapper.h>
9 #include <linux/module.h>
10 #include <linux/init.h>
11 #include <linux/vmalloc.h>
12 #include <linux/kthread.h>
13 #include <linux/dm-io.h>
14 #include <linux/dm-kcopyd.h>
15 #include <linux/dax.h>
16 #include <linux/pfn_t.h>
17 #include <linux/libnvdimm.h>
18 #include <linux/delay.h>
19 #include "dm-io-tracker.h"
21 #define DM_MSG_PREFIX "writecache"
23 #define HIGH_WATERMARK 50
24 #define LOW_WATERMARK 45
25 #define MAX_WRITEBACK_JOBS 0
26 #define ENDIO_LATENCY 16
27 #define WRITEBACK_LATENCY 64
28 #define AUTOCOMMIT_BLOCKS_SSD 65536
29 #define AUTOCOMMIT_BLOCKS_PMEM 64
30 #define AUTOCOMMIT_MSEC 1000
31 #define MAX_AGE_DIV 16
32 #define MAX_AGE_UNSPECIFIED -1UL
33 #define PAUSE_WRITEBACK (HZ * 3)
35 #define BITMAP_GRANULARITY 65536
36 #if BITMAP_GRANULARITY < PAGE_SIZE
37 #undef BITMAP_GRANULARITY
38 #define BITMAP_GRANULARITY PAGE_SIZE
41 #if IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API) && IS_ENABLED(CONFIG_FS_DAX)
42 #define DM_WRITECACHE_HAS_PMEM
45 #ifdef DM_WRITECACHE_HAS_PMEM
46 #define pmem_assign(dest, src) \
48 typeof(dest) uniq = (src); \
49 memcpy_flushcache(&(dest), &uniq, sizeof(dest)); \
52 #define pmem_assign(dest, src) ((dest) = (src))
55 #if IS_ENABLED(CONFIG_ARCH_HAS_COPY_MC) && defined(DM_WRITECACHE_HAS_PMEM)
56 #define DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
59 #define MEMORY_SUPERBLOCK_MAGIC 0x23489321
60 #define MEMORY_SUPERBLOCK_VERSION 1
62 struct wc_memory_entry {
63 __le64 original_sector;
67 struct wc_memory_superblock {
79 struct wc_memory_entry entries[];
83 struct rb_node rb_node;
85 unsigned short wc_list_contiguous;
86 bool write_in_progress
87 #if BITS_PER_LONG == 64
92 #if BITS_PER_LONG == 64
97 #ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
98 uint64_t original_sector;
103 #ifdef DM_WRITECACHE_HAS_PMEM
104 #define WC_MODE_PMEM(wc) ((wc)->pmem_mode)
105 #define WC_MODE_FUA(wc) ((wc)->writeback_fua)
107 #define WC_MODE_PMEM(wc) false
108 #define WC_MODE_FUA(wc) false
110 #define WC_MODE_SORT_FREELIST(wc) (!WC_MODE_PMEM(wc))
112 struct dm_writecache {
114 struct list_head lru;
116 struct list_head freelist;
118 struct rb_root freetree;
119 struct wc_entry *current_free;
124 size_t freelist_size;
125 size_t writeback_size;
126 size_t freelist_high_watermark;
127 size_t freelist_low_watermark;
128 unsigned long max_age;
131 unsigned uncommitted_blocks;
132 unsigned autocommit_blocks;
133 unsigned max_writeback_jobs;
137 unsigned long autocommit_jiffies;
138 struct timer_list autocommit_timer;
139 struct wait_queue_head freelist_wait;
141 struct timer_list max_age_timer;
143 atomic_t bio_in_progress[2];
144 struct wait_queue_head bio_in_progress_wait[2];
146 struct dm_target *ti;
148 struct dm_dev *ssd_dev;
149 sector_t start_sector;
151 uint64_t memory_map_size;
152 size_t metadata_sectors;
155 sector_t data_device_sectors;
157 struct wc_entry *entries;
159 unsigned char block_size_bits;
162 bool writeback_fua:1;
164 bool overwrote_committed:1;
165 bool memory_vmapped:1;
167 bool start_sector_set:1;
168 bool high_wm_percent_set:1;
169 bool low_wm_percent_set:1;
170 bool max_writeback_jobs_set:1;
171 bool autocommit_blocks_set:1;
172 bool autocommit_time_set:1;
174 bool writeback_fua_set:1;
175 bool flush_on_suspend:1;
178 bool metadata_only:1;
181 unsigned high_wm_percent_value;
182 unsigned low_wm_percent_value;
183 unsigned autocommit_time_value;
184 unsigned max_age_value;
185 unsigned pause_value;
187 unsigned writeback_all;
188 struct workqueue_struct *writeback_wq;
189 struct work_struct writeback_work;
190 struct work_struct flush_work;
192 struct dm_io_tracker iot;
194 struct dm_io_client *dm_io;
196 raw_spinlock_t endio_list_lock;
197 struct list_head endio_list;
198 struct task_struct *endio_thread;
200 struct task_struct *flush_thread;
201 struct bio_list flush_list;
203 struct dm_kcopyd_client *dm_kcopyd;
204 unsigned long *dirty_bitmap;
205 unsigned dirty_bitmap_size;
207 struct bio_set bio_set;
211 unsigned long long reads;
212 unsigned long long read_hits;
213 unsigned long long writes;
214 unsigned long long write_hits_uncommitted;
215 unsigned long long write_hits_committed;
216 unsigned long long writes_around;
217 unsigned long long writes_allocate;
218 unsigned long long writes_blocked_on_freelist;
219 unsigned long long flushes;
220 unsigned long long discards;
224 #define WB_LIST_INLINE 16
226 struct writeback_struct {
227 struct list_head endio_entry;
228 struct dm_writecache *wc;
229 struct wc_entry **wc_list;
231 struct wc_entry *wc_list_inline[WB_LIST_INLINE];
236 struct list_head endio_entry;
237 struct dm_writecache *wc;
243 DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(dm_writecache_throttle,
244 "A percentage of time allocated for data copying");
246 static void wc_lock(struct dm_writecache *wc)
248 mutex_lock(&wc->lock);
251 static void wc_unlock(struct dm_writecache *wc)
253 mutex_unlock(&wc->lock);
256 #ifdef DM_WRITECACHE_HAS_PMEM
257 static int persistent_memory_claim(struct dm_writecache *wc)
267 wc->memory_vmapped = false;
269 s = wc->memory_map_size;
275 if (p != s >> PAGE_SHIFT) {
280 offset = get_start_sect(wc->ssd_dev->bdev);
281 if (offset & (PAGE_SIZE / 512 - 1)) {
285 offset >>= PAGE_SHIFT - 9;
287 id = dax_read_lock();
289 da = dax_direct_access(wc->ssd_dev->dax_dev, offset, p, DAX_ACCESS,
290 &wc->memory_map, &pfn);
292 wc->memory_map = NULL;
296 if (!pfn_t_has_page(pfn)) {
297 wc->memory_map = NULL;
303 wc->memory_map = NULL;
304 pages = kvmalloc_array(p, sizeof(struct page *), GFP_KERNEL);
312 daa = dax_direct_access(wc->ssd_dev->dax_dev, offset + i,
313 p - i, DAX_ACCESS, NULL, &pfn);
315 r = daa ? daa : -EINVAL;
318 if (!pfn_t_has_page(pfn)) {
322 while (daa-- && i < p) {
323 pages[i++] = pfn_t_to_page(pfn);
329 wc->memory_map = vmap(pages, p, VM_MAP, PAGE_KERNEL);
330 if (!wc->memory_map) {
335 wc->memory_vmapped = true;
340 wc->memory_map += (size_t)wc->start_sector << SECTOR_SHIFT;
341 wc->memory_map_size -= (size_t)wc->start_sector << SECTOR_SHIFT;
352 static int persistent_memory_claim(struct dm_writecache *wc)
358 static void persistent_memory_release(struct dm_writecache *wc)
360 if (wc->memory_vmapped)
361 vunmap(wc->memory_map - ((size_t)wc->start_sector << SECTOR_SHIFT));
364 static struct page *persistent_memory_page(void *addr)
366 if (is_vmalloc_addr(addr))
367 return vmalloc_to_page(addr);
369 return virt_to_page(addr);
372 static unsigned persistent_memory_page_offset(void *addr)
374 return (unsigned long)addr & (PAGE_SIZE - 1);
377 static void persistent_memory_flush_cache(void *ptr, size_t size)
379 if (is_vmalloc_addr(ptr))
380 flush_kernel_vmap_range(ptr, size);
383 static void persistent_memory_invalidate_cache(void *ptr, size_t size)
385 if (is_vmalloc_addr(ptr))
386 invalidate_kernel_vmap_range(ptr, size);
389 static struct wc_memory_superblock *sb(struct dm_writecache *wc)
391 return wc->memory_map;
394 static struct wc_memory_entry *memory_entry(struct dm_writecache *wc, struct wc_entry *e)
396 return &sb(wc)->entries[e->index];
399 static void *memory_data(struct dm_writecache *wc, struct wc_entry *e)
401 return (char *)wc->block_start + (e->index << wc->block_size_bits);
404 static sector_t cache_sector(struct dm_writecache *wc, struct wc_entry *e)
406 return wc->start_sector + wc->metadata_sectors +
407 ((sector_t)e->index << (wc->block_size_bits - SECTOR_SHIFT));
410 static uint64_t read_original_sector(struct dm_writecache *wc, struct wc_entry *e)
412 #ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
413 return e->original_sector;
415 return le64_to_cpu(memory_entry(wc, e)->original_sector);
419 static uint64_t read_seq_count(struct dm_writecache *wc, struct wc_entry *e)
421 #ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
424 return le64_to_cpu(memory_entry(wc, e)->seq_count);
428 static void clear_seq_count(struct dm_writecache *wc, struct wc_entry *e)
430 #ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
433 pmem_assign(memory_entry(wc, e)->seq_count, cpu_to_le64(-1));
436 static void write_original_sector_seq_count(struct dm_writecache *wc, struct wc_entry *e,
437 uint64_t original_sector, uint64_t seq_count)
439 struct wc_memory_entry me;
440 #ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
441 e->original_sector = original_sector;
442 e->seq_count = seq_count;
444 me.original_sector = cpu_to_le64(original_sector);
445 me.seq_count = cpu_to_le64(seq_count);
446 pmem_assign(*memory_entry(wc, e), me);
449 #define writecache_error(wc, err, msg, arg...) \
451 if (!cmpxchg(&(wc)->error, 0, err)) \
453 wake_up(&(wc)->freelist_wait); \
456 #define writecache_has_error(wc) (unlikely(READ_ONCE((wc)->error)))
458 static void writecache_flush_all_metadata(struct dm_writecache *wc)
460 if (!WC_MODE_PMEM(wc))
461 memset(wc->dirty_bitmap, -1, wc->dirty_bitmap_size);
464 static void writecache_flush_region(struct dm_writecache *wc, void *ptr, size_t size)
466 if (!WC_MODE_PMEM(wc))
467 __set_bit(((char *)ptr - (char *)wc->memory_map) / BITMAP_GRANULARITY,
471 static void writecache_disk_flush(struct dm_writecache *wc, struct dm_dev *dev);
474 struct dm_writecache *wc;
479 static void writecache_notify_io(unsigned long error, void *context)
481 struct io_notify *endio = context;
483 if (unlikely(error != 0))
484 writecache_error(endio->wc, -EIO, "error writing metadata");
485 BUG_ON(atomic_read(&endio->count) <= 0);
486 if (atomic_dec_and_test(&endio->count))
490 static void writecache_wait_for_ios(struct dm_writecache *wc, int direction)
492 wait_event(wc->bio_in_progress_wait[direction],
493 !atomic_read(&wc->bio_in_progress[direction]));
496 static void ssd_commit_flushed(struct dm_writecache *wc, bool wait_for_ios)
498 struct dm_io_region region;
499 struct dm_io_request req;
500 struct io_notify endio = {
502 COMPLETION_INITIALIZER_ONSTACK(endio.c),
505 unsigned bitmap_bits = wc->dirty_bitmap_size * 8;
510 i = find_next_bit(wc->dirty_bitmap, bitmap_bits, i);
511 if (unlikely(i == bitmap_bits))
513 j = find_next_zero_bit(wc->dirty_bitmap, bitmap_bits, i);
515 region.bdev = wc->ssd_dev->bdev;
516 region.sector = (sector_t)i * (BITMAP_GRANULARITY >> SECTOR_SHIFT);
517 region.count = (sector_t)(j - i) * (BITMAP_GRANULARITY >> SECTOR_SHIFT);
519 if (unlikely(region.sector >= wc->metadata_sectors))
521 if (unlikely(region.sector + region.count > wc->metadata_sectors))
522 region.count = wc->metadata_sectors - region.sector;
524 region.sector += wc->start_sector;
525 atomic_inc(&endio.count);
526 req.bi_op = REQ_OP_WRITE;
527 req.bi_op_flags = REQ_SYNC;
528 req.mem.type = DM_IO_VMA;
529 req.mem.ptr.vma = (char *)wc->memory_map + (size_t)i * BITMAP_GRANULARITY;
530 req.client = wc->dm_io;
531 req.notify.fn = writecache_notify_io;
532 req.notify.context = &endio;
534 /* writing via async dm-io (implied by notify.fn above) won't return an error */
535 (void) dm_io(&req, 1, ®ion, NULL);
539 writecache_notify_io(0, &endio);
540 wait_for_completion_io(&endio.c);
543 writecache_wait_for_ios(wc, WRITE);
545 writecache_disk_flush(wc, wc->ssd_dev);
547 memset(wc->dirty_bitmap, 0, wc->dirty_bitmap_size);
550 static void ssd_commit_superblock(struct dm_writecache *wc)
553 struct dm_io_region region;
554 struct dm_io_request req;
556 region.bdev = wc->ssd_dev->bdev;
558 region.count = max(4096U, wc->block_size) >> SECTOR_SHIFT;
560 if (unlikely(region.sector + region.count > wc->metadata_sectors))
561 region.count = wc->metadata_sectors - region.sector;
563 region.sector += wc->start_sector;
565 req.bi_op = REQ_OP_WRITE;
566 req.bi_op_flags = REQ_SYNC | REQ_FUA;
567 req.mem.type = DM_IO_VMA;
568 req.mem.ptr.vma = (char *)wc->memory_map;
569 req.client = wc->dm_io;
570 req.notify.fn = NULL;
571 req.notify.context = NULL;
573 r = dm_io(&req, 1, ®ion, NULL);
575 writecache_error(wc, r, "error writing superblock");
578 static void writecache_commit_flushed(struct dm_writecache *wc, bool wait_for_ios)
580 if (WC_MODE_PMEM(wc))
583 ssd_commit_flushed(wc, wait_for_ios);
586 static void writecache_disk_flush(struct dm_writecache *wc, struct dm_dev *dev)
589 struct dm_io_region region;
590 struct dm_io_request req;
592 region.bdev = dev->bdev;
595 req.bi_op = REQ_OP_WRITE;
596 req.bi_op_flags = REQ_PREFLUSH;
597 req.mem.type = DM_IO_KMEM;
598 req.mem.ptr.addr = NULL;
599 req.client = wc->dm_io;
600 req.notify.fn = NULL;
602 r = dm_io(&req, 1, ®ion, NULL);
604 writecache_error(wc, r, "error flushing metadata: %d", r);
607 #define WFE_RETURN_FOLLOWING 1
608 #define WFE_LOWEST_SEQ 2
610 static struct wc_entry *writecache_find_entry(struct dm_writecache *wc,
611 uint64_t block, int flags)
614 struct rb_node *node = wc->tree.rb_node;
620 e = container_of(node, struct wc_entry, rb_node);
621 if (read_original_sector(wc, e) == block)
624 node = (read_original_sector(wc, e) >= block ?
625 e->rb_node.rb_left : e->rb_node.rb_right);
626 if (unlikely(!node)) {
627 if (!(flags & WFE_RETURN_FOLLOWING))
629 if (read_original_sector(wc, e) >= block) {
632 node = rb_next(&e->rb_node);
635 e = container_of(node, struct wc_entry, rb_node);
643 if (flags & WFE_LOWEST_SEQ)
644 node = rb_prev(&e->rb_node);
646 node = rb_next(&e->rb_node);
649 e2 = container_of(node, struct wc_entry, rb_node);
650 if (read_original_sector(wc, e2) != block)
656 static void writecache_insert_entry(struct dm_writecache *wc, struct wc_entry *ins)
659 struct rb_node **node = &wc->tree.rb_node, *parent = NULL;
662 e = container_of(*node, struct wc_entry, rb_node);
663 parent = &e->rb_node;
664 if (read_original_sector(wc, e) > read_original_sector(wc, ins))
665 node = &parent->rb_left;
667 node = &parent->rb_right;
669 rb_link_node(&ins->rb_node, parent, node);
670 rb_insert_color(&ins->rb_node, &wc->tree);
671 list_add(&ins->lru, &wc->lru);
675 static void writecache_unlink(struct dm_writecache *wc, struct wc_entry *e)
678 rb_erase(&e->rb_node, &wc->tree);
681 static void writecache_add_to_freelist(struct dm_writecache *wc, struct wc_entry *e)
683 if (WC_MODE_SORT_FREELIST(wc)) {
684 struct rb_node **node = &wc->freetree.rb_node, *parent = NULL;
685 if (unlikely(!*node))
686 wc->current_free = e;
689 if (&e->rb_node < *node)
690 node = &parent->rb_left;
692 node = &parent->rb_right;
694 rb_link_node(&e->rb_node, parent, node);
695 rb_insert_color(&e->rb_node, &wc->freetree);
697 list_add_tail(&e->lru, &wc->freelist);
702 static inline void writecache_verify_watermark(struct dm_writecache *wc)
704 if (unlikely(wc->freelist_size + wc->writeback_size <= wc->freelist_high_watermark))
705 queue_work(wc->writeback_wq, &wc->writeback_work);
708 static void writecache_max_age_timer(struct timer_list *t)
710 struct dm_writecache *wc = from_timer(wc, t, max_age_timer);
712 if (!dm_suspended(wc->ti) && !writecache_has_error(wc)) {
713 queue_work(wc->writeback_wq, &wc->writeback_work);
714 mod_timer(&wc->max_age_timer, jiffies + wc->max_age / MAX_AGE_DIV);
718 static struct wc_entry *writecache_pop_from_freelist(struct dm_writecache *wc, sector_t expected_sector)
722 if (WC_MODE_SORT_FREELIST(wc)) {
723 struct rb_node *next;
724 if (unlikely(!wc->current_free))
726 e = wc->current_free;
727 if (expected_sector != (sector_t)-1 && unlikely(cache_sector(wc, e) != expected_sector))
729 next = rb_next(&e->rb_node);
730 rb_erase(&e->rb_node, &wc->freetree);
732 next = rb_first(&wc->freetree);
733 wc->current_free = next ? container_of(next, struct wc_entry, rb_node) : NULL;
735 if (unlikely(list_empty(&wc->freelist)))
737 e = container_of(wc->freelist.next, struct wc_entry, lru);
738 if (expected_sector != (sector_t)-1 && unlikely(cache_sector(wc, e) != expected_sector))
744 writecache_verify_watermark(wc);
749 static void writecache_free_entry(struct dm_writecache *wc, struct wc_entry *e)
751 writecache_unlink(wc, e);
752 writecache_add_to_freelist(wc, e);
753 clear_seq_count(wc, e);
754 writecache_flush_region(wc, memory_entry(wc, e), sizeof(struct wc_memory_entry));
755 if (unlikely(waitqueue_active(&wc->freelist_wait)))
756 wake_up(&wc->freelist_wait);
759 static void writecache_wait_on_freelist(struct dm_writecache *wc)
763 prepare_to_wait(&wc->freelist_wait, &wait, TASK_UNINTERRUPTIBLE);
766 finish_wait(&wc->freelist_wait, &wait);
770 static void writecache_poison_lists(struct dm_writecache *wc)
773 * Catch incorrect access to these values while the device is suspended.
775 memset(&wc->tree, -1, sizeof wc->tree);
776 wc->lru.next = LIST_POISON1;
777 wc->lru.prev = LIST_POISON2;
778 wc->freelist.next = LIST_POISON1;
779 wc->freelist.prev = LIST_POISON2;
782 static void writecache_flush_entry(struct dm_writecache *wc, struct wc_entry *e)
784 writecache_flush_region(wc, memory_entry(wc, e), sizeof(struct wc_memory_entry));
785 if (WC_MODE_PMEM(wc))
786 writecache_flush_region(wc, memory_data(wc, e), wc->block_size);
789 static bool writecache_entry_is_committed(struct dm_writecache *wc, struct wc_entry *e)
791 return read_seq_count(wc, e) < wc->seq_count;
794 static void writecache_flush(struct dm_writecache *wc)
796 struct wc_entry *e, *e2;
797 bool need_flush_after_free;
799 wc->uncommitted_blocks = 0;
800 del_timer(&wc->autocommit_timer);
802 if (list_empty(&wc->lru))
805 e = container_of(wc->lru.next, struct wc_entry, lru);
806 if (writecache_entry_is_committed(wc, e)) {
807 if (wc->overwrote_committed) {
808 writecache_wait_for_ios(wc, WRITE);
809 writecache_disk_flush(wc, wc->ssd_dev);
810 wc->overwrote_committed = false;
815 writecache_flush_entry(wc, e);
816 if (unlikely(e->lru.next == &wc->lru))
818 e2 = container_of(e->lru.next, struct wc_entry, lru);
819 if (writecache_entry_is_committed(wc, e2))
824 writecache_commit_flushed(wc, true);
827 pmem_assign(sb(wc)->seq_count, cpu_to_le64(wc->seq_count));
828 if (WC_MODE_PMEM(wc))
829 writecache_commit_flushed(wc, false);
831 ssd_commit_superblock(wc);
833 wc->overwrote_committed = false;
835 need_flush_after_free = false;
837 /* Free another committed entry with lower seq-count */
838 struct rb_node *rb_node = rb_prev(&e->rb_node);
841 e2 = container_of(rb_node, struct wc_entry, rb_node);
842 if (read_original_sector(wc, e2) == read_original_sector(wc, e) &&
843 likely(!e2->write_in_progress)) {
844 writecache_free_entry(wc, e2);
845 need_flush_after_free = true;
848 if (unlikely(e->lru.prev == &wc->lru))
850 e = container_of(e->lru.prev, struct wc_entry, lru);
854 if (need_flush_after_free)
855 writecache_commit_flushed(wc, false);
858 static void writecache_flush_work(struct work_struct *work)
860 struct dm_writecache *wc = container_of(work, struct dm_writecache, flush_work);
863 writecache_flush(wc);
867 static void writecache_autocommit_timer(struct timer_list *t)
869 struct dm_writecache *wc = from_timer(wc, t, autocommit_timer);
870 if (!writecache_has_error(wc))
871 queue_work(wc->writeback_wq, &wc->flush_work);
874 static void writecache_schedule_autocommit(struct dm_writecache *wc)
876 if (!timer_pending(&wc->autocommit_timer))
877 mod_timer(&wc->autocommit_timer, jiffies + wc->autocommit_jiffies);
880 static void writecache_discard(struct dm_writecache *wc, sector_t start, sector_t end)
883 bool discarded_something = false;
885 e = writecache_find_entry(wc, start, WFE_RETURN_FOLLOWING | WFE_LOWEST_SEQ);
889 while (read_original_sector(wc, e) < end) {
890 struct rb_node *node = rb_next(&e->rb_node);
892 if (likely(!e->write_in_progress)) {
893 if (!discarded_something) {
894 if (!WC_MODE_PMEM(wc)) {
895 writecache_wait_for_ios(wc, READ);
896 writecache_wait_for_ios(wc, WRITE);
898 discarded_something = true;
900 if (!writecache_entry_is_committed(wc, e))
901 wc->uncommitted_blocks--;
902 writecache_free_entry(wc, e);
908 e = container_of(node, struct wc_entry, rb_node);
911 if (discarded_something)
912 writecache_commit_flushed(wc, false);
915 static bool writecache_wait_for_writeback(struct dm_writecache *wc)
917 if (wc->writeback_size) {
918 writecache_wait_on_freelist(wc);
924 static void writecache_suspend(struct dm_target *ti)
926 struct dm_writecache *wc = ti->private;
927 bool flush_on_suspend;
929 del_timer_sync(&wc->autocommit_timer);
930 del_timer_sync(&wc->max_age_timer);
933 writecache_flush(wc);
934 flush_on_suspend = wc->flush_on_suspend;
935 if (flush_on_suspend) {
936 wc->flush_on_suspend = false;
938 queue_work(wc->writeback_wq, &wc->writeback_work);
942 drain_workqueue(wc->writeback_wq);
945 if (flush_on_suspend)
947 while (writecache_wait_for_writeback(wc));
949 if (WC_MODE_PMEM(wc))
950 persistent_memory_flush_cache(wc->memory_map, wc->memory_map_size);
952 writecache_poison_lists(wc);
957 static int writecache_alloc_entries(struct dm_writecache *wc)
963 wc->entries = vmalloc(array_size(sizeof(struct wc_entry), wc->n_blocks));
966 for (b = 0; b < wc->n_blocks; b++) {
967 struct wc_entry *e = &wc->entries[b];
969 e->write_in_progress = false;
976 static int writecache_read_metadata(struct dm_writecache *wc, sector_t n_sectors)
978 struct dm_io_region region;
979 struct dm_io_request req;
981 region.bdev = wc->ssd_dev->bdev;
982 region.sector = wc->start_sector;
983 region.count = n_sectors;
984 req.bi_op = REQ_OP_READ;
985 req.bi_op_flags = REQ_SYNC;
986 req.mem.type = DM_IO_VMA;
987 req.mem.ptr.vma = (char *)wc->memory_map;
988 req.client = wc->dm_io;
989 req.notify.fn = NULL;
991 return dm_io(&req, 1, ®ion, NULL);
994 static void writecache_resume(struct dm_target *ti)
996 struct dm_writecache *wc = ti->private;
998 bool need_flush = false;
1004 wc->data_device_sectors = bdev_nr_sectors(wc->dev->bdev);
1006 if (WC_MODE_PMEM(wc)) {
1007 persistent_memory_invalidate_cache(wc->memory_map, wc->memory_map_size);
1009 r = writecache_read_metadata(wc, wc->metadata_sectors);
1011 size_t sb_entries_offset;
1012 writecache_error(wc, r, "unable to read metadata: %d", r);
1013 sb_entries_offset = offsetof(struct wc_memory_superblock, entries);
1014 memset((char *)wc->memory_map + sb_entries_offset, -1,
1015 (wc->metadata_sectors << SECTOR_SHIFT) - sb_entries_offset);
1020 INIT_LIST_HEAD(&wc->lru);
1021 if (WC_MODE_SORT_FREELIST(wc)) {
1022 wc->freetree = RB_ROOT;
1023 wc->current_free = NULL;
1025 INIT_LIST_HEAD(&wc->freelist);
1027 wc->freelist_size = 0;
1029 r = copy_mc_to_kernel(&sb_seq_count, &sb(wc)->seq_count,
1032 writecache_error(wc, r, "hardware memory error when reading superblock: %d", r);
1033 sb_seq_count = cpu_to_le64(0);
1035 wc->seq_count = le64_to_cpu(sb_seq_count);
1037 #ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
1038 for (b = 0; b < wc->n_blocks; b++) {
1039 struct wc_entry *e = &wc->entries[b];
1040 struct wc_memory_entry wme;
1041 if (writecache_has_error(wc)) {
1042 e->original_sector = -1;
1046 r = copy_mc_to_kernel(&wme, memory_entry(wc, e),
1047 sizeof(struct wc_memory_entry));
1049 writecache_error(wc, r, "hardware memory error when reading metadata entry %lu: %d",
1050 (unsigned long)b, r);
1051 e->original_sector = -1;
1054 e->original_sector = le64_to_cpu(wme.original_sector);
1055 e->seq_count = le64_to_cpu(wme.seq_count);
1060 for (b = 0; b < wc->n_blocks; b++) {
1061 struct wc_entry *e = &wc->entries[b];
1062 if (!writecache_entry_is_committed(wc, e)) {
1063 if (read_seq_count(wc, e) != -1) {
1065 clear_seq_count(wc, e);
1068 writecache_add_to_freelist(wc, e);
1070 struct wc_entry *old;
1072 old = writecache_find_entry(wc, read_original_sector(wc, e), 0);
1074 writecache_insert_entry(wc, e);
1076 if (read_seq_count(wc, old) == read_seq_count(wc, e)) {
1077 writecache_error(wc, -EINVAL,
1078 "two identical entries, position %llu, sector %llu, sequence %llu",
1079 (unsigned long long)b, (unsigned long long)read_original_sector(wc, e),
1080 (unsigned long long)read_seq_count(wc, e));
1082 if (read_seq_count(wc, old) > read_seq_count(wc, e)) {
1085 writecache_free_entry(wc, old);
1086 writecache_insert_entry(wc, e);
1095 writecache_flush_all_metadata(wc);
1096 writecache_commit_flushed(wc, false);
1099 writecache_verify_watermark(wc);
1101 if (wc->max_age != MAX_AGE_UNSPECIFIED)
1102 mod_timer(&wc->max_age_timer, jiffies + wc->max_age / MAX_AGE_DIV);
1107 static int process_flush_mesg(unsigned argc, char **argv, struct dm_writecache *wc)
1113 if (dm_suspended(wc->ti)) {
1117 if (writecache_has_error(wc)) {
1122 writecache_flush(wc);
1123 wc->writeback_all++;
1124 queue_work(wc->writeback_wq, &wc->writeback_work);
1127 flush_workqueue(wc->writeback_wq);
1130 wc->writeback_all--;
1131 if (writecache_has_error(wc)) {
1140 static int process_flush_on_suspend_mesg(unsigned argc, char **argv, struct dm_writecache *wc)
1146 wc->flush_on_suspend = true;
1152 static void activate_cleaner(struct dm_writecache *wc)
1154 wc->flush_on_suspend = true;
1156 wc->freelist_high_watermark = wc->n_blocks;
1157 wc->freelist_low_watermark = wc->n_blocks;
1160 static int process_cleaner_mesg(unsigned argc, char **argv, struct dm_writecache *wc)
1166 activate_cleaner(wc);
1167 if (!dm_suspended(wc->ti))
1168 writecache_verify_watermark(wc);
1174 static int process_clear_stats_mesg(unsigned argc, char **argv, struct dm_writecache *wc)
1180 memset(&wc->stats, 0, sizeof wc->stats);
1186 static int writecache_message(struct dm_target *ti, unsigned argc, char **argv,
1187 char *result, unsigned maxlen)
1190 struct dm_writecache *wc = ti->private;
1192 if (!strcasecmp(argv[0], "flush"))
1193 r = process_flush_mesg(argc, argv, wc);
1194 else if (!strcasecmp(argv[0], "flush_on_suspend"))
1195 r = process_flush_on_suspend_mesg(argc, argv, wc);
1196 else if (!strcasecmp(argv[0], "cleaner"))
1197 r = process_cleaner_mesg(argc, argv, wc);
1198 else if (!strcasecmp(argv[0], "clear_stats"))
1199 r = process_clear_stats_mesg(argc, argv, wc);
1201 DMERR("unrecognised message received: %s", argv[0]);
1206 static void memcpy_flushcache_optimized(void *dest, void *source, size_t size)
1209 * clflushopt performs better with block size 1024, 2048, 4096
1210 * non-temporal stores perform better with block size 512
1212 * block size 512 1024 2048 4096
1213 * movnti 496 MB/s 642 MB/s 725 MB/s 744 MB/s
1214 * clflushopt 373 MB/s 688 MB/s 1.1 GB/s 1.2 GB/s
1216 * We see that movnti performs better for 512-byte blocks, and
1217 * clflushopt performs better for 1024-byte and larger blocks. So, we
1218 * prefer clflushopt for sizes >= 768.
1220 * NOTE: this happens to be the case now (with dm-writecache's single
1221 * threaded model) but re-evaluate this once memcpy_flushcache() is
1222 * enabled to use movdir64b which might invalidate this performance
1223 * advantage seen with cache-allocating-writes plus flushing.
1226 if (static_cpu_has(X86_FEATURE_CLFLUSHOPT) &&
1227 likely(boot_cpu_data.x86_clflush_size == 64) &&
1228 likely(size >= 768)) {
1230 memcpy((void *)dest, (void *)source, 64);
1231 clflushopt((void *)dest);
1235 } while (size >= 64);
1239 memcpy_flushcache(dest, source, size);
1242 static void bio_copy_block(struct dm_writecache *wc, struct bio *bio, void *data)
1246 int rw = bio_data_dir(bio);
1247 unsigned remaining_size = wc->block_size;
1250 struct bio_vec bv = bio_iter_iovec(bio, bio->bi_iter);
1251 buf = bvec_kmap_local(&bv);
1253 if (unlikely(size > remaining_size))
1254 size = remaining_size;
1258 r = copy_mc_to_kernel(buf, data, size);
1259 flush_dcache_page(bio_page(bio));
1261 writecache_error(wc, r, "hardware memory error when reading data: %d", r);
1262 bio->bi_status = BLK_STS_IOERR;
1265 flush_dcache_page(bio_page(bio));
1266 memcpy_flushcache_optimized(data, buf, size);
1271 data = (char *)data + size;
1272 remaining_size -= size;
1273 bio_advance(bio, size);
1274 } while (unlikely(remaining_size));
1277 static int writecache_flush_thread(void *data)
1279 struct dm_writecache *wc = data;
1285 bio = bio_list_pop(&wc->flush_list);
1287 set_current_state(TASK_INTERRUPTIBLE);
1290 if (unlikely(kthread_should_stop())) {
1291 set_current_state(TASK_RUNNING);
1299 if (bio_op(bio) == REQ_OP_DISCARD) {
1300 writecache_discard(wc, bio->bi_iter.bi_sector,
1301 bio_end_sector(bio));
1303 bio_set_dev(bio, wc->dev->bdev);
1304 submit_bio_noacct(bio);
1306 writecache_flush(wc);
1308 if (writecache_has_error(wc))
1309 bio->bi_status = BLK_STS_IOERR;
1317 static void writecache_offload_bio(struct dm_writecache *wc, struct bio *bio)
1319 if (bio_list_empty(&wc->flush_list))
1320 wake_up_process(wc->flush_thread);
1321 bio_list_add(&wc->flush_list, bio);
1327 WC_MAP_REMAP_ORIGIN,
1332 static enum wc_map_op writecache_map_remap_origin(struct dm_writecache *wc, struct bio *bio,
1336 sector_t next_boundary =
1337 read_original_sector(wc, e) - bio->bi_iter.bi_sector;
1338 if (next_boundary < bio->bi_iter.bi_size >> SECTOR_SHIFT)
1339 dm_accept_partial_bio(bio, next_boundary);
1342 return WC_MAP_REMAP_ORIGIN;
1345 static enum wc_map_op writecache_map_read(struct dm_writecache *wc, struct bio *bio)
1347 enum wc_map_op map_op;
1352 e = writecache_find_entry(wc, bio->bi_iter.bi_sector, WFE_RETURN_FOLLOWING);
1353 if (e && read_original_sector(wc, e) == bio->bi_iter.bi_sector) {
1354 wc->stats.read_hits++;
1355 if (WC_MODE_PMEM(wc)) {
1356 bio_copy_block(wc, bio, memory_data(wc, e));
1357 if (bio->bi_iter.bi_size)
1358 goto read_next_block;
1359 map_op = WC_MAP_SUBMIT;
1361 dm_accept_partial_bio(bio, wc->block_size >> SECTOR_SHIFT);
1362 bio_set_dev(bio, wc->ssd_dev->bdev);
1363 bio->bi_iter.bi_sector = cache_sector(wc, e);
1364 if (!writecache_entry_is_committed(wc, e))
1365 writecache_wait_for_ios(wc, WRITE);
1366 map_op = WC_MAP_REMAP;
1369 map_op = writecache_map_remap_origin(wc, bio, e);
1375 static enum wc_map_op writecache_bio_copy_ssd(struct dm_writecache *wc, struct bio *bio,
1376 struct wc_entry *e, bool search_used)
1378 unsigned bio_size = wc->block_size;
1379 sector_t start_cache_sec = cache_sector(wc, e);
1380 sector_t current_cache_sec = start_cache_sec + (bio_size >> SECTOR_SHIFT);
1382 while (bio_size < bio->bi_iter.bi_size) {
1384 struct wc_entry *f = writecache_pop_from_freelist(wc, current_cache_sec);
1387 write_original_sector_seq_count(wc, f, bio->bi_iter.bi_sector +
1388 (bio_size >> SECTOR_SHIFT), wc->seq_count);
1389 writecache_insert_entry(wc, f);
1390 wc->uncommitted_blocks++;
1393 struct rb_node *next = rb_next(&e->rb_node);
1396 f = container_of(next, struct wc_entry, rb_node);
1399 if (read_original_sector(wc, f) !=
1400 read_original_sector(wc, e) + (wc->block_size >> SECTOR_SHIFT))
1402 if (unlikely(f->write_in_progress))
1404 if (writecache_entry_is_committed(wc, f))
1405 wc->overwrote_committed = true;
1408 bio_size += wc->block_size;
1409 current_cache_sec += wc->block_size >> SECTOR_SHIFT;
1412 bio_set_dev(bio, wc->ssd_dev->bdev);
1413 bio->bi_iter.bi_sector = start_cache_sec;
1414 dm_accept_partial_bio(bio, bio_size >> SECTOR_SHIFT);
1416 if (unlikely(wc->uncommitted_blocks >= wc->autocommit_blocks)) {
1417 wc->uncommitted_blocks = 0;
1418 queue_work(wc->writeback_wq, &wc->flush_work);
1420 writecache_schedule_autocommit(wc);
1423 return WC_MAP_REMAP;
1426 static enum wc_map_op writecache_map_write(struct dm_writecache *wc, struct bio *bio)
1431 bool found_entry = false;
1432 bool search_used = false;
1434 if (writecache_has_error(wc))
1435 return WC_MAP_ERROR;
1436 e = writecache_find_entry(wc, bio->bi_iter.bi_sector, 0);
1438 if (!writecache_entry_is_committed(wc, e)) {
1439 wc->stats.write_hits_uncommitted++;
1443 wc->stats.write_hits_committed++;
1444 if (!WC_MODE_PMEM(wc) && !e->write_in_progress) {
1445 wc->overwrote_committed = true;
1451 if (unlikely(wc->cleaner) ||
1452 (wc->metadata_only && !(bio->bi_opf & REQ_META)))
1455 e = writecache_pop_from_freelist(wc, (sector_t)-1);
1457 if (!WC_MODE_PMEM(wc) && !found_entry) {
1459 wc->stats.writes_around++;
1460 e = writecache_find_entry(wc, bio->bi_iter.bi_sector, WFE_RETURN_FOLLOWING);
1461 return writecache_map_remap_origin(wc, bio, e);
1463 wc->stats.writes_blocked_on_freelist++;
1464 writecache_wait_on_freelist(wc);
1467 write_original_sector_seq_count(wc, e, bio->bi_iter.bi_sector, wc->seq_count);
1468 writecache_insert_entry(wc, e);
1469 wc->uncommitted_blocks++;
1470 wc->stats.writes_allocate++;
1472 if (WC_MODE_PMEM(wc))
1473 bio_copy_block(wc, bio, memory_data(wc, e));
1475 return writecache_bio_copy_ssd(wc, bio, e, search_used);
1476 } while (bio->bi_iter.bi_size);
1478 if (unlikely(bio->bi_opf & REQ_FUA || wc->uncommitted_blocks >= wc->autocommit_blocks))
1479 writecache_flush(wc);
1481 writecache_schedule_autocommit(wc);
1483 return WC_MAP_SUBMIT;
1486 static enum wc_map_op writecache_map_flush(struct dm_writecache *wc, struct bio *bio)
1488 if (writecache_has_error(wc))
1489 return WC_MAP_ERROR;
1491 if (WC_MODE_PMEM(wc)) {
1492 wc->stats.flushes++;
1493 writecache_flush(wc);
1494 if (writecache_has_error(wc))
1495 return WC_MAP_ERROR;
1496 else if (unlikely(wc->cleaner) || unlikely(wc->metadata_only))
1497 return WC_MAP_REMAP_ORIGIN;
1498 return WC_MAP_SUBMIT;
1501 if (dm_bio_get_target_bio_nr(bio))
1502 return WC_MAP_REMAP_ORIGIN;
1503 wc->stats.flushes++;
1504 writecache_offload_bio(wc, bio);
1505 return WC_MAP_RETURN;
1508 static enum wc_map_op writecache_map_discard(struct dm_writecache *wc, struct bio *bio)
1510 wc->stats.discards++;
1512 if (writecache_has_error(wc))
1513 return WC_MAP_ERROR;
1515 if (WC_MODE_PMEM(wc)) {
1516 writecache_discard(wc, bio->bi_iter.bi_sector, bio_end_sector(bio));
1517 return WC_MAP_REMAP_ORIGIN;
1520 writecache_offload_bio(wc, bio);
1521 return WC_MAP_RETURN;
1524 static int writecache_map(struct dm_target *ti, struct bio *bio)
1526 struct dm_writecache *wc = ti->private;
1527 enum wc_map_op map_op;
1529 bio->bi_private = NULL;
1533 if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
1534 map_op = writecache_map_flush(wc, bio);
1538 bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
1540 if (unlikely((((unsigned)bio->bi_iter.bi_sector | bio_sectors(bio)) &
1541 (wc->block_size / 512 - 1)) != 0)) {
1542 DMERR("I/O is not aligned, sector %llu, size %u, block size %u",
1543 (unsigned long long)bio->bi_iter.bi_sector,
1544 bio->bi_iter.bi_size, wc->block_size);
1545 map_op = WC_MAP_ERROR;
1549 if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) {
1550 map_op = writecache_map_discard(wc, bio);
1554 if (bio_data_dir(bio) == READ)
1555 map_op = writecache_map_read(wc, bio);
1557 map_op = writecache_map_write(wc, bio);
1560 case WC_MAP_REMAP_ORIGIN:
1561 if (likely(wc->pause != 0)) {
1562 if (bio_op(bio) == REQ_OP_WRITE) {
1563 dm_iot_io_begin(&wc->iot, 1);
1564 bio->bi_private = (void *)2;
1567 bio_set_dev(bio, wc->dev->bdev);
1569 return DM_MAPIO_REMAPPED;
1572 /* make sure that writecache_end_io decrements bio_in_progress: */
1573 bio->bi_private = (void *)1;
1574 atomic_inc(&wc->bio_in_progress[bio_data_dir(bio)]);
1576 return DM_MAPIO_REMAPPED;
1581 return DM_MAPIO_SUBMITTED;
1585 return DM_MAPIO_SUBMITTED;
1590 return DM_MAPIO_SUBMITTED;
1598 static int writecache_end_io(struct dm_target *ti, struct bio *bio, blk_status_t *status)
1600 struct dm_writecache *wc = ti->private;
1602 if (bio->bi_private == (void *)1) {
1603 int dir = bio_data_dir(bio);
1604 if (atomic_dec_and_test(&wc->bio_in_progress[dir]))
1605 if (unlikely(waitqueue_active(&wc->bio_in_progress_wait[dir])))
1606 wake_up(&wc->bio_in_progress_wait[dir]);
1607 } else if (bio->bi_private == (void *)2) {
1608 dm_iot_io_end(&wc->iot, 1);
1613 static int writecache_iterate_devices(struct dm_target *ti,
1614 iterate_devices_callout_fn fn, void *data)
1616 struct dm_writecache *wc = ti->private;
1618 return fn(ti, wc->dev, 0, ti->len, data);
1621 static void writecache_io_hints(struct dm_target *ti, struct queue_limits *limits)
1623 struct dm_writecache *wc = ti->private;
1625 if (limits->logical_block_size < wc->block_size)
1626 limits->logical_block_size = wc->block_size;
1628 if (limits->physical_block_size < wc->block_size)
1629 limits->physical_block_size = wc->block_size;
1631 if (limits->io_min < wc->block_size)
1632 limits->io_min = wc->block_size;
1636 static void writecache_writeback_endio(struct bio *bio)
1638 struct writeback_struct *wb = container_of(bio, struct writeback_struct, bio);
1639 struct dm_writecache *wc = wb->wc;
1640 unsigned long flags;
1642 raw_spin_lock_irqsave(&wc->endio_list_lock, flags);
1643 if (unlikely(list_empty(&wc->endio_list)))
1644 wake_up_process(wc->endio_thread);
1645 list_add_tail(&wb->endio_entry, &wc->endio_list);
1646 raw_spin_unlock_irqrestore(&wc->endio_list_lock, flags);
1649 static void writecache_copy_endio(int read_err, unsigned long write_err, void *ptr)
1651 struct copy_struct *c = ptr;
1652 struct dm_writecache *wc = c->wc;
1654 c->error = likely(!(read_err | write_err)) ? 0 : -EIO;
1656 raw_spin_lock_irq(&wc->endio_list_lock);
1657 if (unlikely(list_empty(&wc->endio_list)))
1658 wake_up_process(wc->endio_thread);
1659 list_add_tail(&c->endio_entry, &wc->endio_list);
1660 raw_spin_unlock_irq(&wc->endio_list_lock);
1663 static void __writecache_endio_pmem(struct dm_writecache *wc, struct list_head *list)
1666 struct writeback_struct *wb;
1668 unsigned long n_walked = 0;
1671 wb = list_entry(list->next, struct writeback_struct, endio_entry);
1672 list_del(&wb->endio_entry);
1674 if (unlikely(wb->bio.bi_status != BLK_STS_OK))
1675 writecache_error(wc, blk_status_to_errno(wb->bio.bi_status),
1676 "write error %d", wb->bio.bi_status);
1680 BUG_ON(!e->write_in_progress);
1681 e->write_in_progress = false;
1682 INIT_LIST_HEAD(&e->lru);
1683 if (!writecache_has_error(wc))
1684 writecache_free_entry(wc, e);
1685 BUG_ON(!wc->writeback_size);
1686 wc->writeback_size--;
1688 if (unlikely(n_walked >= ENDIO_LATENCY)) {
1689 writecache_commit_flushed(wc, false);
1694 } while (++i < wb->wc_list_n);
1696 if (wb->wc_list != wb->wc_list_inline)
1699 } while (!list_empty(list));
1702 static void __writecache_endio_ssd(struct dm_writecache *wc, struct list_head *list)
1704 struct copy_struct *c;
1708 c = list_entry(list->next, struct copy_struct, endio_entry);
1709 list_del(&c->endio_entry);
1711 if (unlikely(c->error))
1712 writecache_error(wc, c->error, "copy error");
1716 BUG_ON(!e->write_in_progress);
1717 e->write_in_progress = false;
1718 INIT_LIST_HEAD(&e->lru);
1719 if (!writecache_has_error(wc))
1720 writecache_free_entry(wc, e);
1722 BUG_ON(!wc->writeback_size);
1723 wc->writeback_size--;
1725 } while (--c->n_entries);
1726 mempool_free(c, &wc->copy_pool);
1727 } while (!list_empty(list));
1730 static int writecache_endio_thread(void *data)
1732 struct dm_writecache *wc = data;
1735 struct list_head list;
1737 raw_spin_lock_irq(&wc->endio_list_lock);
1738 if (!list_empty(&wc->endio_list))
1740 set_current_state(TASK_INTERRUPTIBLE);
1741 raw_spin_unlock_irq(&wc->endio_list_lock);
1743 if (unlikely(kthread_should_stop())) {
1744 set_current_state(TASK_RUNNING);
1753 list = wc->endio_list;
1754 list.next->prev = list.prev->next = &list;
1755 INIT_LIST_HEAD(&wc->endio_list);
1756 raw_spin_unlock_irq(&wc->endio_list_lock);
1758 if (!WC_MODE_FUA(wc))
1759 writecache_disk_flush(wc, wc->dev);
1763 if (WC_MODE_PMEM(wc)) {
1764 __writecache_endio_pmem(wc, &list);
1766 __writecache_endio_ssd(wc, &list);
1767 writecache_wait_for_ios(wc, READ);
1770 writecache_commit_flushed(wc, false);
1778 static bool wc_add_block(struct writeback_struct *wb, struct wc_entry *e)
1780 struct dm_writecache *wc = wb->wc;
1781 unsigned block_size = wc->block_size;
1782 void *address = memory_data(wc, e);
1784 persistent_memory_flush_cache(address, block_size);
1786 if (unlikely(bio_end_sector(&wb->bio) >= wc->data_device_sectors))
1789 return bio_add_page(&wb->bio, persistent_memory_page(address),
1790 block_size, persistent_memory_page_offset(address)) != 0;
1793 struct writeback_list {
1794 struct list_head list;
1798 static void __writeback_throttle(struct dm_writecache *wc, struct writeback_list *wbl)
1800 if (unlikely(wc->max_writeback_jobs)) {
1801 if (READ_ONCE(wc->writeback_size) - wbl->size >= wc->max_writeback_jobs) {
1803 while (wc->writeback_size - wbl->size >= wc->max_writeback_jobs)
1804 writecache_wait_on_freelist(wc);
1811 static void __writecache_writeback_pmem(struct dm_writecache *wc, struct writeback_list *wbl)
1813 struct wc_entry *e, *f;
1815 struct writeback_struct *wb;
1820 e = container_of(wbl->list.prev, struct wc_entry, lru);
1823 max_pages = e->wc_list_contiguous;
1825 bio = bio_alloc_bioset(wc->dev->bdev, max_pages, REQ_OP_WRITE,
1826 GFP_NOIO, &wc->bio_set);
1827 wb = container_of(bio, struct writeback_struct, bio);
1829 bio->bi_end_io = writecache_writeback_endio;
1830 bio->bi_iter.bi_sector = read_original_sector(wc, e);
1831 if (max_pages <= WB_LIST_INLINE ||
1832 unlikely(!(wb->wc_list = kmalloc_array(max_pages, sizeof(struct wc_entry *),
1833 GFP_NOIO | __GFP_NORETRY |
1834 __GFP_NOMEMALLOC | __GFP_NOWARN)))) {
1835 wb->wc_list = wb->wc_list_inline;
1836 max_pages = WB_LIST_INLINE;
1839 BUG_ON(!wc_add_block(wb, e));
1844 while (wbl->size && wb->wc_list_n < max_pages) {
1845 f = container_of(wbl->list.prev, struct wc_entry, lru);
1846 if (read_original_sector(wc, f) !=
1847 read_original_sector(wc, e) + (wc->block_size >> SECTOR_SHIFT))
1849 if (!wc_add_block(wb, f))
1853 wb->wc_list[wb->wc_list_n++] = f;
1856 if (WC_MODE_FUA(wc))
1857 bio->bi_opf |= REQ_FUA;
1858 if (writecache_has_error(wc)) {
1859 bio->bi_status = BLK_STS_IOERR;
1861 } else if (unlikely(!bio_sectors(bio))) {
1862 bio->bi_status = BLK_STS_OK;
1868 __writeback_throttle(wc, wbl);
1872 static void __writecache_writeback_ssd(struct dm_writecache *wc, struct writeback_list *wbl)
1874 struct wc_entry *e, *f;
1875 struct dm_io_region from, to;
1876 struct copy_struct *c;
1882 e = container_of(wbl->list.prev, struct wc_entry, lru);
1885 n_sectors = e->wc_list_contiguous << (wc->block_size_bits - SECTOR_SHIFT);
1887 from.bdev = wc->ssd_dev->bdev;
1888 from.sector = cache_sector(wc, e);
1889 from.count = n_sectors;
1890 to.bdev = wc->dev->bdev;
1891 to.sector = read_original_sector(wc, e);
1892 to.count = n_sectors;
1894 c = mempool_alloc(&wc->copy_pool, GFP_NOIO);
1897 c->n_entries = e->wc_list_contiguous;
1899 while ((n_sectors -= wc->block_size >> SECTOR_SHIFT)) {
1901 f = container_of(wbl->list.prev, struct wc_entry, lru);
1907 if (unlikely(to.sector + to.count > wc->data_device_sectors)) {
1908 if (to.sector >= wc->data_device_sectors) {
1909 writecache_copy_endio(0, 0, c);
1912 from.count = to.count = wc->data_device_sectors - to.sector;
1915 dm_kcopyd_copy(wc->dm_kcopyd, &from, 1, &to, 0, writecache_copy_endio, c);
1917 __writeback_throttle(wc, wbl);
1921 static void writecache_writeback(struct work_struct *work)
1923 struct dm_writecache *wc = container_of(work, struct dm_writecache, writeback_work);
1924 struct blk_plug plug;
1925 struct wc_entry *f, *g, *e = NULL;
1926 struct rb_node *node, *next_node;
1927 struct list_head skipped;
1928 struct writeback_list wbl;
1929 unsigned long n_walked;
1931 if (!WC_MODE_PMEM(wc)) {
1932 /* Wait for any active kcopyd work on behalf of ssd writeback */
1933 dm_kcopyd_client_flush(wc->dm_kcopyd);
1936 if (likely(wc->pause != 0)) {
1939 if (unlikely(wc->cleaner) || unlikely(wc->writeback_all) ||
1940 unlikely(dm_suspended(wc->ti)))
1942 idle = dm_iot_idle_time(&wc->iot);
1943 if (idle >= wc->pause)
1945 idle = wc->pause - idle;
1948 schedule_timeout_idle(idle);
1954 if (writecache_has_error(wc)) {
1959 if (unlikely(wc->writeback_all)) {
1960 if (writecache_wait_for_writeback(wc))
1964 if (wc->overwrote_committed) {
1965 writecache_wait_for_ios(wc, WRITE);
1969 INIT_LIST_HEAD(&skipped);
1970 INIT_LIST_HEAD(&wbl.list);
1972 while (!list_empty(&wc->lru) &&
1973 (wc->writeback_all ||
1974 wc->freelist_size + wc->writeback_size <= wc->freelist_low_watermark ||
1975 (jiffies - container_of(wc->lru.prev, struct wc_entry, lru)->age >=
1976 wc->max_age - wc->max_age / MAX_AGE_DIV))) {
1979 if (unlikely(n_walked > WRITEBACK_LATENCY) &&
1980 likely(!wc->writeback_all)) {
1981 if (likely(!dm_suspended(wc->ti)))
1982 queue_work(wc->writeback_wq, &wc->writeback_work);
1986 if (unlikely(wc->writeback_all)) {
1988 writecache_flush(wc);
1989 e = container_of(rb_first(&wc->tree), struct wc_entry, rb_node);
1993 e = container_of(wc->lru.prev, struct wc_entry, lru);
1994 BUG_ON(e->write_in_progress);
1995 if (unlikely(!writecache_entry_is_committed(wc, e))) {
1996 writecache_flush(wc);
1998 node = rb_prev(&e->rb_node);
2000 f = container_of(node, struct wc_entry, rb_node);
2001 if (unlikely(read_original_sector(wc, f) ==
2002 read_original_sector(wc, e))) {
2003 BUG_ON(!f->write_in_progress);
2004 list_move(&e->lru, &skipped);
2009 wc->writeback_size++;
2010 list_move(&e->lru, &wbl.list);
2012 e->write_in_progress = true;
2013 e->wc_list_contiguous = 1;
2018 next_node = rb_next(&f->rb_node);
2019 if (unlikely(!next_node))
2021 g = container_of(next_node, struct wc_entry, rb_node);
2022 if (unlikely(read_original_sector(wc, g) ==
2023 read_original_sector(wc, f))) {
2027 if (read_original_sector(wc, g) !=
2028 read_original_sector(wc, f) + (wc->block_size >> SECTOR_SHIFT))
2030 if (unlikely(g->write_in_progress))
2032 if (unlikely(!writecache_entry_is_committed(wc, g)))
2035 if (!WC_MODE_PMEM(wc)) {
2041 //if (unlikely(n_walked > WRITEBACK_LATENCY) && likely(!wc->writeback_all))
2044 wc->writeback_size++;
2045 list_move(&g->lru, &wbl.list);
2047 g->write_in_progress = true;
2048 g->wc_list_contiguous = BIO_MAX_VECS;
2050 e->wc_list_contiguous++;
2051 if (unlikely(e->wc_list_contiguous == BIO_MAX_VECS)) {
2052 if (unlikely(wc->writeback_all)) {
2053 next_node = rb_next(&f->rb_node);
2054 if (likely(next_node))
2055 g = container_of(next_node, struct wc_entry, rb_node);
2063 if (!list_empty(&skipped)) {
2064 list_splice_tail(&skipped, &wc->lru);
2066 * If we didn't do any progress, we must wait until some
2067 * writeback finishes to avoid burning CPU in a loop
2069 if (unlikely(!wbl.size))
2070 writecache_wait_for_writeback(wc);
2075 blk_start_plug(&plug);
2077 if (WC_MODE_PMEM(wc))
2078 __writecache_writeback_pmem(wc, &wbl);
2080 __writecache_writeback_ssd(wc, &wbl);
2082 blk_finish_plug(&plug);
2084 if (unlikely(wc->writeback_all)) {
2086 while (writecache_wait_for_writeback(wc));
2091 static int calculate_memory_size(uint64_t device_size, unsigned block_size,
2092 size_t *n_blocks_p, size_t *n_metadata_blocks_p)
2094 uint64_t n_blocks, offset;
2097 n_blocks = device_size;
2098 do_div(n_blocks, block_size + sizeof(struct wc_memory_entry));
2103 /* Verify the following entries[n_blocks] won't overflow */
2104 if (n_blocks >= ((size_t)-sizeof(struct wc_memory_superblock) /
2105 sizeof(struct wc_memory_entry)))
2107 offset = offsetof(struct wc_memory_superblock, entries[n_blocks]);
2108 offset = (offset + block_size - 1) & ~(uint64_t)(block_size - 1);
2109 if (offset + n_blocks * block_size <= device_size)
2114 /* check if the bit field overflows */
2116 if (e.index != n_blocks)
2120 *n_blocks_p = n_blocks;
2121 if (n_metadata_blocks_p)
2122 *n_metadata_blocks_p = offset >> __ffs(block_size);
2126 static int init_memory(struct dm_writecache *wc)
2131 r = calculate_memory_size(wc->memory_map_size, wc->block_size, &wc->n_blocks, NULL);
2135 r = writecache_alloc_entries(wc);
2139 for (b = 0; b < ARRAY_SIZE(sb(wc)->padding); b++)
2140 pmem_assign(sb(wc)->padding[b], cpu_to_le64(0));
2141 pmem_assign(sb(wc)->version, cpu_to_le32(MEMORY_SUPERBLOCK_VERSION));
2142 pmem_assign(sb(wc)->block_size, cpu_to_le32(wc->block_size));
2143 pmem_assign(sb(wc)->n_blocks, cpu_to_le64(wc->n_blocks));
2144 pmem_assign(sb(wc)->seq_count, cpu_to_le64(0));
2146 for (b = 0; b < wc->n_blocks; b++) {
2147 write_original_sector_seq_count(wc, &wc->entries[b], -1, -1);
2151 writecache_flush_all_metadata(wc);
2152 writecache_commit_flushed(wc, false);
2153 pmem_assign(sb(wc)->magic, cpu_to_le32(MEMORY_SUPERBLOCK_MAGIC));
2154 writecache_flush_region(wc, &sb(wc)->magic, sizeof sb(wc)->magic);
2155 writecache_commit_flushed(wc, false);
2160 static void writecache_dtr(struct dm_target *ti)
2162 struct dm_writecache *wc = ti->private;
2167 if (wc->endio_thread)
2168 kthread_stop(wc->endio_thread);
2170 if (wc->flush_thread)
2171 kthread_stop(wc->flush_thread);
2173 bioset_exit(&wc->bio_set);
2175 mempool_exit(&wc->copy_pool);
2177 if (wc->writeback_wq)
2178 destroy_workqueue(wc->writeback_wq);
2181 dm_put_device(ti, wc->dev);
2184 dm_put_device(ti, wc->ssd_dev);
2188 if (wc->memory_map) {
2189 if (WC_MODE_PMEM(wc))
2190 persistent_memory_release(wc);
2192 vfree(wc->memory_map);
2196 dm_kcopyd_client_destroy(wc->dm_kcopyd);
2199 dm_io_client_destroy(wc->dm_io);
2201 vfree(wc->dirty_bitmap);
2206 static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
2208 struct dm_writecache *wc;
2209 struct dm_arg_set as;
2211 unsigned opt_params;
2212 size_t offset, data_size;
2215 int high_wm_percent = HIGH_WATERMARK;
2216 int low_wm_percent = LOW_WATERMARK;
2218 struct wc_memory_superblock s;
2220 static struct dm_arg _args[] = {
2221 {0, 18, "Invalid number of feature args"},
2227 wc = kzalloc(sizeof(struct dm_writecache), GFP_KERNEL);
2229 ti->error = "Cannot allocate writecache structure";
2236 mutex_init(&wc->lock);
2237 wc->max_age = MAX_AGE_UNSPECIFIED;
2238 writecache_poison_lists(wc);
2239 init_waitqueue_head(&wc->freelist_wait);
2240 timer_setup(&wc->autocommit_timer, writecache_autocommit_timer, 0);
2241 timer_setup(&wc->max_age_timer, writecache_max_age_timer, 0);
2243 for (i = 0; i < 2; i++) {
2244 atomic_set(&wc->bio_in_progress[i], 0);
2245 init_waitqueue_head(&wc->bio_in_progress_wait[i]);
2248 wc->dm_io = dm_io_client_create();
2249 if (IS_ERR(wc->dm_io)) {
2250 r = PTR_ERR(wc->dm_io);
2251 ti->error = "Unable to allocate dm-io client";
2256 wc->writeback_wq = alloc_workqueue("writecache-writeback", WQ_MEM_RECLAIM, 1);
2257 if (!wc->writeback_wq) {
2259 ti->error = "Could not allocate writeback workqueue";
2262 INIT_WORK(&wc->writeback_work, writecache_writeback);
2263 INIT_WORK(&wc->flush_work, writecache_flush_work);
2265 dm_iot_init(&wc->iot);
2267 raw_spin_lock_init(&wc->endio_list_lock);
2268 INIT_LIST_HEAD(&wc->endio_list);
2269 wc->endio_thread = kthread_run(writecache_endio_thread, wc, "writecache_endio");
2270 if (IS_ERR(wc->endio_thread)) {
2271 r = PTR_ERR(wc->endio_thread);
2272 wc->endio_thread = NULL;
2273 ti->error = "Couldn't spawn endio thread";
2278 * Parse the mode (pmem or ssd)
2280 string = dm_shift_arg(&as);
2284 if (!strcasecmp(string, "s")) {
2285 wc->pmem_mode = false;
2286 } else if (!strcasecmp(string, "p")) {
2287 #ifdef DM_WRITECACHE_HAS_PMEM
2288 wc->pmem_mode = true;
2289 wc->writeback_fua = true;
2292 * If the architecture doesn't support persistent memory or
2293 * the kernel doesn't support any DAX drivers, this driver can
2294 * only be used in SSD-only mode.
2297 ti->error = "Persistent memory or DAX not supported on this system";
2304 if (WC_MODE_PMEM(wc)) {
2305 r = bioset_init(&wc->bio_set, BIO_POOL_SIZE,
2306 offsetof(struct writeback_struct, bio),
2309 ti->error = "Could not allocate bio set";
2313 wc->pause = PAUSE_WRITEBACK;
2314 r = mempool_init_kmalloc_pool(&wc->copy_pool, 1, sizeof(struct copy_struct));
2316 ti->error = "Could not allocate mempool";
2322 * Parse the origin data device
2324 string = dm_shift_arg(&as);
2327 r = dm_get_device(ti, string, dm_table_get_mode(ti->table), &wc->dev);
2329 ti->error = "Origin data device lookup failed";
2334 * Parse cache data device (be it pmem or ssd)
2336 string = dm_shift_arg(&as);
2340 r = dm_get_device(ti, string, dm_table_get_mode(ti->table), &wc->ssd_dev);
2342 ti->error = "Cache data device lookup failed";
2345 wc->memory_map_size = bdev_nr_bytes(wc->ssd_dev->bdev);
2348 * Parse the cache block size
2350 string = dm_shift_arg(&as);
2353 if (sscanf(string, "%u%c", &wc->block_size, &dummy) != 1 ||
2354 wc->block_size < 512 || wc->block_size > PAGE_SIZE ||
2355 (wc->block_size & (wc->block_size - 1))) {
2357 ti->error = "Invalid block size";
2360 if (wc->block_size < bdev_logical_block_size(wc->dev->bdev) ||
2361 wc->block_size < bdev_logical_block_size(wc->ssd_dev->bdev)) {
2363 ti->error = "Block size is smaller than device logical block size";
2366 wc->block_size_bits = __ffs(wc->block_size);
2368 wc->max_writeback_jobs = MAX_WRITEBACK_JOBS;
2369 wc->autocommit_blocks = !WC_MODE_PMEM(wc) ? AUTOCOMMIT_BLOCKS_SSD : AUTOCOMMIT_BLOCKS_PMEM;
2370 wc->autocommit_jiffies = msecs_to_jiffies(AUTOCOMMIT_MSEC);
2373 * Parse optional arguments
2375 r = dm_read_arg_group(_args, &as, &opt_params, &ti->error);
2379 while (opt_params) {
2380 string = dm_shift_arg(&as), opt_params--;
2381 if (!strcasecmp(string, "start_sector") && opt_params >= 1) {
2382 unsigned long long start_sector;
2383 string = dm_shift_arg(&as), opt_params--;
2384 if (sscanf(string, "%llu%c", &start_sector, &dummy) != 1)
2385 goto invalid_optional;
2386 wc->start_sector = start_sector;
2387 wc->start_sector_set = true;
2388 if (wc->start_sector != start_sector ||
2389 wc->start_sector >= wc->memory_map_size >> SECTOR_SHIFT)
2390 goto invalid_optional;
2391 } else if (!strcasecmp(string, "high_watermark") && opt_params >= 1) {
2392 string = dm_shift_arg(&as), opt_params--;
2393 if (sscanf(string, "%d%c", &high_wm_percent, &dummy) != 1)
2394 goto invalid_optional;
2395 if (high_wm_percent < 0 || high_wm_percent > 100)
2396 goto invalid_optional;
2397 wc->high_wm_percent_value = high_wm_percent;
2398 wc->high_wm_percent_set = true;
2399 } else if (!strcasecmp(string, "low_watermark") && opt_params >= 1) {
2400 string = dm_shift_arg(&as), opt_params--;
2401 if (sscanf(string, "%d%c", &low_wm_percent, &dummy) != 1)
2402 goto invalid_optional;
2403 if (low_wm_percent < 0 || low_wm_percent > 100)
2404 goto invalid_optional;
2405 wc->low_wm_percent_value = low_wm_percent;
2406 wc->low_wm_percent_set = true;
2407 } else if (!strcasecmp(string, "writeback_jobs") && opt_params >= 1) {
2408 string = dm_shift_arg(&as), opt_params--;
2409 if (sscanf(string, "%u%c", &wc->max_writeback_jobs, &dummy) != 1)
2410 goto invalid_optional;
2411 wc->max_writeback_jobs_set = true;
2412 } else if (!strcasecmp(string, "autocommit_blocks") && opt_params >= 1) {
2413 string = dm_shift_arg(&as), opt_params--;
2414 if (sscanf(string, "%u%c", &wc->autocommit_blocks, &dummy) != 1)
2415 goto invalid_optional;
2416 wc->autocommit_blocks_set = true;
2417 } else if (!strcasecmp(string, "autocommit_time") && opt_params >= 1) {
2418 unsigned autocommit_msecs;
2419 string = dm_shift_arg(&as), opt_params--;
2420 if (sscanf(string, "%u%c", &autocommit_msecs, &dummy) != 1)
2421 goto invalid_optional;
2422 if (autocommit_msecs > 3600000)
2423 goto invalid_optional;
2424 wc->autocommit_jiffies = msecs_to_jiffies(autocommit_msecs);
2425 wc->autocommit_time_value = autocommit_msecs;
2426 wc->autocommit_time_set = true;
2427 } else if (!strcasecmp(string, "max_age") && opt_params >= 1) {
2428 unsigned max_age_msecs;
2429 string = dm_shift_arg(&as), opt_params--;
2430 if (sscanf(string, "%u%c", &max_age_msecs, &dummy) != 1)
2431 goto invalid_optional;
2432 if (max_age_msecs > 86400000)
2433 goto invalid_optional;
2434 wc->max_age = msecs_to_jiffies(max_age_msecs);
2435 wc->max_age_set = true;
2436 wc->max_age_value = max_age_msecs;
2437 } else if (!strcasecmp(string, "cleaner")) {
2438 wc->cleaner_set = true;
2440 } else if (!strcasecmp(string, "fua")) {
2441 if (WC_MODE_PMEM(wc)) {
2442 wc->writeback_fua = true;
2443 wc->writeback_fua_set = true;
2444 } else goto invalid_optional;
2445 } else if (!strcasecmp(string, "nofua")) {
2446 if (WC_MODE_PMEM(wc)) {
2447 wc->writeback_fua = false;
2448 wc->writeback_fua_set = true;
2449 } else goto invalid_optional;
2450 } else if (!strcasecmp(string, "metadata_only")) {
2451 wc->metadata_only = true;
2452 } else if (!strcasecmp(string, "pause_writeback") && opt_params >= 1) {
2453 unsigned pause_msecs;
2454 if (WC_MODE_PMEM(wc))
2455 goto invalid_optional;
2456 string = dm_shift_arg(&as), opt_params--;
2457 if (sscanf(string, "%u%c", &pause_msecs, &dummy) != 1)
2458 goto invalid_optional;
2459 if (pause_msecs > 60000)
2460 goto invalid_optional;
2461 wc->pause = msecs_to_jiffies(pause_msecs);
2462 wc->pause_set = true;
2463 wc->pause_value = pause_msecs;
2467 ti->error = "Invalid optional argument";
2472 if (high_wm_percent < low_wm_percent) {
2474 ti->error = "High watermark must be greater than or equal to low watermark";
2478 if (WC_MODE_PMEM(wc)) {
2479 if (!dax_synchronous(wc->ssd_dev->dax_dev)) {
2481 ti->error = "Asynchronous persistent memory not supported as pmem cache";
2485 r = persistent_memory_claim(wc);
2487 ti->error = "Unable to map persistent memory for cache";
2491 size_t n_blocks, n_metadata_blocks;
2492 uint64_t n_bitmap_bits;
2494 wc->memory_map_size -= (uint64_t)wc->start_sector << SECTOR_SHIFT;
2496 bio_list_init(&wc->flush_list);
2497 wc->flush_thread = kthread_run(writecache_flush_thread, wc, "dm_writecache_flush");
2498 if (IS_ERR(wc->flush_thread)) {
2499 r = PTR_ERR(wc->flush_thread);
2500 wc->flush_thread = NULL;
2501 ti->error = "Couldn't spawn flush thread";
2505 r = calculate_memory_size(wc->memory_map_size, wc->block_size,
2506 &n_blocks, &n_metadata_blocks);
2508 ti->error = "Invalid device size";
2512 n_bitmap_bits = (((uint64_t)n_metadata_blocks << wc->block_size_bits) +
2513 BITMAP_GRANULARITY - 1) / BITMAP_GRANULARITY;
2514 /* this is limitation of test_bit functions */
2515 if (n_bitmap_bits > 1U << 31) {
2517 ti->error = "Invalid device size";
2521 wc->memory_map = vmalloc(n_metadata_blocks << wc->block_size_bits);
2522 if (!wc->memory_map) {
2524 ti->error = "Unable to allocate memory for metadata";
2528 wc->dm_kcopyd = dm_kcopyd_client_create(&dm_kcopyd_throttle);
2529 if (IS_ERR(wc->dm_kcopyd)) {
2530 r = PTR_ERR(wc->dm_kcopyd);
2531 ti->error = "Unable to allocate dm-kcopyd client";
2532 wc->dm_kcopyd = NULL;
2536 wc->metadata_sectors = n_metadata_blocks << (wc->block_size_bits - SECTOR_SHIFT);
2537 wc->dirty_bitmap_size = (n_bitmap_bits + BITS_PER_LONG - 1) /
2538 BITS_PER_LONG * sizeof(unsigned long);
2539 wc->dirty_bitmap = vzalloc(wc->dirty_bitmap_size);
2540 if (!wc->dirty_bitmap) {
2542 ti->error = "Unable to allocate dirty bitmap";
2546 r = writecache_read_metadata(wc, wc->block_size >> SECTOR_SHIFT);
2548 ti->error = "Unable to read first block of metadata";
2553 r = copy_mc_to_kernel(&s, sb(wc), sizeof(struct wc_memory_superblock));
2555 ti->error = "Hardware memory error when reading superblock";
2558 if (!le32_to_cpu(s.magic) && !le32_to_cpu(s.version)) {
2559 r = init_memory(wc);
2561 ti->error = "Unable to initialize device";
2564 r = copy_mc_to_kernel(&s, sb(wc),
2565 sizeof(struct wc_memory_superblock));
2567 ti->error = "Hardware memory error when reading superblock";
2572 if (le32_to_cpu(s.magic) != MEMORY_SUPERBLOCK_MAGIC) {
2573 ti->error = "Invalid magic in the superblock";
2578 if (le32_to_cpu(s.version) != MEMORY_SUPERBLOCK_VERSION) {
2579 ti->error = "Invalid version in the superblock";
2584 if (le32_to_cpu(s.block_size) != wc->block_size) {
2585 ti->error = "Block size does not match superblock";
2590 wc->n_blocks = le64_to_cpu(s.n_blocks);
2592 offset = wc->n_blocks * sizeof(struct wc_memory_entry);
2593 if (offset / sizeof(struct wc_memory_entry) != le64_to_cpu(sb(wc)->n_blocks)) {
2595 ti->error = "Overflow in size calculation";
2599 offset += sizeof(struct wc_memory_superblock);
2600 if (offset < sizeof(struct wc_memory_superblock))
2602 offset = (offset + wc->block_size - 1) & ~(size_t)(wc->block_size - 1);
2603 data_size = wc->n_blocks * (size_t)wc->block_size;
2604 if (!offset || (data_size / wc->block_size != wc->n_blocks) ||
2605 (offset + data_size < offset))
2607 if (offset + data_size > wc->memory_map_size) {
2608 ti->error = "Memory area is too small";
2613 wc->metadata_sectors = offset >> SECTOR_SHIFT;
2614 wc->block_start = (char *)sb(wc) + offset;
2616 x = (uint64_t)wc->n_blocks * (100 - high_wm_percent);
2619 wc->freelist_high_watermark = x;
2620 x = (uint64_t)wc->n_blocks * (100 - low_wm_percent);
2623 wc->freelist_low_watermark = x;
2626 activate_cleaner(wc);
2628 r = writecache_alloc_entries(wc);
2630 ti->error = "Cannot allocate memory";
2634 ti->num_flush_bios = WC_MODE_PMEM(wc) ? 1 : 2;
2635 ti->flush_supported = true;
2636 ti->num_discard_bios = 1;
2638 if (WC_MODE_PMEM(wc))
2639 persistent_memory_flush_cache(wc->memory_map, wc->memory_map_size);
2645 ti->error = "Bad arguments";
2651 static void writecache_status(struct dm_target *ti, status_type_t type,
2652 unsigned status_flags, char *result, unsigned maxlen)
2654 struct dm_writecache *wc = ti->private;
2655 unsigned extra_args;
2659 case STATUSTYPE_INFO:
2660 DMEMIT("%ld %llu %llu %llu %llu %llu %llu %llu %llu %llu %llu %llu %llu %llu",
2661 writecache_has_error(wc),
2662 (unsigned long long)wc->n_blocks, (unsigned long long)wc->freelist_size,
2663 (unsigned long long)wc->writeback_size,
2665 wc->stats.read_hits,
2667 wc->stats.write_hits_uncommitted,
2668 wc->stats.write_hits_committed,
2669 wc->stats.writes_around,
2670 wc->stats.writes_allocate,
2671 wc->stats.writes_blocked_on_freelist,
2673 wc->stats.discards);
2675 case STATUSTYPE_TABLE:
2676 DMEMIT("%c %s %s %u ", WC_MODE_PMEM(wc) ? 'p' : 's',
2677 wc->dev->name, wc->ssd_dev->name, wc->block_size);
2679 if (wc->start_sector_set)
2681 if (wc->high_wm_percent_set)
2683 if (wc->low_wm_percent_set)
2685 if (wc->max_writeback_jobs_set)
2687 if (wc->autocommit_blocks_set)
2689 if (wc->autocommit_time_set)
2691 if (wc->max_age_set)
2693 if (wc->cleaner_set)
2695 if (wc->writeback_fua_set)
2697 if (wc->metadata_only)
2702 DMEMIT("%u", extra_args);
2703 if (wc->start_sector_set)
2704 DMEMIT(" start_sector %llu", (unsigned long long)wc->start_sector);
2705 if (wc->high_wm_percent_set)
2706 DMEMIT(" high_watermark %u", wc->high_wm_percent_value);
2707 if (wc->low_wm_percent_set)
2708 DMEMIT(" low_watermark %u", wc->low_wm_percent_value);
2709 if (wc->max_writeback_jobs_set)
2710 DMEMIT(" writeback_jobs %u", wc->max_writeback_jobs);
2711 if (wc->autocommit_blocks_set)
2712 DMEMIT(" autocommit_blocks %u", wc->autocommit_blocks);
2713 if (wc->autocommit_time_set)
2714 DMEMIT(" autocommit_time %u", wc->autocommit_time_value);
2715 if (wc->max_age_set)
2716 DMEMIT(" max_age %u", wc->max_age_value);
2717 if (wc->cleaner_set)
2719 if (wc->writeback_fua_set)
2720 DMEMIT(" %sfua", wc->writeback_fua ? "" : "no");
2721 if (wc->metadata_only)
2722 DMEMIT(" metadata_only");
2724 DMEMIT(" pause_writeback %u", wc->pause_value);
2726 case STATUSTYPE_IMA:
2732 static struct target_type writecache_target = {
2733 .name = "writecache",
2734 .version = {1, 6, 0},
2735 .module = THIS_MODULE,
2736 .ctr = writecache_ctr,
2737 .dtr = writecache_dtr,
2738 .status = writecache_status,
2739 .postsuspend = writecache_suspend,
2740 .resume = writecache_resume,
2741 .message = writecache_message,
2742 .map = writecache_map,
2743 .end_io = writecache_end_io,
2744 .iterate_devices = writecache_iterate_devices,
2745 .io_hints = writecache_io_hints,
2748 static int __init dm_writecache_init(void)
2752 r = dm_register_target(&writecache_target);
2754 DMERR("register failed %d", r);
2761 static void __exit dm_writecache_exit(void)
2763 dm_unregister_target(&writecache_target);
2766 module_init(dm_writecache_init);
2767 module_exit(dm_writecache_exit);
2769 MODULE_DESCRIPTION(DM_NAME " writecache target");
2770 MODULE_AUTHOR("Mikulas Patocka <dm-devel@redhat.com>");
2771 MODULE_LICENSE("GPL");