dm writecache: add unlikely for getting two block with same LBA
[linux-2.6-block.git] / drivers / md / dm-writecache.c
CommitLineData
48debafe
MP
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2018 Red Hat. All rights reserved.
4 *
5 * This file is released under the GPL.
6 */
7
8#include <linux/device-mapper.h>
9#include <linux/module.h>
10#include <linux/init.h>
11#include <linux/vmalloc.h>
12#include <linux/kthread.h>
13#include <linux/dm-io.h>
14#include <linux/dm-kcopyd.h>
15#include <linux/dax.h>
16#include <linux/pfn_t.h>
17#include <linux/libnvdimm.h>
18
19#define DM_MSG_PREFIX "writecache"
20
21#define HIGH_WATERMARK 50
22#define LOW_WATERMARK 45
23#define MAX_WRITEBACK_JOBS 0
24#define ENDIO_LATENCY 16
25#define WRITEBACK_LATENCY 64
26#define AUTOCOMMIT_BLOCKS_SSD 65536
27#define AUTOCOMMIT_BLOCKS_PMEM 64
28#define AUTOCOMMIT_MSEC 1000
29
30#define BITMAP_GRANULARITY 65536
31#if BITMAP_GRANULARITY < PAGE_SIZE
32#undef BITMAP_GRANULARITY
33#define BITMAP_GRANULARITY PAGE_SIZE
34#endif
35
36#if IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API) && IS_ENABLED(CONFIG_DAX_DRIVER)
37#define DM_WRITECACHE_HAS_PMEM
38#endif
39
40#ifdef DM_WRITECACHE_HAS_PMEM
41#define pmem_assign(dest, src) \
42do { \
43 typeof(dest) uniq = (src); \
44 memcpy_flushcache(&(dest), &uniq, sizeof(dest)); \
45} while (0)
46#else
47#define pmem_assign(dest, src) ((dest) = (src))
48#endif
49
50#if defined(__HAVE_ARCH_MEMCPY_MCSAFE) && defined(DM_WRITECACHE_HAS_PMEM)
51#define DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
52#endif
53
54#define MEMORY_SUPERBLOCK_MAGIC 0x23489321
55#define MEMORY_SUPERBLOCK_VERSION 1
56
57struct wc_memory_entry {
58 __le64 original_sector;
59 __le64 seq_count;
60};
61
62struct wc_memory_superblock {
63 union {
64 struct {
65 __le32 magic;
66 __le32 version;
67 __le32 block_size;
68 __le32 pad;
69 __le64 n_blocks;
70 __le64 seq_count;
71 };
72 __le64 padding[8];
73 };
74 struct wc_memory_entry entries[0];
75};
76
77struct wc_entry {
78 struct rb_node rb_node;
79 struct list_head lru;
80 unsigned short wc_list_contiguous;
81 bool write_in_progress
82#if BITS_PER_LONG == 64
83 :1
84#endif
85 ;
86 unsigned long index
87#if BITS_PER_LONG == 64
88 :47
89#endif
90 ;
91#ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
92 uint64_t original_sector;
93 uint64_t seq_count;
94#endif
95};
96
97#ifdef DM_WRITECACHE_HAS_PMEM
98#define WC_MODE_PMEM(wc) ((wc)->pmem_mode)
99#define WC_MODE_FUA(wc) ((wc)->writeback_fua)
100#else
101#define WC_MODE_PMEM(wc) false
102#define WC_MODE_FUA(wc) false
103#endif
104#define WC_MODE_SORT_FREELIST(wc) (!WC_MODE_PMEM(wc))
105
106struct dm_writecache {
107 struct mutex lock;
108 struct list_head lru;
109 union {
110 struct list_head freelist;
111 struct {
112 struct rb_root freetree;
113 struct wc_entry *current_free;
114 };
115 };
116 struct rb_root tree;
117
118 size_t freelist_size;
119 size_t writeback_size;
120 size_t freelist_high_watermark;
121 size_t freelist_low_watermark;
122
123 unsigned uncommitted_blocks;
124 unsigned autocommit_blocks;
125 unsigned max_writeback_jobs;
126
127 int error;
128
129 unsigned long autocommit_jiffies;
130 struct timer_list autocommit_timer;
131 struct wait_queue_head freelist_wait;
132
133 atomic_t bio_in_progress[2];
134 struct wait_queue_head bio_in_progress_wait[2];
135
136 struct dm_target *ti;
137 struct dm_dev *dev;
138 struct dm_dev *ssd_dev;
d284f824 139 sector_t start_sector;
48debafe
MP
140 void *memory_map;
141 uint64_t memory_map_size;
142 size_t metadata_sectors;
143 size_t n_blocks;
144 uint64_t seq_count;
145 void *block_start;
146 struct wc_entry *entries;
147 unsigned block_size;
148 unsigned char block_size_bits;
149
150 bool pmem_mode:1;
151 bool writeback_fua:1;
152
153 bool overwrote_committed:1;
154 bool memory_vmapped:1;
155
156 bool high_wm_percent_set:1;
157 bool low_wm_percent_set:1;
158 bool max_writeback_jobs_set:1;
159 bool autocommit_blocks_set:1;
160 bool autocommit_time_set:1;
161 bool writeback_fua_set:1;
162 bool flush_on_suspend:1;
163
164 unsigned writeback_all;
165 struct workqueue_struct *writeback_wq;
166 struct work_struct writeback_work;
167 struct work_struct flush_work;
168
169 struct dm_io_client *dm_io;
170
171 raw_spinlock_t endio_list_lock;
172 struct list_head endio_list;
173 struct task_struct *endio_thread;
174
175 struct task_struct *flush_thread;
176 struct bio_list flush_list;
177
178 struct dm_kcopyd_client *dm_kcopyd;
179 unsigned long *dirty_bitmap;
180 unsigned dirty_bitmap_size;
181
182 struct bio_set bio_set;
183 mempool_t copy_pool;
184};
185
186#define WB_LIST_INLINE 16
187
188struct writeback_struct {
189 struct list_head endio_entry;
190 struct dm_writecache *wc;
191 struct wc_entry **wc_list;
192 unsigned wc_list_n;
48debafe
MP
193 struct wc_entry *wc_list_inline[WB_LIST_INLINE];
194 struct bio bio;
195};
196
197struct copy_struct {
198 struct list_head endio_entry;
199 struct dm_writecache *wc;
200 struct wc_entry *e;
201 unsigned n_entries;
202 int error;
203};
204
205DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(dm_writecache_throttle,
206 "A percentage of time allocated for data copying");
207
208static void wc_lock(struct dm_writecache *wc)
209{
210 mutex_lock(&wc->lock);
211}
212
213static void wc_unlock(struct dm_writecache *wc)
214{
215 mutex_unlock(&wc->lock);
216}
217
218#ifdef DM_WRITECACHE_HAS_PMEM
219static int persistent_memory_claim(struct dm_writecache *wc)
220{
221 int r;
222 loff_t s;
223 long p, da;
224 pfn_t pfn;
225 int id;
226 struct page **pages;
227
228 wc->memory_vmapped = false;
229
230 if (!wc->ssd_dev->dax_dev) {
231 r = -EOPNOTSUPP;
232 goto err1;
233 }
234 s = wc->memory_map_size;
235 p = s >> PAGE_SHIFT;
236 if (!p) {
237 r = -EINVAL;
238 goto err1;
239 }
240 if (p != s >> PAGE_SHIFT) {
241 r = -EOVERFLOW;
242 goto err1;
243 }
244
245 id = dax_read_lock();
246
247 da = dax_direct_access(wc->ssd_dev->dax_dev, 0, p, &wc->memory_map, &pfn);
248 if (da < 0) {
249 wc->memory_map = NULL;
250 r = da;
251 goto err2;
252 }
253 if (!pfn_t_has_page(pfn)) {
254 wc->memory_map = NULL;
255 r = -EOPNOTSUPP;
256 goto err2;
257 }
258 if (da != p) {
259 long i;
260 wc->memory_map = NULL;
50a7d3ba 261 pages = kvmalloc_array(p, sizeof(struct page *), GFP_KERNEL);
48debafe
MP
262 if (!pages) {
263 r = -ENOMEM;
264 goto err2;
265 }
266 i = 0;
267 do {
268 long daa;
48debafe 269 daa = dax_direct_access(wc->ssd_dev->dax_dev, i, p - i,
f742267a 270 NULL, &pfn);
48debafe
MP
271 if (daa <= 0) {
272 r = daa ? daa : -EINVAL;
273 goto err3;
274 }
275 if (!pfn_t_has_page(pfn)) {
276 r = -EOPNOTSUPP;
277 goto err3;
278 }
279 while (daa-- && i < p) {
280 pages[i++] = pfn_t_to_page(pfn);
281 pfn.val++;
282 }
283 } while (i < p);
284 wc->memory_map = vmap(pages, p, VM_MAP, PAGE_KERNEL);
285 if (!wc->memory_map) {
286 r = -ENOMEM;
287 goto err3;
288 }
289 kvfree(pages);
290 wc->memory_vmapped = true;
291 }
292
293 dax_read_unlock(id);
d284f824
MP
294
295 wc->memory_map += (size_t)wc->start_sector << SECTOR_SHIFT;
296 wc->memory_map_size -= (size_t)wc->start_sector << SECTOR_SHIFT;
297
48debafe
MP
298 return 0;
299err3:
300 kvfree(pages);
301err2:
302 dax_read_unlock(id);
303err1:
304 return r;
305}
306#else
307static int persistent_memory_claim(struct dm_writecache *wc)
308{
309 BUG();
310}
311#endif
312
313static void persistent_memory_release(struct dm_writecache *wc)
314{
315 if (wc->memory_vmapped)
d284f824 316 vunmap(wc->memory_map - ((size_t)wc->start_sector << SECTOR_SHIFT));
48debafe
MP
317}
318
319static struct page *persistent_memory_page(void *addr)
320{
321 if (is_vmalloc_addr(addr))
322 return vmalloc_to_page(addr);
323 else
324 return virt_to_page(addr);
325}
326
327static unsigned persistent_memory_page_offset(void *addr)
328{
329 return (unsigned long)addr & (PAGE_SIZE - 1);
330}
331
332static void persistent_memory_flush_cache(void *ptr, size_t size)
333{
334 if (is_vmalloc_addr(ptr))
335 flush_kernel_vmap_range(ptr, size);
336}
337
338static void persistent_memory_invalidate_cache(void *ptr, size_t size)
339{
340 if (is_vmalloc_addr(ptr))
341 invalidate_kernel_vmap_range(ptr, size);
342}
343
344static struct wc_memory_superblock *sb(struct dm_writecache *wc)
345{
346 return wc->memory_map;
347}
348
349static struct wc_memory_entry *memory_entry(struct dm_writecache *wc, struct wc_entry *e)
350{
da4ad3a2 351 return &sb(wc)->entries[e->index];
48debafe
MP
352}
353
354static void *memory_data(struct dm_writecache *wc, struct wc_entry *e)
355{
356 return (char *)wc->block_start + (e->index << wc->block_size_bits);
357}
358
359static sector_t cache_sector(struct dm_writecache *wc, struct wc_entry *e)
360{
d284f824 361 return wc->start_sector + wc->metadata_sectors +
48debafe
MP
362 ((sector_t)e->index << (wc->block_size_bits - SECTOR_SHIFT));
363}
364
365static uint64_t read_original_sector(struct dm_writecache *wc, struct wc_entry *e)
366{
367#ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
368 return e->original_sector;
369#else
370 return le64_to_cpu(memory_entry(wc, e)->original_sector);
371#endif
372}
373
374static uint64_t read_seq_count(struct dm_writecache *wc, struct wc_entry *e)
375{
376#ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
377 return e->seq_count;
378#else
379 return le64_to_cpu(memory_entry(wc, e)->seq_count);
380#endif
381}
382
383static void clear_seq_count(struct dm_writecache *wc, struct wc_entry *e)
384{
385#ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
386 e->seq_count = -1;
387#endif
388 pmem_assign(memory_entry(wc, e)->seq_count, cpu_to_le64(-1));
389}
390
391static void write_original_sector_seq_count(struct dm_writecache *wc, struct wc_entry *e,
392 uint64_t original_sector, uint64_t seq_count)
393{
394 struct wc_memory_entry me;
395#ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
396 e->original_sector = original_sector;
397 e->seq_count = seq_count;
398#endif
399 me.original_sector = cpu_to_le64(original_sector);
400 me.seq_count = cpu_to_le64(seq_count);
401 pmem_assign(*memory_entry(wc, e), me);
402}
403
404#define writecache_error(wc, err, msg, arg...) \
405do { \
406 if (!cmpxchg(&(wc)->error, 0, err)) \
407 DMERR(msg, ##arg); \
408 wake_up(&(wc)->freelist_wait); \
409} while (0)
410
411#define writecache_has_error(wc) (unlikely(READ_ONCE((wc)->error)))
412
413static void writecache_flush_all_metadata(struct dm_writecache *wc)
414{
415 if (!WC_MODE_PMEM(wc))
416 memset(wc->dirty_bitmap, -1, wc->dirty_bitmap_size);
417}
418
419static void writecache_flush_region(struct dm_writecache *wc, void *ptr, size_t size)
420{
421 if (!WC_MODE_PMEM(wc))
422 __set_bit(((char *)ptr - (char *)wc->memory_map) / BITMAP_GRANULARITY,
423 wc->dirty_bitmap);
424}
425
426static void writecache_disk_flush(struct dm_writecache *wc, struct dm_dev *dev);
427
428struct io_notify {
429 struct dm_writecache *wc;
430 struct completion c;
431 atomic_t count;
432};
433
434static void writecache_notify_io(unsigned long error, void *context)
435{
436 struct io_notify *endio = context;
437
438 if (unlikely(error != 0))
439 writecache_error(endio->wc, -EIO, "error writing metadata");
440 BUG_ON(atomic_read(&endio->count) <= 0);
441 if (atomic_dec_and_test(&endio->count))
442 complete(&endio->c);
443}
444
445static void ssd_commit_flushed(struct dm_writecache *wc)
446{
447 struct dm_io_region region;
448 struct dm_io_request req;
449 struct io_notify endio = {
450 wc,
451 COMPLETION_INITIALIZER_ONSTACK(endio.c),
452 ATOMIC_INIT(1),
453 };
1e1132ea 454 unsigned bitmap_bits = wc->dirty_bitmap_size * 8;
48debafe
MP
455 unsigned i = 0;
456
457 while (1) {
458 unsigned j;
459 i = find_next_bit(wc->dirty_bitmap, bitmap_bits, i);
460 if (unlikely(i == bitmap_bits))
461 break;
462 j = find_next_zero_bit(wc->dirty_bitmap, bitmap_bits, i);
463
464 region.bdev = wc->ssd_dev->bdev;
465 region.sector = (sector_t)i * (BITMAP_GRANULARITY >> SECTOR_SHIFT);
466 region.count = (sector_t)(j - i) * (BITMAP_GRANULARITY >> SECTOR_SHIFT);
467
468 if (unlikely(region.sector >= wc->metadata_sectors))
469 break;
470 if (unlikely(region.sector + region.count > wc->metadata_sectors))
471 region.count = wc->metadata_sectors - region.sector;
472
d284f824 473 region.sector += wc->start_sector;
48debafe
MP
474 atomic_inc(&endio.count);
475 req.bi_op = REQ_OP_WRITE;
476 req.bi_op_flags = REQ_SYNC;
477 req.mem.type = DM_IO_VMA;
478 req.mem.ptr.vma = (char *)wc->memory_map + (size_t)i * BITMAP_GRANULARITY;
479 req.client = wc->dm_io;
480 req.notify.fn = writecache_notify_io;
481 req.notify.context = &endio;
482
483 /* writing via async dm-io (implied by notify.fn above) won't return an error */
484 (void) dm_io(&req, 1, &region, NULL);
485 i = j;
486 }
487
488 writecache_notify_io(0, &endio);
489 wait_for_completion_io(&endio.c);
490
491 writecache_disk_flush(wc, wc->ssd_dev);
492
493 memset(wc->dirty_bitmap, 0, wc->dirty_bitmap_size);
494}
495
496static void writecache_commit_flushed(struct dm_writecache *wc)
497{
498 if (WC_MODE_PMEM(wc))
499 wmb();
500 else
501 ssd_commit_flushed(wc);
502}
503
504static void writecache_disk_flush(struct dm_writecache *wc, struct dm_dev *dev)
505{
506 int r;
507 struct dm_io_region region;
508 struct dm_io_request req;
509
510 region.bdev = dev->bdev;
511 region.sector = 0;
512 region.count = 0;
513 req.bi_op = REQ_OP_WRITE;
514 req.bi_op_flags = REQ_PREFLUSH;
515 req.mem.type = DM_IO_KMEM;
516 req.mem.ptr.addr = NULL;
517 req.client = wc->dm_io;
518 req.notify.fn = NULL;
519
520 r = dm_io(&req, 1, &region, NULL);
521 if (unlikely(r))
522 writecache_error(wc, r, "error flushing metadata: %d", r);
523}
524
525static void writecache_wait_for_ios(struct dm_writecache *wc, int direction)
526{
527 wait_event(wc->bio_in_progress_wait[direction],
528 !atomic_read(&wc->bio_in_progress[direction]));
529}
530
531#define WFE_RETURN_FOLLOWING 1
532#define WFE_LOWEST_SEQ 2
533
534static struct wc_entry *writecache_find_entry(struct dm_writecache *wc,
535 uint64_t block, int flags)
536{
537 struct wc_entry *e;
538 struct rb_node *node = wc->tree.rb_node;
539
540 if (unlikely(!node))
541 return NULL;
542
543 while (1) {
544 e = container_of(node, struct wc_entry, rb_node);
545 if (read_original_sector(wc, e) == block)
546 break;
f8011d33 547
48debafe
MP
548 node = (read_original_sector(wc, e) >= block ?
549 e->rb_node.rb_left : e->rb_node.rb_right);
550 if (unlikely(!node)) {
f8011d33 551 if (!(flags & WFE_RETURN_FOLLOWING))
48debafe 552 return NULL;
48debafe 553 if (read_original_sector(wc, e) >= block) {
f8011d33 554 return e;
48debafe
MP
555 } else {
556 node = rb_next(&e->rb_node);
f8011d33 557 if (unlikely(!node))
48debafe 558 return NULL;
48debafe 559 e = container_of(node, struct wc_entry, rb_node);
f8011d33 560 return e;
48debafe
MP
561 }
562 }
563 }
564
565 while (1) {
566 struct wc_entry *e2;
567 if (flags & WFE_LOWEST_SEQ)
568 node = rb_prev(&e->rb_node);
569 else
570 node = rb_next(&e->rb_node);
84420b1e 571 if (unlikely(!node))
48debafe
MP
572 return e;
573 e2 = container_of(node, struct wc_entry, rb_node);
574 if (read_original_sector(wc, e2) != block)
575 return e;
576 e = e2;
577 }
578}
579
580static void writecache_insert_entry(struct dm_writecache *wc, struct wc_entry *ins)
581{
582 struct wc_entry *e;
583 struct rb_node **node = &wc->tree.rb_node, *parent = NULL;
584
585 while (*node) {
586 e = container_of(*node, struct wc_entry, rb_node);
587 parent = &e->rb_node;
588 if (read_original_sector(wc, e) > read_original_sector(wc, ins))
589 node = &parent->rb_left;
590 else
591 node = &parent->rb_right;
592 }
593 rb_link_node(&ins->rb_node, parent, node);
594 rb_insert_color(&ins->rb_node, &wc->tree);
595 list_add(&ins->lru, &wc->lru);
596}
597
598static void writecache_unlink(struct dm_writecache *wc, struct wc_entry *e)
599{
600 list_del(&e->lru);
601 rb_erase(&e->rb_node, &wc->tree);
602}
603
604static void writecache_add_to_freelist(struct dm_writecache *wc, struct wc_entry *e)
605{
606 if (WC_MODE_SORT_FREELIST(wc)) {
607 struct rb_node **node = &wc->freetree.rb_node, *parent = NULL;
608 if (unlikely(!*node))
609 wc->current_free = e;
610 while (*node) {
611 parent = *node;
612 if (&e->rb_node < *node)
613 node = &parent->rb_left;
614 else
615 node = &parent->rb_right;
616 }
617 rb_link_node(&e->rb_node, parent, node);
618 rb_insert_color(&e->rb_node, &wc->freetree);
619 } else {
620 list_add_tail(&e->lru, &wc->freelist);
621 }
622 wc->freelist_size++;
623}
624
625static struct wc_entry *writecache_pop_from_freelist(struct dm_writecache *wc)
626{
627 struct wc_entry *e;
628
629 if (WC_MODE_SORT_FREELIST(wc)) {
630 struct rb_node *next;
631 if (unlikely(!wc->current_free))
632 return NULL;
633 e = wc->current_free;
634 next = rb_next(&e->rb_node);
635 rb_erase(&e->rb_node, &wc->freetree);
636 if (unlikely(!next))
637 next = rb_first(&wc->freetree);
638 wc->current_free = next ? container_of(next, struct wc_entry, rb_node) : NULL;
639 } else {
640 if (unlikely(list_empty(&wc->freelist)))
641 return NULL;
642 e = container_of(wc->freelist.next, struct wc_entry, lru);
643 list_del(&e->lru);
644 }
645 wc->freelist_size--;
646 if (unlikely(wc->freelist_size + wc->writeback_size <= wc->freelist_high_watermark))
647 queue_work(wc->writeback_wq, &wc->writeback_work);
648
649 return e;
650}
651
652static void writecache_free_entry(struct dm_writecache *wc, struct wc_entry *e)
653{
654 writecache_unlink(wc, e);
655 writecache_add_to_freelist(wc, e);
656 clear_seq_count(wc, e);
657 writecache_flush_region(wc, memory_entry(wc, e), sizeof(struct wc_memory_entry));
658 if (unlikely(waitqueue_active(&wc->freelist_wait)))
659 wake_up(&wc->freelist_wait);
660}
661
662static void writecache_wait_on_freelist(struct dm_writecache *wc)
663{
664 DEFINE_WAIT(wait);
665
666 prepare_to_wait(&wc->freelist_wait, &wait, TASK_UNINTERRUPTIBLE);
667 wc_unlock(wc);
668 io_schedule();
669 finish_wait(&wc->freelist_wait, &wait);
670 wc_lock(wc);
671}
672
673static void writecache_poison_lists(struct dm_writecache *wc)
674{
675 /*
676 * Catch incorrect access to these values while the device is suspended.
677 */
678 memset(&wc->tree, -1, sizeof wc->tree);
679 wc->lru.next = LIST_POISON1;
680 wc->lru.prev = LIST_POISON2;
681 wc->freelist.next = LIST_POISON1;
682 wc->freelist.prev = LIST_POISON2;
683}
684
685static void writecache_flush_entry(struct dm_writecache *wc, struct wc_entry *e)
686{
687 writecache_flush_region(wc, memory_entry(wc, e), sizeof(struct wc_memory_entry));
688 if (WC_MODE_PMEM(wc))
689 writecache_flush_region(wc, memory_data(wc, e), wc->block_size);
690}
691
692static bool writecache_entry_is_committed(struct dm_writecache *wc, struct wc_entry *e)
693{
694 return read_seq_count(wc, e) < wc->seq_count;
695}
696
697static void writecache_flush(struct dm_writecache *wc)
698{
699 struct wc_entry *e, *e2;
700 bool need_flush_after_free;
701
702 wc->uncommitted_blocks = 0;
703 del_timer(&wc->autocommit_timer);
704
705 if (list_empty(&wc->lru))
706 return;
707
708 e = container_of(wc->lru.next, struct wc_entry, lru);
709 if (writecache_entry_is_committed(wc, e)) {
710 if (wc->overwrote_committed) {
711 writecache_wait_for_ios(wc, WRITE);
712 writecache_disk_flush(wc, wc->ssd_dev);
713 wc->overwrote_committed = false;
714 }
715 return;
716 }
717 while (1) {
718 writecache_flush_entry(wc, e);
719 if (unlikely(e->lru.next == &wc->lru))
720 break;
721 e2 = container_of(e->lru.next, struct wc_entry, lru);
722 if (writecache_entry_is_committed(wc, e2))
723 break;
724 e = e2;
725 cond_resched();
726 }
727 writecache_commit_flushed(wc);
728
729 writecache_wait_for_ios(wc, WRITE);
730
731 wc->seq_count++;
732 pmem_assign(sb(wc)->seq_count, cpu_to_le64(wc->seq_count));
733 writecache_flush_region(wc, &sb(wc)->seq_count, sizeof sb(wc)->seq_count);
734 writecache_commit_flushed(wc);
735
736 wc->overwrote_committed = false;
737
738 need_flush_after_free = false;
739 while (1) {
740 /* Free another committed entry with lower seq-count */
741 struct rb_node *rb_node = rb_prev(&e->rb_node);
742
743 if (rb_node) {
744 e2 = container_of(rb_node, struct wc_entry, rb_node);
745 if (read_original_sector(wc, e2) == read_original_sector(wc, e) &&
746 likely(!e2->write_in_progress)) {
747 writecache_free_entry(wc, e2);
748 need_flush_after_free = true;
749 }
750 }
751 if (unlikely(e->lru.prev == &wc->lru))
752 break;
753 e = container_of(e->lru.prev, struct wc_entry, lru);
754 cond_resched();
755 }
756
757 if (need_flush_after_free)
758 writecache_commit_flushed(wc);
759}
760
761static void writecache_flush_work(struct work_struct *work)
762{
763 struct dm_writecache *wc = container_of(work, struct dm_writecache, flush_work);
764
765 wc_lock(wc);
766 writecache_flush(wc);
767 wc_unlock(wc);
768}
769
770static void writecache_autocommit_timer(struct timer_list *t)
771{
772 struct dm_writecache *wc = from_timer(wc, t, autocommit_timer);
773 if (!writecache_has_error(wc))
774 queue_work(wc->writeback_wq, &wc->flush_work);
775}
776
777static void writecache_schedule_autocommit(struct dm_writecache *wc)
778{
779 if (!timer_pending(&wc->autocommit_timer))
780 mod_timer(&wc->autocommit_timer, jiffies + wc->autocommit_jiffies);
781}
782
783static void writecache_discard(struct dm_writecache *wc, sector_t start, sector_t end)
784{
785 struct wc_entry *e;
786 bool discarded_something = false;
787
788 e = writecache_find_entry(wc, start, WFE_RETURN_FOLLOWING | WFE_LOWEST_SEQ);
789 if (unlikely(!e))
790 return;
791
792 while (read_original_sector(wc, e) < end) {
793 struct rb_node *node = rb_next(&e->rb_node);
794
795 if (likely(!e->write_in_progress)) {
796 if (!discarded_something) {
797 writecache_wait_for_ios(wc, READ);
798 writecache_wait_for_ios(wc, WRITE);
799 discarded_something = true;
800 }
801 writecache_free_entry(wc, e);
802 }
803
84420b1e 804 if (unlikely(!node))
48debafe
MP
805 break;
806
807 e = container_of(node, struct wc_entry, rb_node);
808 }
809
810 if (discarded_something)
811 writecache_commit_flushed(wc);
812}
813
814static bool writecache_wait_for_writeback(struct dm_writecache *wc)
815{
816 if (wc->writeback_size) {
817 writecache_wait_on_freelist(wc);
818 return true;
819 }
820 return false;
821}
822
823static void writecache_suspend(struct dm_target *ti)
824{
825 struct dm_writecache *wc = ti->private;
826 bool flush_on_suspend;
827
828 del_timer_sync(&wc->autocommit_timer);
829
830 wc_lock(wc);
831 writecache_flush(wc);
832 flush_on_suspend = wc->flush_on_suspend;
833 if (flush_on_suspend) {
834 wc->flush_on_suspend = false;
835 wc->writeback_all++;
836 queue_work(wc->writeback_wq, &wc->writeback_work);
837 }
838 wc_unlock(wc);
839
840 flush_workqueue(wc->writeback_wq);
841
842 wc_lock(wc);
843 if (flush_on_suspend)
844 wc->writeback_all--;
845 while (writecache_wait_for_writeback(wc));
846
847 if (WC_MODE_PMEM(wc))
848 persistent_memory_flush_cache(wc->memory_map, wc->memory_map_size);
849
850 writecache_poison_lists(wc);
851
852 wc_unlock(wc);
853}
854
855static int writecache_alloc_entries(struct dm_writecache *wc)
856{
857 size_t b;
858
859 if (wc->entries)
860 return 0;
50a7d3ba 861 wc->entries = vmalloc(array_size(sizeof(struct wc_entry), wc->n_blocks));
48debafe
MP
862 if (!wc->entries)
863 return -ENOMEM;
864 for (b = 0; b < wc->n_blocks; b++) {
865 struct wc_entry *e = &wc->entries[b];
866 e->index = b;
867 e->write_in_progress = false;
868 }
869
870 return 0;
871}
872
873static void writecache_resume(struct dm_target *ti)
874{
875 struct dm_writecache *wc = ti->private;
876 size_t b;
877 bool need_flush = false;
878 __le64 sb_seq_count;
879 int r;
880
881 wc_lock(wc);
882
883 if (WC_MODE_PMEM(wc))
884 persistent_memory_invalidate_cache(wc->memory_map, wc->memory_map_size);
885
886 wc->tree = RB_ROOT;
887 INIT_LIST_HEAD(&wc->lru);
888 if (WC_MODE_SORT_FREELIST(wc)) {
889 wc->freetree = RB_ROOT;
890 wc->current_free = NULL;
891 } else {
892 INIT_LIST_HEAD(&wc->freelist);
893 }
894 wc->freelist_size = 0;
895
896 r = memcpy_mcsafe(&sb_seq_count, &sb(wc)->seq_count, sizeof(uint64_t));
897 if (r) {
898 writecache_error(wc, r, "hardware memory error when reading superblock: %d", r);
899 sb_seq_count = cpu_to_le64(0);
900 }
901 wc->seq_count = le64_to_cpu(sb_seq_count);
902
903#ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
904 for (b = 0; b < wc->n_blocks; b++) {
905 struct wc_entry *e = &wc->entries[b];
906 struct wc_memory_entry wme;
907 if (writecache_has_error(wc)) {
908 e->original_sector = -1;
909 e->seq_count = -1;
910 continue;
911 }
912 r = memcpy_mcsafe(&wme, memory_entry(wc, e), sizeof(struct wc_memory_entry));
913 if (r) {
914 writecache_error(wc, r, "hardware memory error when reading metadata entry %lu: %d",
915 (unsigned long)b, r);
916 e->original_sector = -1;
917 e->seq_count = -1;
918 } else {
919 e->original_sector = le64_to_cpu(wme.original_sector);
920 e->seq_count = le64_to_cpu(wme.seq_count);
921 }
922 }
923#endif
924 for (b = 0; b < wc->n_blocks; b++) {
925 struct wc_entry *e = &wc->entries[b];
926 if (!writecache_entry_is_committed(wc, e)) {
927 if (read_seq_count(wc, e) != -1) {
928erase_this:
929 clear_seq_count(wc, e);
930 need_flush = true;
931 }
932 writecache_add_to_freelist(wc, e);
933 } else {
934 struct wc_entry *old;
935
936 old = writecache_find_entry(wc, read_original_sector(wc, e), 0);
937 if (!old) {
938 writecache_insert_entry(wc, e);
939 } else {
940 if (read_seq_count(wc, old) == read_seq_count(wc, e)) {
941 writecache_error(wc, -EINVAL,
942 "two identical entries, position %llu, sector %llu, sequence %llu",
943 (unsigned long long)b, (unsigned long long)read_original_sector(wc, e),
944 (unsigned long long)read_seq_count(wc, e));
945 }
946 if (read_seq_count(wc, old) > read_seq_count(wc, e)) {
947 goto erase_this;
948 } else {
949 writecache_free_entry(wc, old);
950 writecache_insert_entry(wc, e);
951 need_flush = true;
952 }
953 }
954 }
955 cond_resched();
956 }
957
958 if (need_flush) {
959 writecache_flush_all_metadata(wc);
960 writecache_commit_flushed(wc);
961 }
962
963 wc_unlock(wc);
964}
965
966static int process_flush_mesg(unsigned argc, char **argv, struct dm_writecache *wc)
967{
968 if (argc != 1)
969 return -EINVAL;
970
971 wc_lock(wc);
972 if (dm_suspended(wc->ti)) {
973 wc_unlock(wc);
974 return -EBUSY;
975 }
976 if (writecache_has_error(wc)) {
977 wc_unlock(wc);
978 return -EIO;
979 }
980
981 writecache_flush(wc);
982 wc->writeback_all++;
983 queue_work(wc->writeback_wq, &wc->writeback_work);
984 wc_unlock(wc);
985
986 flush_workqueue(wc->writeback_wq);
987
988 wc_lock(wc);
989 wc->writeback_all--;
990 if (writecache_has_error(wc)) {
991 wc_unlock(wc);
992 return -EIO;
993 }
994 wc_unlock(wc);
995
996 return 0;
997}
998
999static int process_flush_on_suspend_mesg(unsigned argc, char **argv, struct dm_writecache *wc)
1000{
1001 if (argc != 1)
1002 return -EINVAL;
1003
1004 wc_lock(wc);
1005 wc->flush_on_suspend = true;
1006 wc_unlock(wc);
1007
1008 return 0;
1009}
1010
1011static int writecache_message(struct dm_target *ti, unsigned argc, char **argv,
1012 char *result, unsigned maxlen)
1013{
1014 int r = -EINVAL;
1015 struct dm_writecache *wc = ti->private;
1016
1017 if (!strcasecmp(argv[0], "flush"))
1018 r = process_flush_mesg(argc, argv, wc);
1019 else if (!strcasecmp(argv[0], "flush_on_suspend"))
1020 r = process_flush_on_suspend_mesg(argc, argv, wc);
1021 else
1022 DMERR("unrecognised message received: %s", argv[0]);
1023
1024 return r;
1025}
1026
1027static void bio_copy_block(struct dm_writecache *wc, struct bio *bio, void *data)
1028{
1029 void *buf;
1030 unsigned long flags;
1031 unsigned size;
1032 int rw = bio_data_dir(bio);
1033 unsigned remaining_size = wc->block_size;
1034
1035 do {
1036 struct bio_vec bv = bio_iter_iovec(bio, bio->bi_iter);
1037 buf = bvec_kmap_irq(&bv, &flags);
1038 size = bv.bv_len;
1039 if (unlikely(size > remaining_size))
1040 size = remaining_size;
1041
1042 if (rw == READ) {
1043 int r;
1044 r = memcpy_mcsafe(buf, data, size);
1045 flush_dcache_page(bio_page(bio));
1046 if (unlikely(r)) {
1047 writecache_error(wc, r, "hardware memory error when reading data: %d", r);
1048 bio->bi_status = BLK_STS_IOERR;
1049 }
1050 } else {
1051 flush_dcache_page(bio_page(bio));
1052 memcpy_flushcache(data, buf, size);
1053 }
1054
1055 bvec_kunmap_irq(buf, &flags);
1056
1057 data = (char *)data + size;
1058 remaining_size -= size;
1059 bio_advance(bio, size);
1060 } while (unlikely(remaining_size));
1061}
1062
1063static int writecache_flush_thread(void *data)
1064{
1065 struct dm_writecache *wc = data;
1066
1067 while (1) {
1068 struct bio *bio;
1069
1070 wc_lock(wc);
1071 bio = bio_list_pop(&wc->flush_list);
1072 if (!bio) {
1073 set_current_state(TASK_INTERRUPTIBLE);
1074 wc_unlock(wc);
1075
1076 if (unlikely(kthread_should_stop())) {
1077 set_current_state(TASK_RUNNING);
1078 break;
1079 }
1080
1081 schedule();
1082 continue;
1083 }
1084
1085 if (bio_op(bio) == REQ_OP_DISCARD) {
1086 writecache_discard(wc, bio->bi_iter.bi_sector,
1087 bio_end_sector(bio));
1088 wc_unlock(wc);
1089 bio_set_dev(bio, wc->dev->bdev);
1090 generic_make_request(bio);
1091 } else {
1092 writecache_flush(wc);
1093 wc_unlock(wc);
1094 if (writecache_has_error(wc))
1095 bio->bi_status = BLK_STS_IOERR;
1096 bio_endio(bio);
1097 }
1098 }
1099
1100 return 0;
1101}
1102
1103static void writecache_offload_bio(struct dm_writecache *wc, struct bio *bio)
1104{
1105 if (bio_list_empty(&wc->flush_list))
1106 wake_up_process(wc->flush_thread);
1107 bio_list_add(&wc->flush_list, bio);
1108}
1109
1110static int writecache_map(struct dm_target *ti, struct bio *bio)
1111{
1112 struct wc_entry *e;
1113 struct dm_writecache *wc = ti->private;
1114
1115 bio->bi_private = NULL;
1116
1117 wc_lock(wc);
1118
1119 if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
1120 if (writecache_has_error(wc))
1121 goto unlock_error;
1122 if (WC_MODE_PMEM(wc)) {
1123 writecache_flush(wc);
1124 if (writecache_has_error(wc))
1125 goto unlock_error;
1126 goto unlock_submit;
1127 } else {
1128 writecache_offload_bio(wc, bio);
1129 goto unlock_return;
1130 }
1131 }
1132
1133 bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
1134
1135 if (unlikely((((unsigned)bio->bi_iter.bi_sector | bio_sectors(bio)) &
1136 (wc->block_size / 512 - 1)) != 0)) {
1137 DMERR("I/O is not aligned, sector %llu, size %u, block size %u",
1138 (unsigned long long)bio->bi_iter.bi_sector,
1139 bio->bi_iter.bi_size, wc->block_size);
1140 goto unlock_error;
1141 }
1142
1143 if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) {
1144 if (writecache_has_error(wc))
1145 goto unlock_error;
1146 if (WC_MODE_PMEM(wc)) {
1147 writecache_discard(wc, bio->bi_iter.bi_sector, bio_end_sector(bio));
1148 goto unlock_remap_origin;
1149 } else {
1150 writecache_offload_bio(wc, bio);
1151 goto unlock_return;
1152 }
1153 }
1154
1155 if (bio_data_dir(bio) == READ) {
1156read_next_block:
1157 e = writecache_find_entry(wc, bio->bi_iter.bi_sector, WFE_RETURN_FOLLOWING);
1158 if (e && read_original_sector(wc, e) == bio->bi_iter.bi_sector) {
1159 if (WC_MODE_PMEM(wc)) {
1160 bio_copy_block(wc, bio, memory_data(wc, e));
1161 if (bio->bi_iter.bi_size)
1162 goto read_next_block;
1163 goto unlock_submit;
1164 } else {
1165 dm_accept_partial_bio(bio, wc->block_size >> SECTOR_SHIFT);
1166 bio_set_dev(bio, wc->ssd_dev->bdev);
1167 bio->bi_iter.bi_sector = cache_sector(wc, e);
1168 if (!writecache_entry_is_committed(wc, e))
1169 writecache_wait_for_ios(wc, WRITE);
1170 goto unlock_remap;
1171 }
1172 } else {
1173 if (e) {
1174 sector_t next_boundary =
1175 read_original_sector(wc, e) - bio->bi_iter.bi_sector;
1176 if (next_boundary < bio->bi_iter.bi_size >> SECTOR_SHIFT) {
1177 dm_accept_partial_bio(bio, next_boundary);
1178 }
1179 }
1180 goto unlock_remap_origin;
1181 }
1182 } else {
1183 do {
1184 if (writecache_has_error(wc))
1185 goto unlock_error;
1186 e = writecache_find_entry(wc, bio->bi_iter.bi_sector, 0);
1187 if (e) {
1188 if (!writecache_entry_is_committed(wc, e))
1189 goto bio_copy;
1190 if (!WC_MODE_PMEM(wc) && !e->write_in_progress) {
1191 wc->overwrote_committed = true;
1192 goto bio_copy;
1193 }
1194 }
1195 e = writecache_pop_from_freelist(wc);
1196 if (unlikely(!e)) {
1197 writecache_wait_on_freelist(wc);
1198 continue;
1199 }
1200 write_original_sector_seq_count(wc, e, bio->bi_iter.bi_sector, wc->seq_count);
1201 writecache_insert_entry(wc, e);
1202 wc->uncommitted_blocks++;
1203bio_copy:
1204 if (WC_MODE_PMEM(wc)) {
1205 bio_copy_block(wc, bio, memory_data(wc, e));
1206 } else {
1207 dm_accept_partial_bio(bio, wc->block_size >> SECTOR_SHIFT);
1208 bio_set_dev(bio, wc->ssd_dev->bdev);
1209 bio->bi_iter.bi_sector = cache_sector(wc, e);
1210 if (unlikely(wc->uncommitted_blocks >= wc->autocommit_blocks)) {
1211 wc->uncommitted_blocks = 0;
1212 queue_work(wc->writeback_wq, &wc->flush_work);
1213 } else {
1214 writecache_schedule_autocommit(wc);
1215 }
1216 goto unlock_remap;
1217 }
1218 } while (bio->bi_iter.bi_size);
1219
1220 if (unlikely(wc->uncommitted_blocks >= wc->autocommit_blocks))
1221 writecache_flush(wc);
1222 else
1223 writecache_schedule_autocommit(wc);
1224 goto unlock_submit;
1225 }
1226
1227unlock_remap_origin:
1228 bio_set_dev(bio, wc->dev->bdev);
1229 wc_unlock(wc);
1230 return DM_MAPIO_REMAPPED;
1231
1232unlock_remap:
1233 /* make sure that writecache_end_io decrements bio_in_progress: */
1234 bio->bi_private = (void *)1;
1235 atomic_inc(&wc->bio_in_progress[bio_data_dir(bio)]);
1236 wc_unlock(wc);
1237 return DM_MAPIO_REMAPPED;
1238
1239unlock_submit:
1240 wc_unlock(wc);
1241 bio_endio(bio);
1242 return DM_MAPIO_SUBMITTED;
1243
1244unlock_return:
1245 wc_unlock(wc);
1246 return DM_MAPIO_SUBMITTED;
1247
1248unlock_error:
1249 wc_unlock(wc);
1250 bio_io_error(bio);
1251 return DM_MAPIO_SUBMITTED;
1252}
1253
1254static int writecache_end_io(struct dm_target *ti, struct bio *bio, blk_status_t *status)
1255{
1256 struct dm_writecache *wc = ti->private;
1257
1258 if (bio->bi_private != NULL) {
1259 int dir = bio_data_dir(bio);
1260 if (atomic_dec_and_test(&wc->bio_in_progress[dir]))
1261 if (unlikely(waitqueue_active(&wc->bio_in_progress_wait[dir])))
1262 wake_up(&wc->bio_in_progress_wait[dir]);
1263 }
1264 return 0;
1265}
1266
1267static int writecache_iterate_devices(struct dm_target *ti,
1268 iterate_devices_callout_fn fn, void *data)
1269{
1270 struct dm_writecache *wc = ti->private;
1271
1272 return fn(ti, wc->dev, 0, ti->len, data);
1273}
1274
1275static void writecache_io_hints(struct dm_target *ti, struct queue_limits *limits)
1276{
1277 struct dm_writecache *wc = ti->private;
1278
1279 if (limits->logical_block_size < wc->block_size)
1280 limits->logical_block_size = wc->block_size;
1281
1282 if (limits->physical_block_size < wc->block_size)
1283 limits->physical_block_size = wc->block_size;
1284
1285 if (limits->io_min < wc->block_size)
1286 limits->io_min = wc->block_size;
1287}
1288
1289
1290static void writecache_writeback_endio(struct bio *bio)
1291{
1292 struct writeback_struct *wb = container_of(bio, struct writeback_struct, bio);
1293 struct dm_writecache *wc = wb->wc;
1294 unsigned long flags;
1295
1296 raw_spin_lock_irqsave(&wc->endio_list_lock, flags);
1297 if (unlikely(list_empty(&wc->endio_list)))
1298 wake_up_process(wc->endio_thread);
1299 list_add_tail(&wb->endio_entry, &wc->endio_list);
1300 raw_spin_unlock_irqrestore(&wc->endio_list_lock, flags);
1301}
1302
1303static void writecache_copy_endio(int read_err, unsigned long write_err, void *ptr)
1304{
1305 struct copy_struct *c = ptr;
1306 struct dm_writecache *wc = c->wc;
1307
1308 c->error = likely(!(read_err | write_err)) ? 0 : -EIO;
1309
1310 raw_spin_lock_irq(&wc->endio_list_lock);
1311 if (unlikely(list_empty(&wc->endio_list)))
1312 wake_up_process(wc->endio_thread);
1313 list_add_tail(&c->endio_entry, &wc->endio_list);
1314 raw_spin_unlock_irq(&wc->endio_list_lock);
1315}
1316
1317static void __writecache_endio_pmem(struct dm_writecache *wc, struct list_head *list)
1318{
1319 unsigned i;
1320 struct writeback_struct *wb;
1321 struct wc_entry *e;
1322 unsigned long n_walked = 0;
1323
1324 do {
1325 wb = list_entry(list->next, struct writeback_struct, endio_entry);
1326 list_del(&wb->endio_entry);
1327
1328 if (unlikely(wb->bio.bi_status != BLK_STS_OK))
1329 writecache_error(wc, blk_status_to_errno(wb->bio.bi_status),
1330 "write error %d", wb->bio.bi_status);
1331 i = 0;
1332 do {
1333 e = wb->wc_list[i];
1334 BUG_ON(!e->write_in_progress);
1335 e->write_in_progress = false;
1336 INIT_LIST_HEAD(&e->lru);
1337 if (!writecache_has_error(wc))
1338 writecache_free_entry(wc, e);
1339 BUG_ON(!wc->writeback_size);
1340 wc->writeback_size--;
1341 n_walked++;
1342 if (unlikely(n_walked >= ENDIO_LATENCY)) {
1343 writecache_commit_flushed(wc);
1344 wc_unlock(wc);
1345 wc_lock(wc);
1346 n_walked = 0;
1347 }
1348 } while (++i < wb->wc_list_n);
1349
1350 if (wb->wc_list != wb->wc_list_inline)
1351 kfree(wb->wc_list);
1352 bio_put(&wb->bio);
1353 } while (!list_empty(list));
1354}
1355
1356static void __writecache_endio_ssd(struct dm_writecache *wc, struct list_head *list)
1357{
1358 struct copy_struct *c;
1359 struct wc_entry *e;
1360
1361 do {
1362 c = list_entry(list->next, struct copy_struct, endio_entry);
1363 list_del(&c->endio_entry);
1364
1365 if (unlikely(c->error))
1366 writecache_error(wc, c->error, "copy error");
1367
1368 e = c->e;
1369 do {
1370 BUG_ON(!e->write_in_progress);
1371 e->write_in_progress = false;
1372 INIT_LIST_HEAD(&e->lru);
1373 if (!writecache_has_error(wc))
1374 writecache_free_entry(wc, e);
1375
1376 BUG_ON(!wc->writeback_size);
1377 wc->writeback_size--;
1378 e++;
1379 } while (--c->n_entries);
1380 mempool_free(c, &wc->copy_pool);
1381 } while (!list_empty(list));
1382}
1383
1384static int writecache_endio_thread(void *data)
1385{
1386 struct dm_writecache *wc = data;
1387
1388 while (1) {
1389 struct list_head list;
1390
1391 raw_spin_lock_irq(&wc->endio_list_lock);
1392 if (!list_empty(&wc->endio_list))
1393 goto pop_from_list;
1394 set_current_state(TASK_INTERRUPTIBLE);
1395 raw_spin_unlock_irq(&wc->endio_list_lock);
1396
1397 if (unlikely(kthread_should_stop())) {
1398 set_current_state(TASK_RUNNING);
1399 break;
1400 }
1401
1402 schedule();
1403
1404 continue;
1405
1406pop_from_list:
1407 list = wc->endio_list;
1408 list.next->prev = list.prev->next = &list;
1409 INIT_LIST_HEAD(&wc->endio_list);
1410 raw_spin_unlock_irq(&wc->endio_list_lock);
1411
1412 if (!WC_MODE_FUA(wc))
1413 writecache_disk_flush(wc, wc->dev);
1414
1415 wc_lock(wc);
1416
1417 if (WC_MODE_PMEM(wc)) {
1418 __writecache_endio_pmem(wc, &list);
1419 } else {
1420 __writecache_endio_ssd(wc, &list);
1421 writecache_wait_for_ios(wc, READ);
1422 }
1423
1424 writecache_commit_flushed(wc);
1425
1426 wc_unlock(wc);
1427 }
1428
1429 return 0;
1430}
1431
1432static bool wc_add_block(struct writeback_struct *wb, struct wc_entry *e, gfp_t gfp)
1433{
1434 struct dm_writecache *wc = wb->wc;
1435 unsigned block_size = wc->block_size;
1436 void *address = memory_data(wc, e);
1437
1438 persistent_memory_flush_cache(address, block_size);
1439 return bio_add_page(&wb->bio, persistent_memory_page(address),
1440 block_size, persistent_memory_page_offset(address)) != 0;
1441}
1442
1443struct writeback_list {
1444 struct list_head list;
1445 size_t size;
1446};
1447
1448static void __writeback_throttle(struct dm_writecache *wc, struct writeback_list *wbl)
1449{
1450 if (unlikely(wc->max_writeback_jobs)) {
1451 if (READ_ONCE(wc->writeback_size) - wbl->size >= wc->max_writeback_jobs) {
1452 wc_lock(wc);
1453 while (wc->writeback_size - wbl->size >= wc->max_writeback_jobs)
1454 writecache_wait_on_freelist(wc);
1455 wc_unlock(wc);
1456 }
1457 }
1458 cond_resched();
1459}
1460
1461static void __writecache_writeback_pmem(struct dm_writecache *wc, struct writeback_list *wbl)
1462{
1463 struct wc_entry *e, *f;
1464 struct bio *bio;
1465 struct writeback_struct *wb;
1466 unsigned max_pages;
1467
1468 while (wbl->size) {
1469 wbl->size--;
1470 e = container_of(wbl->list.prev, struct wc_entry, lru);
1471 list_del(&e->lru);
1472
1473 max_pages = e->wc_list_contiguous;
1474
1475 bio = bio_alloc_bioset(GFP_NOIO, max_pages, &wc->bio_set);
1476 wb = container_of(bio, struct writeback_struct, bio);
1477 wb->wc = wc;
09f2d656
HY
1478 bio->bi_end_io = writecache_writeback_endio;
1479 bio_set_dev(bio, wc->dev->bdev);
1480 bio->bi_iter.bi_sector = read_original_sector(wc, e);
48debafe 1481 if (max_pages <= WB_LIST_INLINE ||
50a7d3ba
KC
1482 unlikely(!(wb->wc_list = kmalloc_array(max_pages, sizeof(struct wc_entry *),
1483 GFP_NOIO | __GFP_NORETRY |
1484 __GFP_NOMEMALLOC | __GFP_NOWARN)))) {
48debafe
MP
1485 wb->wc_list = wb->wc_list_inline;
1486 max_pages = WB_LIST_INLINE;
1487 }
1488
1489 BUG_ON(!wc_add_block(wb, e, GFP_NOIO));
1490
1491 wb->wc_list[0] = e;
1492 wb->wc_list_n = 1;
1493
1494 while (wbl->size && wb->wc_list_n < max_pages) {
1495 f = container_of(wbl->list.prev, struct wc_entry, lru);
1496 if (read_original_sector(wc, f) !=
1497 read_original_sector(wc, e) + (wc->block_size >> SECTOR_SHIFT))
1498 break;
1499 if (!wc_add_block(wb, f, GFP_NOWAIT | __GFP_NOWARN))
1500 break;
1501 wbl->size--;
1502 list_del(&f->lru);
1503 wb->wc_list[wb->wc_list_n++] = f;
1504 e = f;
1505 }
09f2d656 1506 bio_set_op_attrs(bio, REQ_OP_WRITE, WC_MODE_FUA(wc) * REQ_FUA);
48debafe
MP
1507 if (writecache_has_error(wc)) {
1508 bio->bi_status = BLK_STS_IOERR;
09f2d656 1509 bio_endio(bio);
48debafe 1510 } else {
09f2d656 1511 submit_bio(bio);
48debafe
MP
1512 }
1513
1514 __writeback_throttle(wc, wbl);
1515 }
1516}
1517
1518static void __writecache_writeback_ssd(struct dm_writecache *wc, struct writeback_list *wbl)
1519{
1520 struct wc_entry *e, *f;
1521 struct dm_io_region from, to;
1522 struct copy_struct *c;
1523
1524 while (wbl->size) {
1525 unsigned n_sectors;
1526
1527 wbl->size--;
1528 e = container_of(wbl->list.prev, struct wc_entry, lru);
1529 list_del(&e->lru);
1530
1531 n_sectors = e->wc_list_contiguous << (wc->block_size_bits - SECTOR_SHIFT);
1532
1533 from.bdev = wc->ssd_dev->bdev;
1534 from.sector = cache_sector(wc, e);
1535 from.count = n_sectors;
1536 to.bdev = wc->dev->bdev;
1537 to.sector = read_original_sector(wc, e);
1538 to.count = n_sectors;
1539
1540 c = mempool_alloc(&wc->copy_pool, GFP_NOIO);
1541 c->wc = wc;
1542 c->e = e;
1543 c->n_entries = e->wc_list_contiguous;
1544
1545 while ((n_sectors -= wc->block_size >> SECTOR_SHIFT)) {
1546 wbl->size--;
1547 f = container_of(wbl->list.prev, struct wc_entry, lru);
1548 BUG_ON(f != e + 1);
1549 list_del(&f->lru);
1550 e = f;
1551 }
1552
1553 dm_kcopyd_copy(wc->dm_kcopyd, &from, 1, &to, 0, writecache_copy_endio, c);
1554
1555 __writeback_throttle(wc, wbl);
1556 }
1557}
1558
1559static void writecache_writeback(struct work_struct *work)
1560{
1561 struct dm_writecache *wc = container_of(work, struct dm_writecache, writeback_work);
1562 struct blk_plug plug;
1563 struct wc_entry *e, *f, *g;
1564 struct rb_node *node, *next_node;
1565 struct list_head skipped;
1566 struct writeback_list wbl;
1567 unsigned long n_walked;
1568
1569 wc_lock(wc);
1570restart:
1571 if (writecache_has_error(wc)) {
1572 wc_unlock(wc);
1573 return;
1574 }
1575
1576 if (unlikely(wc->writeback_all)) {
1577 if (writecache_wait_for_writeback(wc))
1578 goto restart;
1579 }
1580
1581 if (wc->overwrote_committed) {
1582 writecache_wait_for_ios(wc, WRITE);
1583 }
1584
1585 n_walked = 0;
1586 INIT_LIST_HEAD(&skipped);
1587 INIT_LIST_HEAD(&wbl.list);
1588 wbl.size = 0;
1589 while (!list_empty(&wc->lru) &&
1590 (wc->writeback_all ||
1591 wc->freelist_size + wc->writeback_size <= wc->freelist_low_watermark)) {
1592
1593 n_walked++;
1594 if (unlikely(n_walked > WRITEBACK_LATENCY) &&
1595 likely(!wc->writeback_all) && likely(!dm_suspended(wc->ti))) {
1596 queue_work(wc->writeback_wq, &wc->writeback_work);
1597 break;
1598 }
1599
1600 e = container_of(wc->lru.prev, struct wc_entry, lru);
1601 BUG_ON(e->write_in_progress);
1602 if (unlikely(!writecache_entry_is_committed(wc, e))) {
1603 writecache_flush(wc);
1604 }
1605 node = rb_prev(&e->rb_node);
1606 if (node) {
1607 f = container_of(node, struct wc_entry, rb_node);
1608 if (unlikely(read_original_sector(wc, f) ==
1609 read_original_sector(wc, e))) {
1610 BUG_ON(!f->write_in_progress);
1611 list_del(&e->lru);
1612 list_add(&e->lru, &skipped);
1613 cond_resched();
1614 continue;
1615 }
1616 }
1617 wc->writeback_size++;
1618 list_del(&e->lru);
1619 list_add(&e->lru, &wbl.list);
1620 wbl.size++;
1621 e->write_in_progress = true;
1622 e->wc_list_contiguous = 1;
1623
1624 f = e;
1625
1626 while (1) {
1627 next_node = rb_next(&f->rb_node);
1628 if (unlikely(!next_node))
1629 break;
1630 g = container_of(next_node, struct wc_entry, rb_node);
62421b38
HY
1631 if (unlikely(read_original_sector(wc, g) ==
1632 read_original_sector(wc, f))) {
48debafe
MP
1633 f = g;
1634 continue;
1635 }
1636 if (read_original_sector(wc, g) !=
1637 read_original_sector(wc, f) + (wc->block_size >> SECTOR_SHIFT))
1638 break;
1639 if (unlikely(g->write_in_progress))
1640 break;
1641 if (unlikely(!writecache_entry_is_committed(wc, g)))
1642 break;
1643
1644 if (!WC_MODE_PMEM(wc)) {
1645 if (g != f + 1)
1646 break;
1647 }
1648
1649 n_walked++;
1650 //if (unlikely(n_walked > WRITEBACK_LATENCY) && likely(!wc->writeback_all))
1651 // break;
1652
1653 wc->writeback_size++;
1654 list_del(&g->lru);
1655 list_add(&g->lru, &wbl.list);
1656 wbl.size++;
1657 g->write_in_progress = true;
1658 g->wc_list_contiguous = BIO_MAX_PAGES;
1659 f = g;
1660 e->wc_list_contiguous++;
1661 if (unlikely(e->wc_list_contiguous == BIO_MAX_PAGES))
1662 break;
1663 }
1664 cond_resched();
1665 }
1666
1667 if (!list_empty(&skipped)) {
1668 list_splice_tail(&skipped, &wc->lru);
1669 /*
1670 * If we didn't do any progress, we must wait until some
1671 * writeback finishes to avoid burning CPU in a loop
1672 */
1673 if (unlikely(!wbl.size))
1674 writecache_wait_for_writeback(wc);
1675 }
1676
1677 wc_unlock(wc);
1678
1679 blk_start_plug(&plug);
1680
1681 if (WC_MODE_PMEM(wc))
1682 __writecache_writeback_pmem(wc, &wbl);
1683 else
1684 __writecache_writeback_ssd(wc, &wbl);
1685
1686 blk_finish_plug(&plug);
1687
1688 if (unlikely(wc->writeback_all)) {
1689 wc_lock(wc);
1690 while (writecache_wait_for_writeback(wc));
1691 wc_unlock(wc);
1692 }
1693}
1694
1695static int calculate_memory_size(uint64_t device_size, unsigned block_size,
1696 size_t *n_blocks_p, size_t *n_metadata_blocks_p)
1697{
1698 uint64_t n_blocks, offset;
1699 struct wc_entry e;
1700
1701 n_blocks = device_size;
1702 do_div(n_blocks, block_size + sizeof(struct wc_memory_entry));
1703
1704 while (1) {
1705 if (!n_blocks)
1706 return -ENOSPC;
1707 /* Verify the following entries[n_blocks] won't overflow */
1708 if (n_blocks >= ((size_t)-sizeof(struct wc_memory_superblock) /
1709 sizeof(struct wc_memory_entry)))
1710 return -EFBIG;
1711 offset = offsetof(struct wc_memory_superblock, entries[n_blocks]);
1712 offset = (offset + block_size - 1) & ~(uint64_t)(block_size - 1);
1713 if (offset + n_blocks * block_size <= device_size)
1714 break;
1715 n_blocks--;
1716 }
1717
1718 /* check if the bit field overflows */
1719 e.index = n_blocks;
1720 if (e.index != n_blocks)
1721 return -EFBIG;
1722
1723 if (n_blocks_p)
1724 *n_blocks_p = n_blocks;
1725 if (n_metadata_blocks_p)
1726 *n_metadata_blocks_p = offset >> __ffs(block_size);
1727 return 0;
1728}
1729
1730static int init_memory(struct dm_writecache *wc)
1731{
1732 size_t b;
1733 int r;
1734
1735 r = calculate_memory_size(wc->memory_map_size, wc->block_size, &wc->n_blocks, NULL);
1736 if (r)
1737 return r;
1738
1739 r = writecache_alloc_entries(wc);
1740 if (r)
1741 return r;
1742
1743 for (b = 0; b < ARRAY_SIZE(sb(wc)->padding); b++)
1744 pmem_assign(sb(wc)->padding[b], cpu_to_le64(0));
1745 pmem_assign(sb(wc)->version, cpu_to_le32(MEMORY_SUPERBLOCK_VERSION));
1746 pmem_assign(sb(wc)->block_size, cpu_to_le32(wc->block_size));
1747 pmem_assign(sb(wc)->n_blocks, cpu_to_le64(wc->n_blocks));
1748 pmem_assign(sb(wc)->seq_count, cpu_to_le64(0));
1749
1750 for (b = 0; b < wc->n_blocks; b++)
1751 write_original_sector_seq_count(wc, &wc->entries[b], -1, -1);
1752
1753 writecache_flush_all_metadata(wc);
1754 writecache_commit_flushed(wc);
1755 pmem_assign(sb(wc)->magic, cpu_to_le32(MEMORY_SUPERBLOCK_MAGIC));
1756 writecache_flush_region(wc, &sb(wc)->magic, sizeof sb(wc)->magic);
1757 writecache_commit_flushed(wc);
1758
1759 return 0;
1760}
1761
1762static void writecache_dtr(struct dm_target *ti)
1763{
1764 struct dm_writecache *wc = ti->private;
1765
1766 if (!wc)
1767 return;
1768
1769 if (wc->endio_thread)
1770 kthread_stop(wc->endio_thread);
1771
1772 if (wc->flush_thread)
1773 kthread_stop(wc->flush_thread);
1774
1775 bioset_exit(&wc->bio_set);
1776
1777 mempool_exit(&wc->copy_pool);
1778
1779 if (wc->writeback_wq)
1780 destroy_workqueue(wc->writeback_wq);
1781
1782 if (wc->dev)
1783 dm_put_device(ti, wc->dev);
1784
1785 if (wc->ssd_dev)
1786 dm_put_device(ti, wc->ssd_dev);
1787
1788 if (wc->entries)
1789 vfree(wc->entries);
1790
1791 if (wc->memory_map) {
1792 if (WC_MODE_PMEM(wc))
1793 persistent_memory_release(wc);
1794 else
1795 vfree(wc->memory_map);
1796 }
1797
1798 if (wc->dm_kcopyd)
1799 dm_kcopyd_client_destroy(wc->dm_kcopyd);
1800
1801 if (wc->dm_io)
1802 dm_io_client_destroy(wc->dm_io);
1803
1804 if (wc->dirty_bitmap)
1805 vfree(wc->dirty_bitmap);
1806
1807 kfree(wc);
1808}
1809
1810static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
1811{
1812 struct dm_writecache *wc;
1813 struct dm_arg_set as;
1814 const char *string;
1815 unsigned opt_params;
1816 size_t offset, data_size;
1817 int i, r;
1818 char dummy;
1819 int high_wm_percent = HIGH_WATERMARK;
1820 int low_wm_percent = LOW_WATERMARK;
1821 uint64_t x;
1822 struct wc_memory_superblock s;
1823
1824 static struct dm_arg _args[] = {
1825 {0, 10, "Invalid number of feature args"},
1826 };
1827
1828 as.argc = argc;
1829 as.argv = argv;
1830
1831 wc = kzalloc(sizeof(struct dm_writecache), GFP_KERNEL);
1832 if (!wc) {
1833 ti->error = "Cannot allocate writecache structure";
1834 r = -ENOMEM;
1835 goto bad;
1836 }
1837 ti->private = wc;
1838 wc->ti = ti;
1839
1840 mutex_init(&wc->lock);
1841 writecache_poison_lists(wc);
1842 init_waitqueue_head(&wc->freelist_wait);
1843 timer_setup(&wc->autocommit_timer, writecache_autocommit_timer, 0);
1844
1845 for (i = 0; i < 2; i++) {
1846 atomic_set(&wc->bio_in_progress[i], 0);
1847 init_waitqueue_head(&wc->bio_in_progress_wait[i]);
1848 }
1849
1850 wc->dm_io = dm_io_client_create();
1851 if (IS_ERR(wc->dm_io)) {
1852 r = PTR_ERR(wc->dm_io);
1853 ti->error = "Unable to allocate dm-io client";
1854 wc->dm_io = NULL;
1855 goto bad;
1856 }
1857
f87e033b 1858 wc->writeback_wq = alloc_workqueue("writecache-writeback", WQ_MEM_RECLAIM, 1);
48debafe
MP
1859 if (!wc->writeback_wq) {
1860 r = -ENOMEM;
1861 ti->error = "Could not allocate writeback workqueue";
1862 goto bad;
1863 }
1864 INIT_WORK(&wc->writeback_work, writecache_writeback);
1865 INIT_WORK(&wc->flush_work, writecache_flush_work);
1866
1867 raw_spin_lock_init(&wc->endio_list_lock);
1868 INIT_LIST_HEAD(&wc->endio_list);
1869 wc->endio_thread = kthread_create(writecache_endio_thread, wc, "writecache_endio");
1870 if (IS_ERR(wc->endio_thread)) {
1871 r = PTR_ERR(wc->endio_thread);
1872 wc->endio_thread = NULL;
1873 ti->error = "Couldn't spawn endio thread";
1874 goto bad;
1875 }
1876 wake_up_process(wc->endio_thread);
1877
1878 /*
1879 * Parse the mode (pmem or ssd)
1880 */
1881 string = dm_shift_arg(&as);
1882 if (!string)
1883 goto bad_arguments;
1884
1885 if (!strcasecmp(string, "s")) {
1886 wc->pmem_mode = false;
1887 } else if (!strcasecmp(string, "p")) {
1888#ifdef DM_WRITECACHE_HAS_PMEM
1889 wc->pmem_mode = true;
1890 wc->writeback_fua = true;
1891#else
1892 /*
1893 * If the architecture doesn't support persistent memory or
1894 * the kernel doesn't support any DAX drivers, this driver can
1895 * only be used in SSD-only mode.
1896 */
1897 r = -EOPNOTSUPP;
1898 ti->error = "Persistent memory or DAX not supported on this system";
1899 goto bad;
1900#endif
1901 } else {
1902 goto bad_arguments;
1903 }
1904
1905 if (WC_MODE_PMEM(wc)) {
1906 r = bioset_init(&wc->bio_set, BIO_POOL_SIZE,
1907 offsetof(struct writeback_struct, bio),
1908 BIOSET_NEED_BVECS);
1909 if (r) {
1910 ti->error = "Could not allocate bio set";
1911 goto bad;
1912 }
1913 } else {
1914 r = mempool_init_kmalloc_pool(&wc->copy_pool, 1, sizeof(struct copy_struct));
1915 if (r) {
1916 ti->error = "Could not allocate mempool";
1917 goto bad;
1918 }
1919 }
1920
1921 /*
1922 * Parse the origin data device
1923 */
1924 string = dm_shift_arg(&as);
1925 if (!string)
1926 goto bad_arguments;
1927 r = dm_get_device(ti, string, dm_table_get_mode(ti->table), &wc->dev);
1928 if (r) {
1929 ti->error = "Origin data device lookup failed";
1930 goto bad;
1931 }
1932
1933 /*
1934 * Parse cache data device (be it pmem or ssd)
1935 */
1936 string = dm_shift_arg(&as);
1937 if (!string)
1938 goto bad_arguments;
1939
1940 r = dm_get_device(ti, string, dm_table_get_mode(ti->table), &wc->ssd_dev);
1941 if (r) {
1942 ti->error = "Cache data device lookup failed";
1943 goto bad;
1944 }
1945 wc->memory_map_size = i_size_read(wc->ssd_dev->bdev->bd_inode);
1946
48debafe
MP
1947 /*
1948 * Parse the cache block size
1949 */
1950 string = dm_shift_arg(&as);
1951 if (!string)
1952 goto bad_arguments;
1953 if (sscanf(string, "%u%c", &wc->block_size, &dummy) != 1 ||
1954 wc->block_size < 512 || wc->block_size > PAGE_SIZE ||
1955 (wc->block_size & (wc->block_size - 1))) {
1956 r = -EINVAL;
1957 ti->error = "Invalid block size";
1958 goto bad;
1959 }
1960 wc->block_size_bits = __ffs(wc->block_size);
1961
1962 wc->max_writeback_jobs = MAX_WRITEBACK_JOBS;
1963 wc->autocommit_blocks = !WC_MODE_PMEM(wc) ? AUTOCOMMIT_BLOCKS_SSD : AUTOCOMMIT_BLOCKS_PMEM;
1964 wc->autocommit_jiffies = msecs_to_jiffies(AUTOCOMMIT_MSEC);
1965
1966 /*
1967 * Parse optional arguments
1968 */
1969 r = dm_read_arg_group(_args, &as, &opt_params, &ti->error);
1970 if (r)
1971 goto bad;
1972
1973 while (opt_params) {
1974 string = dm_shift_arg(&as), opt_params--;
d284f824
MP
1975 if (!strcasecmp(string, "start_sector") && opt_params >= 1) {
1976 unsigned long long start_sector;
1977 string = dm_shift_arg(&as), opt_params--;
1978 if (sscanf(string, "%llu%c", &start_sector, &dummy) != 1)
1979 goto invalid_optional;
1980 wc->start_sector = start_sector;
1981 if (wc->start_sector != start_sector ||
1982 wc->start_sector >= wc->memory_map_size >> SECTOR_SHIFT)
1983 goto invalid_optional;
1984 } else if (!strcasecmp(string, "high_watermark") && opt_params >= 1) {
48debafe
MP
1985 string = dm_shift_arg(&as), opt_params--;
1986 if (sscanf(string, "%d%c", &high_wm_percent, &dummy) != 1)
1987 goto invalid_optional;
1988 if (high_wm_percent < 0 || high_wm_percent > 100)
1989 goto invalid_optional;
1990 wc->high_wm_percent_set = true;
1991 } else if (!strcasecmp(string, "low_watermark") && opt_params >= 1) {
1992 string = dm_shift_arg(&as), opt_params--;
1993 if (sscanf(string, "%d%c", &low_wm_percent, &dummy) != 1)
1994 goto invalid_optional;
1995 if (low_wm_percent < 0 || low_wm_percent > 100)
1996 goto invalid_optional;
1997 wc->low_wm_percent_set = true;
1998 } else if (!strcasecmp(string, "writeback_jobs") && opt_params >= 1) {
1999 string = dm_shift_arg(&as), opt_params--;
2000 if (sscanf(string, "%u%c", &wc->max_writeback_jobs, &dummy) != 1)
2001 goto invalid_optional;
2002 wc->max_writeback_jobs_set = true;
2003 } else if (!strcasecmp(string, "autocommit_blocks") && opt_params >= 1) {
2004 string = dm_shift_arg(&as), opt_params--;
2005 if (sscanf(string, "%u%c", &wc->autocommit_blocks, &dummy) != 1)
2006 goto invalid_optional;
2007 wc->autocommit_blocks_set = true;
2008 } else if (!strcasecmp(string, "autocommit_time") && opt_params >= 1) {
2009 unsigned autocommit_msecs;
2010 string = dm_shift_arg(&as), opt_params--;
2011 if (sscanf(string, "%u%c", &autocommit_msecs, &dummy) != 1)
2012 goto invalid_optional;
2013 if (autocommit_msecs > 3600000)
2014 goto invalid_optional;
2015 wc->autocommit_jiffies = msecs_to_jiffies(autocommit_msecs);
2016 wc->autocommit_time_set = true;
2017 } else if (!strcasecmp(string, "fua")) {
2018 if (WC_MODE_PMEM(wc)) {
2019 wc->writeback_fua = true;
2020 wc->writeback_fua_set = true;
2021 } else goto invalid_optional;
2022 } else if (!strcasecmp(string, "nofua")) {
2023 if (WC_MODE_PMEM(wc)) {
2024 wc->writeback_fua = false;
2025 wc->writeback_fua_set = true;
2026 } else goto invalid_optional;
2027 } else {
2028invalid_optional:
2029 r = -EINVAL;
2030 ti->error = "Invalid optional argument";
2031 goto bad;
2032 }
2033 }
2034
2035 if (high_wm_percent < low_wm_percent) {
2036 r = -EINVAL;
2037 ti->error = "High watermark must be greater than or equal to low watermark";
2038 goto bad;
2039 }
2040
d284f824
MP
2041 if (WC_MODE_PMEM(wc)) {
2042 r = persistent_memory_claim(wc);
2043 if (r) {
2044 ti->error = "Unable to map persistent memory for cache";
2045 goto bad;
2046 }
2047 } else {
48debafe
MP
2048 struct dm_io_region region;
2049 struct dm_io_request req;
2050 size_t n_blocks, n_metadata_blocks;
2051 uint64_t n_bitmap_bits;
2052
d284f824
MP
2053 wc->memory_map_size -= (uint64_t)wc->start_sector << SECTOR_SHIFT;
2054
48debafe
MP
2055 bio_list_init(&wc->flush_list);
2056 wc->flush_thread = kthread_create(writecache_flush_thread, wc, "dm_writecache_flush");
2057 if (IS_ERR(wc->flush_thread)) {
2058 r = PTR_ERR(wc->flush_thread);
2059 wc->flush_thread = NULL;
e8ea141a 2060 ti->error = "Couldn't spawn flush thread";
48debafe
MP
2061 goto bad;
2062 }
2063 wake_up_process(wc->flush_thread);
2064
2065 r = calculate_memory_size(wc->memory_map_size, wc->block_size,
2066 &n_blocks, &n_metadata_blocks);
2067 if (r) {
2068 ti->error = "Invalid device size";
2069 goto bad;
2070 }
2071
2072 n_bitmap_bits = (((uint64_t)n_metadata_blocks << wc->block_size_bits) +
2073 BITMAP_GRANULARITY - 1) / BITMAP_GRANULARITY;
2074 /* this is limitation of test_bit functions */
2075 if (n_bitmap_bits > 1U << 31) {
2076 r = -EFBIG;
2077 ti->error = "Invalid device size";
2078 goto bad;
2079 }
2080
2081 wc->memory_map = vmalloc(n_metadata_blocks << wc->block_size_bits);
2082 if (!wc->memory_map) {
2083 r = -ENOMEM;
2084 ti->error = "Unable to allocate memory for metadata";
2085 goto bad;
2086 }
2087
2088 wc->dm_kcopyd = dm_kcopyd_client_create(&dm_kcopyd_throttle);
2089 if (IS_ERR(wc->dm_kcopyd)) {
2090 r = PTR_ERR(wc->dm_kcopyd);
2091 ti->error = "Unable to allocate dm-kcopyd client";
2092 wc->dm_kcopyd = NULL;
2093 goto bad;
2094 }
2095
2096 wc->metadata_sectors = n_metadata_blocks << (wc->block_size_bits - SECTOR_SHIFT);
2097 wc->dirty_bitmap_size = (n_bitmap_bits + BITS_PER_LONG - 1) /
2098 BITS_PER_LONG * sizeof(unsigned long);
2099 wc->dirty_bitmap = vzalloc(wc->dirty_bitmap_size);
2100 if (!wc->dirty_bitmap) {
2101 r = -ENOMEM;
2102 ti->error = "Unable to allocate dirty bitmap";
2103 goto bad;
2104 }
2105
2106 region.bdev = wc->ssd_dev->bdev;
d284f824 2107 region.sector = wc->start_sector;
48debafe
MP
2108 region.count = wc->metadata_sectors;
2109 req.bi_op = REQ_OP_READ;
2110 req.bi_op_flags = REQ_SYNC;
2111 req.mem.type = DM_IO_VMA;
2112 req.mem.ptr.vma = (char *)wc->memory_map;
2113 req.client = wc->dm_io;
2114 req.notify.fn = NULL;
2115
2116 r = dm_io(&req, 1, &region, NULL);
2117 if (r) {
2118 ti->error = "Unable to read metadata";
2119 goto bad;
2120 }
2121 }
2122
2123 r = memcpy_mcsafe(&s, sb(wc), sizeof(struct wc_memory_superblock));
2124 if (r) {
2125 ti->error = "Hardware memory error when reading superblock";
2126 goto bad;
2127 }
2128 if (!le32_to_cpu(s.magic) && !le32_to_cpu(s.version)) {
2129 r = init_memory(wc);
2130 if (r) {
2131 ti->error = "Unable to initialize device";
2132 goto bad;
2133 }
2134 r = memcpy_mcsafe(&s, sb(wc), sizeof(struct wc_memory_superblock));
2135 if (r) {
2136 ti->error = "Hardware memory error when reading superblock";
2137 goto bad;
2138 }
2139 }
2140
2141 if (le32_to_cpu(s.magic) != MEMORY_SUPERBLOCK_MAGIC) {
2142 ti->error = "Invalid magic in the superblock";
2143 r = -EINVAL;
2144 goto bad;
2145 }
2146
2147 if (le32_to_cpu(s.version) != MEMORY_SUPERBLOCK_VERSION) {
2148 ti->error = "Invalid version in the superblock";
2149 r = -EINVAL;
2150 goto bad;
2151 }
2152
2153 if (le32_to_cpu(s.block_size) != wc->block_size) {
2154 ti->error = "Block size does not match superblock";
2155 r = -EINVAL;
2156 goto bad;
2157 }
2158
2159 wc->n_blocks = le64_to_cpu(s.n_blocks);
2160
2161 offset = wc->n_blocks * sizeof(struct wc_memory_entry);
2162 if (offset / sizeof(struct wc_memory_entry) != le64_to_cpu(sb(wc)->n_blocks)) {
2163overflow:
2164 ti->error = "Overflow in size calculation";
2165 r = -EINVAL;
2166 goto bad;
2167 }
2168 offset += sizeof(struct wc_memory_superblock);
2169 if (offset < sizeof(struct wc_memory_superblock))
2170 goto overflow;
2171 offset = (offset + wc->block_size - 1) & ~(size_t)(wc->block_size - 1);
2172 data_size = wc->n_blocks * (size_t)wc->block_size;
2173 if (!offset || (data_size / wc->block_size != wc->n_blocks) ||
2174 (offset + data_size < offset))
2175 goto overflow;
2176 if (offset + data_size > wc->memory_map_size) {
2177 ti->error = "Memory area is too small";
2178 r = -EINVAL;
2179 goto bad;
2180 }
2181
2182 wc->metadata_sectors = offset >> SECTOR_SHIFT;
2183 wc->block_start = (char *)sb(wc) + offset;
2184
2185 x = (uint64_t)wc->n_blocks * (100 - high_wm_percent);
2186 x += 50;
2187 do_div(x, 100);
2188 wc->freelist_high_watermark = x;
2189 x = (uint64_t)wc->n_blocks * (100 - low_wm_percent);
2190 x += 50;
2191 do_div(x, 100);
2192 wc->freelist_low_watermark = x;
2193
2194 r = writecache_alloc_entries(wc);
2195 if (r) {
2196 ti->error = "Cannot allocate memory";
2197 goto bad;
2198 }
2199
2200 ti->num_flush_bios = 1;
2201 ti->flush_supported = true;
2202 ti->num_discard_bios = 1;
2203
2204 if (WC_MODE_PMEM(wc))
2205 persistent_memory_flush_cache(wc->memory_map, wc->memory_map_size);
2206
2207 return 0;
2208
2209bad_arguments:
2210 r = -EINVAL;
2211 ti->error = "Bad arguments";
2212bad:
2213 writecache_dtr(ti);
2214 return r;
2215}
2216
2217static void writecache_status(struct dm_target *ti, status_type_t type,
2218 unsigned status_flags, char *result, unsigned maxlen)
2219{
2220 struct dm_writecache *wc = ti->private;
2221 unsigned extra_args;
2222 unsigned sz = 0;
2223 uint64_t x;
2224
2225 switch (type) {
2226 case STATUSTYPE_INFO:
2227 DMEMIT("%ld %llu %llu %llu", writecache_has_error(wc),
2228 (unsigned long long)wc->n_blocks, (unsigned long long)wc->freelist_size,
2229 (unsigned long long)wc->writeback_size);
2230 break;
2231 case STATUSTYPE_TABLE:
2232 DMEMIT("%c %s %s %u ", WC_MODE_PMEM(wc) ? 'p' : 's',
2233 wc->dev->name, wc->ssd_dev->name, wc->block_size);
2234 extra_args = 0;
9ff07e7d
MP
2235 if (wc->start_sector)
2236 extra_args += 2;
48debafe
MP
2237 if (wc->high_wm_percent_set)
2238 extra_args += 2;
2239 if (wc->low_wm_percent_set)
2240 extra_args += 2;
2241 if (wc->max_writeback_jobs_set)
2242 extra_args += 2;
2243 if (wc->autocommit_blocks_set)
2244 extra_args += 2;
2245 if (wc->autocommit_time_set)
2246 extra_args += 2;
2247 if (wc->writeback_fua_set)
2248 extra_args++;
2249
2250 DMEMIT("%u", extra_args);
9ff07e7d
MP
2251 if (wc->start_sector)
2252 DMEMIT(" start_sector %llu", (unsigned long long)wc->start_sector);
48debafe
MP
2253 if (wc->high_wm_percent_set) {
2254 x = (uint64_t)wc->freelist_high_watermark * 100;
2255 x += wc->n_blocks / 2;
2256 do_div(x, (size_t)wc->n_blocks);
2257 DMEMIT(" high_watermark %u", 100 - (unsigned)x);
2258 }
2259 if (wc->low_wm_percent_set) {
2260 x = (uint64_t)wc->freelist_low_watermark * 100;
2261 x += wc->n_blocks / 2;
2262 do_div(x, (size_t)wc->n_blocks);
2263 DMEMIT(" low_watermark %u", 100 - (unsigned)x);
2264 }
2265 if (wc->max_writeback_jobs_set)
2266 DMEMIT(" writeback_jobs %u", wc->max_writeback_jobs);
2267 if (wc->autocommit_blocks_set)
2268 DMEMIT(" autocommit_blocks %u", wc->autocommit_blocks);
2269 if (wc->autocommit_time_set)
2270 DMEMIT(" autocommit_time %u", jiffies_to_msecs(wc->autocommit_jiffies));
2271 if (wc->writeback_fua_set)
2272 DMEMIT(" %sfua", wc->writeback_fua ? "" : "no");
2273 break;
2274 }
2275}
2276
2277static struct target_type writecache_target = {
2278 .name = "writecache",
9ff07e7d 2279 .version = {1, 1, 1},
48debafe
MP
2280 .module = THIS_MODULE,
2281 .ctr = writecache_ctr,
2282 .dtr = writecache_dtr,
2283 .status = writecache_status,
2284 .postsuspend = writecache_suspend,
2285 .resume = writecache_resume,
2286 .message = writecache_message,
2287 .map = writecache_map,
2288 .end_io = writecache_end_io,
2289 .iterate_devices = writecache_iterate_devices,
2290 .io_hints = writecache_io_hints,
2291};
2292
2293static int __init dm_writecache_init(void)
2294{
2295 int r;
2296
2297 r = dm_register_target(&writecache_target);
2298 if (r < 0) {
2299 DMERR("register failed %d", r);
2300 return r;
2301 }
2302
2303 return 0;
2304}
2305
2306static void __exit dm_writecache_exit(void)
2307{
2308 dm_unregister_target(&writecache_target);
2309}
2310
2311module_init(dm_writecache_init);
2312module_exit(dm_writecache_exit);
2313
2314MODULE_DESCRIPTION(DM_NAME " writecache target");
2315MODULE_AUTHOR("Mikulas Patocka <dm-devel@redhat.com>");
2316MODULE_LICENSE("GPL");