crypto: qce - fix ctr-aes-qce block, chunk sizes
[linux-block.git] / drivers / md / dm-writecache.c
CommitLineData
48debafe
MP
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2018 Red Hat. All rights reserved.
4 *
5 * This file is released under the GPL.
6 */
7
8#include <linux/device-mapper.h>
9#include <linux/module.h>
10#include <linux/init.h>
11#include <linux/vmalloc.h>
12#include <linux/kthread.h>
13#include <linux/dm-io.h>
14#include <linux/dm-kcopyd.h>
15#include <linux/dax.h>
16#include <linux/pfn_t.h>
17#include <linux/libnvdimm.h>
18
19#define DM_MSG_PREFIX "writecache"
20
21#define HIGH_WATERMARK 50
22#define LOW_WATERMARK 45
23#define MAX_WRITEBACK_JOBS 0
24#define ENDIO_LATENCY 16
25#define WRITEBACK_LATENCY 64
26#define AUTOCOMMIT_BLOCKS_SSD 65536
27#define AUTOCOMMIT_BLOCKS_PMEM 64
28#define AUTOCOMMIT_MSEC 1000
29
30#define BITMAP_GRANULARITY 65536
31#if BITMAP_GRANULARITY < PAGE_SIZE
32#undef BITMAP_GRANULARITY
33#define BITMAP_GRANULARITY PAGE_SIZE
34#endif
35
36#if IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API) && IS_ENABLED(CONFIG_DAX_DRIVER)
37#define DM_WRITECACHE_HAS_PMEM
38#endif
39
40#ifdef DM_WRITECACHE_HAS_PMEM
41#define pmem_assign(dest, src) \
42do { \
43 typeof(dest) uniq = (src); \
44 memcpy_flushcache(&(dest), &uniq, sizeof(dest)); \
45} while (0)
46#else
47#define pmem_assign(dest, src) ((dest) = (src))
48#endif
49
50#if defined(__HAVE_ARCH_MEMCPY_MCSAFE) && defined(DM_WRITECACHE_HAS_PMEM)
51#define DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
52#endif
53
54#define MEMORY_SUPERBLOCK_MAGIC 0x23489321
55#define MEMORY_SUPERBLOCK_VERSION 1
56
57struct wc_memory_entry {
58 __le64 original_sector;
59 __le64 seq_count;
60};
61
62struct wc_memory_superblock {
63 union {
64 struct {
65 __le32 magic;
66 __le32 version;
67 __le32 block_size;
68 __le32 pad;
69 __le64 n_blocks;
70 __le64 seq_count;
71 };
72 __le64 padding[8];
73 };
74 struct wc_memory_entry entries[0];
75};
76
77struct wc_entry {
78 struct rb_node rb_node;
79 struct list_head lru;
80 unsigned short wc_list_contiguous;
81 bool write_in_progress
82#if BITS_PER_LONG == 64
83 :1
84#endif
85 ;
86 unsigned long index
87#if BITS_PER_LONG == 64
88 :47
89#endif
90 ;
91#ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
92 uint64_t original_sector;
93 uint64_t seq_count;
94#endif
95};
96
97#ifdef DM_WRITECACHE_HAS_PMEM
98#define WC_MODE_PMEM(wc) ((wc)->pmem_mode)
99#define WC_MODE_FUA(wc) ((wc)->writeback_fua)
100#else
101#define WC_MODE_PMEM(wc) false
102#define WC_MODE_FUA(wc) false
103#endif
104#define WC_MODE_SORT_FREELIST(wc) (!WC_MODE_PMEM(wc))
105
106struct dm_writecache {
107 struct mutex lock;
108 struct list_head lru;
109 union {
110 struct list_head freelist;
111 struct {
112 struct rb_root freetree;
113 struct wc_entry *current_free;
114 };
115 };
116 struct rb_root tree;
117
118 size_t freelist_size;
119 size_t writeback_size;
120 size_t freelist_high_watermark;
121 size_t freelist_low_watermark;
122
123 unsigned uncommitted_blocks;
124 unsigned autocommit_blocks;
125 unsigned max_writeback_jobs;
126
127 int error;
128
129 unsigned long autocommit_jiffies;
130 struct timer_list autocommit_timer;
131 struct wait_queue_head freelist_wait;
132
133 atomic_t bio_in_progress[2];
134 struct wait_queue_head bio_in_progress_wait[2];
135
136 struct dm_target *ti;
137 struct dm_dev *dev;
138 struct dm_dev *ssd_dev;
d284f824 139 sector_t start_sector;
48debafe
MP
140 void *memory_map;
141 uint64_t memory_map_size;
142 size_t metadata_sectors;
143 size_t n_blocks;
144 uint64_t seq_count;
145 void *block_start;
146 struct wc_entry *entries;
147 unsigned block_size;
148 unsigned char block_size_bits;
149
150 bool pmem_mode:1;
151 bool writeback_fua:1;
152
153 bool overwrote_committed:1;
154 bool memory_vmapped:1;
155
156 bool high_wm_percent_set:1;
157 bool low_wm_percent_set:1;
158 bool max_writeback_jobs_set:1;
159 bool autocommit_blocks_set:1;
160 bool autocommit_time_set:1;
161 bool writeback_fua_set:1;
162 bool flush_on_suspend:1;
163
164 unsigned writeback_all;
165 struct workqueue_struct *writeback_wq;
166 struct work_struct writeback_work;
167 struct work_struct flush_work;
168
169 struct dm_io_client *dm_io;
170
171 raw_spinlock_t endio_list_lock;
172 struct list_head endio_list;
173 struct task_struct *endio_thread;
174
175 struct task_struct *flush_thread;
176 struct bio_list flush_list;
177
178 struct dm_kcopyd_client *dm_kcopyd;
179 unsigned long *dirty_bitmap;
180 unsigned dirty_bitmap_size;
181
182 struct bio_set bio_set;
183 mempool_t copy_pool;
184};
185
186#define WB_LIST_INLINE 16
187
188struct writeback_struct {
189 struct list_head endio_entry;
190 struct dm_writecache *wc;
191 struct wc_entry **wc_list;
192 unsigned wc_list_n;
48debafe
MP
193 struct wc_entry *wc_list_inline[WB_LIST_INLINE];
194 struct bio bio;
195};
196
197struct copy_struct {
198 struct list_head endio_entry;
199 struct dm_writecache *wc;
200 struct wc_entry *e;
201 unsigned n_entries;
202 int error;
203};
204
205DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(dm_writecache_throttle,
206 "A percentage of time allocated for data copying");
207
208static void wc_lock(struct dm_writecache *wc)
209{
210 mutex_lock(&wc->lock);
211}
212
213static void wc_unlock(struct dm_writecache *wc)
214{
215 mutex_unlock(&wc->lock);
216}
217
218#ifdef DM_WRITECACHE_HAS_PMEM
219static int persistent_memory_claim(struct dm_writecache *wc)
220{
221 int r;
222 loff_t s;
223 long p, da;
224 pfn_t pfn;
225 int id;
226 struct page **pages;
227
228 wc->memory_vmapped = false;
229
230 if (!wc->ssd_dev->dax_dev) {
231 r = -EOPNOTSUPP;
232 goto err1;
233 }
234 s = wc->memory_map_size;
235 p = s >> PAGE_SHIFT;
236 if (!p) {
237 r = -EINVAL;
238 goto err1;
239 }
240 if (p != s >> PAGE_SHIFT) {
241 r = -EOVERFLOW;
242 goto err1;
243 }
244
245 id = dax_read_lock();
246
247 da = dax_direct_access(wc->ssd_dev->dax_dev, 0, p, &wc->memory_map, &pfn);
248 if (da < 0) {
249 wc->memory_map = NULL;
250 r = da;
251 goto err2;
252 }
253 if (!pfn_t_has_page(pfn)) {
254 wc->memory_map = NULL;
255 r = -EOPNOTSUPP;
256 goto err2;
257 }
258 if (da != p) {
259 long i;
260 wc->memory_map = NULL;
50a7d3ba 261 pages = kvmalloc_array(p, sizeof(struct page *), GFP_KERNEL);
48debafe
MP
262 if (!pages) {
263 r = -ENOMEM;
264 goto err2;
265 }
266 i = 0;
267 do {
268 long daa;
48debafe 269 daa = dax_direct_access(wc->ssd_dev->dax_dev, i, p - i,
f742267a 270 NULL, &pfn);
48debafe
MP
271 if (daa <= 0) {
272 r = daa ? daa : -EINVAL;
273 goto err3;
274 }
275 if (!pfn_t_has_page(pfn)) {
276 r = -EOPNOTSUPP;
277 goto err3;
278 }
279 while (daa-- && i < p) {
280 pages[i++] = pfn_t_to_page(pfn);
281 pfn.val++;
282 }
283 } while (i < p);
284 wc->memory_map = vmap(pages, p, VM_MAP, PAGE_KERNEL);
285 if (!wc->memory_map) {
286 r = -ENOMEM;
287 goto err3;
288 }
289 kvfree(pages);
290 wc->memory_vmapped = true;
291 }
292
293 dax_read_unlock(id);
d284f824
MP
294
295 wc->memory_map += (size_t)wc->start_sector << SECTOR_SHIFT;
296 wc->memory_map_size -= (size_t)wc->start_sector << SECTOR_SHIFT;
297
48debafe
MP
298 return 0;
299err3:
300 kvfree(pages);
301err2:
302 dax_read_unlock(id);
303err1:
304 return r;
305}
306#else
307static int persistent_memory_claim(struct dm_writecache *wc)
308{
309 BUG();
310}
311#endif
312
313static void persistent_memory_release(struct dm_writecache *wc)
314{
315 if (wc->memory_vmapped)
d284f824 316 vunmap(wc->memory_map - ((size_t)wc->start_sector << SECTOR_SHIFT));
48debafe
MP
317}
318
319static struct page *persistent_memory_page(void *addr)
320{
321 if (is_vmalloc_addr(addr))
322 return vmalloc_to_page(addr);
323 else
324 return virt_to_page(addr);
325}
326
327static unsigned persistent_memory_page_offset(void *addr)
328{
329 return (unsigned long)addr & (PAGE_SIZE - 1);
330}
331
332static void persistent_memory_flush_cache(void *ptr, size_t size)
333{
334 if (is_vmalloc_addr(ptr))
335 flush_kernel_vmap_range(ptr, size);
336}
337
338static void persistent_memory_invalidate_cache(void *ptr, size_t size)
339{
340 if (is_vmalloc_addr(ptr))
341 invalidate_kernel_vmap_range(ptr, size);
342}
343
344static struct wc_memory_superblock *sb(struct dm_writecache *wc)
345{
346 return wc->memory_map;
347}
348
349static struct wc_memory_entry *memory_entry(struct dm_writecache *wc, struct wc_entry *e)
350{
da4ad3a2 351 return &sb(wc)->entries[e->index];
48debafe
MP
352}
353
354static void *memory_data(struct dm_writecache *wc, struct wc_entry *e)
355{
356 return (char *)wc->block_start + (e->index << wc->block_size_bits);
357}
358
359static sector_t cache_sector(struct dm_writecache *wc, struct wc_entry *e)
360{
d284f824 361 return wc->start_sector + wc->metadata_sectors +
48debafe
MP
362 ((sector_t)e->index << (wc->block_size_bits - SECTOR_SHIFT));
363}
364
365static uint64_t read_original_sector(struct dm_writecache *wc, struct wc_entry *e)
366{
367#ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
368 return e->original_sector;
369#else
370 return le64_to_cpu(memory_entry(wc, e)->original_sector);
371#endif
372}
373
374static uint64_t read_seq_count(struct dm_writecache *wc, struct wc_entry *e)
375{
376#ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
377 return e->seq_count;
378#else
379 return le64_to_cpu(memory_entry(wc, e)->seq_count);
380#endif
381}
382
383static void clear_seq_count(struct dm_writecache *wc, struct wc_entry *e)
384{
385#ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
386 e->seq_count = -1;
387#endif
388 pmem_assign(memory_entry(wc, e)->seq_count, cpu_to_le64(-1));
389}
390
391static void write_original_sector_seq_count(struct dm_writecache *wc, struct wc_entry *e,
392 uint64_t original_sector, uint64_t seq_count)
393{
394 struct wc_memory_entry me;
395#ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
396 e->original_sector = original_sector;
397 e->seq_count = seq_count;
398#endif
399 me.original_sector = cpu_to_le64(original_sector);
400 me.seq_count = cpu_to_le64(seq_count);
401 pmem_assign(*memory_entry(wc, e), me);
402}
403
404#define writecache_error(wc, err, msg, arg...) \
405do { \
406 if (!cmpxchg(&(wc)->error, 0, err)) \
407 DMERR(msg, ##arg); \
408 wake_up(&(wc)->freelist_wait); \
409} while (0)
410
411#define writecache_has_error(wc) (unlikely(READ_ONCE((wc)->error)))
412
413static void writecache_flush_all_metadata(struct dm_writecache *wc)
414{
415 if (!WC_MODE_PMEM(wc))
416 memset(wc->dirty_bitmap, -1, wc->dirty_bitmap_size);
417}
418
419static void writecache_flush_region(struct dm_writecache *wc, void *ptr, size_t size)
420{
421 if (!WC_MODE_PMEM(wc))
422 __set_bit(((char *)ptr - (char *)wc->memory_map) / BITMAP_GRANULARITY,
423 wc->dirty_bitmap);
424}
425
426static void writecache_disk_flush(struct dm_writecache *wc, struct dm_dev *dev);
427
428struct io_notify {
429 struct dm_writecache *wc;
430 struct completion c;
431 atomic_t count;
432};
433
434static void writecache_notify_io(unsigned long error, void *context)
435{
436 struct io_notify *endio = context;
437
438 if (unlikely(error != 0))
439 writecache_error(endio->wc, -EIO, "error writing metadata");
440 BUG_ON(atomic_read(&endio->count) <= 0);
441 if (atomic_dec_and_test(&endio->count))
442 complete(&endio->c);
443}
444
445static void ssd_commit_flushed(struct dm_writecache *wc)
446{
447 struct dm_io_region region;
448 struct dm_io_request req;
449 struct io_notify endio = {
450 wc,
451 COMPLETION_INITIALIZER_ONSTACK(endio.c),
452 ATOMIC_INIT(1),
453 };
1e1132ea 454 unsigned bitmap_bits = wc->dirty_bitmap_size * 8;
48debafe
MP
455 unsigned i = 0;
456
457 while (1) {
458 unsigned j;
459 i = find_next_bit(wc->dirty_bitmap, bitmap_bits, i);
460 if (unlikely(i == bitmap_bits))
461 break;
462 j = find_next_zero_bit(wc->dirty_bitmap, bitmap_bits, i);
463
464 region.bdev = wc->ssd_dev->bdev;
465 region.sector = (sector_t)i * (BITMAP_GRANULARITY >> SECTOR_SHIFT);
466 region.count = (sector_t)(j - i) * (BITMAP_GRANULARITY >> SECTOR_SHIFT);
467
468 if (unlikely(region.sector >= wc->metadata_sectors))
469 break;
470 if (unlikely(region.sector + region.count > wc->metadata_sectors))
471 region.count = wc->metadata_sectors - region.sector;
472
d284f824 473 region.sector += wc->start_sector;
48debafe
MP
474 atomic_inc(&endio.count);
475 req.bi_op = REQ_OP_WRITE;
476 req.bi_op_flags = REQ_SYNC;
477 req.mem.type = DM_IO_VMA;
478 req.mem.ptr.vma = (char *)wc->memory_map + (size_t)i * BITMAP_GRANULARITY;
479 req.client = wc->dm_io;
480 req.notify.fn = writecache_notify_io;
481 req.notify.context = &endio;
482
483 /* writing via async dm-io (implied by notify.fn above) won't return an error */
484 (void) dm_io(&req, 1, &region, NULL);
485 i = j;
486 }
487
488 writecache_notify_io(0, &endio);
489 wait_for_completion_io(&endio.c);
490
491 writecache_disk_flush(wc, wc->ssd_dev);
492
493 memset(wc->dirty_bitmap, 0, wc->dirty_bitmap_size);
494}
495
496static void writecache_commit_flushed(struct dm_writecache *wc)
497{
498 if (WC_MODE_PMEM(wc))
499 wmb();
500 else
501 ssd_commit_flushed(wc);
502}
503
504static void writecache_disk_flush(struct dm_writecache *wc, struct dm_dev *dev)
505{
506 int r;
507 struct dm_io_region region;
508 struct dm_io_request req;
509
510 region.bdev = dev->bdev;
511 region.sector = 0;
512 region.count = 0;
513 req.bi_op = REQ_OP_WRITE;
514 req.bi_op_flags = REQ_PREFLUSH;
515 req.mem.type = DM_IO_KMEM;
516 req.mem.ptr.addr = NULL;
517 req.client = wc->dm_io;
518 req.notify.fn = NULL;
519
520 r = dm_io(&req, 1, &region, NULL);
521 if (unlikely(r))
522 writecache_error(wc, r, "error flushing metadata: %d", r);
523}
524
525static void writecache_wait_for_ios(struct dm_writecache *wc, int direction)
526{
527 wait_event(wc->bio_in_progress_wait[direction],
528 !atomic_read(&wc->bio_in_progress[direction]));
529}
530
531#define WFE_RETURN_FOLLOWING 1
532#define WFE_LOWEST_SEQ 2
533
534static struct wc_entry *writecache_find_entry(struct dm_writecache *wc,
535 uint64_t block, int flags)
536{
537 struct wc_entry *e;
538 struct rb_node *node = wc->tree.rb_node;
539
540 if (unlikely(!node))
541 return NULL;
542
543 while (1) {
544 e = container_of(node, struct wc_entry, rb_node);
545 if (read_original_sector(wc, e) == block)
546 break;
f8011d33 547
48debafe
MP
548 node = (read_original_sector(wc, e) >= block ?
549 e->rb_node.rb_left : e->rb_node.rb_right);
550 if (unlikely(!node)) {
f8011d33 551 if (!(flags & WFE_RETURN_FOLLOWING))
48debafe 552 return NULL;
48debafe 553 if (read_original_sector(wc, e) >= block) {
f8011d33 554 return e;
48debafe
MP
555 } else {
556 node = rb_next(&e->rb_node);
f8011d33 557 if (unlikely(!node))
48debafe 558 return NULL;
48debafe 559 e = container_of(node, struct wc_entry, rb_node);
f8011d33 560 return e;
48debafe
MP
561 }
562 }
563 }
564
565 while (1) {
566 struct wc_entry *e2;
567 if (flags & WFE_LOWEST_SEQ)
568 node = rb_prev(&e->rb_node);
569 else
570 node = rb_next(&e->rb_node);
84420b1e 571 if (unlikely(!node))
48debafe
MP
572 return e;
573 e2 = container_of(node, struct wc_entry, rb_node);
574 if (read_original_sector(wc, e2) != block)
575 return e;
576 e = e2;
577 }
578}
579
580static void writecache_insert_entry(struct dm_writecache *wc, struct wc_entry *ins)
581{
582 struct wc_entry *e;
583 struct rb_node **node = &wc->tree.rb_node, *parent = NULL;
584
585 while (*node) {
586 e = container_of(*node, struct wc_entry, rb_node);
587 parent = &e->rb_node;
588 if (read_original_sector(wc, e) > read_original_sector(wc, ins))
589 node = &parent->rb_left;
590 else
591 node = &parent->rb_right;
592 }
593 rb_link_node(&ins->rb_node, parent, node);
594 rb_insert_color(&ins->rb_node, &wc->tree);
595 list_add(&ins->lru, &wc->lru);
596}
597
598static void writecache_unlink(struct dm_writecache *wc, struct wc_entry *e)
599{
600 list_del(&e->lru);
601 rb_erase(&e->rb_node, &wc->tree);
602}
603
604static void writecache_add_to_freelist(struct dm_writecache *wc, struct wc_entry *e)
605{
606 if (WC_MODE_SORT_FREELIST(wc)) {
607 struct rb_node **node = &wc->freetree.rb_node, *parent = NULL;
608 if (unlikely(!*node))
609 wc->current_free = e;
610 while (*node) {
611 parent = *node;
612 if (&e->rb_node < *node)
613 node = &parent->rb_left;
614 else
615 node = &parent->rb_right;
616 }
617 rb_link_node(&e->rb_node, parent, node);
618 rb_insert_color(&e->rb_node, &wc->freetree);
619 } else {
620 list_add_tail(&e->lru, &wc->freelist);
621 }
622 wc->freelist_size++;
623}
624
625static struct wc_entry *writecache_pop_from_freelist(struct dm_writecache *wc)
626{
627 struct wc_entry *e;
628
629 if (WC_MODE_SORT_FREELIST(wc)) {
630 struct rb_node *next;
631 if (unlikely(!wc->current_free))
632 return NULL;
633 e = wc->current_free;
634 next = rb_next(&e->rb_node);
635 rb_erase(&e->rb_node, &wc->freetree);
636 if (unlikely(!next))
637 next = rb_first(&wc->freetree);
638 wc->current_free = next ? container_of(next, struct wc_entry, rb_node) : NULL;
639 } else {
640 if (unlikely(list_empty(&wc->freelist)))
641 return NULL;
642 e = container_of(wc->freelist.next, struct wc_entry, lru);
643 list_del(&e->lru);
644 }
645 wc->freelist_size--;
646 if (unlikely(wc->freelist_size + wc->writeback_size <= wc->freelist_high_watermark))
647 queue_work(wc->writeback_wq, &wc->writeback_work);
648
649 return e;
650}
651
652static void writecache_free_entry(struct dm_writecache *wc, struct wc_entry *e)
653{
654 writecache_unlink(wc, e);
655 writecache_add_to_freelist(wc, e);
656 clear_seq_count(wc, e);
657 writecache_flush_region(wc, memory_entry(wc, e), sizeof(struct wc_memory_entry));
658 if (unlikely(waitqueue_active(&wc->freelist_wait)))
659 wake_up(&wc->freelist_wait);
660}
661
662static void writecache_wait_on_freelist(struct dm_writecache *wc)
663{
664 DEFINE_WAIT(wait);
665
666 prepare_to_wait(&wc->freelist_wait, &wait, TASK_UNINTERRUPTIBLE);
667 wc_unlock(wc);
668 io_schedule();
669 finish_wait(&wc->freelist_wait, &wait);
670 wc_lock(wc);
671}
672
673static void writecache_poison_lists(struct dm_writecache *wc)
674{
675 /*
676 * Catch incorrect access to these values while the device is suspended.
677 */
678 memset(&wc->tree, -1, sizeof wc->tree);
679 wc->lru.next = LIST_POISON1;
680 wc->lru.prev = LIST_POISON2;
681 wc->freelist.next = LIST_POISON1;
682 wc->freelist.prev = LIST_POISON2;
683}
684
685static void writecache_flush_entry(struct dm_writecache *wc, struct wc_entry *e)
686{
687 writecache_flush_region(wc, memory_entry(wc, e), sizeof(struct wc_memory_entry));
688 if (WC_MODE_PMEM(wc))
689 writecache_flush_region(wc, memory_data(wc, e), wc->block_size);
690}
691
692static bool writecache_entry_is_committed(struct dm_writecache *wc, struct wc_entry *e)
693{
694 return read_seq_count(wc, e) < wc->seq_count;
695}
696
697static void writecache_flush(struct dm_writecache *wc)
698{
699 struct wc_entry *e, *e2;
700 bool need_flush_after_free;
701
702 wc->uncommitted_blocks = 0;
703 del_timer(&wc->autocommit_timer);
704
705 if (list_empty(&wc->lru))
706 return;
707
708 e = container_of(wc->lru.next, struct wc_entry, lru);
709 if (writecache_entry_is_committed(wc, e)) {
710 if (wc->overwrote_committed) {
711 writecache_wait_for_ios(wc, WRITE);
712 writecache_disk_flush(wc, wc->ssd_dev);
713 wc->overwrote_committed = false;
714 }
715 return;
716 }
717 while (1) {
718 writecache_flush_entry(wc, e);
719 if (unlikely(e->lru.next == &wc->lru))
720 break;
721 e2 = container_of(e->lru.next, struct wc_entry, lru);
722 if (writecache_entry_is_committed(wc, e2))
723 break;
724 e = e2;
725 cond_resched();
726 }
727 writecache_commit_flushed(wc);
728
6d195913
HY
729 if (!WC_MODE_PMEM(wc))
730 writecache_wait_for_ios(wc, WRITE);
48debafe
MP
731
732 wc->seq_count++;
733 pmem_assign(sb(wc)->seq_count, cpu_to_le64(wc->seq_count));
734 writecache_flush_region(wc, &sb(wc)->seq_count, sizeof sb(wc)->seq_count);
735 writecache_commit_flushed(wc);
736
737 wc->overwrote_committed = false;
738
739 need_flush_after_free = false;
740 while (1) {
741 /* Free another committed entry with lower seq-count */
742 struct rb_node *rb_node = rb_prev(&e->rb_node);
743
744 if (rb_node) {
745 e2 = container_of(rb_node, struct wc_entry, rb_node);
746 if (read_original_sector(wc, e2) == read_original_sector(wc, e) &&
747 likely(!e2->write_in_progress)) {
748 writecache_free_entry(wc, e2);
749 need_flush_after_free = true;
750 }
751 }
752 if (unlikely(e->lru.prev == &wc->lru))
753 break;
754 e = container_of(e->lru.prev, struct wc_entry, lru);
755 cond_resched();
756 }
757
758 if (need_flush_after_free)
759 writecache_commit_flushed(wc);
760}
761
762static void writecache_flush_work(struct work_struct *work)
763{
764 struct dm_writecache *wc = container_of(work, struct dm_writecache, flush_work);
765
766 wc_lock(wc);
767 writecache_flush(wc);
768 wc_unlock(wc);
769}
770
771static void writecache_autocommit_timer(struct timer_list *t)
772{
773 struct dm_writecache *wc = from_timer(wc, t, autocommit_timer);
774 if (!writecache_has_error(wc))
775 queue_work(wc->writeback_wq, &wc->flush_work);
776}
777
778static void writecache_schedule_autocommit(struct dm_writecache *wc)
779{
780 if (!timer_pending(&wc->autocommit_timer))
781 mod_timer(&wc->autocommit_timer, jiffies + wc->autocommit_jiffies);
782}
783
784static void writecache_discard(struct dm_writecache *wc, sector_t start, sector_t end)
785{
786 struct wc_entry *e;
787 bool discarded_something = false;
788
789 e = writecache_find_entry(wc, start, WFE_RETURN_FOLLOWING | WFE_LOWEST_SEQ);
790 if (unlikely(!e))
791 return;
792
793 while (read_original_sector(wc, e) < end) {
794 struct rb_node *node = rb_next(&e->rb_node);
795
796 if (likely(!e->write_in_progress)) {
797 if (!discarded_something) {
798 writecache_wait_for_ios(wc, READ);
799 writecache_wait_for_ios(wc, WRITE);
800 discarded_something = true;
801 }
802 writecache_free_entry(wc, e);
803 }
804
84420b1e 805 if (unlikely(!node))
48debafe
MP
806 break;
807
808 e = container_of(node, struct wc_entry, rb_node);
809 }
810
811 if (discarded_something)
812 writecache_commit_flushed(wc);
813}
814
815static bool writecache_wait_for_writeback(struct dm_writecache *wc)
816{
817 if (wc->writeback_size) {
818 writecache_wait_on_freelist(wc);
819 return true;
820 }
821 return false;
822}
823
824static void writecache_suspend(struct dm_target *ti)
825{
826 struct dm_writecache *wc = ti->private;
827 bool flush_on_suspend;
828
829 del_timer_sync(&wc->autocommit_timer);
830
831 wc_lock(wc);
832 writecache_flush(wc);
833 flush_on_suspend = wc->flush_on_suspend;
834 if (flush_on_suspend) {
835 wc->flush_on_suspend = false;
836 wc->writeback_all++;
837 queue_work(wc->writeback_wq, &wc->writeback_work);
838 }
839 wc_unlock(wc);
840
841 flush_workqueue(wc->writeback_wq);
842
843 wc_lock(wc);
844 if (flush_on_suspend)
845 wc->writeback_all--;
846 while (writecache_wait_for_writeback(wc));
847
848 if (WC_MODE_PMEM(wc))
849 persistent_memory_flush_cache(wc->memory_map, wc->memory_map_size);
850
851 writecache_poison_lists(wc);
852
853 wc_unlock(wc);
854}
855
856static int writecache_alloc_entries(struct dm_writecache *wc)
857{
858 size_t b;
859
860 if (wc->entries)
861 return 0;
50a7d3ba 862 wc->entries = vmalloc(array_size(sizeof(struct wc_entry), wc->n_blocks));
48debafe
MP
863 if (!wc->entries)
864 return -ENOMEM;
865 for (b = 0; b < wc->n_blocks; b++) {
866 struct wc_entry *e = &wc->entries[b];
867 e->index = b;
868 e->write_in_progress = false;
869 }
870
871 return 0;
872}
873
874static void writecache_resume(struct dm_target *ti)
875{
876 struct dm_writecache *wc = ti->private;
877 size_t b;
878 bool need_flush = false;
879 __le64 sb_seq_count;
880 int r;
881
882 wc_lock(wc);
883
884 if (WC_MODE_PMEM(wc))
885 persistent_memory_invalidate_cache(wc->memory_map, wc->memory_map_size);
886
887 wc->tree = RB_ROOT;
888 INIT_LIST_HEAD(&wc->lru);
889 if (WC_MODE_SORT_FREELIST(wc)) {
890 wc->freetree = RB_ROOT;
891 wc->current_free = NULL;
892 } else {
893 INIT_LIST_HEAD(&wc->freelist);
894 }
895 wc->freelist_size = 0;
896
897 r = memcpy_mcsafe(&sb_seq_count, &sb(wc)->seq_count, sizeof(uint64_t));
898 if (r) {
899 writecache_error(wc, r, "hardware memory error when reading superblock: %d", r);
900 sb_seq_count = cpu_to_le64(0);
901 }
902 wc->seq_count = le64_to_cpu(sb_seq_count);
903
904#ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
905 for (b = 0; b < wc->n_blocks; b++) {
906 struct wc_entry *e = &wc->entries[b];
907 struct wc_memory_entry wme;
908 if (writecache_has_error(wc)) {
909 e->original_sector = -1;
910 e->seq_count = -1;
911 continue;
912 }
913 r = memcpy_mcsafe(&wme, memory_entry(wc, e), sizeof(struct wc_memory_entry));
914 if (r) {
915 writecache_error(wc, r, "hardware memory error when reading metadata entry %lu: %d",
916 (unsigned long)b, r);
917 e->original_sector = -1;
918 e->seq_count = -1;
919 } else {
920 e->original_sector = le64_to_cpu(wme.original_sector);
921 e->seq_count = le64_to_cpu(wme.seq_count);
922 }
923 }
924#endif
925 for (b = 0; b < wc->n_blocks; b++) {
926 struct wc_entry *e = &wc->entries[b];
927 if (!writecache_entry_is_committed(wc, e)) {
928 if (read_seq_count(wc, e) != -1) {
929erase_this:
930 clear_seq_count(wc, e);
931 need_flush = true;
932 }
933 writecache_add_to_freelist(wc, e);
934 } else {
935 struct wc_entry *old;
936
937 old = writecache_find_entry(wc, read_original_sector(wc, e), 0);
938 if (!old) {
939 writecache_insert_entry(wc, e);
940 } else {
941 if (read_seq_count(wc, old) == read_seq_count(wc, e)) {
942 writecache_error(wc, -EINVAL,
943 "two identical entries, position %llu, sector %llu, sequence %llu",
944 (unsigned long long)b, (unsigned long long)read_original_sector(wc, e),
945 (unsigned long long)read_seq_count(wc, e));
946 }
947 if (read_seq_count(wc, old) > read_seq_count(wc, e)) {
948 goto erase_this;
949 } else {
950 writecache_free_entry(wc, old);
951 writecache_insert_entry(wc, e);
952 need_flush = true;
953 }
954 }
955 }
956 cond_resched();
957 }
958
959 if (need_flush) {
960 writecache_flush_all_metadata(wc);
961 writecache_commit_flushed(wc);
962 }
963
964 wc_unlock(wc);
965}
966
967static int process_flush_mesg(unsigned argc, char **argv, struct dm_writecache *wc)
968{
969 if (argc != 1)
970 return -EINVAL;
971
972 wc_lock(wc);
973 if (dm_suspended(wc->ti)) {
974 wc_unlock(wc);
975 return -EBUSY;
976 }
977 if (writecache_has_error(wc)) {
978 wc_unlock(wc);
979 return -EIO;
980 }
981
982 writecache_flush(wc);
983 wc->writeback_all++;
984 queue_work(wc->writeback_wq, &wc->writeback_work);
985 wc_unlock(wc);
986
987 flush_workqueue(wc->writeback_wq);
988
989 wc_lock(wc);
990 wc->writeback_all--;
991 if (writecache_has_error(wc)) {
992 wc_unlock(wc);
993 return -EIO;
994 }
995 wc_unlock(wc);
996
997 return 0;
998}
999
1000static int process_flush_on_suspend_mesg(unsigned argc, char **argv, struct dm_writecache *wc)
1001{
1002 if (argc != 1)
1003 return -EINVAL;
1004
1005 wc_lock(wc);
1006 wc->flush_on_suspend = true;
1007 wc_unlock(wc);
1008
1009 return 0;
1010}
1011
1012static int writecache_message(struct dm_target *ti, unsigned argc, char **argv,
1013 char *result, unsigned maxlen)
1014{
1015 int r = -EINVAL;
1016 struct dm_writecache *wc = ti->private;
1017
1018 if (!strcasecmp(argv[0], "flush"))
1019 r = process_flush_mesg(argc, argv, wc);
1020 else if (!strcasecmp(argv[0], "flush_on_suspend"))
1021 r = process_flush_on_suspend_mesg(argc, argv, wc);
1022 else
1023 DMERR("unrecognised message received: %s", argv[0]);
1024
1025 return r;
1026}
1027
1028static void bio_copy_block(struct dm_writecache *wc, struct bio *bio, void *data)
1029{
1030 void *buf;
1031 unsigned long flags;
1032 unsigned size;
1033 int rw = bio_data_dir(bio);
1034 unsigned remaining_size = wc->block_size;
1035
1036 do {
1037 struct bio_vec bv = bio_iter_iovec(bio, bio->bi_iter);
1038 buf = bvec_kmap_irq(&bv, &flags);
1039 size = bv.bv_len;
1040 if (unlikely(size > remaining_size))
1041 size = remaining_size;
1042
1043 if (rw == READ) {
1044 int r;
1045 r = memcpy_mcsafe(buf, data, size);
1046 flush_dcache_page(bio_page(bio));
1047 if (unlikely(r)) {
1048 writecache_error(wc, r, "hardware memory error when reading data: %d", r);
1049 bio->bi_status = BLK_STS_IOERR;
1050 }
1051 } else {
1052 flush_dcache_page(bio_page(bio));
1053 memcpy_flushcache(data, buf, size);
1054 }
1055
1056 bvec_kunmap_irq(buf, &flags);
1057
1058 data = (char *)data + size;
1059 remaining_size -= size;
1060 bio_advance(bio, size);
1061 } while (unlikely(remaining_size));
1062}
1063
1064static int writecache_flush_thread(void *data)
1065{
1066 struct dm_writecache *wc = data;
1067
1068 while (1) {
1069 struct bio *bio;
1070
1071 wc_lock(wc);
1072 bio = bio_list_pop(&wc->flush_list);
1073 if (!bio) {
1074 set_current_state(TASK_INTERRUPTIBLE);
1075 wc_unlock(wc);
1076
1077 if (unlikely(kthread_should_stop())) {
1078 set_current_state(TASK_RUNNING);
1079 break;
1080 }
1081
1082 schedule();
1083 continue;
1084 }
1085
1086 if (bio_op(bio) == REQ_OP_DISCARD) {
1087 writecache_discard(wc, bio->bi_iter.bi_sector,
1088 bio_end_sector(bio));
1089 wc_unlock(wc);
1090 bio_set_dev(bio, wc->dev->bdev);
1091 generic_make_request(bio);
1092 } else {
1093 writecache_flush(wc);
1094 wc_unlock(wc);
1095 if (writecache_has_error(wc))
1096 bio->bi_status = BLK_STS_IOERR;
1097 bio_endio(bio);
1098 }
1099 }
1100
1101 return 0;
1102}
1103
1104static void writecache_offload_bio(struct dm_writecache *wc, struct bio *bio)
1105{
1106 if (bio_list_empty(&wc->flush_list))
1107 wake_up_process(wc->flush_thread);
1108 bio_list_add(&wc->flush_list, bio);
1109}
1110
1111static int writecache_map(struct dm_target *ti, struct bio *bio)
1112{
1113 struct wc_entry *e;
1114 struct dm_writecache *wc = ti->private;
1115
1116 bio->bi_private = NULL;
1117
1118 wc_lock(wc);
1119
1120 if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
1121 if (writecache_has_error(wc))
1122 goto unlock_error;
1123 if (WC_MODE_PMEM(wc)) {
1124 writecache_flush(wc);
1125 if (writecache_has_error(wc))
1126 goto unlock_error;
1127 goto unlock_submit;
1128 } else {
1129 writecache_offload_bio(wc, bio);
1130 goto unlock_return;
1131 }
1132 }
1133
1134 bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
1135
1136 if (unlikely((((unsigned)bio->bi_iter.bi_sector | bio_sectors(bio)) &
1137 (wc->block_size / 512 - 1)) != 0)) {
1138 DMERR("I/O is not aligned, sector %llu, size %u, block size %u",
1139 (unsigned long long)bio->bi_iter.bi_sector,
1140 bio->bi_iter.bi_size, wc->block_size);
1141 goto unlock_error;
1142 }
1143
1144 if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) {
1145 if (writecache_has_error(wc))
1146 goto unlock_error;
1147 if (WC_MODE_PMEM(wc)) {
1148 writecache_discard(wc, bio->bi_iter.bi_sector, bio_end_sector(bio));
1149 goto unlock_remap_origin;
1150 } else {
1151 writecache_offload_bio(wc, bio);
1152 goto unlock_return;
1153 }
1154 }
1155
1156 if (bio_data_dir(bio) == READ) {
1157read_next_block:
1158 e = writecache_find_entry(wc, bio->bi_iter.bi_sector, WFE_RETURN_FOLLOWING);
1159 if (e && read_original_sector(wc, e) == bio->bi_iter.bi_sector) {
1160 if (WC_MODE_PMEM(wc)) {
1161 bio_copy_block(wc, bio, memory_data(wc, e));
1162 if (bio->bi_iter.bi_size)
1163 goto read_next_block;
1164 goto unlock_submit;
1165 } else {
1166 dm_accept_partial_bio(bio, wc->block_size >> SECTOR_SHIFT);
1167 bio_set_dev(bio, wc->ssd_dev->bdev);
1168 bio->bi_iter.bi_sector = cache_sector(wc, e);
1169 if (!writecache_entry_is_committed(wc, e))
1170 writecache_wait_for_ios(wc, WRITE);
1171 goto unlock_remap;
1172 }
1173 } else {
1174 if (e) {
1175 sector_t next_boundary =
1176 read_original_sector(wc, e) - bio->bi_iter.bi_sector;
1177 if (next_boundary < bio->bi_iter.bi_size >> SECTOR_SHIFT) {
1178 dm_accept_partial_bio(bio, next_boundary);
1179 }
1180 }
1181 goto unlock_remap_origin;
1182 }
1183 } else {
1184 do {
1185 if (writecache_has_error(wc))
1186 goto unlock_error;
1187 e = writecache_find_entry(wc, bio->bi_iter.bi_sector, 0);
1188 if (e) {
1189 if (!writecache_entry_is_committed(wc, e))
1190 goto bio_copy;
1191 if (!WC_MODE_PMEM(wc) && !e->write_in_progress) {
1192 wc->overwrote_committed = true;
1193 goto bio_copy;
1194 }
1195 }
1196 e = writecache_pop_from_freelist(wc);
1197 if (unlikely(!e)) {
1198 writecache_wait_on_freelist(wc);
1199 continue;
1200 }
1201 write_original_sector_seq_count(wc, e, bio->bi_iter.bi_sector, wc->seq_count);
1202 writecache_insert_entry(wc, e);
1203 wc->uncommitted_blocks++;
1204bio_copy:
1205 if (WC_MODE_PMEM(wc)) {
1206 bio_copy_block(wc, bio, memory_data(wc, e));
1207 } else {
1208 dm_accept_partial_bio(bio, wc->block_size >> SECTOR_SHIFT);
1209 bio_set_dev(bio, wc->ssd_dev->bdev);
1210 bio->bi_iter.bi_sector = cache_sector(wc, e);
1211 if (unlikely(wc->uncommitted_blocks >= wc->autocommit_blocks)) {
1212 wc->uncommitted_blocks = 0;
1213 queue_work(wc->writeback_wq, &wc->flush_work);
1214 } else {
1215 writecache_schedule_autocommit(wc);
1216 }
1217 goto unlock_remap;
1218 }
1219 } while (bio->bi_iter.bi_size);
1220
c1005322
MM
1221 if (unlikely(bio->bi_opf & REQ_FUA ||
1222 wc->uncommitted_blocks >= wc->autocommit_blocks))
48debafe
MP
1223 writecache_flush(wc);
1224 else
1225 writecache_schedule_autocommit(wc);
1226 goto unlock_submit;
1227 }
1228
1229unlock_remap_origin:
1230 bio_set_dev(bio, wc->dev->bdev);
1231 wc_unlock(wc);
1232 return DM_MAPIO_REMAPPED;
1233
1234unlock_remap:
1235 /* make sure that writecache_end_io decrements bio_in_progress: */
1236 bio->bi_private = (void *)1;
1237 atomic_inc(&wc->bio_in_progress[bio_data_dir(bio)]);
1238 wc_unlock(wc);
1239 return DM_MAPIO_REMAPPED;
1240
1241unlock_submit:
1242 wc_unlock(wc);
1243 bio_endio(bio);
1244 return DM_MAPIO_SUBMITTED;
1245
1246unlock_return:
1247 wc_unlock(wc);
1248 return DM_MAPIO_SUBMITTED;
1249
1250unlock_error:
1251 wc_unlock(wc);
1252 bio_io_error(bio);
1253 return DM_MAPIO_SUBMITTED;
1254}
1255
1256static int writecache_end_io(struct dm_target *ti, struct bio *bio, blk_status_t *status)
1257{
1258 struct dm_writecache *wc = ti->private;
1259
1260 if (bio->bi_private != NULL) {
1261 int dir = bio_data_dir(bio);
1262 if (atomic_dec_and_test(&wc->bio_in_progress[dir]))
1263 if (unlikely(waitqueue_active(&wc->bio_in_progress_wait[dir])))
1264 wake_up(&wc->bio_in_progress_wait[dir]);
1265 }
1266 return 0;
1267}
1268
1269static int writecache_iterate_devices(struct dm_target *ti,
1270 iterate_devices_callout_fn fn, void *data)
1271{
1272 struct dm_writecache *wc = ti->private;
1273
1274 return fn(ti, wc->dev, 0, ti->len, data);
1275}
1276
1277static void writecache_io_hints(struct dm_target *ti, struct queue_limits *limits)
1278{
1279 struct dm_writecache *wc = ti->private;
1280
1281 if (limits->logical_block_size < wc->block_size)
1282 limits->logical_block_size = wc->block_size;
1283
1284 if (limits->physical_block_size < wc->block_size)
1285 limits->physical_block_size = wc->block_size;
1286
1287 if (limits->io_min < wc->block_size)
1288 limits->io_min = wc->block_size;
1289}
1290
1291
1292static void writecache_writeback_endio(struct bio *bio)
1293{
1294 struct writeback_struct *wb = container_of(bio, struct writeback_struct, bio);
1295 struct dm_writecache *wc = wb->wc;
1296 unsigned long flags;
1297
1298 raw_spin_lock_irqsave(&wc->endio_list_lock, flags);
1299 if (unlikely(list_empty(&wc->endio_list)))
1300 wake_up_process(wc->endio_thread);
1301 list_add_tail(&wb->endio_entry, &wc->endio_list);
1302 raw_spin_unlock_irqrestore(&wc->endio_list_lock, flags);
1303}
1304
1305static void writecache_copy_endio(int read_err, unsigned long write_err, void *ptr)
1306{
1307 struct copy_struct *c = ptr;
1308 struct dm_writecache *wc = c->wc;
1309
1310 c->error = likely(!(read_err | write_err)) ? 0 : -EIO;
1311
1312 raw_spin_lock_irq(&wc->endio_list_lock);
1313 if (unlikely(list_empty(&wc->endio_list)))
1314 wake_up_process(wc->endio_thread);
1315 list_add_tail(&c->endio_entry, &wc->endio_list);
1316 raw_spin_unlock_irq(&wc->endio_list_lock);
1317}
1318
1319static void __writecache_endio_pmem(struct dm_writecache *wc, struct list_head *list)
1320{
1321 unsigned i;
1322 struct writeback_struct *wb;
1323 struct wc_entry *e;
1324 unsigned long n_walked = 0;
1325
1326 do {
1327 wb = list_entry(list->next, struct writeback_struct, endio_entry);
1328 list_del(&wb->endio_entry);
1329
1330 if (unlikely(wb->bio.bi_status != BLK_STS_OK))
1331 writecache_error(wc, blk_status_to_errno(wb->bio.bi_status),
1332 "write error %d", wb->bio.bi_status);
1333 i = 0;
1334 do {
1335 e = wb->wc_list[i];
1336 BUG_ON(!e->write_in_progress);
1337 e->write_in_progress = false;
1338 INIT_LIST_HEAD(&e->lru);
1339 if (!writecache_has_error(wc))
1340 writecache_free_entry(wc, e);
1341 BUG_ON(!wc->writeback_size);
1342 wc->writeback_size--;
1343 n_walked++;
1344 if (unlikely(n_walked >= ENDIO_LATENCY)) {
1345 writecache_commit_flushed(wc);
1346 wc_unlock(wc);
1347 wc_lock(wc);
1348 n_walked = 0;
1349 }
1350 } while (++i < wb->wc_list_n);
1351
1352 if (wb->wc_list != wb->wc_list_inline)
1353 kfree(wb->wc_list);
1354 bio_put(&wb->bio);
1355 } while (!list_empty(list));
1356}
1357
1358static void __writecache_endio_ssd(struct dm_writecache *wc, struct list_head *list)
1359{
1360 struct copy_struct *c;
1361 struct wc_entry *e;
1362
1363 do {
1364 c = list_entry(list->next, struct copy_struct, endio_entry);
1365 list_del(&c->endio_entry);
1366
1367 if (unlikely(c->error))
1368 writecache_error(wc, c->error, "copy error");
1369
1370 e = c->e;
1371 do {
1372 BUG_ON(!e->write_in_progress);
1373 e->write_in_progress = false;
1374 INIT_LIST_HEAD(&e->lru);
1375 if (!writecache_has_error(wc))
1376 writecache_free_entry(wc, e);
1377
1378 BUG_ON(!wc->writeback_size);
1379 wc->writeback_size--;
1380 e++;
1381 } while (--c->n_entries);
1382 mempool_free(c, &wc->copy_pool);
1383 } while (!list_empty(list));
1384}
1385
1386static int writecache_endio_thread(void *data)
1387{
1388 struct dm_writecache *wc = data;
1389
1390 while (1) {
1391 struct list_head list;
1392
1393 raw_spin_lock_irq(&wc->endio_list_lock);
1394 if (!list_empty(&wc->endio_list))
1395 goto pop_from_list;
1396 set_current_state(TASK_INTERRUPTIBLE);
1397 raw_spin_unlock_irq(&wc->endio_list_lock);
1398
1399 if (unlikely(kthread_should_stop())) {
1400 set_current_state(TASK_RUNNING);
1401 break;
1402 }
1403
1404 schedule();
1405
1406 continue;
1407
1408pop_from_list:
1409 list = wc->endio_list;
1410 list.next->prev = list.prev->next = &list;
1411 INIT_LIST_HEAD(&wc->endio_list);
1412 raw_spin_unlock_irq(&wc->endio_list_lock);
1413
1414 if (!WC_MODE_FUA(wc))
1415 writecache_disk_flush(wc, wc->dev);
1416
1417 wc_lock(wc);
1418
1419 if (WC_MODE_PMEM(wc)) {
1420 __writecache_endio_pmem(wc, &list);
1421 } else {
1422 __writecache_endio_ssd(wc, &list);
1423 writecache_wait_for_ios(wc, READ);
1424 }
1425
1426 writecache_commit_flushed(wc);
1427
1428 wc_unlock(wc);
1429 }
1430
1431 return 0;
1432}
1433
1434static bool wc_add_block(struct writeback_struct *wb, struct wc_entry *e, gfp_t gfp)
1435{
1436 struct dm_writecache *wc = wb->wc;
1437 unsigned block_size = wc->block_size;
1438 void *address = memory_data(wc, e);
1439
1440 persistent_memory_flush_cache(address, block_size);
1441 return bio_add_page(&wb->bio, persistent_memory_page(address),
1442 block_size, persistent_memory_page_offset(address)) != 0;
1443}
1444
1445struct writeback_list {
1446 struct list_head list;
1447 size_t size;
1448};
1449
1450static void __writeback_throttle(struct dm_writecache *wc, struct writeback_list *wbl)
1451{
1452 if (unlikely(wc->max_writeback_jobs)) {
1453 if (READ_ONCE(wc->writeback_size) - wbl->size >= wc->max_writeback_jobs) {
1454 wc_lock(wc);
1455 while (wc->writeback_size - wbl->size >= wc->max_writeback_jobs)
1456 writecache_wait_on_freelist(wc);
1457 wc_unlock(wc);
1458 }
1459 }
1460 cond_resched();
1461}
1462
1463static void __writecache_writeback_pmem(struct dm_writecache *wc, struct writeback_list *wbl)
1464{
1465 struct wc_entry *e, *f;
1466 struct bio *bio;
1467 struct writeback_struct *wb;
1468 unsigned max_pages;
1469
1470 while (wbl->size) {
1471 wbl->size--;
1472 e = container_of(wbl->list.prev, struct wc_entry, lru);
1473 list_del(&e->lru);
1474
1475 max_pages = e->wc_list_contiguous;
1476
1477 bio = bio_alloc_bioset(GFP_NOIO, max_pages, &wc->bio_set);
1478 wb = container_of(bio, struct writeback_struct, bio);
1479 wb->wc = wc;
09f2d656
HY
1480 bio->bi_end_io = writecache_writeback_endio;
1481 bio_set_dev(bio, wc->dev->bdev);
1482 bio->bi_iter.bi_sector = read_original_sector(wc, e);
48debafe 1483 if (max_pages <= WB_LIST_INLINE ||
50a7d3ba
KC
1484 unlikely(!(wb->wc_list = kmalloc_array(max_pages, sizeof(struct wc_entry *),
1485 GFP_NOIO | __GFP_NORETRY |
1486 __GFP_NOMEMALLOC | __GFP_NOWARN)))) {
48debafe
MP
1487 wb->wc_list = wb->wc_list_inline;
1488 max_pages = WB_LIST_INLINE;
1489 }
1490
1491 BUG_ON(!wc_add_block(wb, e, GFP_NOIO));
1492
1493 wb->wc_list[0] = e;
1494 wb->wc_list_n = 1;
1495
1496 while (wbl->size && wb->wc_list_n < max_pages) {
1497 f = container_of(wbl->list.prev, struct wc_entry, lru);
1498 if (read_original_sector(wc, f) !=
1499 read_original_sector(wc, e) + (wc->block_size >> SECTOR_SHIFT))
1500 break;
1501 if (!wc_add_block(wb, f, GFP_NOWAIT | __GFP_NOWARN))
1502 break;
1503 wbl->size--;
1504 list_del(&f->lru);
1505 wb->wc_list[wb->wc_list_n++] = f;
1506 e = f;
1507 }
09f2d656 1508 bio_set_op_attrs(bio, REQ_OP_WRITE, WC_MODE_FUA(wc) * REQ_FUA);
48debafe
MP
1509 if (writecache_has_error(wc)) {
1510 bio->bi_status = BLK_STS_IOERR;
09f2d656 1511 bio_endio(bio);
48debafe 1512 } else {
09f2d656 1513 submit_bio(bio);
48debafe
MP
1514 }
1515
1516 __writeback_throttle(wc, wbl);
1517 }
1518}
1519
1520static void __writecache_writeback_ssd(struct dm_writecache *wc, struct writeback_list *wbl)
1521{
1522 struct wc_entry *e, *f;
1523 struct dm_io_region from, to;
1524 struct copy_struct *c;
1525
1526 while (wbl->size) {
1527 unsigned n_sectors;
1528
1529 wbl->size--;
1530 e = container_of(wbl->list.prev, struct wc_entry, lru);
1531 list_del(&e->lru);
1532
1533 n_sectors = e->wc_list_contiguous << (wc->block_size_bits - SECTOR_SHIFT);
1534
1535 from.bdev = wc->ssd_dev->bdev;
1536 from.sector = cache_sector(wc, e);
1537 from.count = n_sectors;
1538 to.bdev = wc->dev->bdev;
1539 to.sector = read_original_sector(wc, e);
1540 to.count = n_sectors;
1541
1542 c = mempool_alloc(&wc->copy_pool, GFP_NOIO);
1543 c->wc = wc;
1544 c->e = e;
1545 c->n_entries = e->wc_list_contiguous;
1546
1547 while ((n_sectors -= wc->block_size >> SECTOR_SHIFT)) {
1548 wbl->size--;
1549 f = container_of(wbl->list.prev, struct wc_entry, lru);
1550 BUG_ON(f != e + 1);
1551 list_del(&f->lru);
1552 e = f;
1553 }
1554
1555 dm_kcopyd_copy(wc->dm_kcopyd, &from, 1, &to, 0, writecache_copy_endio, c);
1556
1557 __writeback_throttle(wc, wbl);
1558 }
1559}
1560
1561static void writecache_writeback(struct work_struct *work)
1562{
1563 struct dm_writecache *wc = container_of(work, struct dm_writecache, writeback_work);
1564 struct blk_plug plug;
8dd85873 1565 struct wc_entry *f, *uninitialized_var(g), *e = NULL;
48debafe
MP
1566 struct rb_node *node, *next_node;
1567 struct list_head skipped;
1568 struct writeback_list wbl;
1569 unsigned long n_walked;
1570
1571 wc_lock(wc);
1572restart:
1573 if (writecache_has_error(wc)) {
1574 wc_unlock(wc);
1575 return;
1576 }
1577
1578 if (unlikely(wc->writeback_all)) {
1579 if (writecache_wait_for_writeback(wc))
1580 goto restart;
1581 }
1582
1583 if (wc->overwrote_committed) {
1584 writecache_wait_for_ios(wc, WRITE);
1585 }
1586
1587 n_walked = 0;
1588 INIT_LIST_HEAD(&skipped);
1589 INIT_LIST_HEAD(&wbl.list);
1590 wbl.size = 0;
1591 while (!list_empty(&wc->lru) &&
1592 (wc->writeback_all ||
1593 wc->freelist_size + wc->writeback_size <= wc->freelist_low_watermark)) {
1594
1595 n_walked++;
1596 if (unlikely(n_walked > WRITEBACK_LATENCY) &&
1597 likely(!wc->writeback_all) && likely(!dm_suspended(wc->ti))) {
1598 queue_work(wc->writeback_wq, &wc->writeback_work);
1599 break;
1600 }
1601
5229b489
HY
1602 if (unlikely(wc->writeback_all)) {
1603 if (unlikely(!e)) {
1604 writecache_flush(wc);
1605 e = container_of(rb_first(&wc->tree), struct wc_entry, rb_node);
1606 } else
1607 e = g;
1608 } else
1609 e = container_of(wc->lru.prev, struct wc_entry, lru);
48debafe
MP
1610 BUG_ON(e->write_in_progress);
1611 if (unlikely(!writecache_entry_is_committed(wc, e))) {
1612 writecache_flush(wc);
1613 }
1614 node = rb_prev(&e->rb_node);
1615 if (node) {
1616 f = container_of(node, struct wc_entry, rb_node);
1617 if (unlikely(read_original_sector(wc, f) ==
1618 read_original_sector(wc, e))) {
1619 BUG_ON(!f->write_in_progress);
1620 list_del(&e->lru);
1621 list_add(&e->lru, &skipped);
1622 cond_resched();
1623 continue;
1624 }
1625 }
1626 wc->writeback_size++;
1627 list_del(&e->lru);
1628 list_add(&e->lru, &wbl.list);
1629 wbl.size++;
1630 e->write_in_progress = true;
1631 e->wc_list_contiguous = 1;
1632
1633 f = e;
1634
1635 while (1) {
1636 next_node = rb_next(&f->rb_node);
1637 if (unlikely(!next_node))
1638 break;
1639 g = container_of(next_node, struct wc_entry, rb_node);
62421b38
HY
1640 if (unlikely(read_original_sector(wc, g) ==
1641 read_original_sector(wc, f))) {
48debafe
MP
1642 f = g;
1643 continue;
1644 }
1645 if (read_original_sector(wc, g) !=
1646 read_original_sector(wc, f) + (wc->block_size >> SECTOR_SHIFT))
1647 break;
1648 if (unlikely(g->write_in_progress))
1649 break;
1650 if (unlikely(!writecache_entry_is_committed(wc, g)))
1651 break;
1652
1653 if (!WC_MODE_PMEM(wc)) {
1654 if (g != f + 1)
1655 break;
1656 }
1657
1658 n_walked++;
1659 //if (unlikely(n_walked > WRITEBACK_LATENCY) && likely(!wc->writeback_all))
1660 // break;
1661
1662 wc->writeback_size++;
1663 list_del(&g->lru);
1664 list_add(&g->lru, &wbl.list);
1665 wbl.size++;
1666 g->write_in_progress = true;
1667 g->wc_list_contiguous = BIO_MAX_PAGES;
1668 f = g;
1669 e->wc_list_contiguous++;
5229b489
HY
1670 if (unlikely(e->wc_list_contiguous == BIO_MAX_PAGES)) {
1671 if (unlikely(wc->writeback_all)) {
1672 next_node = rb_next(&f->rb_node);
1673 if (likely(next_node))
1674 g = container_of(next_node, struct wc_entry, rb_node);
1675 }
48debafe 1676 break;
5229b489 1677 }
48debafe
MP
1678 }
1679 cond_resched();
1680 }
1681
1682 if (!list_empty(&skipped)) {
1683 list_splice_tail(&skipped, &wc->lru);
1684 /*
1685 * If we didn't do any progress, we must wait until some
1686 * writeback finishes to avoid burning CPU in a loop
1687 */
1688 if (unlikely(!wbl.size))
1689 writecache_wait_for_writeback(wc);
1690 }
1691
1692 wc_unlock(wc);
1693
1694 blk_start_plug(&plug);
1695
1696 if (WC_MODE_PMEM(wc))
1697 __writecache_writeback_pmem(wc, &wbl);
1698 else
1699 __writecache_writeback_ssd(wc, &wbl);
1700
1701 blk_finish_plug(&plug);
1702
1703 if (unlikely(wc->writeback_all)) {
1704 wc_lock(wc);
1705 while (writecache_wait_for_writeback(wc));
1706 wc_unlock(wc);
1707 }
1708}
1709
1710static int calculate_memory_size(uint64_t device_size, unsigned block_size,
1711 size_t *n_blocks_p, size_t *n_metadata_blocks_p)
1712{
1713 uint64_t n_blocks, offset;
1714 struct wc_entry e;
1715
1716 n_blocks = device_size;
1717 do_div(n_blocks, block_size + sizeof(struct wc_memory_entry));
1718
1719 while (1) {
1720 if (!n_blocks)
1721 return -ENOSPC;
1722 /* Verify the following entries[n_blocks] won't overflow */
1723 if (n_blocks >= ((size_t)-sizeof(struct wc_memory_superblock) /
1724 sizeof(struct wc_memory_entry)))
1725 return -EFBIG;
1726 offset = offsetof(struct wc_memory_superblock, entries[n_blocks]);
1727 offset = (offset + block_size - 1) & ~(uint64_t)(block_size - 1);
1728 if (offset + n_blocks * block_size <= device_size)
1729 break;
1730 n_blocks--;
1731 }
1732
1733 /* check if the bit field overflows */
1734 e.index = n_blocks;
1735 if (e.index != n_blocks)
1736 return -EFBIG;
1737
1738 if (n_blocks_p)
1739 *n_blocks_p = n_blocks;
1740 if (n_metadata_blocks_p)
1741 *n_metadata_blocks_p = offset >> __ffs(block_size);
1742 return 0;
1743}
1744
1745static int init_memory(struct dm_writecache *wc)
1746{
1747 size_t b;
1748 int r;
1749
1750 r = calculate_memory_size(wc->memory_map_size, wc->block_size, &wc->n_blocks, NULL);
1751 if (r)
1752 return r;
1753
1754 r = writecache_alloc_entries(wc);
1755 if (r)
1756 return r;
1757
1758 for (b = 0; b < ARRAY_SIZE(sb(wc)->padding); b++)
1759 pmem_assign(sb(wc)->padding[b], cpu_to_le64(0));
1760 pmem_assign(sb(wc)->version, cpu_to_le32(MEMORY_SUPERBLOCK_VERSION));
1761 pmem_assign(sb(wc)->block_size, cpu_to_le32(wc->block_size));
1762 pmem_assign(sb(wc)->n_blocks, cpu_to_le64(wc->n_blocks));
1763 pmem_assign(sb(wc)->seq_count, cpu_to_le64(0));
1764
1765 for (b = 0; b < wc->n_blocks; b++)
1766 write_original_sector_seq_count(wc, &wc->entries[b], -1, -1);
1767
1768 writecache_flush_all_metadata(wc);
1769 writecache_commit_flushed(wc);
1770 pmem_assign(sb(wc)->magic, cpu_to_le32(MEMORY_SUPERBLOCK_MAGIC));
1771 writecache_flush_region(wc, &sb(wc)->magic, sizeof sb(wc)->magic);
1772 writecache_commit_flushed(wc);
1773
1774 return 0;
1775}
1776
1777static void writecache_dtr(struct dm_target *ti)
1778{
1779 struct dm_writecache *wc = ti->private;
1780
1781 if (!wc)
1782 return;
1783
1784 if (wc->endio_thread)
1785 kthread_stop(wc->endio_thread);
1786
1787 if (wc->flush_thread)
1788 kthread_stop(wc->flush_thread);
1789
1790 bioset_exit(&wc->bio_set);
1791
1792 mempool_exit(&wc->copy_pool);
1793
1794 if (wc->writeback_wq)
1795 destroy_workqueue(wc->writeback_wq);
1796
1797 if (wc->dev)
1798 dm_put_device(ti, wc->dev);
1799
1800 if (wc->ssd_dev)
1801 dm_put_device(ti, wc->ssd_dev);
1802
1803 if (wc->entries)
1804 vfree(wc->entries);
1805
1806 if (wc->memory_map) {
1807 if (WC_MODE_PMEM(wc))
1808 persistent_memory_release(wc);
1809 else
1810 vfree(wc->memory_map);
1811 }
1812
1813 if (wc->dm_kcopyd)
1814 dm_kcopyd_client_destroy(wc->dm_kcopyd);
1815
1816 if (wc->dm_io)
1817 dm_io_client_destroy(wc->dm_io);
1818
1819 if (wc->dirty_bitmap)
1820 vfree(wc->dirty_bitmap);
1821
1822 kfree(wc);
1823}
1824
1825static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
1826{
1827 struct dm_writecache *wc;
1828 struct dm_arg_set as;
1829 const char *string;
1830 unsigned opt_params;
1831 size_t offset, data_size;
1832 int i, r;
1833 char dummy;
1834 int high_wm_percent = HIGH_WATERMARK;
1835 int low_wm_percent = LOW_WATERMARK;
1836 uint64_t x;
1837 struct wc_memory_superblock s;
1838
1839 static struct dm_arg _args[] = {
1840 {0, 10, "Invalid number of feature args"},
1841 };
1842
1843 as.argc = argc;
1844 as.argv = argv;
1845
1846 wc = kzalloc(sizeof(struct dm_writecache), GFP_KERNEL);
1847 if (!wc) {
1848 ti->error = "Cannot allocate writecache structure";
1849 r = -ENOMEM;
1850 goto bad;
1851 }
1852 ti->private = wc;
1853 wc->ti = ti;
1854
1855 mutex_init(&wc->lock);
1856 writecache_poison_lists(wc);
1857 init_waitqueue_head(&wc->freelist_wait);
1858 timer_setup(&wc->autocommit_timer, writecache_autocommit_timer, 0);
1859
1860 for (i = 0; i < 2; i++) {
1861 atomic_set(&wc->bio_in_progress[i], 0);
1862 init_waitqueue_head(&wc->bio_in_progress_wait[i]);
1863 }
1864
1865 wc->dm_io = dm_io_client_create();
1866 if (IS_ERR(wc->dm_io)) {
1867 r = PTR_ERR(wc->dm_io);
1868 ti->error = "Unable to allocate dm-io client";
1869 wc->dm_io = NULL;
1870 goto bad;
1871 }
1872
f87e033b 1873 wc->writeback_wq = alloc_workqueue("writecache-writeback", WQ_MEM_RECLAIM, 1);
48debafe
MP
1874 if (!wc->writeback_wq) {
1875 r = -ENOMEM;
1876 ti->error = "Could not allocate writeback workqueue";
1877 goto bad;
1878 }
1879 INIT_WORK(&wc->writeback_work, writecache_writeback);
1880 INIT_WORK(&wc->flush_work, writecache_flush_work);
1881
1882 raw_spin_lock_init(&wc->endio_list_lock);
1883 INIT_LIST_HEAD(&wc->endio_list);
1884 wc->endio_thread = kthread_create(writecache_endio_thread, wc, "writecache_endio");
1885 if (IS_ERR(wc->endio_thread)) {
1886 r = PTR_ERR(wc->endio_thread);
1887 wc->endio_thread = NULL;
1888 ti->error = "Couldn't spawn endio thread";
1889 goto bad;
1890 }
1891 wake_up_process(wc->endio_thread);
1892
1893 /*
1894 * Parse the mode (pmem or ssd)
1895 */
1896 string = dm_shift_arg(&as);
1897 if (!string)
1898 goto bad_arguments;
1899
1900 if (!strcasecmp(string, "s")) {
1901 wc->pmem_mode = false;
1902 } else if (!strcasecmp(string, "p")) {
1903#ifdef DM_WRITECACHE_HAS_PMEM
1904 wc->pmem_mode = true;
1905 wc->writeback_fua = true;
1906#else
1907 /*
1908 * If the architecture doesn't support persistent memory or
1909 * the kernel doesn't support any DAX drivers, this driver can
1910 * only be used in SSD-only mode.
1911 */
1912 r = -EOPNOTSUPP;
1913 ti->error = "Persistent memory or DAX not supported on this system";
1914 goto bad;
1915#endif
1916 } else {
1917 goto bad_arguments;
1918 }
1919
1920 if (WC_MODE_PMEM(wc)) {
1921 r = bioset_init(&wc->bio_set, BIO_POOL_SIZE,
1922 offsetof(struct writeback_struct, bio),
1923 BIOSET_NEED_BVECS);
1924 if (r) {
1925 ti->error = "Could not allocate bio set";
1926 goto bad;
1927 }
1928 } else {
1929 r = mempool_init_kmalloc_pool(&wc->copy_pool, 1, sizeof(struct copy_struct));
1930 if (r) {
1931 ti->error = "Could not allocate mempool";
1932 goto bad;
1933 }
1934 }
1935
1936 /*
1937 * Parse the origin data device
1938 */
1939 string = dm_shift_arg(&as);
1940 if (!string)
1941 goto bad_arguments;
1942 r = dm_get_device(ti, string, dm_table_get_mode(ti->table), &wc->dev);
1943 if (r) {
1944 ti->error = "Origin data device lookup failed";
1945 goto bad;
1946 }
1947
1948 /*
1949 * Parse cache data device (be it pmem or ssd)
1950 */
1951 string = dm_shift_arg(&as);
1952 if (!string)
1953 goto bad_arguments;
1954
1955 r = dm_get_device(ti, string, dm_table_get_mode(ti->table), &wc->ssd_dev);
1956 if (r) {
1957 ti->error = "Cache data device lookup failed";
1958 goto bad;
1959 }
1960 wc->memory_map_size = i_size_read(wc->ssd_dev->bdev->bd_inode);
1961
48debafe
MP
1962 /*
1963 * Parse the cache block size
1964 */
1965 string = dm_shift_arg(&as);
1966 if (!string)
1967 goto bad_arguments;
1968 if (sscanf(string, "%u%c", &wc->block_size, &dummy) != 1 ||
1969 wc->block_size < 512 || wc->block_size > PAGE_SIZE ||
1970 (wc->block_size & (wc->block_size - 1))) {
1971 r = -EINVAL;
1972 ti->error = "Invalid block size";
1973 goto bad;
1974 }
1975 wc->block_size_bits = __ffs(wc->block_size);
1976
1977 wc->max_writeback_jobs = MAX_WRITEBACK_JOBS;
1978 wc->autocommit_blocks = !WC_MODE_PMEM(wc) ? AUTOCOMMIT_BLOCKS_SSD : AUTOCOMMIT_BLOCKS_PMEM;
1979 wc->autocommit_jiffies = msecs_to_jiffies(AUTOCOMMIT_MSEC);
1980
1981 /*
1982 * Parse optional arguments
1983 */
1984 r = dm_read_arg_group(_args, &as, &opt_params, &ti->error);
1985 if (r)
1986 goto bad;
1987
1988 while (opt_params) {
1989 string = dm_shift_arg(&as), opt_params--;
d284f824
MP
1990 if (!strcasecmp(string, "start_sector") && opt_params >= 1) {
1991 unsigned long long start_sector;
1992 string = dm_shift_arg(&as), opt_params--;
1993 if (sscanf(string, "%llu%c", &start_sector, &dummy) != 1)
1994 goto invalid_optional;
1995 wc->start_sector = start_sector;
1996 if (wc->start_sector != start_sector ||
1997 wc->start_sector >= wc->memory_map_size >> SECTOR_SHIFT)
1998 goto invalid_optional;
1999 } else if (!strcasecmp(string, "high_watermark") && opt_params >= 1) {
48debafe
MP
2000 string = dm_shift_arg(&as), opt_params--;
2001 if (sscanf(string, "%d%c", &high_wm_percent, &dummy) != 1)
2002 goto invalid_optional;
2003 if (high_wm_percent < 0 || high_wm_percent > 100)
2004 goto invalid_optional;
2005 wc->high_wm_percent_set = true;
2006 } else if (!strcasecmp(string, "low_watermark") && opt_params >= 1) {
2007 string = dm_shift_arg(&as), opt_params--;
2008 if (sscanf(string, "%d%c", &low_wm_percent, &dummy) != 1)
2009 goto invalid_optional;
2010 if (low_wm_percent < 0 || low_wm_percent > 100)
2011 goto invalid_optional;
2012 wc->low_wm_percent_set = true;
2013 } else if (!strcasecmp(string, "writeback_jobs") && opt_params >= 1) {
2014 string = dm_shift_arg(&as), opt_params--;
2015 if (sscanf(string, "%u%c", &wc->max_writeback_jobs, &dummy) != 1)
2016 goto invalid_optional;
2017 wc->max_writeback_jobs_set = true;
2018 } else if (!strcasecmp(string, "autocommit_blocks") && opt_params >= 1) {
2019 string = dm_shift_arg(&as), opt_params--;
2020 if (sscanf(string, "%u%c", &wc->autocommit_blocks, &dummy) != 1)
2021 goto invalid_optional;
2022 wc->autocommit_blocks_set = true;
2023 } else if (!strcasecmp(string, "autocommit_time") && opt_params >= 1) {
2024 unsigned autocommit_msecs;
2025 string = dm_shift_arg(&as), opt_params--;
2026 if (sscanf(string, "%u%c", &autocommit_msecs, &dummy) != 1)
2027 goto invalid_optional;
2028 if (autocommit_msecs > 3600000)
2029 goto invalid_optional;
2030 wc->autocommit_jiffies = msecs_to_jiffies(autocommit_msecs);
2031 wc->autocommit_time_set = true;
2032 } else if (!strcasecmp(string, "fua")) {
2033 if (WC_MODE_PMEM(wc)) {
2034 wc->writeback_fua = true;
2035 wc->writeback_fua_set = true;
2036 } else goto invalid_optional;
2037 } else if (!strcasecmp(string, "nofua")) {
2038 if (WC_MODE_PMEM(wc)) {
2039 wc->writeback_fua = false;
2040 wc->writeback_fua_set = true;
2041 } else goto invalid_optional;
2042 } else {
2043invalid_optional:
2044 r = -EINVAL;
2045 ti->error = "Invalid optional argument";
2046 goto bad;
2047 }
2048 }
2049
2050 if (high_wm_percent < low_wm_percent) {
2051 r = -EINVAL;
2052 ti->error = "High watermark must be greater than or equal to low watermark";
2053 goto bad;
2054 }
2055
d284f824
MP
2056 if (WC_MODE_PMEM(wc)) {
2057 r = persistent_memory_claim(wc);
2058 if (r) {
2059 ti->error = "Unable to map persistent memory for cache";
2060 goto bad;
2061 }
2062 } else {
48debafe
MP
2063 struct dm_io_region region;
2064 struct dm_io_request req;
2065 size_t n_blocks, n_metadata_blocks;
2066 uint64_t n_bitmap_bits;
2067
d284f824
MP
2068 wc->memory_map_size -= (uint64_t)wc->start_sector << SECTOR_SHIFT;
2069
48debafe
MP
2070 bio_list_init(&wc->flush_list);
2071 wc->flush_thread = kthread_create(writecache_flush_thread, wc, "dm_writecache_flush");
2072 if (IS_ERR(wc->flush_thread)) {
2073 r = PTR_ERR(wc->flush_thread);
2074 wc->flush_thread = NULL;
e8ea141a 2075 ti->error = "Couldn't spawn flush thread";
48debafe
MP
2076 goto bad;
2077 }
2078 wake_up_process(wc->flush_thread);
2079
2080 r = calculate_memory_size(wc->memory_map_size, wc->block_size,
2081 &n_blocks, &n_metadata_blocks);
2082 if (r) {
2083 ti->error = "Invalid device size";
2084 goto bad;
2085 }
2086
2087 n_bitmap_bits = (((uint64_t)n_metadata_blocks << wc->block_size_bits) +
2088 BITMAP_GRANULARITY - 1) / BITMAP_GRANULARITY;
2089 /* this is limitation of test_bit functions */
2090 if (n_bitmap_bits > 1U << 31) {
2091 r = -EFBIG;
2092 ti->error = "Invalid device size";
2093 goto bad;
2094 }
2095
2096 wc->memory_map = vmalloc(n_metadata_blocks << wc->block_size_bits);
2097 if (!wc->memory_map) {
2098 r = -ENOMEM;
2099 ti->error = "Unable to allocate memory for metadata";
2100 goto bad;
2101 }
2102
2103 wc->dm_kcopyd = dm_kcopyd_client_create(&dm_kcopyd_throttle);
2104 if (IS_ERR(wc->dm_kcopyd)) {
2105 r = PTR_ERR(wc->dm_kcopyd);
2106 ti->error = "Unable to allocate dm-kcopyd client";
2107 wc->dm_kcopyd = NULL;
2108 goto bad;
2109 }
2110
2111 wc->metadata_sectors = n_metadata_blocks << (wc->block_size_bits - SECTOR_SHIFT);
2112 wc->dirty_bitmap_size = (n_bitmap_bits + BITS_PER_LONG - 1) /
2113 BITS_PER_LONG * sizeof(unsigned long);
2114 wc->dirty_bitmap = vzalloc(wc->dirty_bitmap_size);
2115 if (!wc->dirty_bitmap) {
2116 r = -ENOMEM;
2117 ti->error = "Unable to allocate dirty bitmap";
2118 goto bad;
2119 }
2120
2121 region.bdev = wc->ssd_dev->bdev;
d284f824 2122 region.sector = wc->start_sector;
48debafe
MP
2123 region.count = wc->metadata_sectors;
2124 req.bi_op = REQ_OP_READ;
2125 req.bi_op_flags = REQ_SYNC;
2126 req.mem.type = DM_IO_VMA;
2127 req.mem.ptr.vma = (char *)wc->memory_map;
2128 req.client = wc->dm_io;
2129 req.notify.fn = NULL;
2130
2131 r = dm_io(&req, 1, &region, NULL);
2132 if (r) {
2133 ti->error = "Unable to read metadata";
2134 goto bad;
2135 }
2136 }
2137
2138 r = memcpy_mcsafe(&s, sb(wc), sizeof(struct wc_memory_superblock));
2139 if (r) {
2140 ti->error = "Hardware memory error when reading superblock";
2141 goto bad;
2142 }
2143 if (!le32_to_cpu(s.magic) && !le32_to_cpu(s.version)) {
2144 r = init_memory(wc);
2145 if (r) {
2146 ti->error = "Unable to initialize device";
2147 goto bad;
2148 }
2149 r = memcpy_mcsafe(&s, sb(wc), sizeof(struct wc_memory_superblock));
2150 if (r) {
2151 ti->error = "Hardware memory error when reading superblock";
2152 goto bad;
2153 }
2154 }
2155
2156 if (le32_to_cpu(s.magic) != MEMORY_SUPERBLOCK_MAGIC) {
2157 ti->error = "Invalid magic in the superblock";
2158 r = -EINVAL;
2159 goto bad;
2160 }
2161
2162 if (le32_to_cpu(s.version) != MEMORY_SUPERBLOCK_VERSION) {
2163 ti->error = "Invalid version in the superblock";
2164 r = -EINVAL;
2165 goto bad;
2166 }
2167
2168 if (le32_to_cpu(s.block_size) != wc->block_size) {
2169 ti->error = "Block size does not match superblock";
2170 r = -EINVAL;
2171 goto bad;
2172 }
2173
2174 wc->n_blocks = le64_to_cpu(s.n_blocks);
2175
2176 offset = wc->n_blocks * sizeof(struct wc_memory_entry);
2177 if (offset / sizeof(struct wc_memory_entry) != le64_to_cpu(sb(wc)->n_blocks)) {
2178overflow:
2179 ti->error = "Overflow in size calculation";
2180 r = -EINVAL;
2181 goto bad;
2182 }
2183 offset += sizeof(struct wc_memory_superblock);
2184 if (offset < sizeof(struct wc_memory_superblock))
2185 goto overflow;
2186 offset = (offset + wc->block_size - 1) & ~(size_t)(wc->block_size - 1);
2187 data_size = wc->n_blocks * (size_t)wc->block_size;
2188 if (!offset || (data_size / wc->block_size != wc->n_blocks) ||
2189 (offset + data_size < offset))
2190 goto overflow;
2191 if (offset + data_size > wc->memory_map_size) {
2192 ti->error = "Memory area is too small";
2193 r = -EINVAL;
2194 goto bad;
2195 }
2196
2197 wc->metadata_sectors = offset >> SECTOR_SHIFT;
2198 wc->block_start = (char *)sb(wc) + offset;
2199
2200 x = (uint64_t)wc->n_blocks * (100 - high_wm_percent);
2201 x += 50;
2202 do_div(x, 100);
2203 wc->freelist_high_watermark = x;
2204 x = (uint64_t)wc->n_blocks * (100 - low_wm_percent);
2205 x += 50;
2206 do_div(x, 100);
2207 wc->freelist_low_watermark = x;
2208
2209 r = writecache_alloc_entries(wc);
2210 if (r) {
2211 ti->error = "Cannot allocate memory";
2212 goto bad;
2213 }
2214
2215 ti->num_flush_bios = 1;
2216 ti->flush_supported = true;
2217 ti->num_discard_bios = 1;
2218
2219 if (WC_MODE_PMEM(wc))
2220 persistent_memory_flush_cache(wc->memory_map, wc->memory_map_size);
2221
2222 return 0;
2223
2224bad_arguments:
2225 r = -EINVAL;
2226 ti->error = "Bad arguments";
2227bad:
2228 writecache_dtr(ti);
2229 return r;
2230}
2231
2232static void writecache_status(struct dm_target *ti, status_type_t type,
2233 unsigned status_flags, char *result, unsigned maxlen)
2234{
2235 struct dm_writecache *wc = ti->private;
2236 unsigned extra_args;
2237 unsigned sz = 0;
2238 uint64_t x;
2239
2240 switch (type) {
2241 case STATUSTYPE_INFO:
2242 DMEMIT("%ld %llu %llu %llu", writecache_has_error(wc),
2243 (unsigned long long)wc->n_blocks, (unsigned long long)wc->freelist_size,
2244 (unsigned long long)wc->writeback_size);
2245 break;
2246 case STATUSTYPE_TABLE:
2247 DMEMIT("%c %s %s %u ", WC_MODE_PMEM(wc) ? 'p' : 's',
2248 wc->dev->name, wc->ssd_dev->name, wc->block_size);
2249 extra_args = 0;
9ff07e7d
MP
2250 if (wc->start_sector)
2251 extra_args += 2;
48debafe
MP
2252 if (wc->high_wm_percent_set)
2253 extra_args += 2;
2254 if (wc->low_wm_percent_set)
2255 extra_args += 2;
2256 if (wc->max_writeback_jobs_set)
2257 extra_args += 2;
2258 if (wc->autocommit_blocks_set)
2259 extra_args += 2;
2260 if (wc->autocommit_time_set)
2261 extra_args += 2;
2262 if (wc->writeback_fua_set)
2263 extra_args++;
2264
2265 DMEMIT("%u", extra_args);
9ff07e7d
MP
2266 if (wc->start_sector)
2267 DMEMIT(" start_sector %llu", (unsigned long long)wc->start_sector);
48debafe
MP
2268 if (wc->high_wm_percent_set) {
2269 x = (uint64_t)wc->freelist_high_watermark * 100;
2270 x += wc->n_blocks / 2;
2271 do_div(x, (size_t)wc->n_blocks);
2272 DMEMIT(" high_watermark %u", 100 - (unsigned)x);
2273 }
2274 if (wc->low_wm_percent_set) {
2275 x = (uint64_t)wc->freelist_low_watermark * 100;
2276 x += wc->n_blocks / 2;
2277 do_div(x, (size_t)wc->n_blocks);
2278 DMEMIT(" low_watermark %u", 100 - (unsigned)x);
2279 }
2280 if (wc->max_writeback_jobs_set)
2281 DMEMIT(" writeback_jobs %u", wc->max_writeback_jobs);
2282 if (wc->autocommit_blocks_set)
2283 DMEMIT(" autocommit_blocks %u", wc->autocommit_blocks);
2284 if (wc->autocommit_time_set)
2285 DMEMIT(" autocommit_time %u", jiffies_to_msecs(wc->autocommit_jiffies));
2286 if (wc->writeback_fua_set)
2287 DMEMIT(" %sfua", wc->writeback_fua ? "" : "no");
2288 break;
2289 }
2290}
2291
2292static struct target_type writecache_target = {
2293 .name = "writecache",
9ff07e7d 2294 .version = {1, 1, 1},
48debafe
MP
2295 .module = THIS_MODULE,
2296 .ctr = writecache_ctr,
2297 .dtr = writecache_dtr,
2298 .status = writecache_status,
2299 .postsuspend = writecache_suspend,
2300 .resume = writecache_resume,
2301 .message = writecache_message,
2302 .map = writecache_map,
2303 .end_io = writecache_end_io,
2304 .iterate_devices = writecache_iterate_devices,
2305 .io_hints = writecache_io_hints,
2306};
2307
2308static int __init dm_writecache_init(void)
2309{
2310 int r;
2311
2312 r = dm_register_target(&writecache_target);
2313 if (r < 0) {
2314 DMERR("register failed %d", r);
2315 return r;
2316 }
2317
2318 return 0;
2319}
2320
2321static void __exit dm_writecache_exit(void)
2322{
2323 dm_unregister_target(&writecache_target);
2324}
2325
2326module_init(dm_writecache_init);
2327module_exit(dm_writecache_exit);
2328
2329MODULE_DESCRIPTION(DM_NAME " writecache target");
2330MODULE_AUTHOR("Mikulas Patocka <dm-devel@redhat.com>");
2331MODULE_LICENSE("GPL");