Commit | Line | Data |
---|---|---|
48debafe MP |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* | |
3 | * Copyright (C) 2018 Red Hat. All rights reserved. | |
4 | * | |
5 | * This file is released under the GPL. | |
6 | */ | |
7 | ||
8 | #include <linux/device-mapper.h> | |
9 | #include <linux/module.h> | |
10 | #include <linux/init.h> | |
11 | #include <linux/vmalloc.h> | |
12 | #include <linux/kthread.h> | |
13 | #include <linux/dm-io.h> | |
14 | #include <linux/dm-kcopyd.h> | |
15 | #include <linux/dax.h> | |
16 | #include <linux/pfn_t.h> | |
17 | #include <linux/libnvdimm.h> | |
18 | ||
19 | #define DM_MSG_PREFIX "writecache" | |
20 | ||
21 | #define HIGH_WATERMARK 50 | |
22 | #define LOW_WATERMARK 45 | |
23 | #define MAX_WRITEBACK_JOBS 0 | |
24 | #define ENDIO_LATENCY 16 | |
25 | #define WRITEBACK_LATENCY 64 | |
26 | #define AUTOCOMMIT_BLOCKS_SSD 65536 | |
27 | #define AUTOCOMMIT_BLOCKS_PMEM 64 | |
28 | #define AUTOCOMMIT_MSEC 1000 | |
3923d485 MP |
29 | #define MAX_AGE_DIV 16 |
30 | #define MAX_AGE_UNSPECIFIED -1UL | |
48debafe MP |
31 | |
32 | #define BITMAP_GRANULARITY 65536 | |
33 | #if BITMAP_GRANULARITY < PAGE_SIZE | |
34 | #undef BITMAP_GRANULARITY | |
35 | #define BITMAP_GRANULARITY PAGE_SIZE | |
36 | #endif | |
37 | ||
38 | #if IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API) && IS_ENABLED(CONFIG_DAX_DRIVER) | |
39 | #define DM_WRITECACHE_HAS_PMEM | |
40 | #endif | |
41 | ||
42 | #ifdef DM_WRITECACHE_HAS_PMEM | |
43 | #define pmem_assign(dest, src) \ | |
44 | do { \ | |
45 | typeof(dest) uniq = (src); \ | |
46 | memcpy_flushcache(&(dest), &uniq, sizeof(dest)); \ | |
47 | } while (0) | |
48 | #else | |
49 | #define pmem_assign(dest, src) ((dest) = (src)) | |
50 | #endif | |
51 | ||
ec6347bb | 52 | #if IS_ENABLED(CONFIG_ARCH_HAS_COPY_MC) && defined(DM_WRITECACHE_HAS_PMEM) |
48debafe MP |
53 | #define DM_WRITECACHE_HANDLE_HARDWARE_ERRORS |
54 | #endif | |
55 | ||
56 | #define MEMORY_SUPERBLOCK_MAGIC 0x23489321 | |
57 | #define MEMORY_SUPERBLOCK_VERSION 1 | |
58 | ||
59 | struct wc_memory_entry { | |
60 | __le64 original_sector; | |
61 | __le64 seq_count; | |
62 | }; | |
63 | ||
64 | struct wc_memory_superblock { | |
65 | union { | |
66 | struct { | |
67 | __le32 magic; | |
68 | __le32 version; | |
69 | __le32 block_size; | |
70 | __le32 pad; | |
71 | __le64 n_blocks; | |
72 | __le64 seq_count; | |
73 | }; | |
74 | __le64 padding[8]; | |
75 | }; | |
c40819f2 | 76 | struct wc_memory_entry entries[]; |
48debafe MP |
77 | }; |
78 | ||
79 | struct wc_entry { | |
80 | struct rb_node rb_node; | |
81 | struct list_head lru; | |
82 | unsigned short wc_list_contiguous; | |
83 | bool write_in_progress | |
84 | #if BITS_PER_LONG == 64 | |
85 | :1 | |
86 | #endif | |
87 | ; | |
88 | unsigned long index | |
89 | #if BITS_PER_LONG == 64 | |
90 | :47 | |
91 | #endif | |
92 | ; | |
3923d485 | 93 | unsigned long age; |
48debafe MP |
94 | #ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS |
95 | uint64_t original_sector; | |
96 | uint64_t seq_count; | |
97 | #endif | |
98 | }; | |
99 | ||
100 | #ifdef DM_WRITECACHE_HAS_PMEM | |
101 | #define WC_MODE_PMEM(wc) ((wc)->pmem_mode) | |
102 | #define WC_MODE_FUA(wc) ((wc)->writeback_fua) | |
103 | #else | |
104 | #define WC_MODE_PMEM(wc) false | |
105 | #define WC_MODE_FUA(wc) false | |
106 | #endif | |
107 | #define WC_MODE_SORT_FREELIST(wc) (!WC_MODE_PMEM(wc)) | |
108 | ||
109 | struct dm_writecache { | |
110 | struct mutex lock; | |
111 | struct list_head lru; | |
112 | union { | |
113 | struct list_head freelist; | |
114 | struct { | |
115 | struct rb_root freetree; | |
116 | struct wc_entry *current_free; | |
117 | }; | |
118 | }; | |
119 | struct rb_root tree; | |
120 | ||
121 | size_t freelist_size; | |
122 | size_t writeback_size; | |
123 | size_t freelist_high_watermark; | |
124 | size_t freelist_low_watermark; | |
3923d485 | 125 | unsigned long max_age; |
48debafe MP |
126 | |
127 | unsigned uncommitted_blocks; | |
128 | unsigned autocommit_blocks; | |
129 | unsigned max_writeback_jobs; | |
130 | ||
131 | int error; | |
132 | ||
133 | unsigned long autocommit_jiffies; | |
134 | struct timer_list autocommit_timer; | |
135 | struct wait_queue_head freelist_wait; | |
136 | ||
3923d485 MP |
137 | struct timer_list max_age_timer; |
138 | ||
48debafe MP |
139 | atomic_t bio_in_progress[2]; |
140 | struct wait_queue_head bio_in_progress_wait[2]; | |
141 | ||
142 | struct dm_target *ti; | |
143 | struct dm_dev *dev; | |
144 | struct dm_dev *ssd_dev; | |
d284f824 | 145 | sector_t start_sector; |
48debafe MP |
146 | void *memory_map; |
147 | uint64_t memory_map_size; | |
148 | size_t metadata_sectors; | |
149 | size_t n_blocks; | |
150 | uint64_t seq_count; | |
4134455f | 151 | sector_t data_device_sectors; |
48debafe MP |
152 | void *block_start; |
153 | struct wc_entry *entries; | |
154 | unsigned block_size; | |
155 | unsigned char block_size_bits; | |
156 | ||
157 | bool pmem_mode:1; | |
158 | bool writeback_fua:1; | |
159 | ||
160 | bool overwrote_committed:1; | |
161 | bool memory_vmapped:1; | |
162 | ||
054bee16 | 163 | bool start_sector_set:1; |
48debafe MP |
164 | bool high_wm_percent_set:1; |
165 | bool low_wm_percent_set:1; | |
166 | bool max_writeback_jobs_set:1; | |
167 | bool autocommit_blocks_set:1; | |
168 | bool autocommit_time_set:1; | |
054bee16 | 169 | bool max_age_set:1; |
48debafe MP |
170 | bool writeback_fua_set:1; |
171 | bool flush_on_suspend:1; | |
93de44eb | 172 | bool cleaner:1; |
054bee16 | 173 | bool cleaner_set:1; |
611c3e16 | 174 | bool metadata_only:1; |
054bee16 MP |
175 | |
176 | unsigned high_wm_percent_value; | |
177 | unsigned low_wm_percent_value; | |
178 | unsigned autocommit_time_value; | |
179 | unsigned max_age_value; | |
48debafe MP |
180 | |
181 | unsigned writeback_all; | |
182 | struct workqueue_struct *writeback_wq; | |
183 | struct work_struct writeback_work; | |
184 | struct work_struct flush_work; | |
185 | ||
186 | struct dm_io_client *dm_io; | |
187 | ||
188 | raw_spinlock_t endio_list_lock; | |
189 | struct list_head endio_list; | |
190 | struct task_struct *endio_thread; | |
191 | ||
192 | struct task_struct *flush_thread; | |
193 | struct bio_list flush_list; | |
194 | ||
195 | struct dm_kcopyd_client *dm_kcopyd; | |
196 | unsigned long *dirty_bitmap; | |
197 | unsigned dirty_bitmap_size; | |
198 | ||
199 | struct bio_set bio_set; | |
200 | mempool_t copy_pool; | |
201 | }; | |
202 | ||
203 | #define WB_LIST_INLINE 16 | |
204 | ||
205 | struct writeback_struct { | |
206 | struct list_head endio_entry; | |
207 | struct dm_writecache *wc; | |
208 | struct wc_entry **wc_list; | |
209 | unsigned wc_list_n; | |
48debafe MP |
210 | struct wc_entry *wc_list_inline[WB_LIST_INLINE]; |
211 | struct bio bio; | |
212 | }; | |
213 | ||
214 | struct copy_struct { | |
215 | struct list_head endio_entry; | |
216 | struct dm_writecache *wc; | |
217 | struct wc_entry *e; | |
218 | unsigned n_entries; | |
219 | int error; | |
220 | }; | |
221 | ||
222 | DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(dm_writecache_throttle, | |
223 | "A percentage of time allocated for data copying"); | |
224 | ||
225 | static void wc_lock(struct dm_writecache *wc) | |
226 | { | |
227 | mutex_lock(&wc->lock); | |
228 | } | |
229 | ||
230 | static void wc_unlock(struct dm_writecache *wc) | |
231 | { | |
232 | mutex_unlock(&wc->lock); | |
233 | } | |
234 | ||
235 | #ifdef DM_WRITECACHE_HAS_PMEM | |
236 | static int persistent_memory_claim(struct dm_writecache *wc) | |
237 | { | |
238 | int r; | |
239 | loff_t s; | |
240 | long p, da; | |
241 | pfn_t pfn; | |
242 | int id; | |
243 | struct page **pages; | |
f9e040ef | 244 | sector_t offset; |
48debafe MP |
245 | |
246 | wc->memory_vmapped = false; | |
247 | ||
48debafe MP |
248 | s = wc->memory_map_size; |
249 | p = s >> PAGE_SHIFT; | |
250 | if (!p) { | |
251 | r = -EINVAL; | |
252 | goto err1; | |
253 | } | |
254 | if (p != s >> PAGE_SHIFT) { | |
255 | r = -EOVERFLOW; | |
256 | goto err1; | |
257 | } | |
258 | ||
f9e040ef MP |
259 | offset = get_start_sect(wc->ssd_dev->bdev); |
260 | if (offset & (PAGE_SIZE / 512 - 1)) { | |
261 | r = -EINVAL; | |
262 | goto err1; | |
263 | } | |
264 | offset >>= PAGE_SHIFT - 9; | |
265 | ||
48debafe MP |
266 | id = dax_read_lock(); |
267 | ||
f9e040ef | 268 | da = dax_direct_access(wc->ssd_dev->dax_dev, offset, p, &wc->memory_map, &pfn); |
48debafe MP |
269 | if (da < 0) { |
270 | wc->memory_map = NULL; | |
271 | r = da; | |
272 | goto err2; | |
273 | } | |
274 | if (!pfn_t_has_page(pfn)) { | |
275 | wc->memory_map = NULL; | |
276 | r = -EOPNOTSUPP; | |
277 | goto err2; | |
278 | } | |
279 | if (da != p) { | |
280 | long i; | |
281 | wc->memory_map = NULL; | |
50a7d3ba | 282 | pages = kvmalloc_array(p, sizeof(struct page *), GFP_KERNEL); |
48debafe MP |
283 | if (!pages) { |
284 | r = -ENOMEM; | |
285 | goto err2; | |
286 | } | |
287 | i = 0; | |
288 | do { | |
289 | long daa; | |
f9e040ef | 290 | daa = dax_direct_access(wc->ssd_dev->dax_dev, offset + i, p - i, |
f742267a | 291 | NULL, &pfn); |
48debafe MP |
292 | if (daa <= 0) { |
293 | r = daa ? daa : -EINVAL; | |
294 | goto err3; | |
295 | } | |
296 | if (!pfn_t_has_page(pfn)) { | |
297 | r = -EOPNOTSUPP; | |
298 | goto err3; | |
299 | } | |
300 | while (daa-- && i < p) { | |
301 | pages[i++] = pfn_t_to_page(pfn); | |
302 | pfn.val++; | |
d35bd764 MP |
303 | if (!(i & 15)) |
304 | cond_resched(); | |
48debafe MP |
305 | } |
306 | } while (i < p); | |
307 | wc->memory_map = vmap(pages, p, VM_MAP, PAGE_KERNEL); | |
308 | if (!wc->memory_map) { | |
309 | r = -ENOMEM; | |
310 | goto err3; | |
311 | } | |
312 | kvfree(pages); | |
313 | wc->memory_vmapped = true; | |
314 | } | |
315 | ||
316 | dax_read_unlock(id); | |
d284f824 MP |
317 | |
318 | wc->memory_map += (size_t)wc->start_sector << SECTOR_SHIFT; | |
319 | wc->memory_map_size -= (size_t)wc->start_sector << SECTOR_SHIFT; | |
320 | ||
48debafe MP |
321 | return 0; |
322 | err3: | |
323 | kvfree(pages); | |
324 | err2: | |
325 | dax_read_unlock(id); | |
326 | err1: | |
327 | return r; | |
328 | } | |
329 | #else | |
330 | static int persistent_memory_claim(struct dm_writecache *wc) | |
331 | { | |
857c4c0a | 332 | return -EOPNOTSUPP; |
48debafe MP |
333 | } |
334 | #endif | |
335 | ||
336 | static void persistent_memory_release(struct dm_writecache *wc) | |
337 | { | |
338 | if (wc->memory_vmapped) | |
d284f824 | 339 | vunmap(wc->memory_map - ((size_t)wc->start_sector << SECTOR_SHIFT)); |
48debafe MP |
340 | } |
341 | ||
342 | static struct page *persistent_memory_page(void *addr) | |
343 | { | |
344 | if (is_vmalloc_addr(addr)) | |
345 | return vmalloc_to_page(addr); | |
346 | else | |
347 | return virt_to_page(addr); | |
348 | } | |
349 | ||
350 | static unsigned persistent_memory_page_offset(void *addr) | |
351 | { | |
352 | return (unsigned long)addr & (PAGE_SIZE - 1); | |
353 | } | |
354 | ||
355 | static void persistent_memory_flush_cache(void *ptr, size_t size) | |
356 | { | |
357 | if (is_vmalloc_addr(ptr)) | |
358 | flush_kernel_vmap_range(ptr, size); | |
359 | } | |
360 | ||
361 | static void persistent_memory_invalidate_cache(void *ptr, size_t size) | |
362 | { | |
363 | if (is_vmalloc_addr(ptr)) | |
364 | invalidate_kernel_vmap_range(ptr, size); | |
365 | } | |
366 | ||
367 | static struct wc_memory_superblock *sb(struct dm_writecache *wc) | |
368 | { | |
369 | return wc->memory_map; | |
370 | } | |
371 | ||
372 | static struct wc_memory_entry *memory_entry(struct dm_writecache *wc, struct wc_entry *e) | |
373 | { | |
da4ad3a2 | 374 | return &sb(wc)->entries[e->index]; |
48debafe MP |
375 | } |
376 | ||
377 | static void *memory_data(struct dm_writecache *wc, struct wc_entry *e) | |
378 | { | |
379 | return (char *)wc->block_start + (e->index << wc->block_size_bits); | |
380 | } | |
381 | ||
382 | static sector_t cache_sector(struct dm_writecache *wc, struct wc_entry *e) | |
383 | { | |
d284f824 | 384 | return wc->start_sector + wc->metadata_sectors + |
48debafe MP |
385 | ((sector_t)e->index << (wc->block_size_bits - SECTOR_SHIFT)); |
386 | } | |
387 | ||
388 | static uint64_t read_original_sector(struct dm_writecache *wc, struct wc_entry *e) | |
389 | { | |
390 | #ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS | |
391 | return e->original_sector; | |
392 | #else | |
393 | return le64_to_cpu(memory_entry(wc, e)->original_sector); | |
394 | #endif | |
395 | } | |
396 | ||
397 | static uint64_t read_seq_count(struct dm_writecache *wc, struct wc_entry *e) | |
398 | { | |
399 | #ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS | |
400 | return e->seq_count; | |
401 | #else | |
402 | return le64_to_cpu(memory_entry(wc, e)->seq_count); | |
403 | #endif | |
404 | } | |
405 | ||
406 | static void clear_seq_count(struct dm_writecache *wc, struct wc_entry *e) | |
407 | { | |
408 | #ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS | |
409 | e->seq_count = -1; | |
410 | #endif | |
411 | pmem_assign(memory_entry(wc, e)->seq_count, cpu_to_le64(-1)); | |
412 | } | |
413 | ||
414 | static void write_original_sector_seq_count(struct dm_writecache *wc, struct wc_entry *e, | |
415 | uint64_t original_sector, uint64_t seq_count) | |
416 | { | |
417 | struct wc_memory_entry me; | |
418 | #ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS | |
419 | e->original_sector = original_sector; | |
420 | e->seq_count = seq_count; | |
421 | #endif | |
422 | me.original_sector = cpu_to_le64(original_sector); | |
423 | me.seq_count = cpu_to_le64(seq_count); | |
424 | pmem_assign(*memory_entry(wc, e), me); | |
425 | } | |
426 | ||
427 | #define writecache_error(wc, err, msg, arg...) \ | |
428 | do { \ | |
429 | if (!cmpxchg(&(wc)->error, 0, err)) \ | |
430 | DMERR(msg, ##arg); \ | |
431 | wake_up(&(wc)->freelist_wait); \ | |
432 | } while (0) | |
433 | ||
434 | #define writecache_has_error(wc) (unlikely(READ_ONCE((wc)->error))) | |
435 | ||
436 | static void writecache_flush_all_metadata(struct dm_writecache *wc) | |
437 | { | |
438 | if (!WC_MODE_PMEM(wc)) | |
439 | memset(wc->dirty_bitmap, -1, wc->dirty_bitmap_size); | |
440 | } | |
441 | ||
442 | static void writecache_flush_region(struct dm_writecache *wc, void *ptr, size_t size) | |
443 | { | |
444 | if (!WC_MODE_PMEM(wc)) | |
445 | __set_bit(((char *)ptr - (char *)wc->memory_map) / BITMAP_GRANULARITY, | |
446 | wc->dirty_bitmap); | |
447 | } | |
448 | ||
449 | static void writecache_disk_flush(struct dm_writecache *wc, struct dm_dev *dev); | |
450 | ||
451 | struct io_notify { | |
452 | struct dm_writecache *wc; | |
453 | struct completion c; | |
454 | atomic_t count; | |
455 | }; | |
456 | ||
457 | static void writecache_notify_io(unsigned long error, void *context) | |
458 | { | |
459 | struct io_notify *endio = context; | |
460 | ||
461 | if (unlikely(error != 0)) | |
462 | writecache_error(endio->wc, -EIO, "error writing metadata"); | |
463 | BUG_ON(atomic_read(&endio->count) <= 0); | |
464 | if (atomic_dec_and_test(&endio->count)) | |
465 | complete(&endio->c); | |
466 | } | |
467 | ||
aa950920 MP |
468 | static void writecache_wait_for_ios(struct dm_writecache *wc, int direction) |
469 | { | |
470 | wait_event(wc->bio_in_progress_wait[direction], | |
471 | !atomic_read(&wc->bio_in_progress[direction])); | |
472 | } | |
473 | ||
474 | static void ssd_commit_flushed(struct dm_writecache *wc, bool wait_for_ios) | |
48debafe MP |
475 | { |
476 | struct dm_io_region region; | |
477 | struct dm_io_request req; | |
478 | struct io_notify endio = { | |
479 | wc, | |
480 | COMPLETION_INITIALIZER_ONSTACK(endio.c), | |
481 | ATOMIC_INIT(1), | |
482 | }; | |
1e1132ea | 483 | unsigned bitmap_bits = wc->dirty_bitmap_size * 8; |
48debafe MP |
484 | unsigned i = 0; |
485 | ||
486 | while (1) { | |
487 | unsigned j; | |
488 | i = find_next_bit(wc->dirty_bitmap, bitmap_bits, i); | |
489 | if (unlikely(i == bitmap_bits)) | |
490 | break; | |
491 | j = find_next_zero_bit(wc->dirty_bitmap, bitmap_bits, i); | |
492 | ||
493 | region.bdev = wc->ssd_dev->bdev; | |
494 | region.sector = (sector_t)i * (BITMAP_GRANULARITY >> SECTOR_SHIFT); | |
495 | region.count = (sector_t)(j - i) * (BITMAP_GRANULARITY >> SECTOR_SHIFT); | |
496 | ||
497 | if (unlikely(region.sector >= wc->metadata_sectors)) | |
498 | break; | |
499 | if (unlikely(region.sector + region.count > wc->metadata_sectors)) | |
500 | region.count = wc->metadata_sectors - region.sector; | |
501 | ||
d284f824 | 502 | region.sector += wc->start_sector; |
48debafe MP |
503 | atomic_inc(&endio.count); |
504 | req.bi_op = REQ_OP_WRITE; | |
505 | req.bi_op_flags = REQ_SYNC; | |
506 | req.mem.type = DM_IO_VMA; | |
507 | req.mem.ptr.vma = (char *)wc->memory_map + (size_t)i * BITMAP_GRANULARITY; | |
508 | req.client = wc->dm_io; | |
509 | req.notify.fn = writecache_notify_io; | |
510 | req.notify.context = &endio; | |
511 | ||
512 | /* writing via async dm-io (implied by notify.fn above) won't return an error */ | |
513 | (void) dm_io(&req, 1, ®ion, NULL); | |
514 | i = j; | |
515 | } | |
516 | ||
517 | writecache_notify_io(0, &endio); | |
518 | wait_for_completion_io(&endio.c); | |
519 | ||
aa950920 MP |
520 | if (wait_for_ios) |
521 | writecache_wait_for_ios(wc, WRITE); | |
522 | ||
48debafe MP |
523 | writecache_disk_flush(wc, wc->ssd_dev); |
524 | ||
525 | memset(wc->dirty_bitmap, 0, wc->dirty_bitmap_size); | |
526 | } | |
527 | ||
dc8a01ae MP |
528 | static void ssd_commit_superblock(struct dm_writecache *wc) |
529 | { | |
530 | int r; | |
531 | struct dm_io_region region; | |
532 | struct dm_io_request req; | |
533 | ||
534 | region.bdev = wc->ssd_dev->bdev; | |
535 | region.sector = 0; | |
867de40c MP |
536 | region.count = max(4096U, wc->block_size) >> SECTOR_SHIFT; |
537 | ||
538 | if (unlikely(region.sector + region.count > wc->metadata_sectors)) | |
539 | region.count = wc->metadata_sectors - region.sector; | |
540 | ||
dc8a01ae MP |
541 | region.sector += wc->start_sector; |
542 | ||
543 | req.bi_op = REQ_OP_WRITE; | |
544 | req.bi_op_flags = REQ_SYNC | REQ_FUA; | |
545 | req.mem.type = DM_IO_VMA; | |
546 | req.mem.ptr.vma = (char *)wc->memory_map; | |
547 | req.client = wc->dm_io; | |
548 | req.notify.fn = NULL; | |
549 | req.notify.context = NULL; | |
550 | ||
551 | r = dm_io(&req, 1, ®ion, NULL); | |
552 | if (unlikely(r)) | |
553 | writecache_error(wc, r, "error writing superblock"); | |
554 | } | |
555 | ||
aa950920 | 556 | static void writecache_commit_flushed(struct dm_writecache *wc, bool wait_for_ios) |
48debafe MP |
557 | { |
558 | if (WC_MODE_PMEM(wc)) | |
3e79f082 | 559 | pmem_wmb(); |
48debafe | 560 | else |
aa950920 | 561 | ssd_commit_flushed(wc, wait_for_ios); |
48debafe MP |
562 | } |
563 | ||
564 | static void writecache_disk_flush(struct dm_writecache *wc, struct dm_dev *dev) | |
565 | { | |
566 | int r; | |
567 | struct dm_io_region region; | |
568 | struct dm_io_request req; | |
569 | ||
570 | region.bdev = dev->bdev; | |
571 | region.sector = 0; | |
572 | region.count = 0; | |
573 | req.bi_op = REQ_OP_WRITE; | |
574 | req.bi_op_flags = REQ_PREFLUSH; | |
575 | req.mem.type = DM_IO_KMEM; | |
576 | req.mem.ptr.addr = NULL; | |
577 | req.client = wc->dm_io; | |
578 | req.notify.fn = NULL; | |
579 | ||
580 | r = dm_io(&req, 1, ®ion, NULL); | |
581 | if (unlikely(r)) | |
582 | writecache_error(wc, r, "error flushing metadata: %d", r); | |
583 | } | |
584 | ||
48debafe MP |
585 | #define WFE_RETURN_FOLLOWING 1 |
586 | #define WFE_LOWEST_SEQ 2 | |
587 | ||
588 | static struct wc_entry *writecache_find_entry(struct dm_writecache *wc, | |
589 | uint64_t block, int flags) | |
590 | { | |
591 | struct wc_entry *e; | |
592 | struct rb_node *node = wc->tree.rb_node; | |
593 | ||
594 | if (unlikely(!node)) | |
595 | return NULL; | |
596 | ||
597 | while (1) { | |
598 | e = container_of(node, struct wc_entry, rb_node); | |
599 | if (read_original_sector(wc, e) == block) | |
600 | break; | |
f8011d33 | 601 | |
48debafe MP |
602 | node = (read_original_sector(wc, e) >= block ? |
603 | e->rb_node.rb_left : e->rb_node.rb_right); | |
604 | if (unlikely(!node)) { | |
f8011d33 | 605 | if (!(flags & WFE_RETURN_FOLLOWING)) |
48debafe | 606 | return NULL; |
48debafe | 607 | if (read_original_sector(wc, e) >= block) { |
f8011d33 | 608 | return e; |
48debafe MP |
609 | } else { |
610 | node = rb_next(&e->rb_node); | |
f8011d33 | 611 | if (unlikely(!node)) |
48debafe | 612 | return NULL; |
48debafe | 613 | e = container_of(node, struct wc_entry, rb_node); |
f8011d33 | 614 | return e; |
48debafe MP |
615 | } |
616 | } | |
617 | } | |
618 | ||
619 | while (1) { | |
620 | struct wc_entry *e2; | |
621 | if (flags & WFE_LOWEST_SEQ) | |
622 | node = rb_prev(&e->rb_node); | |
623 | else | |
624 | node = rb_next(&e->rb_node); | |
84420b1e | 625 | if (unlikely(!node)) |
48debafe MP |
626 | return e; |
627 | e2 = container_of(node, struct wc_entry, rb_node); | |
628 | if (read_original_sector(wc, e2) != block) | |
629 | return e; | |
630 | e = e2; | |
631 | } | |
632 | } | |
633 | ||
634 | static void writecache_insert_entry(struct dm_writecache *wc, struct wc_entry *ins) | |
635 | { | |
636 | struct wc_entry *e; | |
637 | struct rb_node **node = &wc->tree.rb_node, *parent = NULL; | |
638 | ||
639 | while (*node) { | |
640 | e = container_of(*node, struct wc_entry, rb_node); | |
641 | parent = &e->rb_node; | |
642 | if (read_original_sector(wc, e) > read_original_sector(wc, ins)) | |
643 | node = &parent->rb_left; | |
644 | else | |
645 | node = &parent->rb_right; | |
646 | } | |
647 | rb_link_node(&ins->rb_node, parent, node); | |
648 | rb_insert_color(&ins->rb_node, &wc->tree); | |
649 | list_add(&ins->lru, &wc->lru); | |
3923d485 | 650 | ins->age = jiffies; |
48debafe MP |
651 | } |
652 | ||
653 | static void writecache_unlink(struct dm_writecache *wc, struct wc_entry *e) | |
654 | { | |
655 | list_del(&e->lru); | |
656 | rb_erase(&e->rb_node, &wc->tree); | |
657 | } | |
658 | ||
659 | static void writecache_add_to_freelist(struct dm_writecache *wc, struct wc_entry *e) | |
660 | { | |
661 | if (WC_MODE_SORT_FREELIST(wc)) { | |
662 | struct rb_node **node = &wc->freetree.rb_node, *parent = NULL; | |
663 | if (unlikely(!*node)) | |
664 | wc->current_free = e; | |
665 | while (*node) { | |
666 | parent = *node; | |
667 | if (&e->rb_node < *node) | |
668 | node = &parent->rb_left; | |
669 | else | |
670 | node = &parent->rb_right; | |
671 | } | |
672 | rb_link_node(&e->rb_node, parent, node); | |
673 | rb_insert_color(&e->rb_node, &wc->freetree); | |
674 | } else { | |
675 | list_add_tail(&e->lru, &wc->freelist); | |
676 | } | |
677 | wc->freelist_size++; | |
678 | } | |
679 | ||
41c526c5 MP |
680 | static inline void writecache_verify_watermark(struct dm_writecache *wc) |
681 | { | |
682 | if (unlikely(wc->freelist_size + wc->writeback_size <= wc->freelist_high_watermark)) | |
683 | queue_work(wc->writeback_wq, &wc->writeback_work); | |
684 | } | |
685 | ||
3923d485 MP |
686 | static void writecache_max_age_timer(struct timer_list *t) |
687 | { | |
688 | struct dm_writecache *wc = from_timer(wc, t, max_age_timer); | |
689 | ||
690 | if (!dm_suspended(wc->ti) && !writecache_has_error(wc)) { | |
691 | queue_work(wc->writeback_wq, &wc->writeback_work); | |
692 | mod_timer(&wc->max_age_timer, jiffies + wc->max_age / MAX_AGE_DIV); | |
693 | } | |
694 | } | |
695 | ||
dcd19507 | 696 | static struct wc_entry *writecache_pop_from_freelist(struct dm_writecache *wc, sector_t expected_sector) |
48debafe MP |
697 | { |
698 | struct wc_entry *e; | |
699 | ||
700 | if (WC_MODE_SORT_FREELIST(wc)) { | |
701 | struct rb_node *next; | |
702 | if (unlikely(!wc->current_free)) | |
703 | return NULL; | |
704 | e = wc->current_free; | |
dcd19507 MP |
705 | if (expected_sector != (sector_t)-1 && unlikely(cache_sector(wc, e) != expected_sector)) |
706 | return NULL; | |
48debafe MP |
707 | next = rb_next(&e->rb_node); |
708 | rb_erase(&e->rb_node, &wc->freetree); | |
709 | if (unlikely(!next)) | |
710 | next = rb_first(&wc->freetree); | |
711 | wc->current_free = next ? container_of(next, struct wc_entry, rb_node) : NULL; | |
712 | } else { | |
713 | if (unlikely(list_empty(&wc->freelist))) | |
714 | return NULL; | |
715 | e = container_of(wc->freelist.next, struct wc_entry, lru); | |
dcd19507 MP |
716 | if (expected_sector != (sector_t)-1 && unlikely(cache_sector(wc, e) != expected_sector)) |
717 | return NULL; | |
48debafe MP |
718 | list_del(&e->lru); |
719 | } | |
720 | wc->freelist_size--; | |
41c526c5 MP |
721 | |
722 | writecache_verify_watermark(wc); | |
48debafe MP |
723 | |
724 | return e; | |
725 | } | |
726 | ||
727 | static void writecache_free_entry(struct dm_writecache *wc, struct wc_entry *e) | |
728 | { | |
729 | writecache_unlink(wc, e); | |
730 | writecache_add_to_freelist(wc, e); | |
731 | clear_seq_count(wc, e); | |
732 | writecache_flush_region(wc, memory_entry(wc, e), sizeof(struct wc_memory_entry)); | |
733 | if (unlikely(waitqueue_active(&wc->freelist_wait))) | |
734 | wake_up(&wc->freelist_wait); | |
735 | } | |
736 | ||
737 | static void writecache_wait_on_freelist(struct dm_writecache *wc) | |
738 | { | |
739 | DEFINE_WAIT(wait); | |
740 | ||
741 | prepare_to_wait(&wc->freelist_wait, &wait, TASK_UNINTERRUPTIBLE); | |
742 | wc_unlock(wc); | |
743 | io_schedule(); | |
744 | finish_wait(&wc->freelist_wait, &wait); | |
745 | wc_lock(wc); | |
746 | } | |
747 | ||
748 | static void writecache_poison_lists(struct dm_writecache *wc) | |
749 | { | |
750 | /* | |
751 | * Catch incorrect access to these values while the device is suspended. | |
752 | */ | |
753 | memset(&wc->tree, -1, sizeof wc->tree); | |
754 | wc->lru.next = LIST_POISON1; | |
755 | wc->lru.prev = LIST_POISON2; | |
756 | wc->freelist.next = LIST_POISON1; | |
757 | wc->freelist.prev = LIST_POISON2; | |
758 | } | |
759 | ||
760 | static void writecache_flush_entry(struct dm_writecache *wc, struct wc_entry *e) | |
761 | { | |
762 | writecache_flush_region(wc, memory_entry(wc, e), sizeof(struct wc_memory_entry)); | |
763 | if (WC_MODE_PMEM(wc)) | |
764 | writecache_flush_region(wc, memory_data(wc, e), wc->block_size); | |
765 | } | |
766 | ||
767 | static bool writecache_entry_is_committed(struct dm_writecache *wc, struct wc_entry *e) | |
768 | { | |
769 | return read_seq_count(wc, e) < wc->seq_count; | |
770 | } | |
771 | ||
772 | static void writecache_flush(struct dm_writecache *wc) | |
773 | { | |
774 | struct wc_entry *e, *e2; | |
775 | bool need_flush_after_free; | |
776 | ||
777 | wc->uncommitted_blocks = 0; | |
778 | del_timer(&wc->autocommit_timer); | |
779 | ||
780 | if (list_empty(&wc->lru)) | |
781 | return; | |
782 | ||
783 | e = container_of(wc->lru.next, struct wc_entry, lru); | |
784 | if (writecache_entry_is_committed(wc, e)) { | |
785 | if (wc->overwrote_committed) { | |
786 | writecache_wait_for_ios(wc, WRITE); | |
787 | writecache_disk_flush(wc, wc->ssd_dev); | |
788 | wc->overwrote_committed = false; | |
789 | } | |
790 | return; | |
791 | } | |
792 | while (1) { | |
793 | writecache_flush_entry(wc, e); | |
794 | if (unlikely(e->lru.next == &wc->lru)) | |
795 | break; | |
796 | e2 = container_of(e->lru.next, struct wc_entry, lru); | |
797 | if (writecache_entry_is_committed(wc, e2)) | |
798 | break; | |
799 | e = e2; | |
800 | cond_resched(); | |
801 | } | |
aa950920 | 802 | writecache_commit_flushed(wc, true); |
48debafe MP |
803 | |
804 | wc->seq_count++; | |
805 | pmem_assign(sb(wc)->seq_count, cpu_to_le64(wc->seq_count)); | |
dc8a01ae MP |
806 | if (WC_MODE_PMEM(wc)) |
807 | writecache_commit_flushed(wc, false); | |
808 | else | |
809 | ssd_commit_superblock(wc); | |
48debafe MP |
810 | |
811 | wc->overwrote_committed = false; | |
812 | ||
813 | need_flush_after_free = false; | |
814 | while (1) { | |
815 | /* Free another committed entry with lower seq-count */ | |
816 | struct rb_node *rb_node = rb_prev(&e->rb_node); | |
817 | ||
818 | if (rb_node) { | |
819 | e2 = container_of(rb_node, struct wc_entry, rb_node); | |
820 | if (read_original_sector(wc, e2) == read_original_sector(wc, e) && | |
821 | likely(!e2->write_in_progress)) { | |
822 | writecache_free_entry(wc, e2); | |
823 | need_flush_after_free = true; | |
824 | } | |
825 | } | |
826 | if (unlikely(e->lru.prev == &wc->lru)) | |
827 | break; | |
828 | e = container_of(e->lru.prev, struct wc_entry, lru); | |
829 | cond_resched(); | |
830 | } | |
831 | ||
832 | if (need_flush_after_free) | |
aa950920 | 833 | writecache_commit_flushed(wc, false); |
48debafe MP |
834 | } |
835 | ||
836 | static void writecache_flush_work(struct work_struct *work) | |
837 | { | |
838 | struct dm_writecache *wc = container_of(work, struct dm_writecache, flush_work); | |
839 | ||
840 | wc_lock(wc); | |
841 | writecache_flush(wc); | |
842 | wc_unlock(wc); | |
843 | } | |
844 | ||
845 | static void writecache_autocommit_timer(struct timer_list *t) | |
846 | { | |
847 | struct dm_writecache *wc = from_timer(wc, t, autocommit_timer); | |
848 | if (!writecache_has_error(wc)) | |
849 | queue_work(wc->writeback_wq, &wc->flush_work); | |
850 | } | |
851 | ||
852 | static void writecache_schedule_autocommit(struct dm_writecache *wc) | |
853 | { | |
854 | if (!timer_pending(&wc->autocommit_timer)) | |
855 | mod_timer(&wc->autocommit_timer, jiffies + wc->autocommit_jiffies); | |
856 | } | |
857 | ||
858 | static void writecache_discard(struct dm_writecache *wc, sector_t start, sector_t end) | |
859 | { | |
860 | struct wc_entry *e; | |
861 | bool discarded_something = false; | |
862 | ||
863 | e = writecache_find_entry(wc, start, WFE_RETURN_FOLLOWING | WFE_LOWEST_SEQ); | |
864 | if (unlikely(!e)) | |
865 | return; | |
866 | ||
867 | while (read_original_sector(wc, e) < end) { | |
868 | struct rb_node *node = rb_next(&e->rb_node); | |
869 | ||
870 | if (likely(!e->write_in_progress)) { | |
871 | if (!discarded_something) { | |
a143e172 HY |
872 | if (!WC_MODE_PMEM(wc)) { |
873 | writecache_wait_for_ios(wc, READ); | |
874 | writecache_wait_for_ios(wc, WRITE); | |
875 | } | |
48debafe MP |
876 | discarded_something = true; |
877 | } | |
39495b12 HY |
878 | if (!writecache_entry_is_committed(wc, e)) |
879 | wc->uncommitted_blocks--; | |
48debafe MP |
880 | writecache_free_entry(wc, e); |
881 | } | |
882 | ||
84420b1e | 883 | if (unlikely(!node)) |
48debafe MP |
884 | break; |
885 | ||
886 | e = container_of(node, struct wc_entry, rb_node); | |
887 | } | |
888 | ||
889 | if (discarded_something) | |
aa950920 | 890 | writecache_commit_flushed(wc, false); |
48debafe MP |
891 | } |
892 | ||
893 | static bool writecache_wait_for_writeback(struct dm_writecache *wc) | |
894 | { | |
895 | if (wc->writeback_size) { | |
896 | writecache_wait_on_freelist(wc); | |
897 | return true; | |
898 | } | |
899 | return false; | |
900 | } | |
901 | ||
902 | static void writecache_suspend(struct dm_target *ti) | |
903 | { | |
904 | struct dm_writecache *wc = ti->private; | |
905 | bool flush_on_suspend; | |
906 | ||
907 | del_timer_sync(&wc->autocommit_timer); | |
3923d485 | 908 | del_timer_sync(&wc->max_age_timer); |
48debafe MP |
909 | |
910 | wc_lock(wc); | |
911 | writecache_flush(wc); | |
912 | flush_on_suspend = wc->flush_on_suspend; | |
913 | if (flush_on_suspend) { | |
914 | wc->flush_on_suspend = false; | |
915 | wc->writeback_all++; | |
916 | queue_work(wc->writeback_wq, &wc->writeback_work); | |
917 | } | |
918 | wc_unlock(wc); | |
919 | ||
adc0daad | 920 | drain_workqueue(wc->writeback_wq); |
48debafe MP |
921 | |
922 | wc_lock(wc); | |
923 | if (flush_on_suspend) | |
924 | wc->writeback_all--; | |
925 | while (writecache_wait_for_writeback(wc)); | |
926 | ||
927 | if (WC_MODE_PMEM(wc)) | |
928 | persistent_memory_flush_cache(wc->memory_map, wc->memory_map_size); | |
929 | ||
930 | writecache_poison_lists(wc); | |
931 | ||
932 | wc_unlock(wc); | |
933 | } | |
934 | ||
935 | static int writecache_alloc_entries(struct dm_writecache *wc) | |
936 | { | |
937 | size_t b; | |
938 | ||
939 | if (wc->entries) | |
940 | return 0; | |
50a7d3ba | 941 | wc->entries = vmalloc(array_size(sizeof(struct wc_entry), wc->n_blocks)); |
48debafe MP |
942 | if (!wc->entries) |
943 | return -ENOMEM; | |
944 | for (b = 0; b < wc->n_blocks; b++) { | |
945 | struct wc_entry *e = &wc->entries[b]; | |
946 | e->index = b; | |
947 | e->write_in_progress = false; | |
1edaa447 | 948 | cond_resched(); |
48debafe MP |
949 | } |
950 | ||
951 | return 0; | |
952 | } | |
953 | ||
31b22120 MP |
954 | static int writecache_read_metadata(struct dm_writecache *wc, sector_t n_sectors) |
955 | { | |
956 | struct dm_io_region region; | |
957 | struct dm_io_request req; | |
958 | ||
959 | region.bdev = wc->ssd_dev->bdev; | |
960 | region.sector = wc->start_sector; | |
961 | region.count = n_sectors; | |
962 | req.bi_op = REQ_OP_READ; | |
963 | req.bi_op_flags = REQ_SYNC; | |
964 | req.mem.type = DM_IO_VMA; | |
965 | req.mem.ptr.vma = (char *)wc->memory_map; | |
966 | req.client = wc->dm_io; | |
967 | req.notify.fn = NULL; | |
968 | ||
969 | return dm_io(&req, 1, ®ion, NULL); | |
970 | } | |
971 | ||
48debafe MP |
972 | static void writecache_resume(struct dm_target *ti) |
973 | { | |
974 | struct dm_writecache *wc = ti->private; | |
975 | size_t b; | |
976 | bool need_flush = false; | |
977 | __le64 sb_seq_count; | |
978 | int r; | |
979 | ||
980 | wc_lock(wc); | |
981 | ||
d9928ac5 | 982 | wc->data_device_sectors = bdev_nr_sectors(wc->dev->bdev); |
4134455f | 983 | |
31b22120 | 984 | if (WC_MODE_PMEM(wc)) { |
48debafe | 985 | persistent_memory_invalidate_cache(wc->memory_map, wc->memory_map_size); |
31b22120 MP |
986 | } else { |
987 | r = writecache_read_metadata(wc, wc->metadata_sectors); | |
988 | if (r) { | |
989 | size_t sb_entries_offset; | |
990 | writecache_error(wc, r, "unable to read metadata: %d", r); | |
991 | sb_entries_offset = offsetof(struct wc_memory_superblock, entries); | |
992 | memset((char *)wc->memory_map + sb_entries_offset, -1, | |
993 | (wc->metadata_sectors << SECTOR_SHIFT) - sb_entries_offset); | |
994 | } | |
995 | } | |
48debafe MP |
996 | |
997 | wc->tree = RB_ROOT; | |
998 | INIT_LIST_HEAD(&wc->lru); | |
999 | if (WC_MODE_SORT_FREELIST(wc)) { | |
1000 | wc->freetree = RB_ROOT; | |
1001 | wc->current_free = NULL; | |
1002 | } else { | |
1003 | INIT_LIST_HEAD(&wc->freelist); | |
1004 | } | |
1005 | wc->freelist_size = 0; | |
1006 | ||
ec6347bb DW |
1007 | r = copy_mc_to_kernel(&sb_seq_count, &sb(wc)->seq_count, |
1008 | sizeof(uint64_t)); | |
48debafe MP |
1009 | if (r) { |
1010 | writecache_error(wc, r, "hardware memory error when reading superblock: %d", r); | |
1011 | sb_seq_count = cpu_to_le64(0); | |
1012 | } | |
1013 | wc->seq_count = le64_to_cpu(sb_seq_count); | |
1014 | ||
1015 | #ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS | |
1016 | for (b = 0; b < wc->n_blocks; b++) { | |
1017 | struct wc_entry *e = &wc->entries[b]; | |
1018 | struct wc_memory_entry wme; | |
1019 | if (writecache_has_error(wc)) { | |
1020 | e->original_sector = -1; | |
1021 | e->seq_count = -1; | |
1022 | continue; | |
1023 | } | |
ec6347bb DW |
1024 | r = copy_mc_to_kernel(&wme, memory_entry(wc, e), |
1025 | sizeof(struct wc_memory_entry)); | |
48debafe MP |
1026 | if (r) { |
1027 | writecache_error(wc, r, "hardware memory error when reading metadata entry %lu: %d", | |
1028 | (unsigned long)b, r); | |
1029 | e->original_sector = -1; | |
1030 | e->seq_count = -1; | |
1031 | } else { | |
1032 | e->original_sector = le64_to_cpu(wme.original_sector); | |
1033 | e->seq_count = le64_to_cpu(wme.seq_count); | |
1034 | } | |
1edaa447 | 1035 | cond_resched(); |
48debafe MP |
1036 | } |
1037 | #endif | |
1038 | for (b = 0; b < wc->n_blocks; b++) { | |
1039 | struct wc_entry *e = &wc->entries[b]; | |
1040 | if (!writecache_entry_is_committed(wc, e)) { | |
1041 | if (read_seq_count(wc, e) != -1) { | |
1042 | erase_this: | |
1043 | clear_seq_count(wc, e); | |
1044 | need_flush = true; | |
1045 | } | |
1046 | writecache_add_to_freelist(wc, e); | |
1047 | } else { | |
1048 | struct wc_entry *old; | |
1049 | ||
1050 | old = writecache_find_entry(wc, read_original_sector(wc, e), 0); | |
1051 | if (!old) { | |
1052 | writecache_insert_entry(wc, e); | |
1053 | } else { | |
1054 | if (read_seq_count(wc, old) == read_seq_count(wc, e)) { | |
1055 | writecache_error(wc, -EINVAL, | |
1056 | "two identical entries, position %llu, sector %llu, sequence %llu", | |
1057 | (unsigned long long)b, (unsigned long long)read_original_sector(wc, e), | |
1058 | (unsigned long long)read_seq_count(wc, e)); | |
1059 | } | |
1060 | if (read_seq_count(wc, old) > read_seq_count(wc, e)) { | |
1061 | goto erase_this; | |
1062 | } else { | |
1063 | writecache_free_entry(wc, old); | |
1064 | writecache_insert_entry(wc, e); | |
1065 | need_flush = true; | |
1066 | } | |
1067 | } | |
1068 | } | |
1069 | cond_resched(); | |
1070 | } | |
1071 | ||
1072 | if (need_flush) { | |
1073 | writecache_flush_all_metadata(wc); | |
aa950920 | 1074 | writecache_commit_flushed(wc, false); |
48debafe MP |
1075 | } |
1076 | ||
41c526c5 MP |
1077 | writecache_verify_watermark(wc); |
1078 | ||
3923d485 MP |
1079 | if (wc->max_age != MAX_AGE_UNSPECIFIED) |
1080 | mod_timer(&wc->max_age_timer, jiffies + wc->max_age / MAX_AGE_DIV); | |
1081 | ||
48debafe MP |
1082 | wc_unlock(wc); |
1083 | } | |
1084 | ||
1085 | static int process_flush_mesg(unsigned argc, char **argv, struct dm_writecache *wc) | |
1086 | { | |
1087 | if (argc != 1) | |
1088 | return -EINVAL; | |
1089 | ||
1090 | wc_lock(wc); | |
1091 | if (dm_suspended(wc->ti)) { | |
1092 | wc_unlock(wc); | |
1093 | return -EBUSY; | |
1094 | } | |
1095 | if (writecache_has_error(wc)) { | |
1096 | wc_unlock(wc); | |
1097 | return -EIO; | |
1098 | } | |
1099 | ||
1100 | writecache_flush(wc); | |
1101 | wc->writeback_all++; | |
1102 | queue_work(wc->writeback_wq, &wc->writeback_work); | |
1103 | wc_unlock(wc); | |
1104 | ||
1105 | flush_workqueue(wc->writeback_wq); | |
1106 | ||
1107 | wc_lock(wc); | |
1108 | wc->writeback_all--; | |
1109 | if (writecache_has_error(wc)) { | |
1110 | wc_unlock(wc); | |
1111 | return -EIO; | |
1112 | } | |
1113 | wc_unlock(wc); | |
1114 | ||
1115 | return 0; | |
1116 | } | |
1117 | ||
1118 | static int process_flush_on_suspend_mesg(unsigned argc, char **argv, struct dm_writecache *wc) | |
1119 | { | |
1120 | if (argc != 1) | |
1121 | return -EINVAL; | |
1122 | ||
1123 | wc_lock(wc); | |
1124 | wc->flush_on_suspend = true; | |
1125 | wc_unlock(wc); | |
1126 | ||
1127 | return 0; | |
1128 | } | |
1129 | ||
93de44eb MP |
1130 | static void activate_cleaner(struct dm_writecache *wc) |
1131 | { | |
1132 | wc->flush_on_suspend = true; | |
1133 | wc->cleaner = true; | |
1134 | wc->freelist_high_watermark = wc->n_blocks; | |
1135 | wc->freelist_low_watermark = wc->n_blocks; | |
1136 | } | |
1137 | ||
1138 | static int process_cleaner_mesg(unsigned argc, char **argv, struct dm_writecache *wc) | |
1139 | { | |
1140 | if (argc != 1) | |
1141 | return -EINVAL; | |
1142 | ||
1143 | wc_lock(wc); | |
1144 | activate_cleaner(wc); | |
1145 | if (!dm_suspended(wc->ti)) | |
1146 | writecache_verify_watermark(wc); | |
1147 | wc_unlock(wc); | |
1148 | ||
1149 | return 0; | |
1150 | } | |
1151 | ||
48debafe MP |
1152 | static int writecache_message(struct dm_target *ti, unsigned argc, char **argv, |
1153 | char *result, unsigned maxlen) | |
1154 | { | |
1155 | int r = -EINVAL; | |
1156 | struct dm_writecache *wc = ti->private; | |
1157 | ||
1158 | if (!strcasecmp(argv[0], "flush")) | |
1159 | r = process_flush_mesg(argc, argv, wc); | |
1160 | else if (!strcasecmp(argv[0], "flush_on_suspend")) | |
1161 | r = process_flush_on_suspend_mesg(argc, argv, wc); | |
93de44eb MP |
1162 | else if (!strcasecmp(argv[0], "cleaner")) |
1163 | r = process_cleaner_mesg(argc, argv, wc); | |
48debafe MP |
1164 | else |
1165 | DMERR("unrecognised message received: %s", argv[0]); | |
1166 | ||
1167 | return r; | |
1168 | } | |
1169 | ||
48338daa MP |
1170 | static void memcpy_flushcache_optimized(void *dest, void *source, size_t size) |
1171 | { | |
1172 | /* | |
1173 | * clflushopt performs better with block size 1024, 2048, 4096 | |
1174 | * non-temporal stores perform better with block size 512 | |
1175 | * | |
1176 | * block size 512 1024 2048 4096 | |
1177 | * movnti 496 MB/s 642 MB/s 725 MB/s 744 MB/s | |
1178 | * clflushopt 373 MB/s 688 MB/s 1.1 GB/s 1.2 GB/s | |
1179 | * | |
1180 | * We see that movnti performs better for 512-byte blocks, and | |
1181 | * clflushopt performs better for 1024-byte and larger blocks. So, we | |
1182 | * prefer clflushopt for sizes >= 768. | |
1183 | * | |
1184 | * NOTE: this happens to be the case now (with dm-writecache's single | |
1185 | * threaded model) but re-evaluate this once memcpy_flushcache() is | |
1186 | * enabled to use movdir64b which might invalidate this performance | |
1187 | * advantage seen with cache-allocating-writes plus flushing. | |
1188 | */ | |
1189 | #ifdef CONFIG_X86 | |
1190 | if (static_cpu_has(X86_FEATURE_CLFLUSHOPT) && | |
1191 | likely(boot_cpu_data.x86_clflush_size == 64) && | |
1192 | likely(size >= 768)) { | |
1193 | do { | |
1194 | memcpy((void *)dest, (void *)source, 64); | |
1195 | clflushopt((void *)dest); | |
1196 | dest += 64; | |
1197 | source += 64; | |
1198 | size -= 64; | |
1199 | } while (size >= 64); | |
1200 | return; | |
1201 | } | |
1202 | #endif | |
1203 | memcpy_flushcache(dest, source, size); | |
1204 | } | |
1205 | ||
48debafe MP |
1206 | static void bio_copy_block(struct dm_writecache *wc, struct bio *bio, void *data) |
1207 | { | |
1208 | void *buf; | |
1209 | unsigned long flags; | |
1210 | unsigned size; | |
1211 | int rw = bio_data_dir(bio); | |
1212 | unsigned remaining_size = wc->block_size; | |
1213 | ||
1214 | do { | |
1215 | struct bio_vec bv = bio_iter_iovec(bio, bio->bi_iter); | |
1216 | buf = bvec_kmap_irq(&bv, &flags); | |
1217 | size = bv.bv_len; | |
1218 | if (unlikely(size > remaining_size)) | |
1219 | size = remaining_size; | |
1220 | ||
1221 | if (rw == READ) { | |
1222 | int r; | |
ec6347bb | 1223 | r = copy_mc_to_kernel(buf, data, size); |
48debafe MP |
1224 | flush_dcache_page(bio_page(bio)); |
1225 | if (unlikely(r)) { | |
1226 | writecache_error(wc, r, "hardware memory error when reading data: %d", r); | |
1227 | bio->bi_status = BLK_STS_IOERR; | |
1228 | } | |
1229 | } else { | |
1230 | flush_dcache_page(bio_page(bio)); | |
48338daa | 1231 | memcpy_flushcache_optimized(data, buf, size); |
48debafe MP |
1232 | } |
1233 | ||
1234 | bvec_kunmap_irq(buf, &flags); | |
1235 | ||
1236 | data = (char *)data + size; | |
1237 | remaining_size -= size; | |
1238 | bio_advance(bio, size); | |
1239 | } while (unlikely(remaining_size)); | |
1240 | } | |
1241 | ||
1242 | static int writecache_flush_thread(void *data) | |
1243 | { | |
1244 | struct dm_writecache *wc = data; | |
1245 | ||
1246 | while (1) { | |
1247 | struct bio *bio; | |
1248 | ||
1249 | wc_lock(wc); | |
1250 | bio = bio_list_pop(&wc->flush_list); | |
1251 | if (!bio) { | |
1252 | set_current_state(TASK_INTERRUPTIBLE); | |
1253 | wc_unlock(wc); | |
1254 | ||
1255 | if (unlikely(kthread_should_stop())) { | |
1256 | set_current_state(TASK_RUNNING); | |
1257 | break; | |
1258 | } | |
1259 | ||
1260 | schedule(); | |
1261 | continue; | |
1262 | } | |
1263 | ||
1264 | if (bio_op(bio) == REQ_OP_DISCARD) { | |
1265 | writecache_discard(wc, bio->bi_iter.bi_sector, | |
1266 | bio_end_sector(bio)); | |
1267 | wc_unlock(wc); | |
1268 | bio_set_dev(bio, wc->dev->bdev); | |
ed00aabd | 1269 | submit_bio_noacct(bio); |
48debafe MP |
1270 | } else { |
1271 | writecache_flush(wc); | |
1272 | wc_unlock(wc); | |
1273 | if (writecache_has_error(wc)) | |
1274 | bio->bi_status = BLK_STS_IOERR; | |
1275 | bio_endio(bio); | |
1276 | } | |
1277 | } | |
1278 | ||
1279 | return 0; | |
1280 | } | |
1281 | ||
1282 | static void writecache_offload_bio(struct dm_writecache *wc, struct bio *bio) | |
1283 | { | |
1284 | if (bio_list_empty(&wc->flush_list)) | |
1285 | wake_up_process(wc->flush_thread); | |
1286 | bio_list_add(&wc->flush_list, bio); | |
1287 | } | |
1288 | ||
1289 | static int writecache_map(struct dm_target *ti, struct bio *bio) | |
1290 | { | |
1291 | struct wc_entry *e; | |
1292 | struct dm_writecache *wc = ti->private; | |
1293 | ||
1294 | bio->bi_private = NULL; | |
1295 | ||
1296 | wc_lock(wc); | |
1297 | ||
1298 | if (unlikely(bio->bi_opf & REQ_PREFLUSH)) { | |
1299 | if (writecache_has_error(wc)) | |
1300 | goto unlock_error; | |
1301 | if (WC_MODE_PMEM(wc)) { | |
1302 | writecache_flush(wc); | |
1303 | if (writecache_has_error(wc)) | |
1304 | goto unlock_error; | |
611c3e16 | 1305 | if (unlikely(wc->cleaner) || unlikely(wc->metadata_only)) |
ee55b92a | 1306 | goto unlock_remap_origin; |
48debafe MP |
1307 | goto unlock_submit; |
1308 | } else { | |
ee55b92a MP |
1309 | if (dm_bio_get_target_bio_nr(bio)) |
1310 | goto unlock_remap_origin; | |
48debafe MP |
1311 | writecache_offload_bio(wc, bio); |
1312 | goto unlock_return; | |
1313 | } | |
1314 | } | |
1315 | ||
1316 | bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector); | |
1317 | ||
1318 | if (unlikely((((unsigned)bio->bi_iter.bi_sector | bio_sectors(bio)) & | |
1319 | (wc->block_size / 512 - 1)) != 0)) { | |
1320 | DMERR("I/O is not aligned, sector %llu, size %u, block size %u", | |
1321 | (unsigned long long)bio->bi_iter.bi_sector, | |
1322 | bio->bi_iter.bi_size, wc->block_size); | |
1323 | goto unlock_error; | |
1324 | } | |
1325 | ||
1326 | if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) { | |
1327 | if (writecache_has_error(wc)) | |
1328 | goto unlock_error; | |
1329 | if (WC_MODE_PMEM(wc)) { | |
1330 | writecache_discard(wc, bio->bi_iter.bi_sector, bio_end_sector(bio)); | |
1331 | goto unlock_remap_origin; | |
1332 | } else { | |
1333 | writecache_offload_bio(wc, bio); | |
1334 | goto unlock_return; | |
1335 | } | |
1336 | } | |
1337 | ||
1338 | if (bio_data_dir(bio) == READ) { | |
1339 | read_next_block: | |
1340 | e = writecache_find_entry(wc, bio->bi_iter.bi_sector, WFE_RETURN_FOLLOWING); | |
1341 | if (e && read_original_sector(wc, e) == bio->bi_iter.bi_sector) { | |
1342 | if (WC_MODE_PMEM(wc)) { | |
1343 | bio_copy_block(wc, bio, memory_data(wc, e)); | |
1344 | if (bio->bi_iter.bi_size) | |
1345 | goto read_next_block; | |
1346 | goto unlock_submit; | |
1347 | } else { | |
1348 | dm_accept_partial_bio(bio, wc->block_size >> SECTOR_SHIFT); | |
1349 | bio_set_dev(bio, wc->ssd_dev->bdev); | |
1350 | bio->bi_iter.bi_sector = cache_sector(wc, e); | |
1351 | if (!writecache_entry_is_committed(wc, e)) | |
1352 | writecache_wait_for_ios(wc, WRITE); | |
1353 | goto unlock_remap; | |
1354 | } | |
1355 | } else { | |
1356 | if (e) { | |
1357 | sector_t next_boundary = | |
1358 | read_original_sector(wc, e) - bio->bi_iter.bi_sector; | |
1359 | if (next_boundary < bio->bi_iter.bi_size >> SECTOR_SHIFT) { | |
1360 | dm_accept_partial_bio(bio, next_boundary); | |
1361 | } | |
1362 | } | |
1363 | goto unlock_remap_origin; | |
1364 | } | |
1365 | } else { | |
1366 | do { | |
d53f1faf | 1367 | bool found_entry = false; |
ee50cc19 | 1368 | bool search_used = false; |
48debafe MP |
1369 | if (writecache_has_error(wc)) |
1370 | goto unlock_error; | |
1371 | e = writecache_find_entry(wc, bio->bi_iter.bi_sector, 0); | |
1372 | if (e) { | |
ee50cc19 MP |
1373 | if (!writecache_entry_is_committed(wc, e)) { |
1374 | search_used = true; | |
48debafe | 1375 | goto bio_copy; |
ee50cc19 | 1376 | } |
48debafe MP |
1377 | if (!WC_MODE_PMEM(wc) && !e->write_in_progress) { |
1378 | wc->overwrote_committed = true; | |
ee50cc19 | 1379 | search_used = true; |
48debafe MP |
1380 | goto bio_copy; |
1381 | } | |
d53f1faf | 1382 | found_entry = true; |
93de44eb | 1383 | } else { |
611c3e16 MP |
1384 | if (unlikely(wc->cleaner) || |
1385 | (wc->metadata_only && !(bio->bi_opf & REQ_META))) | |
93de44eb | 1386 | goto direct_write; |
48debafe | 1387 | } |
dcd19507 | 1388 | e = writecache_pop_from_freelist(wc, (sector_t)-1); |
48debafe | 1389 | if (unlikely(!e)) { |
ee55b92a | 1390 | if (!WC_MODE_PMEM(wc) && !found_entry) { |
93de44eb | 1391 | direct_write: |
d53f1faf MP |
1392 | e = writecache_find_entry(wc, bio->bi_iter.bi_sector, WFE_RETURN_FOLLOWING); |
1393 | if (e) { | |
1394 | sector_t next_boundary = read_original_sector(wc, e) - bio->bi_iter.bi_sector; | |
1395 | BUG_ON(!next_boundary); | |
1396 | if (next_boundary < bio->bi_iter.bi_size >> SECTOR_SHIFT) { | |
1397 | dm_accept_partial_bio(bio, next_boundary); | |
1398 | } | |
1399 | } | |
1400 | goto unlock_remap_origin; | |
1401 | } | |
48debafe MP |
1402 | writecache_wait_on_freelist(wc); |
1403 | continue; | |
1404 | } | |
1405 | write_original_sector_seq_count(wc, e, bio->bi_iter.bi_sector, wc->seq_count); | |
1406 | writecache_insert_entry(wc, e); | |
1407 | wc->uncommitted_blocks++; | |
1408 | bio_copy: | |
1409 | if (WC_MODE_PMEM(wc)) { | |
1410 | bio_copy_block(wc, bio, memory_data(wc, e)); | |
1411 | } else { | |
dcd19507 MP |
1412 | unsigned bio_size = wc->block_size; |
1413 | sector_t start_cache_sec = cache_sector(wc, e); | |
1414 | sector_t current_cache_sec = start_cache_sec + (bio_size >> SECTOR_SHIFT); | |
1415 | ||
1416 | while (bio_size < bio->bi_iter.bi_size) { | |
ee50cc19 MP |
1417 | if (!search_used) { |
1418 | struct wc_entry *f = writecache_pop_from_freelist(wc, current_cache_sec); | |
1419 | if (!f) | |
1420 | break; | |
1421 | write_original_sector_seq_count(wc, f, bio->bi_iter.bi_sector + | |
1422 | (bio_size >> SECTOR_SHIFT), wc->seq_count); | |
1423 | writecache_insert_entry(wc, f); | |
1424 | wc->uncommitted_blocks++; | |
1425 | } else { | |
1426 | struct wc_entry *f; | |
1427 | struct rb_node *next = rb_next(&e->rb_node); | |
1428 | if (!next) | |
1429 | break; | |
1430 | f = container_of(next, struct wc_entry, rb_node); | |
1431 | if (f != e + 1) | |
1432 | break; | |
1433 | if (read_original_sector(wc, f) != | |
1434 | read_original_sector(wc, e) + (wc->block_size >> SECTOR_SHIFT)) | |
1435 | break; | |
1436 | if (unlikely(f->write_in_progress)) | |
1437 | break; | |
1438 | if (writecache_entry_is_committed(wc, f)) | |
1439 | wc->overwrote_committed = true; | |
1440 | e = f; | |
1441 | } | |
dcd19507 MP |
1442 | bio_size += wc->block_size; |
1443 | current_cache_sec += wc->block_size >> SECTOR_SHIFT; | |
1444 | } | |
1445 | ||
48debafe | 1446 | bio_set_dev(bio, wc->ssd_dev->bdev); |
dcd19507 MP |
1447 | bio->bi_iter.bi_sector = start_cache_sec; |
1448 | dm_accept_partial_bio(bio, bio_size >> SECTOR_SHIFT); | |
1449 | ||
48debafe MP |
1450 | if (unlikely(wc->uncommitted_blocks >= wc->autocommit_blocks)) { |
1451 | wc->uncommitted_blocks = 0; | |
1452 | queue_work(wc->writeback_wq, &wc->flush_work); | |
1453 | } else { | |
1454 | writecache_schedule_autocommit(wc); | |
1455 | } | |
1456 | goto unlock_remap; | |
1457 | } | |
1458 | } while (bio->bi_iter.bi_size); | |
1459 | ||
c1005322 MM |
1460 | if (unlikely(bio->bi_opf & REQ_FUA || |
1461 | wc->uncommitted_blocks >= wc->autocommit_blocks)) | |
48debafe MP |
1462 | writecache_flush(wc); |
1463 | else | |
1464 | writecache_schedule_autocommit(wc); | |
1465 | goto unlock_submit; | |
1466 | } | |
1467 | ||
1468 | unlock_remap_origin: | |
1469 | bio_set_dev(bio, wc->dev->bdev); | |
1470 | wc_unlock(wc); | |
1471 | return DM_MAPIO_REMAPPED; | |
1472 | ||
1473 | unlock_remap: | |
1474 | /* make sure that writecache_end_io decrements bio_in_progress: */ | |
1475 | bio->bi_private = (void *)1; | |
1476 | atomic_inc(&wc->bio_in_progress[bio_data_dir(bio)]); | |
1477 | wc_unlock(wc); | |
1478 | return DM_MAPIO_REMAPPED; | |
1479 | ||
1480 | unlock_submit: | |
1481 | wc_unlock(wc); | |
1482 | bio_endio(bio); | |
1483 | return DM_MAPIO_SUBMITTED; | |
1484 | ||
1485 | unlock_return: | |
1486 | wc_unlock(wc); | |
1487 | return DM_MAPIO_SUBMITTED; | |
1488 | ||
1489 | unlock_error: | |
1490 | wc_unlock(wc); | |
1491 | bio_io_error(bio); | |
1492 | return DM_MAPIO_SUBMITTED; | |
1493 | } | |
1494 | ||
1495 | static int writecache_end_io(struct dm_target *ti, struct bio *bio, blk_status_t *status) | |
1496 | { | |
1497 | struct dm_writecache *wc = ti->private; | |
1498 | ||
1499 | if (bio->bi_private != NULL) { | |
1500 | int dir = bio_data_dir(bio); | |
1501 | if (atomic_dec_and_test(&wc->bio_in_progress[dir])) | |
1502 | if (unlikely(waitqueue_active(&wc->bio_in_progress_wait[dir]))) | |
1503 | wake_up(&wc->bio_in_progress_wait[dir]); | |
1504 | } | |
1505 | return 0; | |
1506 | } | |
1507 | ||
1508 | static int writecache_iterate_devices(struct dm_target *ti, | |
1509 | iterate_devices_callout_fn fn, void *data) | |
1510 | { | |
1511 | struct dm_writecache *wc = ti->private; | |
1512 | ||
1513 | return fn(ti, wc->dev, 0, ti->len, data); | |
1514 | } | |
1515 | ||
1516 | static void writecache_io_hints(struct dm_target *ti, struct queue_limits *limits) | |
1517 | { | |
1518 | struct dm_writecache *wc = ti->private; | |
1519 | ||
1520 | if (limits->logical_block_size < wc->block_size) | |
1521 | limits->logical_block_size = wc->block_size; | |
1522 | ||
1523 | if (limits->physical_block_size < wc->block_size) | |
1524 | limits->physical_block_size = wc->block_size; | |
1525 | ||
1526 | if (limits->io_min < wc->block_size) | |
1527 | limits->io_min = wc->block_size; | |
1528 | } | |
1529 | ||
1530 | ||
1531 | static void writecache_writeback_endio(struct bio *bio) | |
1532 | { | |
1533 | struct writeback_struct *wb = container_of(bio, struct writeback_struct, bio); | |
1534 | struct dm_writecache *wc = wb->wc; | |
1535 | unsigned long flags; | |
1536 | ||
1537 | raw_spin_lock_irqsave(&wc->endio_list_lock, flags); | |
1538 | if (unlikely(list_empty(&wc->endio_list))) | |
1539 | wake_up_process(wc->endio_thread); | |
1540 | list_add_tail(&wb->endio_entry, &wc->endio_list); | |
1541 | raw_spin_unlock_irqrestore(&wc->endio_list_lock, flags); | |
1542 | } | |
1543 | ||
1544 | static void writecache_copy_endio(int read_err, unsigned long write_err, void *ptr) | |
1545 | { | |
1546 | struct copy_struct *c = ptr; | |
1547 | struct dm_writecache *wc = c->wc; | |
1548 | ||
1549 | c->error = likely(!(read_err | write_err)) ? 0 : -EIO; | |
1550 | ||
1551 | raw_spin_lock_irq(&wc->endio_list_lock); | |
1552 | if (unlikely(list_empty(&wc->endio_list))) | |
1553 | wake_up_process(wc->endio_thread); | |
1554 | list_add_tail(&c->endio_entry, &wc->endio_list); | |
1555 | raw_spin_unlock_irq(&wc->endio_list_lock); | |
1556 | } | |
1557 | ||
1558 | static void __writecache_endio_pmem(struct dm_writecache *wc, struct list_head *list) | |
1559 | { | |
1560 | unsigned i; | |
1561 | struct writeback_struct *wb; | |
1562 | struct wc_entry *e; | |
1563 | unsigned long n_walked = 0; | |
1564 | ||
1565 | do { | |
1566 | wb = list_entry(list->next, struct writeback_struct, endio_entry); | |
1567 | list_del(&wb->endio_entry); | |
1568 | ||
1569 | if (unlikely(wb->bio.bi_status != BLK_STS_OK)) | |
1570 | writecache_error(wc, blk_status_to_errno(wb->bio.bi_status), | |
1571 | "write error %d", wb->bio.bi_status); | |
1572 | i = 0; | |
1573 | do { | |
1574 | e = wb->wc_list[i]; | |
1575 | BUG_ON(!e->write_in_progress); | |
1576 | e->write_in_progress = false; | |
1577 | INIT_LIST_HEAD(&e->lru); | |
1578 | if (!writecache_has_error(wc)) | |
1579 | writecache_free_entry(wc, e); | |
1580 | BUG_ON(!wc->writeback_size); | |
1581 | wc->writeback_size--; | |
1582 | n_walked++; | |
1583 | if (unlikely(n_walked >= ENDIO_LATENCY)) { | |
aa950920 | 1584 | writecache_commit_flushed(wc, false); |
48debafe MP |
1585 | wc_unlock(wc); |
1586 | wc_lock(wc); | |
1587 | n_walked = 0; | |
1588 | } | |
1589 | } while (++i < wb->wc_list_n); | |
1590 | ||
1591 | if (wb->wc_list != wb->wc_list_inline) | |
1592 | kfree(wb->wc_list); | |
1593 | bio_put(&wb->bio); | |
1594 | } while (!list_empty(list)); | |
1595 | } | |
1596 | ||
1597 | static void __writecache_endio_ssd(struct dm_writecache *wc, struct list_head *list) | |
1598 | { | |
1599 | struct copy_struct *c; | |
1600 | struct wc_entry *e; | |
1601 | ||
1602 | do { | |
1603 | c = list_entry(list->next, struct copy_struct, endio_entry); | |
1604 | list_del(&c->endio_entry); | |
1605 | ||
1606 | if (unlikely(c->error)) | |
1607 | writecache_error(wc, c->error, "copy error"); | |
1608 | ||
1609 | e = c->e; | |
1610 | do { | |
1611 | BUG_ON(!e->write_in_progress); | |
1612 | e->write_in_progress = false; | |
1613 | INIT_LIST_HEAD(&e->lru); | |
1614 | if (!writecache_has_error(wc)) | |
1615 | writecache_free_entry(wc, e); | |
1616 | ||
1617 | BUG_ON(!wc->writeback_size); | |
1618 | wc->writeback_size--; | |
1619 | e++; | |
1620 | } while (--c->n_entries); | |
1621 | mempool_free(c, &wc->copy_pool); | |
1622 | } while (!list_empty(list)); | |
1623 | } | |
1624 | ||
1625 | static int writecache_endio_thread(void *data) | |
1626 | { | |
1627 | struct dm_writecache *wc = data; | |
1628 | ||
1629 | while (1) { | |
1630 | struct list_head list; | |
1631 | ||
1632 | raw_spin_lock_irq(&wc->endio_list_lock); | |
1633 | if (!list_empty(&wc->endio_list)) | |
1634 | goto pop_from_list; | |
1635 | set_current_state(TASK_INTERRUPTIBLE); | |
1636 | raw_spin_unlock_irq(&wc->endio_list_lock); | |
1637 | ||
1638 | if (unlikely(kthread_should_stop())) { | |
1639 | set_current_state(TASK_RUNNING); | |
1640 | break; | |
1641 | } | |
1642 | ||
1643 | schedule(); | |
1644 | ||
1645 | continue; | |
1646 | ||
1647 | pop_from_list: | |
1648 | list = wc->endio_list; | |
1649 | list.next->prev = list.prev->next = &list; | |
1650 | INIT_LIST_HEAD(&wc->endio_list); | |
1651 | raw_spin_unlock_irq(&wc->endio_list_lock); | |
1652 | ||
1653 | if (!WC_MODE_FUA(wc)) | |
1654 | writecache_disk_flush(wc, wc->dev); | |
1655 | ||
1656 | wc_lock(wc); | |
1657 | ||
1658 | if (WC_MODE_PMEM(wc)) { | |
1659 | __writecache_endio_pmem(wc, &list); | |
1660 | } else { | |
1661 | __writecache_endio_ssd(wc, &list); | |
1662 | writecache_wait_for_ios(wc, READ); | |
1663 | } | |
1664 | ||
aa950920 | 1665 | writecache_commit_flushed(wc, false); |
48debafe MP |
1666 | |
1667 | wc_unlock(wc); | |
1668 | } | |
1669 | ||
1670 | return 0; | |
1671 | } | |
1672 | ||
620cbe40 | 1673 | static bool wc_add_block(struct writeback_struct *wb, struct wc_entry *e) |
48debafe MP |
1674 | { |
1675 | struct dm_writecache *wc = wb->wc; | |
1676 | unsigned block_size = wc->block_size; | |
1677 | void *address = memory_data(wc, e); | |
1678 | ||
1679 | persistent_memory_flush_cache(address, block_size); | |
4134455f MP |
1680 | |
1681 | if (unlikely(bio_end_sector(&wb->bio) >= wc->data_device_sectors)) | |
1682 | return true; | |
1683 | ||
48debafe MP |
1684 | return bio_add_page(&wb->bio, persistent_memory_page(address), |
1685 | block_size, persistent_memory_page_offset(address)) != 0; | |
1686 | } | |
1687 | ||
1688 | struct writeback_list { | |
1689 | struct list_head list; | |
1690 | size_t size; | |
1691 | }; | |
1692 | ||
1693 | static void __writeback_throttle(struct dm_writecache *wc, struct writeback_list *wbl) | |
1694 | { | |
1695 | if (unlikely(wc->max_writeback_jobs)) { | |
1696 | if (READ_ONCE(wc->writeback_size) - wbl->size >= wc->max_writeback_jobs) { | |
1697 | wc_lock(wc); | |
1698 | while (wc->writeback_size - wbl->size >= wc->max_writeback_jobs) | |
1699 | writecache_wait_on_freelist(wc); | |
1700 | wc_unlock(wc); | |
1701 | } | |
1702 | } | |
1703 | cond_resched(); | |
1704 | } | |
1705 | ||
1706 | static void __writecache_writeback_pmem(struct dm_writecache *wc, struct writeback_list *wbl) | |
1707 | { | |
1708 | struct wc_entry *e, *f; | |
1709 | struct bio *bio; | |
1710 | struct writeback_struct *wb; | |
1711 | unsigned max_pages; | |
1712 | ||
1713 | while (wbl->size) { | |
1714 | wbl->size--; | |
1715 | e = container_of(wbl->list.prev, struct wc_entry, lru); | |
1716 | list_del(&e->lru); | |
1717 | ||
1718 | max_pages = e->wc_list_contiguous; | |
1719 | ||
1720 | bio = bio_alloc_bioset(GFP_NOIO, max_pages, &wc->bio_set); | |
1721 | wb = container_of(bio, struct writeback_struct, bio); | |
1722 | wb->wc = wc; | |
09f2d656 HY |
1723 | bio->bi_end_io = writecache_writeback_endio; |
1724 | bio_set_dev(bio, wc->dev->bdev); | |
1725 | bio->bi_iter.bi_sector = read_original_sector(wc, e); | |
48debafe | 1726 | if (max_pages <= WB_LIST_INLINE || |
50a7d3ba KC |
1727 | unlikely(!(wb->wc_list = kmalloc_array(max_pages, sizeof(struct wc_entry *), |
1728 | GFP_NOIO | __GFP_NORETRY | | |
1729 | __GFP_NOMEMALLOC | __GFP_NOWARN)))) { | |
48debafe MP |
1730 | wb->wc_list = wb->wc_list_inline; |
1731 | max_pages = WB_LIST_INLINE; | |
1732 | } | |
1733 | ||
620cbe40 | 1734 | BUG_ON(!wc_add_block(wb, e)); |
48debafe MP |
1735 | |
1736 | wb->wc_list[0] = e; | |
1737 | wb->wc_list_n = 1; | |
1738 | ||
1739 | while (wbl->size && wb->wc_list_n < max_pages) { | |
1740 | f = container_of(wbl->list.prev, struct wc_entry, lru); | |
1741 | if (read_original_sector(wc, f) != | |
1742 | read_original_sector(wc, e) + (wc->block_size >> SECTOR_SHIFT)) | |
1743 | break; | |
620cbe40 | 1744 | if (!wc_add_block(wb, f)) |
48debafe MP |
1745 | break; |
1746 | wbl->size--; | |
1747 | list_del(&f->lru); | |
1748 | wb->wc_list[wb->wc_list_n++] = f; | |
1749 | e = f; | |
1750 | } | |
09f2d656 | 1751 | bio_set_op_attrs(bio, REQ_OP_WRITE, WC_MODE_FUA(wc) * REQ_FUA); |
48debafe MP |
1752 | if (writecache_has_error(wc)) { |
1753 | bio->bi_status = BLK_STS_IOERR; | |
09f2d656 | 1754 | bio_endio(bio); |
4134455f MP |
1755 | } else if (unlikely(!bio_sectors(bio))) { |
1756 | bio->bi_status = BLK_STS_OK; | |
1757 | bio_endio(bio); | |
48debafe | 1758 | } else { |
09f2d656 | 1759 | submit_bio(bio); |
48debafe MP |
1760 | } |
1761 | ||
1762 | __writeback_throttle(wc, wbl); | |
1763 | } | |
1764 | } | |
1765 | ||
1766 | static void __writecache_writeback_ssd(struct dm_writecache *wc, struct writeback_list *wbl) | |
1767 | { | |
1768 | struct wc_entry *e, *f; | |
1769 | struct dm_io_region from, to; | |
1770 | struct copy_struct *c; | |
1771 | ||
1772 | while (wbl->size) { | |
1773 | unsigned n_sectors; | |
1774 | ||
1775 | wbl->size--; | |
1776 | e = container_of(wbl->list.prev, struct wc_entry, lru); | |
1777 | list_del(&e->lru); | |
1778 | ||
1779 | n_sectors = e->wc_list_contiguous << (wc->block_size_bits - SECTOR_SHIFT); | |
1780 | ||
1781 | from.bdev = wc->ssd_dev->bdev; | |
1782 | from.sector = cache_sector(wc, e); | |
1783 | from.count = n_sectors; | |
1784 | to.bdev = wc->dev->bdev; | |
1785 | to.sector = read_original_sector(wc, e); | |
1786 | to.count = n_sectors; | |
1787 | ||
1788 | c = mempool_alloc(&wc->copy_pool, GFP_NOIO); | |
1789 | c->wc = wc; | |
1790 | c->e = e; | |
1791 | c->n_entries = e->wc_list_contiguous; | |
1792 | ||
1793 | while ((n_sectors -= wc->block_size >> SECTOR_SHIFT)) { | |
1794 | wbl->size--; | |
1795 | f = container_of(wbl->list.prev, struct wc_entry, lru); | |
1796 | BUG_ON(f != e + 1); | |
1797 | list_del(&f->lru); | |
1798 | e = f; | |
1799 | } | |
1800 | ||
4134455f MP |
1801 | if (unlikely(to.sector + to.count > wc->data_device_sectors)) { |
1802 | if (to.sector >= wc->data_device_sectors) { | |
1803 | writecache_copy_endio(0, 0, c); | |
1804 | continue; | |
1805 | } | |
1806 | from.count = to.count = wc->data_device_sectors - to.sector; | |
1807 | } | |
1808 | ||
48debafe MP |
1809 | dm_kcopyd_copy(wc->dm_kcopyd, &from, 1, &to, 0, writecache_copy_endio, c); |
1810 | ||
1811 | __writeback_throttle(wc, wbl); | |
1812 | } | |
1813 | } | |
1814 | ||
1815 | static void writecache_writeback(struct work_struct *work) | |
1816 | { | |
1817 | struct dm_writecache *wc = container_of(work, struct dm_writecache, writeback_work); | |
1818 | struct blk_plug plug; | |
3f649ab7 | 1819 | struct wc_entry *f, *g, *e = NULL; |
48debafe MP |
1820 | struct rb_node *node, *next_node; |
1821 | struct list_head skipped; | |
1822 | struct writeback_list wbl; | |
1823 | unsigned long n_walked; | |
1824 | ||
293128b1 MP |
1825 | if (!WC_MODE_PMEM(wc)) { |
1826 | /* Wait for any active kcopyd work on behalf of ssd writeback */ | |
1827 | dm_kcopyd_client_flush(wc->dm_kcopyd); | |
1828 | } | |
1829 | ||
48debafe MP |
1830 | wc_lock(wc); |
1831 | restart: | |
1832 | if (writecache_has_error(wc)) { | |
1833 | wc_unlock(wc); | |
1834 | return; | |
1835 | } | |
1836 | ||
1837 | if (unlikely(wc->writeback_all)) { | |
1838 | if (writecache_wait_for_writeback(wc)) | |
1839 | goto restart; | |
1840 | } | |
1841 | ||
1842 | if (wc->overwrote_committed) { | |
1843 | writecache_wait_for_ios(wc, WRITE); | |
1844 | } | |
1845 | ||
1846 | n_walked = 0; | |
1847 | INIT_LIST_HEAD(&skipped); | |
1848 | INIT_LIST_HEAD(&wbl.list); | |
1849 | wbl.size = 0; | |
1850 | while (!list_empty(&wc->lru) && | |
1851 | (wc->writeback_all || | |
3923d485 MP |
1852 | wc->freelist_size + wc->writeback_size <= wc->freelist_low_watermark || |
1853 | (jiffies - container_of(wc->lru.prev, struct wc_entry, lru)->age >= | |
1854 | wc->max_age - wc->max_age / MAX_AGE_DIV))) { | |
48debafe MP |
1855 | |
1856 | n_walked++; | |
1857 | if (unlikely(n_walked > WRITEBACK_LATENCY) && | |
af4f6cab MP |
1858 | likely(!wc->writeback_all)) { |
1859 | if (likely(!dm_suspended(wc->ti))) | |
1860 | queue_work(wc->writeback_wq, &wc->writeback_work); | |
48debafe MP |
1861 | break; |
1862 | } | |
1863 | ||
5229b489 HY |
1864 | if (unlikely(wc->writeback_all)) { |
1865 | if (unlikely(!e)) { | |
1866 | writecache_flush(wc); | |
1867 | e = container_of(rb_first(&wc->tree), struct wc_entry, rb_node); | |
1868 | } else | |
1869 | e = g; | |
1870 | } else | |
1871 | e = container_of(wc->lru.prev, struct wc_entry, lru); | |
48debafe MP |
1872 | BUG_ON(e->write_in_progress); |
1873 | if (unlikely(!writecache_entry_is_committed(wc, e))) { | |
1874 | writecache_flush(wc); | |
1875 | } | |
1876 | node = rb_prev(&e->rb_node); | |
1877 | if (node) { | |
1878 | f = container_of(node, struct wc_entry, rb_node); | |
1879 | if (unlikely(read_original_sector(wc, f) == | |
1880 | read_original_sector(wc, e))) { | |
1881 | BUG_ON(!f->write_in_progress); | |
8c77f1cb | 1882 | list_move(&e->lru, &skipped); |
48debafe MP |
1883 | cond_resched(); |
1884 | continue; | |
1885 | } | |
1886 | } | |
1887 | wc->writeback_size++; | |
8c77f1cb | 1888 | list_move(&e->lru, &wbl.list); |
48debafe MP |
1889 | wbl.size++; |
1890 | e->write_in_progress = true; | |
1891 | e->wc_list_contiguous = 1; | |
1892 | ||
1893 | f = e; | |
1894 | ||
1895 | while (1) { | |
1896 | next_node = rb_next(&f->rb_node); | |
1897 | if (unlikely(!next_node)) | |
1898 | break; | |
1899 | g = container_of(next_node, struct wc_entry, rb_node); | |
62421b38 HY |
1900 | if (unlikely(read_original_sector(wc, g) == |
1901 | read_original_sector(wc, f))) { | |
48debafe MP |
1902 | f = g; |
1903 | continue; | |
1904 | } | |
1905 | if (read_original_sector(wc, g) != | |
1906 | read_original_sector(wc, f) + (wc->block_size >> SECTOR_SHIFT)) | |
1907 | break; | |
1908 | if (unlikely(g->write_in_progress)) | |
1909 | break; | |
1910 | if (unlikely(!writecache_entry_is_committed(wc, g))) | |
1911 | break; | |
1912 | ||
1913 | if (!WC_MODE_PMEM(wc)) { | |
1914 | if (g != f + 1) | |
1915 | break; | |
1916 | } | |
1917 | ||
1918 | n_walked++; | |
1919 | //if (unlikely(n_walked > WRITEBACK_LATENCY) && likely(!wc->writeback_all)) | |
1920 | // break; | |
1921 | ||
1922 | wc->writeback_size++; | |
8c77f1cb | 1923 | list_move(&g->lru, &wbl.list); |
48debafe MP |
1924 | wbl.size++; |
1925 | g->write_in_progress = true; | |
a8affc03 | 1926 | g->wc_list_contiguous = BIO_MAX_VECS; |
48debafe MP |
1927 | f = g; |
1928 | e->wc_list_contiguous++; | |
a8affc03 | 1929 | if (unlikely(e->wc_list_contiguous == BIO_MAX_VECS)) { |
5229b489 HY |
1930 | if (unlikely(wc->writeback_all)) { |
1931 | next_node = rb_next(&f->rb_node); | |
1932 | if (likely(next_node)) | |
1933 | g = container_of(next_node, struct wc_entry, rb_node); | |
1934 | } | |
48debafe | 1935 | break; |
5229b489 | 1936 | } |
48debafe MP |
1937 | } |
1938 | cond_resched(); | |
1939 | } | |
1940 | ||
1941 | if (!list_empty(&skipped)) { | |
1942 | list_splice_tail(&skipped, &wc->lru); | |
1943 | /* | |
1944 | * If we didn't do any progress, we must wait until some | |
1945 | * writeback finishes to avoid burning CPU in a loop | |
1946 | */ | |
1947 | if (unlikely(!wbl.size)) | |
1948 | writecache_wait_for_writeback(wc); | |
1949 | } | |
1950 | ||
1951 | wc_unlock(wc); | |
1952 | ||
1953 | blk_start_plug(&plug); | |
1954 | ||
1955 | if (WC_MODE_PMEM(wc)) | |
1956 | __writecache_writeback_pmem(wc, &wbl); | |
1957 | else | |
1958 | __writecache_writeback_ssd(wc, &wbl); | |
1959 | ||
1960 | blk_finish_plug(&plug); | |
1961 | ||
1962 | if (unlikely(wc->writeback_all)) { | |
1963 | wc_lock(wc); | |
1964 | while (writecache_wait_for_writeback(wc)); | |
1965 | wc_unlock(wc); | |
1966 | } | |
1967 | } | |
1968 | ||
1969 | static int calculate_memory_size(uint64_t device_size, unsigned block_size, | |
1970 | size_t *n_blocks_p, size_t *n_metadata_blocks_p) | |
1971 | { | |
1972 | uint64_t n_blocks, offset; | |
1973 | struct wc_entry e; | |
1974 | ||
1975 | n_blocks = device_size; | |
1976 | do_div(n_blocks, block_size + sizeof(struct wc_memory_entry)); | |
1977 | ||
1978 | while (1) { | |
1979 | if (!n_blocks) | |
1980 | return -ENOSPC; | |
1981 | /* Verify the following entries[n_blocks] won't overflow */ | |
1982 | if (n_blocks >= ((size_t)-sizeof(struct wc_memory_superblock) / | |
1983 | sizeof(struct wc_memory_entry))) | |
1984 | return -EFBIG; | |
1985 | offset = offsetof(struct wc_memory_superblock, entries[n_blocks]); | |
1986 | offset = (offset + block_size - 1) & ~(uint64_t)(block_size - 1); | |
1987 | if (offset + n_blocks * block_size <= device_size) | |
1988 | break; | |
1989 | n_blocks--; | |
1990 | } | |
1991 | ||
1992 | /* check if the bit field overflows */ | |
1993 | e.index = n_blocks; | |
1994 | if (e.index != n_blocks) | |
1995 | return -EFBIG; | |
1996 | ||
1997 | if (n_blocks_p) | |
1998 | *n_blocks_p = n_blocks; | |
1999 | if (n_metadata_blocks_p) | |
2000 | *n_metadata_blocks_p = offset >> __ffs(block_size); | |
2001 | return 0; | |
2002 | } | |
2003 | ||
2004 | static int init_memory(struct dm_writecache *wc) | |
2005 | { | |
2006 | size_t b; | |
2007 | int r; | |
2008 | ||
2009 | r = calculate_memory_size(wc->memory_map_size, wc->block_size, &wc->n_blocks, NULL); | |
2010 | if (r) | |
2011 | return r; | |
2012 | ||
2013 | r = writecache_alloc_entries(wc); | |
2014 | if (r) | |
2015 | return r; | |
2016 | ||
2017 | for (b = 0; b < ARRAY_SIZE(sb(wc)->padding); b++) | |
2018 | pmem_assign(sb(wc)->padding[b], cpu_to_le64(0)); | |
2019 | pmem_assign(sb(wc)->version, cpu_to_le32(MEMORY_SUPERBLOCK_VERSION)); | |
2020 | pmem_assign(sb(wc)->block_size, cpu_to_le32(wc->block_size)); | |
2021 | pmem_assign(sb(wc)->n_blocks, cpu_to_le64(wc->n_blocks)); | |
2022 | pmem_assign(sb(wc)->seq_count, cpu_to_le64(0)); | |
2023 | ||
1edaa447 | 2024 | for (b = 0; b < wc->n_blocks; b++) { |
48debafe | 2025 | write_original_sector_seq_count(wc, &wc->entries[b], -1, -1); |
1edaa447 MP |
2026 | cond_resched(); |
2027 | } | |
48debafe MP |
2028 | |
2029 | writecache_flush_all_metadata(wc); | |
aa950920 | 2030 | writecache_commit_flushed(wc, false); |
48debafe MP |
2031 | pmem_assign(sb(wc)->magic, cpu_to_le32(MEMORY_SUPERBLOCK_MAGIC)); |
2032 | writecache_flush_region(wc, &sb(wc)->magic, sizeof sb(wc)->magic); | |
aa950920 | 2033 | writecache_commit_flushed(wc, false); |
48debafe MP |
2034 | |
2035 | return 0; | |
2036 | } | |
2037 | ||
2038 | static void writecache_dtr(struct dm_target *ti) | |
2039 | { | |
2040 | struct dm_writecache *wc = ti->private; | |
2041 | ||
2042 | if (!wc) | |
2043 | return; | |
2044 | ||
2045 | if (wc->endio_thread) | |
2046 | kthread_stop(wc->endio_thread); | |
2047 | ||
2048 | if (wc->flush_thread) | |
2049 | kthread_stop(wc->flush_thread); | |
2050 | ||
2051 | bioset_exit(&wc->bio_set); | |
2052 | ||
2053 | mempool_exit(&wc->copy_pool); | |
2054 | ||
2055 | if (wc->writeback_wq) | |
2056 | destroy_workqueue(wc->writeback_wq); | |
2057 | ||
2058 | if (wc->dev) | |
2059 | dm_put_device(ti, wc->dev); | |
2060 | ||
2061 | if (wc->ssd_dev) | |
2062 | dm_put_device(ti, wc->ssd_dev); | |
2063 | ||
21ec672e | 2064 | vfree(wc->entries); |
48debafe MP |
2065 | |
2066 | if (wc->memory_map) { | |
2067 | if (WC_MODE_PMEM(wc)) | |
2068 | persistent_memory_release(wc); | |
2069 | else | |
2070 | vfree(wc->memory_map); | |
2071 | } | |
2072 | ||
2073 | if (wc->dm_kcopyd) | |
2074 | dm_kcopyd_client_destroy(wc->dm_kcopyd); | |
2075 | ||
2076 | if (wc->dm_io) | |
2077 | dm_io_client_destroy(wc->dm_io); | |
2078 | ||
21ec672e | 2079 | vfree(wc->dirty_bitmap); |
48debafe MP |
2080 | |
2081 | kfree(wc); | |
2082 | } | |
2083 | ||
2084 | static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv) | |
2085 | { | |
2086 | struct dm_writecache *wc; | |
2087 | struct dm_arg_set as; | |
2088 | const char *string; | |
2089 | unsigned opt_params; | |
2090 | size_t offset, data_size; | |
2091 | int i, r; | |
2092 | char dummy; | |
2093 | int high_wm_percent = HIGH_WATERMARK; | |
2094 | int low_wm_percent = LOW_WATERMARK; | |
2095 | uint64_t x; | |
2096 | struct wc_memory_superblock s; | |
2097 | ||
2098 | static struct dm_arg _args[] = { | |
611c3e16 | 2099 | {0, 17, "Invalid number of feature args"}, |
48debafe MP |
2100 | }; |
2101 | ||
2102 | as.argc = argc; | |
2103 | as.argv = argv; | |
2104 | ||
2105 | wc = kzalloc(sizeof(struct dm_writecache), GFP_KERNEL); | |
2106 | if (!wc) { | |
2107 | ti->error = "Cannot allocate writecache structure"; | |
2108 | r = -ENOMEM; | |
2109 | goto bad; | |
2110 | } | |
2111 | ti->private = wc; | |
2112 | wc->ti = ti; | |
2113 | ||
2114 | mutex_init(&wc->lock); | |
3923d485 | 2115 | wc->max_age = MAX_AGE_UNSPECIFIED; |
48debafe MP |
2116 | writecache_poison_lists(wc); |
2117 | init_waitqueue_head(&wc->freelist_wait); | |
2118 | timer_setup(&wc->autocommit_timer, writecache_autocommit_timer, 0); | |
3923d485 | 2119 | timer_setup(&wc->max_age_timer, writecache_max_age_timer, 0); |
48debafe MP |
2120 | |
2121 | for (i = 0; i < 2; i++) { | |
2122 | atomic_set(&wc->bio_in_progress[i], 0); | |
2123 | init_waitqueue_head(&wc->bio_in_progress_wait[i]); | |
2124 | } | |
2125 | ||
2126 | wc->dm_io = dm_io_client_create(); | |
2127 | if (IS_ERR(wc->dm_io)) { | |
2128 | r = PTR_ERR(wc->dm_io); | |
2129 | ti->error = "Unable to allocate dm-io client"; | |
2130 | wc->dm_io = NULL; | |
2131 | goto bad; | |
2132 | } | |
2133 | ||
f87e033b | 2134 | wc->writeback_wq = alloc_workqueue("writecache-writeback", WQ_MEM_RECLAIM, 1); |
48debafe MP |
2135 | if (!wc->writeback_wq) { |
2136 | r = -ENOMEM; | |
2137 | ti->error = "Could not allocate writeback workqueue"; | |
2138 | goto bad; | |
2139 | } | |
2140 | INIT_WORK(&wc->writeback_work, writecache_writeback); | |
2141 | INIT_WORK(&wc->flush_work, writecache_flush_work); | |
2142 | ||
2143 | raw_spin_lock_init(&wc->endio_list_lock); | |
2144 | INIT_LIST_HEAD(&wc->endio_list); | |
2145 | wc->endio_thread = kthread_create(writecache_endio_thread, wc, "writecache_endio"); | |
2146 | if (IS_ERR(wc->endio_thread)) { | |
2147 | r = PTR_ERR(wc->endio_thread); | |
2148 | wc->endio_thread = NULL; | |
2149 | ti->error = "Couldn't spawn endio thread"; | |
2150 | goto bad; | |
2151 | } | |
2152 | wake_up_process(wc->endio_thread); | |
2153 | ||
2154 | /* | |
2155 | * Parse the mode (pmem or ssd) | |
2156 | */ | |
2157 | string = dm_shift_arg(&as); | |
2158 | if (!string) | |
2159 | goto bad_arguments; | |
2160 | ||
2161 | if (!strcasecmp(string, "s")) { | |
2162 | wc->pmem_mode = false; | |
2163 | } else if (!strcasecmp(string, "p")) { | |
2164 | #ifdef DM_WRITECACHE_HAS_PMEM | |
2165 | wc->pmem_mode = true; | |
2166 | wc->writeback_fua = true; | |
2167 | #else | |
2168 | /* | |
2169 | * If the architecture doesn't support persistent memory or | |
2170 | * the kernel doesn't support any DAX drivers, this driver can | |
2171 | * only be used in SSD-only mode. | |
2172 | */ | |
2173 | r = -EOPNOTSUPP; | |
2174 | ti->error = "Persistent memory or DAX not supported on this system"; | |
2175 | goto bad; | |
2176 | #endif | |
2177 | } else { | |
2178 | goto bad_arguments; | |
2179 | } | |
2180 | ||
2181 | if (WC_MODE_PMEM(wc)) { | |
2182 | r = bioset_init(&wc->bio_set, BIO_POOL_SIZE, | |
2183 | offsetof(struct writeback_struct, bio), | |
2184 | BIOSET_NEED_BVECS); | |
2185 | if (r) { | |
2186 | ti->error = "Could not allocate bio set"; | |
2187 | goto bad; | |
2188 | } | |
2189 | } else { | |
2190 | r = mempool_init_kmalloc_pool(&wc->copy_pool, 1, sizeof(struct copy_struct)); | |
2191 | if (r) { | |
2192 | ti->error = "Could not allocate mempool"; | |
2193 | goto bad; | |
2194 | } | |
2195 | } | |
2196 | ||
2197 | /* | |
2198 | * Parse the origin data device | |
2199 | */ | |
2200 | string = dm_shift_arg(&as); | |
2201 | if (!string) | |
2202 | goto bad_arguments; | |
2203 | r = dm_get_device(ti, string, dm_table_get_mode(ti->table), &wc->dev); | |
2204 | if (r) { | |
2205 | ti->error = "Origin data device lookup failed"; | |
2206 | goto bad; | |
2207 | } | |
2208 | ||
2209 | /* | |
2210 | * Parse cache data device (be it pmem or ssd) | |
2211 | */ | |
2212 | string = dm_shift_arg(&as); | |
2213 | if (!string) | |
2214 | goto bad_arguments; | |
2215 | ||
2216 | r = dm_get_device(ti, string, dm_table_get_mode(ti->table), &wc->ssd_dev); | |
2217 | if (r) { | |
2218 | ti->error = "Cache data device lookup failed"; | |
2219 | goto bad; | |
2220 | } | |
2221 | wc->memory_map_size = i_size_read(wc->ssd_dev->bdev->bd_inode); | |
2222 | ||
48debafe MP |
2223 | /* |
2224 | * Parse the cache block size | |
2225 | */ | |
2226 | string = dm_shift_arg(&as); | |
2227 | if (!string) | |
2228 | goto bad_arguments; | |
2229 | if (sscanf(string, "%u%c", &wc->block_size, &dummy) != 1 || | |
2230 | wc->block_size < 512 || wc->block_size > PAGE_SIZE || | |
2231 | (wc->block_size & (wc->block_size - 1))) { | |
2232 | r = -EINVAL; | |
2233 | ti->error = "Invalid block size"; | |
2234 | goto bad; | |
2235 | } | |
31b22120 MP |
2236 | if (wc->block_size < bdev_logical_block_size(wc->dev->bdev) || |
2237 | wc->block_size < bdev_logical_block_size(wc->ssd_dev->bdev)) { | |
2238 | r = -EINVAL; | |
2239 | ti->error = "Block size is smaller than device logical block size"; | |
2240 | goto bad; | |
2241 | } | |
48debafe MP |
2242 | wc->block_size_bits = __ffs(wc->block_size); |
2243 | ||
2244 | wc->max_writeback_jobs = MAX_WRITEBACK_JOBS; | |
2245 | wc->autocommit_blocks = !WC_MODE_PMEM(wc) ? AUTOCOMMIT_BLOCKS_SSD : AUTOCOMMIT_BLOCKS_PMEM; | |
2246 | wc->autocommit_jiffies = msecs_to_jiffies(AUTOCOMMIT_MSEC); | |
2247 | ||
2248 | /* | |
2249 | * Parse optional arguments | |
2250 | */ | |
2251 | r = dm_read_arg_group(_args, &as, &opt_params, &ti->error); | |
2252 | if (r) | |
2253 | goto bad; | |
2254 | ||
2255 | while (opt_params) { | |
2256 | string = dm_shift_arg(&as), opt_params--; | |
d284f824 MP |
2257 | if (!strcasecmp(string, "start_sector") && opt_params >= 1) { |
2258 | unsigned long long start_sector; | |
2259 | string = dm_shift_arg(&as), opt_params--; | |
2260 | if (sscanf(string, "%llu%c", &start_sector, &dummy) != 1) | |
2261 | goto invalid_optional; | |
2262 | wc->start_sector = start_sector; | |
054bee16 | 2263 | wc->start_sector_set = true; |
d284f824 MP |
2264 | if (wc->start_sector != start_sector || |
2265 | wc->start_sector >= wc->memory_map_size >> SECTOR_SHIFT) | |
2266 | goto invalid_optional; | |
2267 | } else if (!strcasecmp(string, "high_watermark") && opt_params >= 1) { | |
48debafe MP |
2268 | string = dm_shift_arg(&as), opt_params--; |
2269 | if (sscanf(string, "%d%c", &high_wm_percent, &dummy) != 1) | |
2270 | goto invalid_optional; | |
2271 | if (high_wm_percent < 0 || high_wm_percent > 100) | |
2272 | goto invalid_optional; | |
054bee16 | 2273 | wc->high_wm_percent_value = high_wm_percent; |
48debafe MP |
2274 | wc->high_wm_percent_set = true; |
2275 | } else if (!strcasecmp(string, "low_watermark") && opt_params >= 1) { | |
2276 | string = dm_shift_arg(&as), opt_params--; | |
2277 | if (sscanf(string, "%d%c", &low_wm_percent, &dummy) != 1) | |
2278 | goto invalid_optional; | |
2279 | if (low_wm_percent < 0 || low_wm_percent > 100) | |
2280 | goto invalid_optional; | |
054bee16 | 2281 | wc->low_wm_percent_value = low_wm_percent; |
48debafe MP |
2282 | wc->low_wm_percent_set = true; |
2283 | } else if (!strcasecmp(string, "writeback_jobs") && opt_params >= 1) { | |
2284 | string = dm_shift_arg(&as), opt_params--; | |
2285 | if (sscanf(string, "%u%c", &wc->max_writeback_jobs, &dummy) != 1) | |
2286 | goto invalid_optional; | |
2287 | wc->max_writeback_jobs_set = true; | |
2288 | } else if (!strcasecmp(string, "autocommit_blocks") && opt_params >= 1) { | |
2289 | string = dm_shift_arg(&as), opt_params--; | |
2290 | if (sscanf(string, "%u%c", &wc->autocommit_blocks, &dummy) != 1) | |
2291 | goto invalid_optional; | |
2292 | wc->autocommit_blocks_set = true; | |
2293 | } else if (!strcasecmp(string, "autocommit_time") && opt_params >= 1) { | |
2294 | unsigned autocommit_msecs; | |
2295 | string = dm_shift_arg(&as), opt_params--; | |
2296 | if (sscanf(string, "%u%c", &autocommit_msecs, &dummy) != 1) | |
2297 | goto invalid_optional; | |
2298 | if (autocommit_msecs > 3600000) | |
2299 | goto invalid_optional; | |
2300 | wc->autocommit_jiffies = msecs_to_jiffies(autocommit_msecs); | |
054bee16 | 2301 | wc->autocommit_time_value = autocommit_msecs; |
48debafe | 2302 | wc->autocommit_time_set = true; |
3923d485 MP |
2303 | } else if (!strcasecmp(string, "max_age") && opt_params >= 1) { |
2304 | unsigned max_age_msecs; | |
2305 | string = dm_shift_arg(&as), opt_params--; | |
2306 | if (sscanf(string, "%u%c", &max_age_msecs, &dummy) != 1) | |
2307 | goto invalid_optional; | |
2308 | if (max_age_msecs > 86400000) | |
2309 | goto invalid_optional; | |
2310 | wc->max_age = msecs_to_jiffies(max_age_msecs); | |
054bee16 MP |
2311 | wc->max_age_set = true; |
2312 | wc->max_age_value = max_age_msecs; | |
93de44eb | 2313 | } else if (!strcasecmp(string, "cleaner")) { |
054bee16 | 2314 | wc->cleaner_set = true; |
93de44eb | 2315 | wc->cleaner = true; |
48debafe MP |
2316 | } else if (!strcasecmp(string, "fua")) { |
2317 | if (WC_MODE_PMEM(wc)) { | |
2318 | wc->writeback_fua = true; | |
2319 | wc->writeback_fua_set = true; | |
2320 | } else goto invalid_optional; | |
2321 | } else if (!strcasecmp(string, "nofua")) { | |
2322 | if (WC_MODE_PMEM(wc)) { | |
2323 | wc->writeback_fua = false; | |
2324 | wc->writeback_fua_set = true; | |
2325 | } else goto invalid_optional; | |
611c3e16 MP |
2326 | } else if (!strcasecmp(string, "metadata_only")) { |
2327 | wc->metadata_only = true; | |
48debafe MP |
2328 | } else { |
2329 | invalid_optional: | |
2330 | r = -EINVAL; | |
2331 | ti->error = "Invalid optional argument"; | |
2332 | goto bad; | |
2333 | } | |
2334 | } | |
2335 | ||
2336 | if (high_wm_percent < low_wm_percent) { | |
2337 | r = -EINVAL; | |
2338 | ti->error = "High watermark must be greater than or equal to low watermark"; | |
2339 | goto bad; | |
2340 | } | |
2341 | ||
d284f824 | 2342 | if (WC_MODE_PMEM(wc)) { |
a4662458 MS |
2343 | if (!dax_synchronous(wc->ssd_dev->dax_dev)) { |
2344 | r = -EOPNOTSUPP; | |
2345 | ti->error = "Asynchronous persistent memory not supported as pmem cache"; | |
2346 | goto bad; | |
2347 | } | |
2348 | ||
d284f824 MP |
2349 | r = persistent_memory_claim(wc); |
2350 | if (r) { | |
2351 | ti->error = "Unable to map persistent memory for cache"; | |
2352 | goto bad; | |
2353 | } | |
2354 | } else { | |
48debafe MP |
2355 | size_t n_blocks, n_metadata_blocks; |
2356 | uint64_t n_bitmap_bits; | |
2357 | ||
d284f824 MP |
2358 | wc->memory_map_size -= (uint64_t)wc->start_sector << SECTOR_SHIFT; |
2359 | ||
48debafe MP |
2360 | bio_list_init(&wc->flush_list); |
2361 | wc->flush_thread = kthread_create(writecache_flush_thread, wc, "dm_writecache_flush"); | |
2362 | if (IS_ERR(wc->flush_thread)) { | |
2363 | r = PTR_ERR(wc->flush_thread); | |
2364 | wc->flush_thread = NULL; | |
e8ea141a | 2365 | ti->error = "Couldn't spawn flush thread"; |
48debafe MP |
2366 | goto bad; |
2367 | } | |
2368 | wake_up_process(wc->flush_thread); | |
2369 | ||
2370 | r = calculate_memory_size(wc->memory_map_size, wc->block_size, | |
2371 | &n_blocks, &n_metadata_blocks); | |
2372 | if (r) { | |
2373 | ti->error = "Invalid device size"; | |
2374 | goto bad; | |
2375 | } | |
2376 | ||
2377 | n_bitmap_bits = (((uint64_t)n_metadata_blocks << wc->block_size_bits) + | |
2378 | BITMAP_GRANULARITY - 1) / BITMAP_GRANULARITY; | |
2379 | /* this is limitation of test_bit functions */ | |
2380 | if (n_bitmap_bits > 1U << 31) { | |
2381 | r = -EFBIG; | |
2382 | ti->error = "Invalid device size"; | |
2383 | goto bad; | |
2384 | } | |
2385 | ||
2386 | wc->memory_map = vmalloc(n_metadata_blocks << wc->block_size_bits); | |
2387 | if (!wc->memory_map) { | |
2388 | r = -ENOMEM; | |
2389 | ti->error = "Unable to allocate memory for metadata"; | |
2390 | goto bad; | |
2391 | } | |
2392 | ||
2393 | wc->dm_kcopyd = dm_kcopyd_client_create(&dm_kcopyd_throttle); | |
2394 | if (IS_ERR(wc->dm_kcopyd)) { | |
2395 | r = PTR_ERR(wc->dm_kcopyd); | |
2396 | ti->error = "Unable to allocate dm-kcopyd client"; | |
2397 | wc->dm_kcopyd = NULL; | |
2398 | goto bad; | |
2399 | } | |
2400 | ||
2401 | wc->metadata_sectors = n_metadata_blocks << (wc->block_size_bits - SECTOR_SHIFT); | |
2402 | wc->dirty_bitmap_size = (n_bitmap_bits + BITS_PER_LONG - 1) / | |
2403 | BITS_PER_LONG * sizeof(unsigned long); | |
2404 | wc->dirty_bitmap = vzalloc(wc->dirty_bitmap_size); | |
2405 | if (!wc->dirty_bitmap) { | |
2406 | r = -ENOMEM; | |
2407 | ti->error = "Unable to allocate dirty bitmap"; | |
2408 | goto bad; | |
2409 | } | |
2410 | ||
31b22120 | 2411 | r = writecache_read_metadata(wc, wc->block_size >> SECTOR_SHIFT); |
48debafe | 2412 | if (r) { |
31b22120 | 2413 | ti->error = "Unable to read first block of metadata"; |
48debafe MP |
2414 | goto bad; |
2415 | } | |
2416 | } | |
2417 | ||
ec6347bb | 2418 | r = copy_mc_to_kernel(&s, sb(wc), sizeof(struct wc_memory_superblock)); |
48debafe MP |
2419 | if (r) { |
2420 | ti->error = "Hardware memory error when reading superblock"; | |
2421 | goto bad; | |
2422 | } | |
2423 | if (!le32_to_cpu(s.magic) && !le32_to_cpu(s.version)) { | |
2424 | r = init_memory(wc); | |
2425 | if (r) { | |
2426 | ti->error = "Unable to initialize device"; | |
2427 | goto bad; | |
2428 | } | |
ec6347bb DW |
2429 | r = copy_mc_to_kernel(&s, sb(wc), |
2430 | sizeof(struct wc_memory_superblock)); | |
48debafe MP |
2431 | if (r) { |
2432 | ti->error = "Hardware memory error when reading superblock"; | |
2433 | goto bad; | |
2434 | } | |
2435 | } | |
2436 | ||
2437 | if (le32_to_cpu(s.magic) != MEMORY_SUPERBLOCK_MAGIC) { | |
2438 | ti->error = "Invalid magic in the superblock"; | |
2439 | r = -EINVAL; | |
2440 | goto bad; | |
2441 | } | |
2442 | ||
2443 | if (le32_to_cpu(s.version) != MEMORY_SUPERBLOCK_VERSION) { | |
2444 | ti->error = "Invalid version in the superblock"; | |
2445 | r = -EINVAL; | |
2446 | goto bad; | |
2447 | } | |
2448 | ||
2449 | if (le32_to_cpu(s.block_size) != wc->block_size) { | |
2450 | ti->error = "Block size does not match superblock"; | |
2451 | r = -EINVAL; | |
2452 | goto bad; | |
2453 | } | |
2454 | ||
2455 | wc->n_blocks = le64_to_cpu(s.n_blocks); | |
2456 | ||
2457 | offset = wc->n_blocks * sizeof(struct wc_memory_entry); | |
2458 | if (offset / sizeof(struct wc_memory_entry) != le64_to_cpu(sb(wc)->n_blocks)) { | |
2459 | overflow: | |
2460 | ti->error = "Overflow in size calculation"; | |
2461 | r = -EINVAL; | |
2462 | goto bad; | |
2463 | } | |
2464 | offset += sizeof(struct wc_memory_superblock); | |
2465 | if (offset < sizeof(struct wc_memory_superblock)) | |
2466 | goto overflow; | |
2467 | offset = (offset + wc->block_size - 1) & ~(size_t)(wc->block_size - 1); | |
2468 | data_size = wc->n_blocks * (size_t)wc->block_size; | |
2469 | if (!offset || (data_size / wc->block_size != wc->n_blocks) || | |
2470 | (offset + data_size < offset)) | |
2471 | goto overflow; | |
2472 | if (offset + data_size > wc->memory_map_size) { | |
2473 | ti->error = "Memory area is too small"; | |
2474 | r = -EINVAL; | |
2475 | goto bad; | |
2476 | } | |
2477 | ||
2478 | wc->metadata_sectors = offset >> SECTOR_SHIFT; | |
2479 | wc->block_start = (char *)sb(wc) + offset; | |
2480 | ||
2481 | x = (uint64_t)wc->n_blocks * (100 - high_wm_percent); | |
2482 | x += 50; | |
2483 | do_div(x, 100); | |
2484 | wc->freelist_high_watermark = x; | |
2485 | x = (uint64_t)wc->n_blocks * (100 - low_wm_percent); | |
2486 | x += 50; | |
2487 | do_div(x, 100); | |
2488 | wc->freelist_low_watermark = x; | |
2489 | ||
93de44eb MP |
2490 | if (wc->cleaner) |
2491 | activate_cleaner(wc); | |
2492 | ||
48debafe MP |
2493 | r = writecache_alloc_entries(wc); |
2494 | if (r) { | |
2495 | ti->error = "Cannot allocate memory"; | |
2496 | goto bad; | |
2497 | } | |
2498 | ||
ee55b92a | 2499 | ti->num_flush_bios = WC_MODE_PMEM(wc) ? 1 : 2; |
48debafe MP |
2500 | ti->flush_supported = true; |
2501 | ti->num_discard_bios = 1; | |
2502 | ||
2503 | if (WC_MODE_PMEM(wc)) | |
2504 | persistent_memory_flush_cache(wc->memory_map, wc->memory_map_size); | |
2505 | ||
2506 | return 0; | |
2507 | ||
2508 | bad_arguments: | |
2509 | r = -EINVAL; | |
2510 | ti->error = "Bad arguments"; | |
2511 | bad: | |
2512 | writecache_dtr(ti); | |
2513 | return r; | |
2514 | } | |
2515 | ||
2516 | static void writecache_status(struct dm_target *ti, status_type_t type, | |
2517 | unsigned status_flags, char *result, unsigned maxlen) | |
2518 | { | |
2519 | struct dm_writecache *wc = ti->private; | |
2520 | unsigned extra_args; | |
2521 | unsigned sz = 0; | |
48debafe MP |
2522 | |
2523 | switch (type) { | |
2524 | case STATUSTYPE_INFO: | |
2525 | DMEMIT("%ld %llu %llu %llu", writecache_has_error(wc), | |
2526 | (unsigned long long)wc->n_blocks, (unsigned long long)wc->freelist_size, | |
2527 | (unsigned long long)wc->writeback_size); | |
2528 | break; | |
2529 | case STATUSTYPE_TABLE: | |
2530 | DMEMIT("%c %s %s %u ", WC_MODE_PMEM(wc) ? 'p' : 's', | |
2531 | wc->dev->name, wc->ssd_dev->name, wc->block_size); | |
2532 | extra_args = 0; | |
054bee16 | 2533 | if (wc->start_sector_set) |
9ff07e7d | 2534 | extra_args += 2; |
054bee16 | 2535 | if (wc->high_wm_percent_set) |
48debafe | 2536 | extra_args += 2; |
054bee16 | 2537 | if (wc->low_wm_percent_set) |
48debafe MP |
2538 | extra_args += 2; |
2539 | if (wc->max_writeback_jobs_set) | |
2540 | extra_args += 2; | |
2541 | if (wc->autocommit_blocks_set) | |
2542 | extra_args += 2; | |
2543 | if (wc->autocommit_time_set) | |
2544 | extra_args += 2; | |
054bee16 | 2545 | if (wc->max_age_set) |
e5d41cbc | 2546 | extra_args += 2; |
054bee16 | 2547 | if (wc->cleaner_set) |
93de44eb | 2548 | extra_args++; |
48debafe MP |
2549 | if (wc->writeback_fua_set) |
2550 | extra_args++; | |
611c3e16 MP |
2551 | if (wc->metadata_only) |
2552 | extra_args++; | |
48debafe MP |
2553 | |
2554 | DMEMIT("%u", extra_args); | |
054bee16 | 2555 | if (wc->start_sector_set) |
9ff07e7d | 2556 | DMEMIT(" start_sector %llu", (unsigned long long)wc->start_sector); |
054bee16 MP |
2557 | if (wc->high_wm_percent_set) |
2558 | DMEMIT(" high_watermark %u", wc->high_wm_percent_value); | |
2559 | if (wc->low_wm_percent_set) | |
2560 | DMEMIT(" low_watermark %u", wc->low_wm_percent_value); | |
48debafe MP |
2561 | if (wc->max_writeback_jobs_set) |
2562 | DMEMIT(" writeback_jobs %u", wc->max_writeback_jobs); | |
2563 | if (wc->autocommit_blocks_set) | |
2564 | DMEMIT(" autocommit_blocks %u", wc->autocommit_blocks); | |
2565 | if (wc->autocommit_time_set) | |
054bee16 MP |
2566 | DMEMIT(" autocommit_time %u", wc->autocommit_time_value); |
2567 | if (wc->max_age_set) | |
2568 | DMEMIT(" max_age %u", wc->max_age_value); | |
2569 | if (wc->cleaner_set) | |
93de44eb | 2570 | DMEMIT(" cleaner"); |
48debafe MP |
2571 | if (wc->writeback_fua_set) |
2572 | DMEMIT(" %sfua", wc->writeback_fua ? "" : "no"); | |
611c3e16 MP |
2573 | if (wc->metadata_only) |
2574 | DMEMIT(" metadata_only"); | |
48debafe MP |
2575 | break; |
2576 | } | |
2577 | } | |
2578 | ||
2579 | static struct target_type writecache_target = { | |
2580 | .name = "writecache", | |
611c3e16 | 2581 | .version = {1, 5, 0}, |
48debafe MP |
2582 | .module = THIS_MODULE, |
2583 | .ctr = writecache_ctr, | |
2584 | .dtr = writecache_dtr, | |
2585 | .status = writecache_status, | |
2586 | .postsuspend = writecache_suspend, | |
2587 | .resume = writecache_resume, | |
2588 | .message = writecache_message, | |
2589 | .map = writecache_map, | |
2590 | .end_io = writecache_end_io, | |
2591 | .iterate_devices = writecache_iterate_devices, | |
2592 | .io_hints = writecache_io_hints, | |
2593 | }; | |
2594 | ||
2595 | static int __init dm_writecache_init(void) | |
2596 | { | |
2597 | int r; | |
2598 | ||
2599 | r = dm_register_target(&writecache_target); | |
2600 | if (r < 0) { | |
2601 | DMERR("register failed %d", r); | |
2602 | return r; | |
2603 | } | |
2604 | ||
2605 | return 0; | |
2606 | } | |
2607 | ||
2608 | static void __exit dm_writecache_exit(void) | |
2609 | { | |
2610 | dm_unregister_target(&writecache_target); | |
2611 | } | |
2612 | ||
2613 | module_init(dm_writecache_init); | |
2614 | module_exit(dm_writecache_exit); | |
2615 | ||
2616 | MODULE_DESCRIPTION(DM_NAME " writecache target"); | |
2617 | MODULE_AUTHOR("Mikulas Patocka <dm-devel@redhat.com>"); | |
2618 | MODULE_LICENSE("GPL"); |