dm integrity: allow large ranges to be described
[linux-2.6-block.git] / drivers / md / dm-integrity.c
CommitLineData
7eada909
MP
1/*
2 * Copyright (C) 2016-2017 Red Hat, Inc. All rights reserved.
3 * Copyright (C) 2016-2017 Milan Broz
4 * Copyright (C) 2016-2017 Mikulas Patocka
5 *
6 * This file is released under the GPL.
7 */
8
d3e632f0 9#include <linux/compiler.h>
7eada909
MP
10#include <linux/module.h>
11#include <linux/device-mapper.h>
12#include <linux/dm-io.h>
13#include <linux/vmalloc.h>
14#include <linux/sort.h>
15#include <linux/rbtree.h>
16#include <linux/delay.h>
17#include <linux/random.h>
18#include <crypto/hash.h>
19#include <crypto/skcipher.h>
20#include <linux/async_tx.h>
afa53df8 21#include <linux/dm-bufio.h>
7eada909
MP
22
23#define DM_MSG_PREFIX "integrity"
24
25#define DEFAULT_INTERLEAVE_SECTORS 32768
26#define DEFAULT_JOURNAL_SIZE_FACTOR 7
27#define DEFAULT_BUFFER_SECTORS 128
28#define DEFAULT_JOURNAL_WATERMARK 50
29#define DEFAULT_SYNC_MSEC 10000
30#define DEFAULT_MAX_JOURNAL_SECTORS 131072
56b67a4f
MP
31#define MIN_LOG2_INTERLEAVE_SECTORS 3
32#define MAX_LOG2_INTERLEAVE_SECTORS 31
7eada909 33#define METADATA_WORKQUEUE_MAX_ACTIVE 16
a3fcf725
MP
34#define RECALC_SECTORS 8192
35#define RECALC_WRITE_SUPER 16
7eada909
MP
36
37/*
38 * Warning - DEBUG_PRINT prints security-sensitive data to the log,
39 * so it should not be enabled in the official kernel
40 */
41//#define DEBUG_PRINT
42//#define INTERNAL_VERIFY
43
44/*
45 * On disk structures
46 */
47
48#define SB_MAGIC "integrt"
1f9fc0b8
MP
49#define SB_VERSION_1 1
50#define SB_VERSION_2 2
7eada909 51#define SB_SECTORS 8
9d609f85 52#define MAX_SECTORS_PER_BLOCK 8
7eada909
MP
53
54struct superblock {
55 __u8 magic[8];
56 __u8 version;
57 __u8 log2_interleave_sectors;
58 __u16 integrity_tag_size;
59 __u32 journal_sections;
60 __u64 provided_data_sectors; /* userspace uses this value */
61 __u32 flags;
9d609f85 62 __u8 log2_sectors_per_block;
a3fcf725
MP
63 __u8 pad[3];
64 __u64 recalc_sector;
7eada909
MP
65};
66
67#define SB_FLAG_HAVE_JOURNAL_MAC 0x1
a3fcf725 68#define SB_FLAG_RECALCULATING 0x2
7eada909
MP
69
70#define JOURNAL_ENTRY_ROUNDUP 8
71
72typedef __u64 commit_id_t;
73#define JOURNAL_MAC_PER_SECTOR 8
74
75struct journal_entry {
76 union {
77 struct {
78 __u32 sector_lo;
79 __u32 sector_hi;
80 } s;
81 __u64 sector;
82 } u;
9d609f85
MP
83 commit_id_t last_bytes[0];
84 /* __u8 tag[0]; */
7eada909
MP
85};
86
9d609f85
MP
87#define journal_entry_tag(ic, je) ((__u8 *)&(je)->last_bytes[(ic)->sectors_per_block])
88
7eada909 89#if BITS_PER_LONG == 64
d3e632f0 90#define journal_entry_set_sector(je, x) do { smp_wmb(); WRITE_ONCE((je)->u.sector, cpu_to_le64(x)); } while (0)
7eada909
MP
91#define journal_entry_get_sector(je) le64_to_cpu((je)->u.sector)
92#elif defined(CONFIG_LBDAF)
d3e632f0 93#define journal_entry_set_sector(je, x) do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); WRITE_ONCE((je)->u.s.sector_hi, cpu_to_le32((x) >> 32)); } while (0)
7eada909
MP
94#define journal_entry_get_sector(je) le64_to_cpu((je)->u.sector)
95#else
d3e632f0 96#define journal_entry_set_sector(je, x) do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); WRITE_ONCE((je)->u.s.sector_hi, cpu_to_le32(0)); } while (0)
7eada909
MP
97#define journal_entry_get_sector(je) le32_to_cpu((je)->u.s.sector_lo)
98#endif
99#define journal_entry_is_unused(je) ((je)->u.s.sector_hi == cpu_to_le32(-1))
100#define journal_entry_set_unused(je) do { ((je)->u.s.sector_hi = cpu_to_le32(-1)); } while (0)
101#define journal_entry_is_inprogress(je) ((je)->u.s.sector_hi == cpu_to_le32(-2))
102#define journal_entry_set_inprogress(je) do { ((je)->u.s.sector_hi = cpu_to_le32(-2)); } while (0)
103
104#define JOURNAL_BLOCK_SECTORS 8
105#define JOURNAL_SECTOR_DATA ((1 << SECTOR_SHIFT) - sizeof(commit_id_t))
106#define JOURNAL_MAC_SIZE (JOURNAL_MAC_PER_SECTOR * JOURNAL_BLOCK_SECTORS)
107
108struct journal_sector {
109 __u8 entries[JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR];
110 __u8 mac[JOURNAL_MAC_PER_SECTOR];
111 commit_id_t commit_id;
112};
113
9d609f85 114#define MAX_TAG_SIZE (JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR - offsetof(struct journal_entry, last_bytes[MAX_SECTORS_PER_BLOCK]))
7eada909
MP
115
116#define METADATA_PADDING_SECTORS 8
117
118#define N_COMMIT_IDS 4
119
120static unsigned char prev_commit_seq(unsigned char seq)
121{
122 return (seq + N_COMMIT_IDS - 1) % N_COMMIT_IDS;
123}
124
125static unsigned char next_commit_seq(unsigned char seq)
126{
127 return (seq + 1) % N_COMMIT_IDS;
128}
129
130/*
131 * In-memory structures
132 */
133
134struct journal_node {
135 struct rb_node node;
136 sector_t sector;
137};
138
139struct alg_spec {
140 char *alg_string;
141 char *key_string;
142 __u8 *key;
143 unsigned key_size;
144};
145
146struct dm_integrity_c {
147 struct dm_dev *dev;
356d9d52 148 struct dm_dev *meta_dev;
7eada909
MP
149 unsigned tag_size;
150 __s8 log2_tag_size;
151 sector_t start;
6f1c819c 152 mempool_t journal_io_mempool;
7eada909
MP
153 struct dm_io_client *io;
154 struct dm_bufio_client *bufio;
155 struct workqueue_struct *metadata_wq;
156 struct superblock *sb;
157 unsigned journal_pages;
158 struct page_list *journal;
159 struct page_list *journal_io;
160 struct page_list *journal_xor;
161
162 struct crypto_skcipher *journal_crypt;
163 struct scatterlist **journal_scatterlist;
164 struct scatterlist **journal_io_scatterlist;
165 struct skcipher_request **sk_requests;
166
167 struct crypto_shash *journal_mac;
168
169 struct journal_node *journal_tree;
170 struct rb_root journal_tree_root;
171
172 sector_t provided_data_sectors;
173
174 unsigned short journal_entry_size;
175 unsigned char journal_entries_per_sector;
176 unsigned char journal_section_entries;
9d609f85 177 unsigned short journal_section_sectors;
7eada909
MP
178 unsigned journal_sections;
179 unsigned journal_entries;
356d9d52
MP
180 sector_t data_device_sectors;
181 sector_t meta_device_sectors;
7eada909
MP
182 unsigned initial_sectors;
183 unsigned metadata_run;
184 __s8 log2_metadata_run;
185 __u8 log2_buffer_sectors;
9d609f85 186 __u8 sectors_per_block;
7eada909
MP
187
188 unsigned char mode;
c21b1639 189 int suspending;
7eada909
MP
190
191 int failed;
192
193 struct crypto_shash *internal_hash;
194
195 /* these variables are locked with endio_wait.lock */
196 struct rb_root in_progress;
724376a0 197 struct list_head wait_list;
7eada909
MP
198 wait_queue_head_t endio_wait;
199 struct workqueue_struct *wait_wq;
200
201 unsigned char commit_seq;
202 commit_id_t commit_ids[N_COMMIT_IDS];
203
204 unsigned committed_section;
205 unsigned n_committed_sections;
206
207 unsigned uncommitted_section;
208 unsigned n_uncommitted_sections;
209
210 unsigned free_section;
211 unsigned char free_section_entry;
212 unsigned free_sectors;
213
214 unsigned free_sectors_threshold;
215
216 struct workqueue_struct *commit_wq;
217 struct work_struct commit_work;
218
219 struct workqueue_struct *writer_wq;
220 struct work_struct writer_work;
221
a3fcf725
MP
222 struct workqueue_struct *recalc_wq;
223 struct work_struct recalc_work;
224 u8 *recalc_buffer;
225 u8 *recalc_tags;
226
7eada909
MP
227 struct bio_list flush_bio_list;
228
229 unsigned long autocommit_jiffies;
230 struct timer_list autocommit_timer;
231 unsigned autocommit_msec;
232
233 wait_queue_head_t copy_to_journal_wait;
234
235 struct completion crypto_backoff;
236
237 bool journal_uptodate;
238 bool just_formatted;
239
240 struct alg_spec internal_hash_alg;
241 struct alg_spec journal_crypt_alg;
242 struct alg_spec journal_mac_alg;
3f2e5393
MP
243
244 atomic64_t number_of_mismatches;
7eada909
MP
245};
246
247struct dm_integrity_range {
248 sector_t logical_sector;
4f43446d 249 sector_t n_sectors;
724376a0
MP
250 bool waiting;
251 union {
252 struct rb_node node;
253 struct {
254 struct task_struct *task;
255 struct list_head wait_entry;
256 };
257 };
7eada909
MP
258};
259
260struct dm_integrity_io {
261 struct work_struct work;
262
263 struct dm_integrity_c *ic;
264 bool write;
265 bool fua;
266
267 struct dm_integrity_range range;
268
269 sector_t metadata_block;
270 unsigned metadata_offset;
271
272 atomic_t in_flight;
4e4cbee9 273 blk_status_t bi_status;
7eada909
MP
274
275 struct completion *completion;
276
74d46992
CH
277 struct gendisk *orig_bi_disk;
278 u8 orig_bi_partno;
7eada909
MP
279 bio_end_io_t *orig_bi_end_io;
280 struct bio_integrity_payload *orig_bi_integrity;
281 struct bvec_iter orig_bi_iter;
282};
283
284struct journal_completion {
285 struct dm_integrity_c *ic;
286 atomic_t in_flight;
287 struct completion comp;
288};
289
290struct journal_io {
291 struct dm_integrity_range range;
292 struct journal_completion *comp;
293};
294
295static struct kmem_cache *journal_io_cache;
296
297#define JOURNAL_IO_MEMPOOL 32
298
299#ifdef DEBUG_PRINT
300#define DEBUG_print(x, ...) printk(KERN_DEBUG x, ##__VA_ARGS__)
301static void __DEBUG_bytes(__u8 *bytes, size_t len, const char *msg, ...)
302{
303 va_list args;
304 va_start(args, msg);
305 vprintk(msg, args);
306 va_end(args);
307 if (len)
308 pr_cont(":");
309 while (len) {
310 pr_cont(" %02x", *bytes);
311 bytes++;
312 len--;
313 }
314 pr_cont("\n");
315}
316#define DEBUG_bytes(bytes, len, msg, ...) __DEBUG_bytes(bytes, len, KERN_DEBUG msg, ##__VA_ARGS__)
317#else
318#define DEBUG_print(x, ...) do { } while (0)
319#define DEBUG_bytes(bytes, len, msg, ...) do { } while (0)
320#endif
321
322/*
323 * DM Integrity profile, protection is performed layer above (dm-crypt)
324 */
7c373d66 325static const struct blk_integrity_profile dm_integrity_profile = {
7eada909
MP
326 .name = "DM-DIF-EXT-TAG",
327 .generate_fn = NULL,
328 .verify_fn = NULL,
329};
330
331static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map);
332static void integrity_bio_wait(struct work_struct *w);
333static void dm_integrity_dtr(struct dm_target *ti);
334
335static void dm_integrity_io_error(struct dm_integrity_c *ic, const char *msg, int err)
336{
3f2e5393
MP
337 if (err == -EILSEQ)
338 atomic64_inc(&ic->number_of_mismatches);
7eada909
MP
339 if (!cmpxchg(&ic->failed, 0, err))
340 DMERR("Error on %s: %d", msg, err);
341}
342
343static int dm_integrity_failed(struct dm_integrity_c *ic)
344{
d3e632f0 345 return READ_ONCE(ic->failed);
7eada909
MP
346}
347
348static commit_id_t dm_integrity_commit_id(struct dm_integrity_c *ic, unsigned i,
349 unsigned j, unsigned char seq)
350{
351 /*
352 * Xor the number with section and sector, so that if a piece of
353 * journal is written at wrong place, it is detected.
354 */
355 return ic->commit_ids[seq] ^ cpu_to_le64(((__u64)i << 32) ^ j);
356}
357
358static void get_area_and_offset(struct dm_integrity_c *ic, sector_t data_sector,
359 sector_t *area, sector_t *offset)
360{
356d9d52
MP
361 if (!ic->meta_dev) {
362 __u8 log2_interleave_sectors = ic->sb->log2_interleave_sectors;
363 *area = data_sector >> log2_interleave_sectors;
364 *offset = (unsigned)data_sector & ((1U << log2_interleave_sectors) - 1);
365 } else {
366 *area = 0;
367 *offset = data_sector;
368 }
7eada909
MP
369}
370
9d609f85
MP
371#define sector_to_block(ic, n) \
372do { \
373 BUG_ON((n) & (unsigned)((ic)->sectors_per_block - 1)); \
374 (n) >>= (ic)->sb->log2_sectors_per_block; \
375} while (0)
376
7eada909
MP
377static __u64 get_metadata_sector_and_offset(struct dm_integrity_c *ic, sector_t area,
378 sector_t offset, unsigned *metadata_offset)
379{
380 __u64 ms;
381 unsigned mo;
382
383 ms = area << ic->sb->log2_interleave_sectors;
384 if (likely(ic->log2_metadata_run >= 0))
385 ms += area << ic->log2_metadata_run;
386 else
387 ms += area * ic->metadata_run;
388 ms >>= ic->log2_buffer_sectors;
389
9d609f85
MP
390 sector_to_block(ic, offset);
391
7eada909
MP
392 if (likely(ic->log2_tag_size >= 0)) {
393 ms += offset >> (SECTOR_SHIFT + ic->log2_buffer_sectors - ic->log2_tag_size);
394 mo = (offset << ic->log2_tag_size) & ((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - 1);
395 } else {
396 ms += (__u64)offset * ic->tag_size >> (SECTOR_SHIFT + ic->log2_buffer_sectors);
397 mo = (offset * ic->tag_size) & ((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - 1);
398 }
399 *metadata_offset = mo;
400 return ms;
401}
402
403static sector_t get_data_sector(struct dm_integrity_c *ic, sector_t area, sector_t offset)
404{
405 sector_t result;
406
356d9d52
MP
407 if (ic->meta_dev)
408 return offset;
409
7eada909
MP
410 result = area << ic->sb->log2_interleave_sectors;
411 if (likely(ic->log2_metadata_run >= 0))
412 result += (area + 1) << ic->log2_metadata_run;
413 else
414 result += (area + 1) * ic->metadata_run;
415
416 result += (sector_t)ic->initial_sectors + offset;
71e9ddbc
MP
417 result += ic->start;
418
7eada909
MP
419 return result;
420}
421
422static void wraparound_section(struct dm_integrity_c *ic, unsigned *sec_ptr)
423{
424 if (unlikely(*sec_ptr >= ic->journal_sections))
425 *sec_ptr -= ic->journal_sections;
426}
427
1f9fc0b8
MP
428static void sb_set_version(struct dm_integrity_c *ic)
429{
a3fcf725 430 if (ic->meta_dev || ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
1f9fc0b8
MP
431 ic->sb->version = SB_VERSION_2;
432 else
433 ic->sb->version = SB_VERSION_1;
434}
435
7eada909
MP
436static int sync_rw_sb(struct dm_integrity_c *ic, int op, int op_flags)
437{
438 struct dm_io_request io_req;
439 struct dm_io_region io_loc;
440
441 io_req.bi_op = op;
442 io_req.bi_op_flags = op_flags;
443 io_req.mem.type = DM_IO_KMEM;
444 io_req.mem.ptr.addr = ic->sb;
445 io_req.notify.fn = NULL;
446 io_req.client = ic->io;
356d9d52 447 io_loc.bdev = ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev;
7eada909
MP
448 io_loc.sector = ic->start;
449 io_loc.count = SB_SECTORS;
450
451 return dm_io(&io_req, 1, &io_loc, NULL);
452}
453
454static void access_journal_check(struct dm_integrity_c *ic, unsigned section, unsigned offset,
455 bool e, const char *function)
456{
457#if defined(CONFIG_DM_DEBUG) || defined(INTERNAL_VERIFY)
458 unsigned limit = e ? ic->journal_section_entries : ic->journal_section_sectors;
459
460 if (unlikely(section >= ic->journal_sections) ||
461 unlikely(offset >= limit)) {
462 printk(KERN_CRIT "%s: invalid access at (%u,%u), limit (%u,%u)\n",
463 function, section, offset, ic->journal_sections, limit);
464 BUG();
465 }
466#endif
467}
468
469static void page_list_location(struct dm_integrity_c *ic, unsigned section, unsigned offset,
470 unsigned *pl_index, unsigned *pl_offset)
471{
472 unsigned sector;
473
56b67a4f 474 access_journal_check(ic, section, offset, false, "page_list_location");
7eada909
MP
475
476 sector = section * ic->journal_section_sectors + offset;
477
478 *pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
479 *pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
480}
481
482static struct journal_sector *access_page_list(struct dm_integrity_c *ic, struct page_list *pl,
483 unsigned section, unsigned offset, unsigned *n_sectors)
484{
485 unsigned pl_index, pl_offset;
486 char *va;
487
488 page_list_location(ic, section, offset, &pl_index, &pl_offset);
489
490 if (n_sectors)
491 *n_sectors = (PAGE_SIZE - pl_offset) >> SECTOR_SHIFT;
492
493 va = lowmem_page_address(pl[pl_index].page);
494
495 return (struct journal_sector *)(va + pl_offset);
496}
497
498static struct journal_sector *access_journal(struct dm_integrity_c *ic, unsigned section, unsigned offset)
499{
500 return access_page_list(ic, ic->journal, section, offset, NULL);
501}
502
503static struct journal_entry *access_journal_entry(struct dm_integrity_c *ic, unsigned section, unsigned n)
504{
505 unsigned rel_sector, offset;
506 struct journal_sector *js;
507
508 access_journal_check(ic, section, n, true, "access_journal_entry");
509
510 rel_sector = n % JOURNAL_BLOCK_SECTORS;
511 offset = n / JOURNAL_BLOCK_SECTORS;
512
513 js = access_journal(ic, section, rel_sector);
514 return (struct journal_entry *)((char *)js + offset * ic->journal_entry_size);
515}
516
517static struct journal_sector *access_journal_data(struct dm_integrity_c *ic, unsigned section, unsigned n)
518{
9d609f85 519 n <<= ic->sb->log2_sectors_per_block;
7eada909 520
9d609f85
MP
521 n += JOURNAL_BLOCK_SECTORS;
522
523 access_journal_check(ic, section, n, false, "access_journal_data");
524
525 return access_journal(ic, section, n);
7eada909
MP
526}
527
528static void section_mac(struct dm_integrity_c *ic, unsigned section, __u8 result[JOURNAL_MAC_SIZE])
529{
530 SHASH_DESC_ON_STACK(desc, ic->journal_mac);
531 int r;
532 unsigned j, size;
533
534 desc->tfm = ic->journal_mac;
432061b3 535 desc->flags = 0;
7eada909
MP
536
537 r = crypto_shash_init(desc);
538 if (unlikely(r)) {
539 dm_integrity_io_error(ic, "crypto_shash_init", r);
540 goto err;
541 }
542
543 for (j = 0; j < ic->journal_section_entries; j++) {
544 struct journal_entry *je = access_journal_entry(ic, section, j);
545 r = crypto_shash_update(desc, (__u8 *)&je->u.sector, sizeof je->u.sector);
546 if (unlikely(r)) {
547 dm_integrity_io_error(ic, "crypto_shash_update", r);
548 goto err;
549 }
550 }
551
552 size = crypto_shash_digestsize(ic->journal_mac);
553
554 if (likely(size <= JOURNAL_MAC_SIZE)) {
555 r = crypto_shash_final(desc, result);
556 if (unlikely(r)) {
557 dm_integrity_io_error(ic, "crypto_shash_final", r);
558 goto err;
559 }
560 memset(result + size, 0, JOURNAL_MAC_SIZE - size);
561 } else {
6d39a124
KC
562 __u8 digest[HASH_MAX_DIGESTSIZE];
563
564 if (WARN_ON(size > sizeof(digest))) {
565 dm_integrity_io_error(ic, "digest_size", -EINVAL);
566 goto err;
567 }
7eada909
MP
568 r = crypto_shash_final(desc, digest);
569 if (unlikely(r)) {
570 dm_integrity_io_error(ic, "crypto_shash_final", r);
571 goto err;
572 }
573 memcpy(result, digest, JOURNAL_MAC_SIZE);
574 }
575
576 return;
577err:
578 memset(result, 0, JOURNAL_MAC_SIZE);
579}
580
581static void rw_section_mac(struct dm_integrity_c *ic, unsigned section, bool wr)
582{
583 __u8 result[JOURNAL_MAC_SIZE];
584 unsigned j;
585
586 if (!ic->journal_mac)
587 return;
588
589 section_mac(ic, section, result);
590
591 for (j = 0; j < JOURNAL_BLOCK_SECTORS; j++) {
592 struct journal_sector *js = access_journal(ic, section, j);
593
594 if (likely(wr))
595 memcpy(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR);
596 else {
597 if (memcmp(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR))
598 dm_integrity_io_error(ic, "journal mac", -EILSEQ);
599 }
600 }
601}
602
603static void complete_journal_op(void *context)
604{
605 struct journal_completion *comp = context;
606 BUG_ON(!atomic_read(&comp->in_flight));
607 if (likely(atomic_dec_and_test(&comp->in_flight)))
608 complete(&comp->comp);
609}
610
611static void xor_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
612 unsigned n_sections, struct journal_completion *comp)
613{
614 struct async_submit_ctl submit;
615 size_t n_bytes = (size_t)(n_sections * ic->journal_section_sectors) << SECTOR_SHIFT;
616 unsigned pl_index, pl_offset, section_index;
617 struct page_list *source_pl, *target_pl;
618
619 if (likely(encrypt)) {
620 source_pl = ic->journal;
621 target_pl = ic->journal_io;
622 } else {
623 source_pl = ic->journal_io;
624 target_pl = ic->journal;
625 }
626
627 page_list_location(ic, section, 0, &pl_index, &pl_offset);
628
629 atomic_add(roundup(pl_offset + n_bytes, PAGE_SIZE) >> PAGE_SHIFT, &comp->in_flight);
630
631 init_async_submit(&submit, ASYNC_TX_XOR_ZERO_DST, NULL, complete_journal_op, comp, NULL);
632
633 section_index = pl_index;
634
635 do {
636 size_t this_step;
637 struct page *src_pages[2];
638 struct page *dst_page;
639
640 while (unlikely(pl_index == section_index)) {
641 unsigned dummy;
642 if (likely(encrypt))
643 rw_section_mac(ic, section, true);
644 section++;
645 n_sections--;
646 if (!n_sections)
647 break;
648 page_list_location(ic, section, 0, &section_index, &dummy);
649 }
650
651 this_step = min(n_bytes, (size_t)PAGE_SIZE - pl_offset);
652 dst_page = target_pl[pl_index].page;
653 src_pages[0] = source_pl[pl_index].page;
654 src_pages[1] = ic->journal_xor[pl_index].page;
655
656 async_xor(dst_page, src_pages, pl_offset, 2, this_step, &submit);
657
658 pl_index++;
659 pl_offset = 0;
660 n_bytes -= this_step;
661 } while (n_bytes);
662
663 BUG_ON(n_sections);
664
665 async_tx_issue_pending_all();
666}
667
668static void complete_journal_encrypt(struct crypto_async_request *req, int err)
669{
670 struct journal_completion *comp = req->data;
671 if (unlikely(err)) {
672 if (likely(err == -EINPROGRESS)) {
673 complete(&comp->ic->crypto_backoff);
674 return;
675 }
676 dm_integrity_io_error(comp->ic, "asynchronous encrypt", err);
677 }
678 complete_journal_op(comp);
679}
680
681static bool do_crypt(bool encrypt, struct skcipher_request *req, struct journal_completion *comp)
682{
683 int r;
432061b3 684 skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
7eada909
MP
685 complete_journal_encrypt, comp);
686 if (likely(encrypt))
687 r = crypto_skcipher_encrypt(req);
688 else
689 r = crypto_skcipher_decrypt(req);
690 if (likely(!r))
691 return false;
692 if (likely(r == -EINPROGRESS))
693 return true;
694 if (likely(r == -EBUSY)) {
695 wait_for_completion(&comp->ic->crypto_backoff);
696 reinit_completion(&comp->ic->crypto_backoff);
697 return true;
698 }
699 dm_integrity_io_error(comp->ic, "encrypt", r);
700 return false;
701}
702
703static void crypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
704 unsigned n_sections, struct journal_completion *comp)
705{
706 struct scatterlist **source_sg;
707 struct scatterlist **target_sg;
708
709 atomic_add(2, &comp->in_flight);
710
711 if (likely(encrypt)) {
712 source_sg = ic->journal_scatterlist;
713 target_sg = ic->journal_io_scatterlist;
714 } else {
715 source_sg = ic->journal_io_scatterlist;
716 target_sg = ic->journal_scatterlist;
717 }
718
719 do {
720 struct skcipher_request *req;
721 unsigned ivsize;
722 char *iv;
723
724 if (likely(encrypt))
725 rw_section_mac(ic, section, true);
726
727 req = ic->sk_requests[section];
728 ivsize = crypto_skcipher_ivsize(ic->journal_crypt);
729 iv = req->iv;
730
731 memcpy(iv, iv + ivsize, ivsize);
732
733 req->src = source_sg[section];
734 req->dst = target_sg[section];
735
736 if (unlikely(do_crypt(encrypt, req, comp)))
737 atomic_inc(&comp->in_flight);
738
739 section++;
740 n_sections--;
741 } while (n_sections);
742
743 atomic_dec(&comp->in_flight);
744 complete_journal_op(comp);
745}
746
747static void encrypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
748 unsigned n_sections, struct journal_completion *comp)
749{
750 if (ic->journal_xor)
751 return xor_journal(ic, encrypt, section, n_sections, comp);
752 else
753 return crypt_journal(ic, encrypt, section, n_sections, comp);
754}
755
756static void complete_journal_io(unsigned long error, void *context)
757{
758 struct journal_completion *comp = context;
759 if (unlikely(error != 0))
760 dm_integrity_io_error(comp->ic, "writing journal", -EIO);
761 complete_journal_op(comp);
762}
763
981e8a98
MP
764static void rw_journal_sectors(struct dm_integrity_c *ic, int op, int op_flags,
765 unsigned sector, unsigned n_sectors, struct journal_completion *comp)
7eada909
MP
766{
767 struct dm_io_request io_req;
768 struct dm_io_region io_loc;
981e8a98 769 unsigned pl_index, pl_offset;
7eada909
MP
770 int r;
771
772 if (unlikely(dm_integrity_failed(ic))) {
773 if (comp)
774 complete_journal_io(-1UL, comp);
775 return;
776 }
777
7eada909
MP
778 pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
779 pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
780
781 io_req.bi_op = op;
782 io_req.bi_op_flags = op_flags;
783 io_req.mem.type = DM_IO_PAGE_LIST;
784 if (ic->journal_io)
785 io_req.mem.ptr.pl = &ic->journal_io[pl_index];
786 else
787 io_req.mem.ptr.pl = &ic->journal[pl_index];
788 io_req.mem.offset = pl_offset;
789 if (likely(comp != NULL)) {
790 io_req.notify.fn = complete_journal_io;
791 io_req.notify.context = comp;
792 } else {
793 io_req.notify.fn = NULL;
794 }
795 io_req.client = ic->io;
356d9d52 796 io_loc.bdev = ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev;
7eada909
MP
797 io_loc.sector = ic->start + SB_SECTORS + sector;
798 io_loc.count = n_sectors;
799
800 r = dm_io(&io_req, 1, &io_loc, NULL);
801 if (unlikely(r)) {
802 dm_integrity_io_error(ic, op == REQ_OP_READ ? "reading journal" : "writing journal", r);
803 if (comp) {
804 WARN_ONCE(1, "asynchronous dm_io failed: %d", r);
805 complete_journal_io(-1UL, comp);
806 }
807 }
808}
809
981e8a98
MP
810static void rw_journal(struct dm_integrity_c *ic, int op, int op_flags, unsigned section,
811 unsigned n_sections, struct journal_completion *comp)
812{
813 unsigned sector, n_sectors;
814
815 sector = section * ic->journal_section_sectors;
816 n_sectors = n_sections * ic->journal_section_sectors;
817
818 rw_journal_sectors(ic, op, op_flags, sector, n_sectors, comp);
819}
820
7eada909
MP
821static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsigned commit_sections)
822{
823 struct journal_completion io_comp;
824 struct journal_completion crypt_comp_1;
825 struct journal_completion crypt_comp_2;
826 unsigned i;
827
828 io_comp.ic = ic;
b5e8ad92 829 init_completion(&io_comp.comp);
7eada909
MP
830
831 if (commit_start + commit_sections <= ic->journal_sections) {
832 io_comp.in_flight = (atomic_t)ATOMIC_INIT(1);
833 if (ic->journal_io) {
834 crypt_comp_1.ic = ic;
b5e8ad92 835 init_completion(&crypt_comp_1.comp);
7eada909
MP
836 crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
837 encrypt_journal(ic, true, commit_start, commit_sections, &crypt_comp_1);
838 wait_for_completion_io(&crypt_comp_1.comp);
839 } else {
840 for (i = 0; i < commit_sections; i++)
841 rw_section_mac(ic, commit_start + i, true);
842 }
ff0361b3
JK
843 rw_journal(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, commit_start,
844 commit_sections, &io_comp);
7eada909
MP
845 } else {
846 unsigned to_end;
847 io_comp.in_flight = (atomic_t)ATOMIC_INIT(2);
848 to_end = ic->journal_sections - commit_start;
849 if (ic->journal_io) {
850 crypt_comp_1.ic = ic;
b5e8ad92 851 init_completion(&crypt_comp_1.comp);
7eada909
MP
852 crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
853 encrypt_journal(ic, true, commit_start, to_end, &crypt_comp_1);
854 if (try_wait_for_completion(&crypt_comp_1.comp)) {
855 rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp);
b5e8ad92 856 reinit_completion(&crypt_comp_1.comp);
7eada909
MP
857 crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
858 encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_1);
859 wait_for_completion_io(&crypt_comp_1.comp);
860 } else {
861 crypt_comp_2.ic = ic;
b5e8ad92 862 init_completion(&crypt_comp_2.comp);
7eada909
MP
863 crypt_comp_2.in_flight = (atomic_t)ATOMIC_INIT(0);
864 encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_2);
865 wait_for_completion_io(&crypt_comp_1.comp);
866 rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp);
867 wait_for_completion_io(&crypt_comp_2.comp);
868 }
869 } else {
870 for (i = 0; i < to_end; i++)
871 rw_section_mac(ic, commit_start + i, true);
872 rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp);
873 for (i = 0; i < commit_sections - to_end; i++)
874 rw_section_mac(ic, i, true);
875 }
876 rw_journal(ic, REQ_OP_WRITE, REQ_FUA, 0, commit_sections - to_end, &io_comp);
877 }
878
879 wait_for_completion_io(&io_comp.comp);
880}
881
882static void copy_from_journal(struct dm_integrity_c *ic, unsigned section, unsigned offset,
883 unsigned n_sectors, sector_t target, io_notify_fn fn, void *data)
884{
885 struct dm_io_request io_req;
886 struct dm_io_region io_loc;
887 int r;
888 unsigned sector, pl_index, pl_offset;
889
9d609f85
MP
890 BUG_ON((target | n_sectors | offset) & (unsigned)(ic->sectors_per_block - 1));
891
7eada909
MP
892 if (unlikely(dm_integrity_failed(ic))) {
893 fn(-1UL, data);
894 return;
895 }
896
897 sector = section * ic->journal_section_sectors + JOURNAL_BLOCK_SECTORS + offset;
898
899 pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
900 pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
901
902 io_req.bi_op = REQ_OP_WRITE;
903 io_req.bi_op_flags = 0;
904 io_req.mem.type = DM_IO_PAGE_LIST;
905 io_req.mem.ptr.pl = &ic->journal[pl_index];
906 io_req.mem.offset = pl_offset;
907 io_req.notify.fn = fn;
908 io_req.notify.context = data;
909 io_req.client = ic->io;
910 io_loc.bdev = ic->dev->bdev;
71e9ddbc 911 io_loc.sector = target;
7eada909
MP
912 io_loc.count = n_sectors;
913
914 r = dm_io(&io_req, 1, &io_loc, NULL);
915 if (unlikely(r)) {
916 WARN_ONCE(1, "asynchronous dm_io failed: %d", r);
917 fn(-1UL, data);
918 }
919}
920
724376a0
MP
921static bool ranges_overlap(struct dm_integrity_range *range1, struct dm_integrity_range *range2)
922{
923 return range1->logical_sector < range2->logical_sector + range2->n_sectors &&
4ed319c6 924 range1->logical_sector + range1->n_sectors > range2->logical_sector;
724376a0
MP
925}
926
927static bool add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range, bool check_waiting)
7eada909
MP
928{
929 struct rb_node **n = &ic->in_progress.rb_node;
930 struct rb_node *parent;
931
9d609f85
MP
932 BUG_ON((new_range->logical_sector | new_range->n_sectors) & (unsigned)(ic->sectors_per_block - 1));
933
724376a0
MP
934 if (likely(check_waiting)) {
935 struct dm_integrity_range *range;
936 list_for_each_entry(range, &ic->wait_list, wait_entry) {
937 if (unlikely(ranges_overlap(range, new_range)))
938 return false;
939 }
940 }
941
7eada909
MP
942 parent = NULL;
943
944 while (*n) {
945 struct dm_integrity_range *range = container_of(*n, struct dm_integrity_range, node);
946
947 parent = *n;
948 if (new_range->logical_sector + new_range->n_sectors <= range->logical_sector) {
949 n = &range->node.rb_left;
950 } else if (new_range->logical_sector >= range->logical_sector + range->n_sectors) {
951 n = &range->node.rb_right;
952 } else {
953 return false;
954 }
955 }
956
957 rb_link_node(&new_range->node, parent, n);
958 rb_insert_color(&new_range->node, &ic->in_progress);
959
960 return true;
961}
962
963static void remove_range_unlocked(struct dm_integrity_c *ic, struct dm_integrity_range *range)
964{
965 rb_erase(&range->node, &ic->in_progress);
724376a0
MP
966 while (unlikely(!list_empty(&ic->wait_list))) {
967 struct dm_integrity_range *last_range =
968 list_first_entry(&ic->wait_list, struct dm_integrity_range, wait_entry);
969 struct task_struct *last_range_task;
724376a0
MP
970 last_range_task = last_range->task;
971 list_del(&last_range->wait_entry);
972 if (!add_new_range(ic, last_range, false)) {
973 last_range->task = last_range_task;
974 list_add(&last_range->wait_entry, &ic->wait_list);
975 break;
976 }
977 last_range->waiting = false;
978 wake_up_process(last_range_task);
979 }
7eada909
MP
980}
981
982static void remove_range(struct dm_integrity_c *ic, struct dm_integrity_range *range)
983{
984 unsigned long flags;
985
986 spin_lock_irqsave(&ic->endio_wait.lock, flags);
987 remove_range_unlocked(ic, range);
988 spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
989}
990
724376a0
MP
991static void wait_and_add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range)
992{
993 new_range->waiting = true;
994 list_add_tail(&new_range->wait_entry, &ic->wait_list);
995 new_range->task = current;
996 do {
997 __set_current_state(TASK_UNINTERRUPTIBLE);
998 spin_unlock_irq(&ic->endio_wait.lock);
999 io_schedule();
1000 spin_lock_irq(&ic->endio_wait.lock);
1001 } while (unlikely(new_range->waiting));
1002}
1003
7eada909
MP
1004static void init_journal_node(struct journal_node *node)
1005{
1006 RB_CLEAR_NODE(&node->node);
1007 node->sector = (sector_t)-1;
1008}
1009
1010static void add_journal_node(struct dm_integrity_c *ic, struct journal_node *node, sector_t sector)
1011{
1012 struct rb_node **link;
1013 struct rb_node *parent;
1014
1015 node->sector = sector;
1016 BUG_ON(!RB_EMPTY_NODE(&node->node));
1017
1018 link = &ic->journal_tree_root.rb_node;
1019 parent = NULL;
1020
1021 while (*link) {
1022 struct journal_node *j;
1023 parent = *link;
1024 j = container_of(parent, struct journal_node, node);
1025 if (sector < j->sector)
1026 link = &j->node.rb_left;
1027 else
1028 link = &j->node.rb_right;
1029 }
1030
1031 rb_link_node(&node->node, parent, link);
1032 rb_insert_color(&node->node, &ic->journal_tree_root);
1033}
1034
1035static void remove_journal_node(struct dm_integrity_c *ic, struct journal_node *node)
1036{
1037 BUG_ON(RB_EMPTY_NODE(&node->node));
1038 rb_erase(&node->node, &ic->journal_tree_root);
1039 init_journal_node(node);
1040}
1041
1042#define NOT_FOUND (-1U)
1043
1044static unsigned find_journal_node(struct dm_integrity_c *ic, sector_t sector, sector_t *next_sector)
1045{
1046 struct rb_node *n = ic->journal_tree_root.rb_node;
1047 unsigned found = NOT_FOUND;
1048 *next_sector = (sector_t)-1;
1049 while (n) {
1050 struct journal_node *j = container_of(n, struct journal_node, node);
1051 if (sector == j->sector) {
1052 found = j - ic->journal_tree;
1053 }
1054 if (sector < j->sector) {
1055 *next_sector = j->sector;
1056 n = j->node.rb_left;
1057 } else {
1058 n = j->node.rb_right;
1059 }
1060 }
1061
1062 return found;
1063}
1064
1065static bool test_journal_node(struct dm_integrity_c *ic, unsigned pos, sector_t sector)
1066{
1067 struct journal_node *node, *next_node;
1068 struct rb_node *next;
1069
1070 if (unlikely(pos >= ic->journal_entries))
1071 return false;
1072 node = &ic->journal_tree[pos];
1073 if (unlikely(RB_EMPTY_NODE(&node->node)))
1074 return false;
1075 if (unlikely(node->sector != sector))
1076 return false;
1077
1078 next = rb_next(&node->node);
1079 if (unlikely(!next))
1080 return true;
1081
1082 next_node = container_of(next, struct journal_node, node);
1083 return next_node->sector != sector;
1084}
1085
1086static bool find_newer_committed_node(struct dm_integrity_c *ic, struct journal_node *node)
1087{
1088 struct rb_node *next;
1089 struct journal_node *next_node;
1090 unsigned next_section;
1091
1092 BUG_ON(RB_EMPTY_NODE(&node->node));
1093
1094 next = rb_next(&node->node);
1095 if (unlikely(!next))
1096 return false;
1097
1098 next_node = container_of(next, struct journal_node, node);
1099
1100 if (next_node->sector != node->sector)
1101 return false;
1102
1103 next_section = (unsigned)(next_node - ic->journal_tree) / ic->journal_section_entries;
1104 if (next_section >= ic->committed_section &&
1105 next_section < ic->committed_section + ic->n_committed_sections)
1106 return true;
1107 if (next_section + ic->journal_sections < ic->committed_section + ic->n_committed_sections)
1108 return true;
1109
1110 return false;
1111}
1112
1113#define TAG_READ 0
1114#define TAG_WRITE 1
1115#define TAG_CMP 2
1116
1117static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, sector_t *metadata_block,
1118 unsigned *metadata_offset, unsigned total_size, int op)
1119{
1120 do {
1121 unsigned char *data, *dp;
1122 struct dm_buffer *b;
1123 unsigned to_copy;
1124 int r;
1125
1126 r = dm_integrity_failed(ic);
1127 if (unlikely(r))
1128 return r;
1129
1130 data = dm_bufio_read(ic->bufio, *metadata_block, &b);
5e3d0e37 1131 if (IS_ERR(data))
7eada909
MP
1132 return PTR_ERR(data);
1133
1134 to_copy = min((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - *metadata_offset, total_size);
1135 dp = data + *metadata_offset;
1136 if (op == TAG_READ) {
1137 memcpy(tag, dp, to_copy);
1138 } else if (op == TAG_WRITE) {
1139 memcpy(dp, tag, to_copy);
1e3b21c6 1140 dm_bufio_mark_partial_buffer_dirty(b, *metadata_offset, *metadata_offset + to_copy);
7eada909
MP
1141 } else {
1142 /* e.g.: op == TAG_CMP */
1143 if (unlikely(memcmp(dp, tag, to_copy))) {
1144 unsigned i;
1145
1146 for (i = 0; i < to_copy; i++) {
1147 if (dp[i] != tag[i])
1148 break;
1149 total_size--;
1150 }
1151 dm_bufio_release(b);
1152 return total_size;
1153 }
1154 }
1155 dm_bufio_release(b);
1156
1157 tag += to_copy;
1158 *metadata_offset += to_copy;
1159 if (unlikely(*metadata_offset == 1U << SECTOR_SHIFT << ic->log2_buffer_sectors)) {
1160 (*metadata_block)++;
1161 *metadata_offset = 0;
1162 }
1163 total_size -= to_copy;
1164 } while (unlikely(total_size));
1165
1166 return 0;
1167}
1168
1169static void dm_integrity_flush_buffers(struct dm_integrity_c *ic)
1170{
1171 int r;
1172 r = dm_bufio_write_dirty_buffers(ic->bufio);
1173 if (unlikely(r))
1174 dm_integrity_io_error(ic, "writing tags", r);
1175}
1176
1177static void sleep_on_endio_wait(struct dm_integrity_c *ic)
1178{
1179 DECLARE_WAITQUEUE(wait, current);
1180 __add_wait_queue(&ic->endio_wait, &wait);
1181 __set_current_state(TASK_UNINTERRUPTIBLE);
1182 spin_unlock_irq(&ic->endio_wait.lock);
1183 io_schedule();
1184 spin_lock_irq(&ic->endio_wait.lock);
1185 __remove_wait_queue(&ic->endio_wait, &wait);
1186}
1187
8376d3c1 1188static void autocommit_fn(struct timer_list *t)
7eada909 1189{
8376d3c1 1190 struct dm_integrity_c *ic = from_timer(ic, t, autocommit_timer);
7eada909
MP
1191
1192 if (likely(!dm_integrity_failed(ic)))
1193 queue_work(ic->commit_wq, &ic->commit_work);
1194}
1195
1196static void schedule_autocommit(struct dm_integrity_c *ic)
1197{
1198 if (!timer_pending(&ic->autocommit_timer))
1199 mod_timer(&ic->autocommit_timer, jiffies + ic->autocommit_jiffies);
1200}
1201
1202static void submit_flush_bio(struct dm_integrity_c *ic, struct dm_integrity_io *dio)
1203{
1204 struct bio *bio;
7def52b7
MS
1205 unsigned long flags;
1206
1207 spin_lock_irqsave(&ic->endio_wait.lock, flags);
7eada909
MP
1208 bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1209 bio_list_add(&ic->flush_bio_list, bio);
7def52b7
MS
1210 spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
1211
7eada909
MP
1212 queue_work(ic->commit_wq, &ic->commit_work);
1213}
1214
1215static void do_endio(struct dm_integrity_c *ic, struct bio *bio)
1216{
1217 int r = dm_integrity_failed(ic);
4e4cbee9
CH
1218 if (unlikely(r) && !bio->bi_status)
1219 bio->bi_status = errno_to_blk_status(r);
7eada909
MP
1220 bio_endio(bio);
1221}
1222
1223static void do_endio_flush(struct dm_integrity_c *ic, struct dm_integrity_io *dio)
1224{
1225 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1226
4e4cbee9 1227 if (unlikely(dio->fua) && likely(!bio->bi_status) && likely(!dm_integrity_failed(ic)))
7eada909
MP
1228 submit_flush_bio(ic, dio);
1229 else
1230 do_endio(ic, bio);
1231}
1232
1233static void dec_in_flight(struct dm_integrity_io *dio)
1234{
1235 if (atomic_dec_and_test(&dio->in_flight)) {
1236 struct dm_integrity_c *ic = dio->ic;
1237 struct bio *bio;
1238
1239 remove_range(ic, &dio->range);
1240
1241 if (unlikely(dio->write))
1242 schedule_autocommit(ic);
1243
1244 bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1245
4e4cbee9
CH
1246 if (unlikely(dio->bi_status) && !bio->bi_status)
1247 bio->bi_status = dio->bi_status;
1248 if (likely(!bio->bi_status) && unlikely(bio_sectors(bio) != dio->range.n_sectors)) {
7eada909
MP
1249 dio->range.logical_sector += dio->range.n_sectors;
1250 bio_advance(bio, dio->range.n_sectors << SECTOR_SHIFT);
1251 INIT_WORK(&dio->work, integrity_bio_wait);
1252 queue_work(ic->wait_wq, &dio->work);
1253 return;
1254 }
1255 do_endio_flush(ic, dio);
1256 }
1257}
1258
1259static void integrity_end_io(struct bio *bio)
1260{
1261 struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
1262
1263 bio->bi_iter = dio->orig_bi_iter;
74d46992
CH
1264 bio->bi_disk = dio->orig_bi_disk;
1265 bio->bi_partno = dio->orig_bi_partno;
7eada909
MP
1266 if (dio->orig_bi_integrity) {
1267 bio->bi_integrity = dio->orig_bi_integrity;
1268 bio->bi_opf |= REQ_INTEGRITY;
1269 }
1270 bio->bi_end_io = dio->orig_bi_end_io;
1271
1272 if (dio->completion)
1273 complete(dio->completion);
1274
1275 dec_in_flight(dio);
1276}
1277
1278static void integrity_sector_checksum(struct dm_integrity_c *ic, sector_t sector,
1279 const char *data, char *result)
1280{
1281 __u64 sector_le = cpu_to_le64(sector);
1282 SHASH_DESC_ON_STACK(req, ic->internal_hash);
1283 int r;
1284 unsigned digest_size;
1285
1286 req->tfm = ic->internal_hash;
1287 req->flags = 0;
1288
1289 r = crypto_shash_init(req);
1290 if (unlikely(r < 0)) {
1291 dm_integrity_io_error(ic, "crypto_shash_init", r);
1292 goto failed;
1293 }
1294
1295 r = crypto_shash_update(req, (const __u8 *)&sector_le, sizeof sector_le);
1296 if (unlikely(r < 0)) {
1297 dm_integrity_io_error(ic, "crypto_shash_update", r);
1298 goto failed;
1299 }
1300
9d609f85 1301 r = crypto_shash_update(req, data, ic->sectors_per_block << SECTOR_SHIFT);
7eada909
MP
1302 if (unlikely(r < 0)) {
1303 dm_integrity_io_error(ic, "crypto_shash_update", r);
1304 goto failed;
1305 }
1306
1307 r = crypto_shash_final(req, result);
1308 if (unlikely(r < 0)) {
1309 dm_integrity_io_error(ic, "crypto_shash_final", r);
1310 goto failed;
1311 }
1312
1313 digest_size = crypto_shash_digestsize(ic->internal_hash);
1314 if (unlikely(digest_size < ic->tag_size))
1315 memset(result + digest_size, 0, ic->tag_size - digest_size);
1316
1317 return;
1318
1319failed:
1320 /* this shouldn't happen anyway, the hash functions have no reason to fail */
1321 get_random_bytes(result, ic->tag_size);
1322}
1323
1324static void integrity_metadata(struct work_struct *w)
1325{
1326 struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
1327 struct dm_integrity_c *ic = dio->ic;
1328
1329 int r;
1330
1331 if (ic->internal_hash) {
1332 struct bvec_iter iter;
1333 struct bio_vec bv;
1334 unsigned digest_size = crypto_shash_digestsize(ic->internal_hash);
1335 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1336 char *checksums;
56b67a4f 1337 unsigned extra_space = unlikely(digest_size > ic->tag_size) ? digest_size - ic->tag_size : 0;
6d39a124 1338 char checksums_onstack[HASH_MAX_DIGESTSIZE];
7eada909
MP
1339 unsigned sectors_to_process = dio->range.n_sectors;
1340 sector_t sector = dio->range.logical_sector;
1341
c2bcb2b7
MP
1342 if (unlikely(ic->mode == 'R'))
1343 goto skip_io;
1344
9d609f85 1345 checksums = kmalloc((PAGE_SIZE >> SECTOR_SHIFT >> ic->sb->log2_sectors_per_block) * ic->tag_size + extra_space,
7eada909 1346 GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN);
6d39a124 1347 if (!checksums) {
7eada909 1348 checksums = checksums_onstack;
6d39a124
KC
1349 if (WARN_ON(extra_space &&
1350 digest_size > sizeof(checksums_onstack))) {
1351 r = -EINVAL;
1352 goto error;
1353 }
1354 }
7eada909
MP
1355
1356 __bio_for_each_segment(bv, bio, iter, dio->orig_bi_iter) {
1357 unsigned pos;
1358 char *mem, *checksums_ptr;
1359
1360again:
1361 mem = (char *)kmap_atomic(bv.bv_page) + bv.bv_offset;
1362 pos = 0;
1363 checksums_ptr = checksums;
1364 do {
1365 integrity_sector_checksum(ic, sector, mem + pos, checksums_ptr);
1366 checksums_ptr += ic->tag_size;
9d609f85
MP
1367 sectors_to_process -= ic->sectors_per_block;
1368 pos += ic->sectors_per_block << SECTOR_SHIFT;
1369 sector += ic->sectors_per_block;
7eada909
MP
1370 } while (pos < bv.bv_len && sectors_to_process && checksums != checksums_onstack);
1371 kunmap_atomic(mem);
1372
1373 r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
1374 checksums_ptr - checksums, !dio->write ? TAG_CMP : TAG_WRITE);
1375 if (unlikely(r)) {
1376 if (r > 0) {
22555744
MP
1377 DMERR_LIMIT("Checksum failed at sector 0x%llx",
1378 (unsigned long long)(sector - ((r + ic->tag_size - 1) / ic->tag_size)));
7eada909 1379 r = -EILSEQ;
3f2e5393 1380 atomic64_inc(&ic->number_of_mismatches);
7eada909
MP
1381 }
1382 if (likely(checksums != checksums_onstack))
1383 kfree(checksums);
1384 goto error;
1385 }
1386
1387 if (!sectors_to_process)
1388 break;
1389
1390 if (unlikely(pos < bv.bv_len)) {
1391 bv.bv_offset += pos;
1392 bv.bv_len -= pos;
1393 goto again;
1394 }
1395 }
1396
1397 if (likely(checksums != checksums_onstack))
1398 kfree(checksums);
1399 } else {
1400 struct bio_integrity_payload *bip = dio->orig_bi_integrity;
1401
1402 if (bip) {
1403 struct bio_vec biv;
1404 struct bvec_iter iter;
9d609f85
MP
1405 unsigned data_to_process = dio->range.n_sectors;
1406 sector_to_block(ic, data_to_process);
1407 data_to_process *= ic->tag_size;
7eada909
MP
1408
1409 bip_for_each_vec(biv, bip, iter) {
1410 unsigned char *tag;
1411 unsigned this_len;
1412
1413 BUG_ON(PageHighMem(biv.bv_page));
1414 tag = lowmem_page_address(biv.bv_page) + biv.bv_offset;
1415 this_len = min(biv.bv_len, data_to_process);
1416 r = dm_integrity_rw_tag(ic, tag, &dio->metadata_block, &dio->metadata_offset,
1417 this_len, !dio->write ? TAG_READ : TAG_WRITE);
1418 if (unlikely(r))
1419 goto error;
1420 data_to_process -= this_len;
1421 if (!data_to_process)
1422 break;
1423 }
1424 }
1425 }
c2bcb2b7 1426skip_io:
7eada909
MP
1427 dec_in_flight(dio);
1428 return;
1429error:
4e4cbee9 1430 dio->bi_status = errno_to_blk_status(r);
7eada909
MP
1431 dec_in_flight(dio);
1432}
1433
1434static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
1435{
1436 struct dm_integrity_c *ic = ti->private;
1437 struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
9d609f85 1438 struct bio_integrity_payload *bip;
7eada909
MP
1439
1440 sector_t area, offset;
1441
1442 dio->ic = ic;
4e4cbee9 1443 dio->bi_status = 0;
7eada909
MP
1444
1445 if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
1446 submit_flush_bio(ic, dio);
1447 return DM_MAPIO_SUBMITTED;
1448 }
1449
1450 dio->range.logical_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
1451 dio->write = bio_op(bio) == REQ_OP_WRITE;
1452 dio->fua = dio->write && bio->bi_opf & REQ_FUA;
1453 if (unlikely(dio->fua)) {
1454 /*
1455 * Don't pass down the FUA flag because we have to flush
1456 * disk cache anyway.
1457 */
1458 bio->bi_opf &= ~REQ_FUA;
1459 }
1460 if (unlikely(dio->range.logical_sector + bio_sectors(bio) > ic->provided_data_sectors)) {
1461 DMERR("Too big sector number: 0x%llx + 0x%x > 0x%llx",
1462 (unsigned long long)dio->range.logical_sector, bio_sectors(bio),
1463 (unsigned long long)ic->provided_data_sectors);
846785e6 1464 return DM_MAPIO_KILL;
7eada909 1465 }
9d609f85
MP
1466 if (unlikely((dio->range.logical_sector | bio_sectors(bio)) & (unsigned)(ic->sectors_per_block - 1))) {
1467 DMERR("Bio not aligned on %u sectors: 0x%llx, 0x%x",
1468 ic->sectors_per_block,
1469 (unsigned long long)dio->range.logical_sector, bio_sectors(bio));
846785e6 1470 return DM_MAPIO_KILL;
9d609f85
MP
1471 }
1472
1473 if (ic->sectors_per_block > 1) {
1474 struct bvec_iter iter;
1475 struct bio_vec bv;
1476 bio_for_each_segment(bv, bio, iter) {
95b1369a 1477 if (unlikely(bv.bv_len & ((ic->sectors_per_block << SECTOR_SHIFT) - 1))) {
9d609f85
MP
1478 DMERR("Bio vector (%u,%u) is not aligned on %u-sector boundary",
1479 bv.bv_offset, bv.bv_len, ic->sectors_per_block);
846785e6 1480 return DM_MAPIO_KILL;
9d609f85
MP
1481 }
1482 }
1483 }
1484
1485 bip = bio_integrity(bio);
1486 if (!ic->internal_hash) {
1487 if (bip) {
1488 unsigned wanted_tag_size = bio_sectors(bio) >> ic->sb->log2_sectors_per_block;
1489 if (ic->log2_tag_size >= 0)
1490 wanted_tag_size <<= ic->log2_tag_size;
1491 else
1492 wanted_tag_size *= ic->tag_size;
1493 if (unlikely(wanted_tag_size != bip->bip_iter.bi_size)) {
1494 DMERR("Invalid integrity data size %u, expected %u", bip->bip_iter.bi_size, wanted_tag_size);
846785e6 1495 return DM_MAPIO_KILL;
9d609f85
MP
1496 }
1497 }
1498 } else {
1499 if (unlikely(bip != NULL)) {
1500 DMERR("Unexpected integrity data when using internal hash");
846785e6 1501 return DM_MAPIO_KILL;
9d609f85
MP
1502 }
1503 }
7eada909 1504
c2bcb2b7 1505 if (unlikely(ic->mode == 'R') && unlikely(dio->write))
846785e6 1506 return DM_MAPIO_KILL;
c2bcb2b7 1507
7eada909
MP
1508 get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
1509 dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset);
1510 bio->bi_iter.bi_sector = get_data_sector(ic, area, offset);
1511
1512 dm_integrity_map_continue(dio, true);
1513 return DM_MAPIO_SUBMITTED;
1514}
1515
1516static bool __journal_read_write(struct dm_integrity_io *dio, struct bio *bio,
1517 unsigned journal_section, unsigned journal_entry)
1518{
1519 struct dm_integrity_c *ic = dio->ic;
1520 sector_t logical_sector;
1521 unsigned n_sectors;
1522
1523 logical_sector = dio->range.logical_sector;
1524 n_sectors = dio->range.n_sectors;
1525 do {
1526 struct bio_vec bv = bio_iovec(bio);
1527 char *mem;
1528
1529 if (unlikely(bv.bv_len >> SECTOR_SHIFT > n_sectors))
1530 bv.bv_len = n_sectors << SECTOR_SHIFT;
1531 n_sectors -= bv.bv_len >> SECTOR_SHIFT;
1532 bio_advance_iter(bio, &bio->bi_iter, bv.bv_len);
1533retry_kmap:
1534 mem = kmap_atomic(bv.bv_page);
1535 if (likely(dio->write))
1536 flush_dcache_page(bv.bv_page);
1537
1538 do {
1539 struct journal_entry *je = access_journal_entry(ic, journal_section, journal_entry);
1540
1541 if (unlikely(!dio->write)) {
1542 struct journal_sector *js;
9d609f85
MP
1543 char *mem_ptr;
1544 unsigned s;
7eada909
MP
1545
1546 if (unlikely(journal_entry_is_inprogress(je))) {
1547 flush_dcache_page(bv.bv_page);
1548 kunmap_atomic(mem);
1549
1550 __io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je));
1551 goto retry_kmap;
1552 }
1553 smp_rmb();
1554 BUG_ON(journal_entry_get_sector(je) != logical_sector);
1555 js = access_journal_data(ic, journal_section, journal_entry);
9d609f85
MP
1556 mem_ptr = mem + bv.bv_offset;
1557 s = 0;
1558 do {
1559 memcpy(mem_ptr, js, JOURNAL_SECTOR_DATA);
1560 *(commit_id_t *)(mem_ptr + JOURNAL_SECTOR_DATA) = je->last_bytes[s];
1561 js++;
1562 mem_ptr += 1 << SECTOR_SHIFT;
1563 } while (++s < ic->sectors_per_block);
7eada909
MP
1564#ifdef INTERNAL_VERIFY
1565 if (ic->internal_hash) {
6d39a124 1566 char checksums_onstack[max(HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
7eada909
MP
1567
1568 integrity_sector_checksum(ic, logical_sector, mem + bv.bv_offset, checksums_onstack);
9d609f85 1569 if (unlikely(memcmp(checksums_onstack, journal_entry_tag(ic, je), ic->tag_size))) {
22555744
MP
1570 DMERR_LIMIT("Checksum failed when reading from journal, at sector 0x%llx",
1571 (unsigned long long)logical_sector);
7eada909
MP
1572 }
1573 }
1574#endif
1575 }
1576
1577 if (!ic->internal_hash) {
1578 struct bio_integrity_payload *bip = bio_integrity(bio);
1579 unsigned tag_todo = ic->tag_size;
9d609f85 1580 char *tag_ptr = journal_entry_tag(ic, je);
7eada909
MP
1581
1582 if (bip) do {
1583 struct bio_vec biv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter);
1584 unsigned tag_now = min(biv.bv_len, tag_todo);
1585 char *tag_addr;
1586 BUG_ON(PageHighMem(biv.bv_page));
1587 tag_addr = lowmem_page_address(biv.bv_page) + biv.bv_offset;
1588 if (likely(dio->write))
1589 memcpy(tag_ptr, tag_addr, tag_now);
1590 else
1591 memcpy(tag_addr, tag_ptr, tag_now);
1592 bvec_iter_advance(bip->bip_vec, &bip->bip_iter, tag_now);
1593 tag_ptr += tag_now;
1594 tag_todo -= tag_now;
1595 } while (unlikely(tag_todo)); else {
1596 if (likely(dio->write))
1597 memset(tag_ptr, 0, tag_todo);
1598 }
1599 }
1600
1601 if (likely(dio->write)) {
1602 struct journal_sector *js;
9d609f85 1603 unsigned s;
7eada909
MP
1604
1605 js = access_journal_data(ic, journal_section, journal_entry);
9d609f85
MP
1606 memcpy(js, mem + bv.bv_offset, ic->sectors_per_block << SECTOR_SHIFT);
1607
1608 s = 0;
1609 do {
1610 je->last_bytes[s] = js[s].commit_id;
1611 } while (++s < ic->sectors_per_block);
7eada909
MP
1612
1613 if (ic->internal_hash) {
1614 unsigned digest_size = crypto_shash_digestsize(ic->internal_hash);
1615 if (unlikely(digest_size > ic->tag_size)) {
6d39a124 1616 char checksums_onstack[HASH_MAX_DIGESTSIZE];
7eada909 1617 integrity_sector_checksum(ic, logical_sector, (char *)js, checksums_onstack);
9d609f85 1618 memcpy(journal_entry_tag(ic, je), checksums_onstack, ic->tag_size);
7eada909 1619 } else
9d609f85 1620 integrity_sector_checksum(ic, logical_sector, (char *)js, journal_entry_tag(ic, je));
7eada909
MP
1621 }
1622
1623 journal_entry_set_sector(je, logical_sector);
1624 }
9d609f85 1625 logical_sector += ic->sectors_per_block;
7eada909
MP
1626
1627 journal_entry++;
1628 if (unlikely(journal_entry == ic->journal_section_entries)) {
1629 journal_entry = 0;
1630 journal_section++;
1631 wraparound_section(ic, &journal_section);
1632 }
1633
9d609f85
MP
1634 bv.bv_offset += ic->sectors_per_block << SECTOR_SHIFT;
1635 } while (bv.bv_len -= ic->sectors_per_block << SECTOR_SHIFT);
7eada909
MP
1636
1637 if (unlikely(!dio->write))
1638 flush_dcache_page(bv.bv_page);
1639 kunmap_atomic(mem);
1640 } while (n_sectors);
1641
1642 if (likely(dio->write)) {
1643 smp_mb();
1644 if (unlikely(waitqueue_active(&ic->copy_to_journal_wait)))
1645 wake_up(&ic->copy_to_journal_wait);
d3e632f0 1646 if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold) {
7eada909
MP
1647 queue_work(ic->commit_wq, &ic->commit_work);
1648 } else {
1649 schedule_autocommit(ic);
1650 }
1651 } else {
1652 remove_range(ic, &dio->range);
1653 }
1654
1655 if (unlikely(bio->bi_iter.bi_size)) {
1656 sector_t area, offset;
1657
1658 dio->range.logical_sector = logical_sector;
1659 get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
1660 dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset);
1661 return true;
1662 }
1663
1664 return false;
1665}
1666
1667static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map)
1668{
1669 struct dm_integrity_c *ic = dio->ic;
1670 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1671 unsigned journal_section, journal_entry;
1672 unsigned journal_read_pos;
1673 struct completion read_comp;
1674 bool need_sync_io = ic->internal_hash && !dio->write;
1675
1676 if (need_sync_io && from_map) {
1677 INIT_WORK(&dio->work, integrity_bio_wait);
1678 queue_work(ic->metadata_wq, &dio->work);
1679 return;
1680 }
1681
1682lock_retry:
1683 spin_lock_irq(&ic->endio_wait.lock);
1684retry:
1685 if (unlikely(dm_integrity_failed(ic))) {
1686 spin_unlock_irq(&ic->endio_wait.lock);
1687 do_endio(ic, bio);
1688 return;
1689 }
1690 dio->range.n_sectors = bio_sectors(bio);
1691 journal_read_pos = NOT_FOUND;
1692 if (likely(ic->mode == 'J')) {
1693 if (dio->write) {
1694 unsigned next_entry, i, pos;
9dd59727 1695 unsigned ws, we, range_sectors;
7eada909 1696
9dd59727 1697 dio->range.n_sectors = min(dio->range.n_sectors,
4f43446d 1698 (sector_t)ic->free_sectors << ic->sb->log2_sectors_per_block);
518748b1
MP
1699 if (unlikely(!dio->range.n_sectors)) {
1700 if (from_map)
1701 goto offload_to_thread;
1702 sleep_on_endio_wait(ic);
1703 goto retry;
1704 }
9dd59727
MP
1705 range_sectors = dio->range.n_sectors >> ic->sb->log2_sectors_per_block;
1706 ic->free_sectors -= range_sectors;
7eada909
MP
1707 journal_section = ic->free_section;
1708 journal_entry = ic->free_section_entry;
1709
9dd59727 1710 next_entry = ic->free_section_entry + range_sectors;
7eada909
MP
1711 ic->free_section_entry = next_entry % ic->journal_section_entries;
1712 ic->free_section += next_entry / ic->journal_section_entries;
1713 ic->n_uncommitted_sections += next_entry / ic->journal_section_entries;
1714 wraparound_section(ic, &ic->free_section);
1715
1716 pos = journal_section * ic->journal_section_entries + journal_entry;
1717 ws = journal_section;
1718 we = journal_entry;
9d609f85
MP
1719 i = 0;
1720 do {
7eada909
MP
1721 struct journal_entry *je;
1722
1723 add_journal_node(ic, &ic->journal_tree[pos], dio->range.logical_sector + i);
1724 pos++;
1725 if (unlikely(pos >= ic->journal_entries))
1726 pos = 0;
1727
1728 je = access_journal_entry(ic, ws, we);
1729 BUG_ON(!journal_entry_is_unused(je));
1730 journal_entry_set_inprogress(je);
1731 we++;
1732 if (unlikely(we == ic->journal_section_entries)) {
1733 we = 0;
1734 ws++;
1735 wraparound_section(ic, &ws);
1736 }
9d609f85 1737 } while ((i += ic->sectors_per_block) < dio->range.n_sectors);
7eada909
MP
1738
1739 spin_unlock_irq(&ic->endio_wait.lock);
1740 goto journal_read_write;
1741 } else {
1742 sector_t next_sector;
1743 journal_read_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
1744 if (likely(journal_read_pos == NOT_FOUND)) {
1745 if (unlikely(dio->range.n_sectors > next_sector - dio->range.logical_sector))
1746 dio->range.n_sectors = next_sector - dio->range.logical_sector;
1747 } else {
1748 unsigned i;
9d609f85
MP
1749 unsigned jp = journal_read_pos + 1;
1750 for (i = ic->sectors_per_block; i < dio->range.n_sectors; i += ic->sectors_per_block, jp++) {
1751 if (!test_journal_node(ic, jp, dio->range.logical_sector + i))
7eada909
MP
1752 break;
1753 }
1754 dio->range.n_sectors = i;
1755 }
1756 }
1757 }
724376a0 1758 if (unlikely(!add_new_range(ic, &dio->range, true))) {
7eada909
MP
1759 /*
1760 * We must not sleep in the request routine because it could
1761 * stall bios on current->bio_list.
1762 * So, we offload the bio to a workqueue if we have to sleep.
1763 */
7eada909 1764 if (from_map) {
518748b1 1765offload_to_thread:
7eada909
MP
1766 spin_unlock_irq(&ic->endio_wait.lock);
1767 INIT_WORK(&dio->work, integrity_bio_wait);
1768 queue_work(ic->wait_wq, &dio->work);
1769 return;
7eada909 1770 }
724376a0 1771 wait_and_add_new_range(ic, &dio->range);
7eada909
MP
1772 }
1773 spin_unlock_irq(&ic->endio_wait.lock);
1774
1775 if (unlikely(journal_read_pos != NOT_FOUND)) {
1776 journal_section = journal_read_pos / ic->journal_section_entries;
1777 journal_entry = journal_read_pos % ic->journal_section_entries;
1778 goto journal_read_write;
1779 }
1780
1781 dio->in_flight = (atomic_t)ATOMIC_INIT(2);
1782
1783 if (need_sync_io) {
b5e8ad92 1784 init_completion(&read_comp);
7eada909
MP
1785 dio->completion = &read_comp;
1786 } else
1787 dio->completion = NULL;
1788
1789 dio->orig_bi_iter = bio->bi_iter;
1790
74d46992
CH
1791 dio->orig_bi_disk = bio->bi_disk;
1792 dio->orig_bi_partno = bio->bi_partno;
1793 bio_set_dev(bio, ic->dev->bdev);
7eada909
MP
1794
1795 dio->orig_bi_integrity = bio_integrity(bio);
1796 bio->bi_integrity = NULL;
1797 bio->bi_opf &= ~REQ_INTEGRITY;
1798
1799 dio->orig_bi_end_io = bio->bi_end_io;
1800 bio->bi_end_io = integrity_end_io;
1801
1802 bio->bi_iter.bi_size = dio->range.n_sectors << SECTOR_SHIFT;
7eada909
MP
1803 generic_make_request(bio);
1804
1805 if (need_sync_io) {
1806 wait_for_completion_io(&read_comp);
a3fcf725
MP
1807 if (unlikely(ic->recalc_wq != NULL) &&
1808 ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
1809 dio->range.logical_sector + dio->range.n_sectors > le64_to_cpu(ic->sb->recalc_sector))
1810 goto skip_check;
b7e326f7
HL
1811 if (likely(!bio->bi_status))
1812 integrity_metadata(&dio->work);
1813 else
a3fcf725 1814skip_check:
b7e326f7
HL
1815 dec_in_flight(dio);
1816
7eada909
MP
1817 } else {
1818 INIT_WORK(&dio->work, integrity_metadata);
1819 queue_work(ic->metadata_wq, &dio->work);
1820 }
1821
1822 return;
1823
1824journal_read_write:
1825 if (unlikely(__journal_read_write(dio, bio, journal_section, journal_entry)))
1826 goto lock_retry;
1827
1828 do_endio_flush(ic, dio);
1829}
1830
1831
1832static void integrity_bio_wait(struct work_struct *w)
1833{
1834 struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
1835
1836 dm_integrity_map_continue(dio, false);
1837}
1838
1839static void pad_uncommitted(struct dm_integrity_c *ic)
1840{
1841 if (ic->free_section_entry) {
1842 ic->free_sectors -= ic->journal_section_entries - ic->free_section_entry;
1843 ic->free_section_entry = 0;
1844 ic->free_section++;
1845 wraparound_section(ic, &ic->free_section);
1846 ic->n_uncommitted_sections++;
1847 }
aa03a91f
MP
1848 WARN_ON(ic->journal_sections * ic->journal_section_entries !=
1849 (ic->n_uncommitted_sections + ic->n_committed_sections) * ic->journal_section_entries + ic->free_sectors);
7eada909
MP
1850}
1851
1852static void integrity_commit(struct work_struct *w)
1853{
1854 struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, commit_work);
1855 unsigned commit_start, commit_sections;
1856 unsigned i, j, n;
1857 struct bio *flushes;
1858
1859 del_timer(&ic->autocommit_timer);
1860
1861 spin_lock_irq(&ic->endio_wait.lock);
1862 flushes = bio_list_get(&ic->flush_bio_list);
1863 if (unlikely(ic->mode != 'J')) {
1864 spin_unlock_irq(&ic->endio_wait.lock);
1865 dm_integrity_flush_buffers(ic);
1866 goto release_flush_bios;
1867 }
1868
1869 pad_uncommitted(ic);
1870 commit_start = ic->uncommitted_section;
1871 commit_sections = ic->n_uncommitted_sections;
1872 spin_unlock_irq(&ic->endio_wait.lock);
1873
1874 if (!commit_sections)
1875 goto release_flush_bios;
1876
1877 i = commit_start;
1878 for (n = 0; n < commit_sections; n++) {
1879 for (j = 0; j < ic->journal_section_entries; j++) {
1880 struct journal_entry *je;
1881 je = access_journal_entry(ic, i, j);
1882 io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je));
1883 }
1884 for (j = 0; j < ic->journal_section_sectors; j++) {
1885 struct journal_sector *js;
1886 js = access_journal(ic, i, j);
1887 js->commit_id = dm_integrity_commit_id(ic, i, j, ic->commit_seq);
1888 }
1889 i++;
1890 if (unlikely(i >= ic->journal_sections))
1891 ic->commit_seq = next_commit_seq(ic->commit_seq);
1892 wraparound_section(ic, &i);
1893 }
1894 smp_rmb();
1895
1896 write_journal(ic, commit_start, commit_sections);
1897
1898 spin_lock_irq(&ic->endio_wait.lock);
1899 ic->uncommitted_section += commit_sections;
1900 wraparound_section(ic, &ic->uncommitted_section);
1901 ic->n_uncommitted_sections -= commit_sections;
1902 ic->n_committed_sections += commit_sections;
1903 spin_unlock_irq(&ic->endio_wait.lock);
1904
d3e632f0 1905 if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold)
7eada909
MP
1906 queue_work(ic->writer_wq, &ic->writer_work);
1907
1908release_flush_bios:
1909 while (flushes) {
1910 struct bio *next = flushes->bi_next;
1911 flushes->bi_next = NULL;
1912 do_endio(ic, flushes);
1913 flushes = next;
1914 }
1915}
1916
1917static void complete_copy_from_journal(unsigned long error, void *context)
1918{
1919 struct journal_io *io = context;
1920 struct journal_completion *comp = io->comp;
1921 struct dm_integrity_c *ic = comp->ic;
1922 remove_range(ic, &io->range);
6f1c819c 1923 mempool_free(io, &ic->journal_io_mempool);
7eada909
MP
1924 if (unlikely(error != 0))
1925 dm_integrity_io_error(ic, "copying from journal", -EIO);
1926 complete_journal_op(comp);
1927}
1928
9d609f85
MP
1929static void restore_last_bytes(struct dm_integrity_c *ic, struct journal_sector *js,
1930 struct journal_entry *je)
1931{
1932 unsigned s = 0;
1933 do {
1934 js->commit_id = je->last_bytes[s];
1935 js++;
1936 } while (++s < ic->sectors_per_block);
1937}
1938
7eada909
MP
1939static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start,
1940 unsigned write_sections, bool from_replay)
1941{
1942 unsigned i, j, n;
1943 struct journal_completion comp;
a7c3e62b
MP
1944 struct blk_plug plug;
1945
1946 blk_start_plug(&plug);
7eada909
MP
1947
1948 comp.ic = ic;
1949 comp.in_flight = (atomic_t)ATOMIC_INIT(1);
b5e8ad92 1950 init_completion(&comp.comp);
7eada909
MP
1951
1952 i = write_start;
1953 for (n = 0; n < write_sections; n++, i++, wraparound_section(ic, &i)) {
1954#ifndef INTERNAL_VERIFY
1955 if (unlikely(from_replay))
1956#endif
1957 rw_section_mac(ic, i, false);
1958 for (j = 0; j < ic->journal_section_entries; j++) {
1959 struct journal_entry *je = access_journal_entry(ic, i, j);
1960 sector_t sec, area, offset;
1961 unsigned k, l, next_loop;
1962 sector_t metadata_block;
1963 unsigned metadata_offset;
1964 struct journal_io *io;
1965
1966 if (journal_entry_is_unused(je))
1967 continue;
1968 BUG_ON(unlikely(journal_entry_is_inprogress(je)) && !from_replay);
1969 sec = journal_entry_get_sector(je);
9d609f85
MP
1970 if (unlikely(from_replay)) {
1971 if (unlikely(sec & (unsigned)(ic->sectors_per_block - 1))) {
1972 dm_integrity_io_error(ic, "invalid sector in journal", -EIO);
1973 sec &= ~(sector_t)(ic->sectors_per_block - 1);
1974 }
1975 }
7eada909 1976 get_area_and_offset(ic, sec, &area, &offset);
9d609f85 1977 restore_last_bytes(ic, access_journal_data(ic, i, j), je);
7eada909
MP
1978 for (k = j + 1; k < ic->journal_section_entries; k++) {
1979 struct journal_entry *je2 = access_journal_entry(ic, i, k);
1980 sector_t sec2, area2, offset2;
1981 if (journal_entry_is_unused(je2))
1982 break;
1983 BUG_ON(unlikely(journal_entry_is_inprogress(je2)) && !from_replay);
1984 sec2 = journal_entry_get_sector(je2);
1985 get_area_and_offset(ic, sec2, &area2, &offset2);
9d609f85 1986 if (area2 != area || offset2 != offset + ((k - j) << ic->sb->log2_sectors_per_block))
7eada909 1987 break;
9d609f85 1988 restore_last_bytes(ic, access_journal_data(ic, i, k), je2);
7eada909
MP
1989 }
1990 next_loop = k - 1;
1991
6f1c819c 1992 io = mempool_alloc(&ic->journal_io_mempool, GFP_NOIO);
7eada909
MP
1993 io->comp = &comp;
1994 io->range.logical_sector = sec;
9d609f85 1995 io->range.n_sectors = (k - j) << ic->sb->log2_sectors_per_block;
7eada909
MP
1996
1997 spin_lock_irq(&ic->endio_wait.lock);
724376a0
MP
1998 if (unlikely(!add_new_range(ic, &io->range, true)))
1999 wait_and_add_new_range(ic, &io->range);
7eada909
MP
2000
2001 if (likely(!from_replay)) {
2002 struct journal_node *section_node = &ic->journal_tree[i * ic->journal_section_entries];
2003
2004 /* don't write if there is newer committed sector */
2005 while (j < k && find_newer_committed_node(ic, &section_node[j])) {
2006 struct journal_entry *je2 = access_journal_entry(ic, i, j);
2007
2008 journal_entry_set_unused(je2);
2009 remove_journal_node(ic, &section_node[j]);
2010 j++;
9d609f85
MP
2011 sec += ic->sectors_per_block;
2012 offset += ic->sectors_per_block;
7eada909
MP
2013 }
2014 while (j < k && find_newer_committed_node(ic, &section_node[k - 1])) {
2015 struct journal_entry *je2 = access_journal_entry(ic, i, k - 1);
2016
2017 journal_entry_set_unused(je2);
2018 remove_journal_node(ic, &section_node[k - 1]);
2019 k--;
2020 }
2021 if (j == k) {
2022 remove_range_unlocked(ic, &io->range);
2023 spin_unlock_irq(&ic->endio_wait.lock);
6f1c819c 2024 mempool_free(io, &ic->journal_io_mempool);
7eada909
MP
2025 goto skip_io;
2026 }
2027 for (l = j; l < k; l++) {
2028 remove_journal_node(ic, &section_node[l]);
2029 }
2030 }
2031 spin_unlock_irq(&ic->endio_wait.lock);
2032
2033 metadata_block = get_metadata_sector_and_offset(ic, area, offset, &metadata_offset);
2034 for (l = j; l < k; l++) {
2035 int r;
2036 struct journal_entry *je2 = access_journal_entry(ic, i, l);
2037
2038 if (
2039#ifndef INTERNAL_VERIFY
2040 unlikely(from_replay) &&
2041#endif
2042 ic->internal_hash) {
6d39a124 2043 char test_tag[max_t(size_t, HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
7eada909 2044
9d609f85 2045 integrity_sector_checksum(ic, sec + ((l - j) << ic->sb->log2_sectors_per_block),
7eada909 2046 (char *)access_journal_data(ic, i, l), test_tag);
9d609f85 2047 if (unlikely(memcmp(test_tag, journal_entry_tag(ic, je2), ic->tag_size)))
7eada909
MP
2048 dm_integrity_io_error(ic, "tag mismatch when replaying journal", -EILSEQ);
2049 }
2050
2051 journal_entry_set_unused(je2);
9d609f85 2052 r = dm_integrity_rw_tag(ic, journal_entry_tag(ic, je2), &metadata_block, &metadata_offset,
7eada909
MP
2053 ic->tag_size, TAG_WRITE);
2054 if (unlikely(r)) {
2055 dm_integrity_io_error(ic, "reading tags", r);
2056 }
2057 }
2058
2059 atomic_inc(&comp.in_flight);
9d609f85
MP
2060 copy_from_journal(ic, i, j << ic->sb->log2_sectors_per_block,
2061 (k - j) << ic->sb->log2_sectors_per_block,
2062 get_data_sector(ic, area, offset),
7eada909
MP
2063 complete_copy_from_journal, io);
2064skip_io:
2065 j = next_loop;
2066 }
2067 }
2068
2069 dm_bufio_write_dirty_buffers_async(ic->bufio);
2070
a7c3e62b
MP
2071 blk_finish_plug(&plug);
2072
7eada909
MP
2073 complete_journal_op(&comp);
2074 wait_for_completion_io(&comp.comp);
2075
2076 dm_integrity_flush_buffers(ic);
2077}
2078
2079static void integrity_writer(struct work_struct *w)
2080{
2081 struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, writer_work);
2082 unsigned write_start, write_sections;
2083
2084 unsigned prev_free_sectors;
2085
2086 /* the following test is not needed, but it tests the replay code */
747829a8 2087 if (READ_ONCE(ic->suspending) && !ic->meta_dev)
7eada909
MP
2088 return;
2089
2090 spin_lock_irq(&ic->endio_wait.lock);
2091 write_start = ic->committed_section;
2092 write_sections = ic->n_committed_sections;
2093 spin_unlock_irq(&ic->endio_wait.lock);
2094
2095 if (!write_sections)
2096 return;
2097
2098 do_journal_write(ic, write_start, write_sections, false);
2099
2100 spin_lock_irq(&ic->endio_wait.lock);
2101
2102 ic->committed_section += write_sections;
2103 wraparound_section(ic, &ic->committed_section);
2104 ic->n_committed_sections -= write_sections;
2105
2106 prev_free_sectors = ic->free_sectors;
2107 ic->free_sectors += write_sections * ic->journal_section_entries;
2108 if (unlikely(!prev_free_sectors))
2109 wake_up_locked(&ic->endio_wait);
2110
2111 spin_unlock_irq(&ic->endio_wait.lock);
2112}
2113
a3fcf725
MP
2114static void recalc_write_super(struct dm_integrity_c *ic)
2115{
2116 int r;
2117
2118 dm_integrity_flush_buffers(ic);
2119 if (dm_integrity_failed(ic))
2120 return;
2121
2122 sb_set_version(ic);
2123 r = sync_rw_sb(ic, REQ_OP_WRITE, 0);
2124 if (unlikely(r))
2125 dm_integrity_io_error(ic, "writing superblock", r);
2126}
2127
2128static void integrity_recalc(struct work_struct *w)
2129{
2130 struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, recalc_work);
2131 struct dm_integrity_range range;
2132 struct dm_io_request io_req;
2133 struct dm_io_region io_loc;
2134 sector_t area, offset;
2135 sector_t metadata_block;
2136 unsigned metadata_offset;
2137 __u8 *t;
2138 unsigned i;
2139 int r;
2140 unsigned super_counter = 0;
2141
2142 spin_lock_irq(&ic->endio_wait.lock);
2143
2144next_chunk:
2145
2146 if (unlikely(READ_ONCE(ic->suspending)))
2147 goto unlock_ret;
2148
2149 range.logical_sector = le64_to_cpu(ic->sb->recalc_sector);
2150 if (unlikely(range.logical_sector >= ic->provided_data_sectors))
2151 goto unlock_ret;
2152
2153 get_area_and_offset(ic, range.logical_sector, &area, &offset);
2154 range.n_sectors = min((sector_t)RECALC_SECTORS, ic->provided_data_sectors - range.logical_sector);
2155 if (!ic->meta_dev)
4f43446d 2156 range.n_sectors = min(range.n_sectors, ((sector_t)1U << ic->sb->log2_interleave_sectors) - (unsigned)offset);
a3fcf725
MP
2157
2158 if (unlikely(!add_new_range(ic, &range, true)))
2159 wait_and_add_new_range(ic, &range);
2160
2161 spin_unlock_irq(&ic->endio_wait.lock);
2162
2163 if (unlikely(++super_counter == RECALC_WRITE_SUPER)) {
2164 recalc_write_super(ic);
2165 super_counter = 0;
2166 }
2167
2168 if (unlikely(dm_integrity_failed(ic)))
2169 goto err;
2170
2171 io_req.bi_op = REQ_OP_READ;
2172 io_req.bi_op_flags = 0;
2173 io_req.mem.type = DM_IO_VMA;
2174 io_req.mem.ptr.addr = ic->recalc_buffer;
2175 io_req.notify.fn = NULL;
2176 io_req.client = ic->io;
2177 io_loc.bdev = ic->dev->bdev;
2178 io_loc.sector = get_data_sector(ic, area, offset);
2179 io_loc.count = range.n_sectors;
2180
2181 r = dm_io(&io_req, 1, &io_loc, NULL);
2182 if (unlikely(r)) {
2183 dm_integrity_io_error(ic, "reading data", r);
2184 goto err;
2185 }
2186
2187 t = ic->recalc_tags;
2188 for (i = 0; i < range.n_sectors; i += ic->sectors_per_block) {
2189 integrity_sector_checksum(ic, range.logical_sector + i, ic->recalc_buffer + (i << SECTOR_SHIFT), t);
2190 t += ic->tag_size;
2191 }
2192
2193 metadata_block = get_metadata_sector_and_offset(ic, area, offset, &metadata_offset);
2194
2195 r = dm_integrity_rw_tag(ic, ic->recalc_tags, &metadata_block, &metadata_offset, t - ic->recalc_tags, TAG_WRITE);
2196 if (unlikely(r)) {
2197 dm_integrity_io_error(ic, "writing tags", r);
2198 goto err;
2199 }
2200
2201 spin_lock_irq(&ic->endio_wait.lock);
2202 remove_range_unlocked(ic, &range);
2203 ic->sb->recalc_sector = cpu_to_le64(range.logical_sector + range.n_sectors);
2204 goto next_chunk;
2205
2206err:
2207 remove_range(ic, &range);
2208 return;
2209
2210unlock_ret:
2211 spin_unlock_irq(&ic->endio_wait.lock);
2212
2213 recalc_write_super(ic);
2214}
2215
7eada909
MP
2216static void init_journal(struct dm_integrity_c *ic, unsigned start_section,
2217 unsigned n_sections, unsigned char commit_seq)
2218{
2219 unsigned i, j, n;
2220
2221 if (!n_sections)
2222 return;
2223
2224 for (n = 0; n < n_sections; n++) {
2225 i = start_section + n;
2226 wraparound_section(ic, &i);
2227 for (j = 0; j < ic->journal_section_sectors; j++) {
2228 struct journal_sector *js = access_journal(ic, i, j);
2229 memset(&js->entries, 0, JOURNAL_SECTOR_DATA);
2230 js->commit_id = dm_integrity_commit_id(ic, i, j, commit_seq);
2231 }
2232 for (j = 0; j < ic->journal_section_entries; j++) {
2233 struct journal_entry *je = access_journal_entry(ic, i, j);
2234 journal_entry_set_unused(je);
2235 }
2236 }
2237
2238 write_journal(ic, start_section, n_sections);
2239}
2240
2241static int find_commit_seq(struct dm_integrity_c *ic, unsigned i, unsigned j, commit_id_t id)
2242{
2243 unsigned char k;
2244 for (k = 0; k < N_COMMIT_IDS; k++) {
2245 if (dm_integrity_commit_id(ic, i, j, k) == id)
2246 return k;
2247 }
2248 dm_integrity_io_error(ic, "journal commit id", -EIO);
2249 return -EIO;
2250}
2251
2252static void replay_journal(struct dm_integrity_c *ic)
2253{
2254 unsigned i, j;
2255 bool used_commit_ids[N_COMMIT_IDS];
2256 unsigned max_commit_id_sections[N_COMMIT_IDS];
2257 unsigned write_start, write_sections;
2258 unsigned continue_section;
2259 bool journal_empty;
2260 unsigned char unused, last_used, want_commit_seq;
2261
c2bcb2b7
MP
2262 if (ic->mode == 'R')
2263 return;
2264
7eada909
MP
2265 if (ic->journal_uptodate)
2266 return;
2267
2268 last_used = 0;
2269 write_start = 0;
2270
2271 if (!ic->just_formatted) {
2272 DEBUG_print("reading journal\n");
2273 rw_journal(ic, REQ_OP_READ, 0, 0, ic->journal_sections, NULL);
2274 if (ic->journal_io)
2275 DEBUG_bytes(lowmem_page_address(ic->journal_io[0].page), 64, "read journal");
2276 if (ic->journal_io) {
2277 struct journal_completion crypt_comp;
2278 crypt_comp.ic = ic;
b5e8ad92 2279 init_completion(&crypt_comp.comp);
7eada909
MP
2280 crypt_comp.in_flight = (atomic_t)ATOMIC_INIT(0);
2281 encrypt_journal(ic, false, 0, ic->journal_sections, &crypt_comp);
2282 wait_for_completion(&crypt_comp.comp);
2283 }
2284 DEBUG_bytes(lowmem_page_address(ic->journal[0].page), 64, "decrypted journal");
2285 }
2286
2287 if (dm_integrity_failed(ic))
2288 goto clear_journal;
2289
2290 journal_empty = true;
2291 memset(used_commit_ids, 0, sizeof used_commit_ids);
2292 memset(max_commit_id_sections, 0, sizeof max_commit_id_sections);
2293 for (i = 0; i < ic->journal_sections; i++) {
2294 for (j = 0; j < ic->journal_section_sectors; j++) {
2295 int k;
2296 struct journal_sector *js = access_journal(ic, i, j);
2297 k = find_commit_seq(ic, i, j, js->commit_id);
2298 if (k < 0)
2299 goto clear_journal;
2300 used_commit_ids[k] = true;
2301 max_commit_id_sections[k] = i;
2302 }
2303 if (journal_empty) {
2304 for (j = 0; j < ic->journal_section_entries; j++) {
2305 struct journal_entry *je = access_journal_entry(ic, i, j);
2306 if (!journal_entry_is_unused(je)) {
2307 journal_empty = false;
2308 break;
2309 }
2310 }
2311 }
2312 }
2313
2314 if (!used_commit_ids[N_COMMIT_IDS - 1]) {
2315 unused = N_COMMIT_IDS - 1;
2316 while (unused && !used_commit_ids[unused - 1])
2317 unused--;
2318 } else {
2319 for (unused = 0; unused < N_COMMIT_IDS; unused++)
2320 if (!used_commit_ids[unused])
2321 break;
2322 if (unused == N_COMMIT_IDS) {
2323 dm_integrity_io_error(ic, "journal commit ids", -EIO);
2324 goto clear_journal;
2325 }
2326 }
2327 DEBUG_print("first unused commit seq %d [%d,%d,%d,%d]\n",
2328 unused, used_commit_ids[0], used_commit_ids[1],
2329 used_commit_ids[2], used_commit_ids[3]);
2330
2331 last_used = prev_commit_seq(unused);
2332 want_commit_seq = prev_commit_seq(last_used);
2333
2334 if (!used_commit_ids[want_commit_seq] && used_commit_ids[prev_commit_seq(want_commit_seq)])
2335 journal_empty = true;
2336
2337 write_start = max_commit_id_sections[last_used] + 1;
2338 if (unlikely(write_start >= ic->journal_sections))
2339 want_commit_seq = next_commit_seq(want_commit_seq);
2340 wraparound_section(ic, &write_start);
2341
2342 i = write_start;
2343 for (write_sections = 0; write_sections < ic->journal_sections; write_sections++) {
2344 for (j = 0; j < ic->journal_section_sectors; j++) {
2345 struct journal_sector *js = access_journal(ic, i, j);
2346
2347 if (js->commit_id != dm_integrity_commit_id(ic, i, j, want_commit_seq)) {
2348 /*
2349 * This could be caused by crash during writing.
2350 * We won't replay the inconsistent part of the
2351 * journal.
2352 */
2353 DEBUG_print("commit id mismatch at position (%u, %u): %d != %d\n",
2354 i, j, find_commit_seq(ic, i, j, js->commit_id), want_commit_seq);
2355 goto brk;
2356 }
2357 }
2358 i++;
2359 if (unlikely(i >= ic->journal_sections))
2360 want_commit_seq = next_commit_seq(want_commit_seq);
2361 wraparound_section(ic, &i);
2362 }
2363brk:
2364
2365 if (!journal_empty) {
2366 DEBUG_print("replaying %u sections, starting at %u, commit seq %d\n",
2367 write_sections, write_start, want_commit_seq);
2368 do_journal_write(ic, write_start, write_sections, true);
2369 }
2370
2371 if (write_sections == ic->journal_sections && (ic->mode == 'J' || journal_empty)) {
2372 continue_section = write_start;
2373 ic->commit_seq = want_commit_seq;
2374 DEBUG_print("continuing from section %u, commit seq %d\n", write_start, ic->commit_seq);
2375 } else {
2376 unsigned s;
2377 unsigned char erase_seq;
2378clear_journal:
2379 DEBUG_print("clearing journal\n");
2380
2381 erase_seq = prev_commit_seq(prev_commit_seq(last_used));
2382 s = write_start;
2383 init_journal(ic, s, 1, erase_seq);
2384 s++;
2385 wraparound_section(ic, &s);
2386 if (ic->journal_sections >= 2) {
2387 init_journal(ic, s, ic->journal_sections - 2, erase_seq);
2388 s += ic->journal_sections - 2;
2389 wraparound_section(ic, &s);
2390 init_journal(ic, s, 1, erase_seq);
2391 }
2392
2393 continue_section = 0;
2394 ic->commit_seq = next_commit_seq(erase_seq);
2395 }
2396
2397 ic->committed_section = continue_section;
2398 ic->n_committed_sections = 0;
2399
2400 ic->uncommitted_section = continue_section;
2401 ic->n_uncommitted_sections = 0;
2402
2403 ic->free_section = continue_section;
2404 ic->free_section_entry = 0;
2405 ic->free_sectors = ic->journal_entries;
2406
2407 ic->journal_tree_root = RB_ROOT;
2408 for (i = 0; i < ic->journal_entries; i++)
2409 init_journal_node(&ic->journal_tree[i]);
2410}
2411
2412static void dm_integrity_postsuspend(struct dm_target *ti)
2413{
2414 struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
2415
2416 del_timer_sync(&ic->autocommit_timer);
2417
c21b1639 2418 WRITE_ONCE(ic->suspending, 1);
7eada909 2419
a3fcf725
MP
2420 if (ic->recalc_wq)
2421 drain_workqueue(ic->recalc_wq);
2422
7eada909
MP
2423 queue_work(ic->commit_wq, &ic->commit_work);
2424 drain_workqueue(ic->commit_wq);
2425
2426 if (ic->mode == 'J') {
747829a8
MP
2427 if (ic->meta_dev)
2428 queue_work(ic->writer_wq, &ic->writer_work);
7eada909
MP
2429 drain_workqueue(ic->writer_wq);
2430 dm_integrity_flush_buffers(ic);
2431 }
2432
c21b1639 2433 WRITE_ONCE(ic->suspending, 0);
7eada909
MP
2434
2435 BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
2436
2437 ic->journal_uptodate = true;
2438}
2439
2440static void dm_integrity_resume(struct dm_target *ti)
2441{
2442 struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
2443
2444 replay_journal(ic);
a3fcf725
MP
2445
2446 if (ic->recalc_wq && ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
2447 __u64 recalc_pos = le64_to_cpu(ic->sb->recalc_sector);
2448 if (recalc_pos < ic->provided_data_sectors) {
2449 queue_work(ic->recalc_wq, &ic->recalc_work);
2450 } else if (recalc_pos > ic->provided_data_sectors) {
2451 ic->sb->recalc_sector = cpu_to_le64(ic->provided_data_sectors);
2452 recalc_write_super(ic);
2453 }
2454 }
7eada909
MP
2455}
2456
2457static void dm_integrity_status(struct dm_target *ti, status_type_t type,
2458 unsigned status_flags, char *result, unsigned maxlen)
2459{
2460 struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
2461 unsigned arg_count;
2462 size_t sz = 0;
2463
2464 switch (type) {
2465 case STATUSTYPE_INFO:
f84fd2c9
MP
2466 DMEMIT("%llu %llu",
2467 (unsigned long long)atomic64_read(&ic->number_of_mismatches),
2468 (unsigned long long)ic->provided_data_sectors);
a3fcf725
MP
2469 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
2470 DMEMIT(" %llu", (unsigned long long)le64_to_cpu(ic->sb->recalc_sector));
2471 else
2472 DMEMIT(" -");
7eada909
MP
2473 break;
2474
2475 case STATUSTYPE_TABLE: {
2476 __u64 watermark_percentage = (__u64)(ic->journal_entries - ic->free_sectors_threshold) * 100;
2477 watermark_percentage += ic->journal_entries / 2;
2478 do_div(watermark_percentage, ic->journal_entries);
893e3c39 2479 arg_count = 3;
356d9d52 2480 arg_count += !!ic->meta_dev;
9d609f85 2481 arg_count += ic->sectors_per_block != 1;
a3fcf725 2482 arg_count += !!(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING));
893e3c39
MP
2483 arg_count += ic->mode == 'J';
2484 arg_count += ic->mode == 'J';
7eada909
MP
2485 arg_count += !!ic->internal_hash_alg.alg_string;
2486 arg_count += !!ic->journal_crypt_alg.alg_string;
2487 arg_count += !!ic->journal_mac_alg.alg_string;
2488 DMEMIT("%s %llu %u %c %u", ic->dev->name, (unsigned long long)ic->start,
2489 ic->tag_size, ic->mode, arg_count);
356d9d52
MP
2490 if (ic->meta_dev)
2491 DMEMIT(" meta_device:%s", ic->meta_dev->name);
a3fcf725
MP
2492 if (ic->sectors_per_block != 1)
2493 DMEMIT(" block_size:%u", ic->sectors_per_block << SECTOR_SHIFT);
2494 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
2495 DMEMIT(" recalculate");
56b67a4f
MP
2496 DMEMIT(" journal_sectors:%u", ic->initial_sectors - SB_SECTORS);
2497 DMEMIT(" interleave_sectors:%u", 1U << ic->sb->log2_interleave_sectors);
2498 DMEMIT(" buffer_sectors:%u", 1U << ic->log2_buffer_sectors);
893e3c39
MP
2499 if (ic->mode == 'J') {
2500 DMEMIT(" journal_watermark:%u", (unsigned)watermark_percentage);
2501 DMEMIT(" commit_time:%u", ic->autocommit_msec);
2502 }
7eada909
MP
2503
2504#define EMIT_ALG(a, n) \
2505 do { \
2506 if (ic->a.alg_string) { \
2507 DMEMIT(" %s:%s", n, ic->a.alg_string); \
2508 if (ic->a.key_string) \
2509 DMEMIT(":%s", ic->a.key_string);\
2510 } \
2511 } while (0)
56b67a4f
MP
2512 EMIT_ALG(internal_hash_alg, "internal_hash");
2513 EMIT_ALG(journal_crypt_alg, "journal_crypt");
2514 EMIT_ALG(journal_mac_alg, "journal_mac");
7eada909
MP
2515 break;
2516 }
2517 }
2518}
2519
2520static int dm_integrity_iterate_devices(struct dm_target *ti,
2521 iterate_devices_callout_fn fn, void *data)
2522{
2523 struct dm_integrity_c *ic = ti->private;
2524
356d9d52
MP
2525 if (!ic->meta_dev)
2526 return fn(ti, ic->dev, ic->start + ic->initial_sectors + ic->metadata_run, ti->len, data);
2527 else
2528 return fn(ti, ic->dev, 0, ti->len, data);
7eada909
MP
2529}
2530
9d609f85
MP
2531static void dm_integrity_io_hints(struct dm_target *ti, struct queue_limits *limits)
2532{
2533 struct dm_integrity_c *ic = ti->private;
2534
2535 if (ic->sectors_per_block > 1) {
2536 limits->logical_block_size = ic->sectors_per_block << SECTOR_SHIFT;
2537 limits->physical_block_size = ic->sectors_per_block << SECTOR_SHIFT;
2538 blk_limits_io_min(limits, ic->sectors_per_block << SECTOR_SHIFT);
2539 }
2540}
2541
7eada909
MP
2542static void calculate_journal_section_size(struct dm_integrity_c *ic)
2543{
2544 unsigned sector_space = JOURNAL_SECTOR_DATA;
2545
2546 ic->journal_sections = le32_to_cpu(ic->sb->journal_sections);
9d609f85 2547 ic->journal_entry_size = roundup(offsetof(struct journal_entry, last_bytes[ic->sectors_per_block]) + ic->tag_size,
7eada909
MP
2548 JOURNAL_ENTRY_ROUNDUP);
2549
2550 if (ic->sb->flags & cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC))
2551 sector_space -= JOURNAL_MAC_PER_SECTOR;
2552 ic->journal_entries_per_sector = sector_space / ic->journal_entry_size;
2553 ic->journal_section_entries = ic->journal_entries_per_sector * JOURNAL_BLOCK_SECTORS;
9d609f85 2554 ic->journal_section_sectors = (ic->journal_section_entries << ic->sb->log2_sectors_per_block) + JOURNAL_BLOCK_SECTORS;
7eada909
MP
2555 ic->journal_entries = ic->journal_section_entries * ic->journal_sections;
2556}
2557
2558static int calculate_device_limits(struct dm_integrity_c *ic)
2559{
2560 __u64 initial_sectors;
7eada909
MP
2561
2562 calculate_journal_section_size(ic);
2563 initial_sectors = SB_SECTORS + (__u64)ic->journal_section_sectors * ic->journal_sections;
356d9d52 2564 if (initial_sectors + METADATA_PADDING_SECTORS >= ic->meta_device_sectors || initial_sectors > UINT_MAX)
7eada909
MP
2565 return -EINVAL;
2566 ic->initial_sectors = initial_sectors;
2567
356d9d52
MP
2568 if (!ic->meta_dev) {
2569 sector_t last_sector, last_area, last_offset;
7eada909 2570
356d9d52
MP
2571 ic->metadata_run = roundup((__u64)ic->tag_size << (ic->sb->log2_interleave_sectors - ic->sb->log2_sectors_per_block),
2572 (__u64)(1 << SECTOR_SHIFT << METADATA_PADDING_SECTORS)) >> SECTOR_SHIFT;
2573 if (!(ic->metadata_run & (ic->metadata_run - 1)))
2574 ic->log2_metadata_run = __ffs(ic->metadata_run);
2575 else
2576 ic->log2_metadata_run = -1;
7eada909 2577
356d9d52
MP
2578 get_area_and_offset(ic, ic->provided_data_sectors - 1, &last_area, &last_offset);
2579 last_sector = get_data_sector(ic, last_area, last_offset);
2580 if (last_sector < ic->start || last_sector >= ic->meta_device_sectors)
2581 return -EINVAL;
2582 } else {
30bba430 2583 __u64 meta_size = (ic->provided_data_sectors >> ic->sb->log2_sectors_per_block) * ic->tag_size;
356d9d52
MP
2584 meta_size = (meta_size + ((1U << (ic->log2_buffer_sectors + SECTOR_SHIFT)) - 1))
2585 >> (ic->log2_buffer_sectors + SECTOR_SHIFT);
2586 meta_size <<= ic->log2_buffer_sectors;
2587 if (ic->initial_sectors + meta_size < ic->initial_sectors ||
2588 ic->initial_sectors + meta_size > ic->meta_device_sectors)
2589 return -EINVAL;
2590 ic->metadata_run = 1;
2591 ic->log2_metadata_run = 0;
2592 }
7eada909
MP
2593
2594 return 0;
2595}
2596
2597static int initialize_superblock(struct dm_integrity_c *ic, unsigned journal_sectors, unsigned interleave_sectors)
2598{
2599 unsigned journal_sections;
2600 int test_bit;
2601
56b67a4f 2602 memset(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT);
7eada909 2603 memcpy(ic->sb->magic, SB_MAGIC, 8);
7eada909 2604 ic->sb->integrity_tag_size = cpu_to_le16(ic->tag_size);
9d609f85 2605 ic->sb->log2_sectors_per_block = __ffs(ic->sectors_per_block);
7eada909
MP
2606 if (ic->journal_mac_alg.alg_string)
2607 ic->sb->flags |= cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC);
2608
2609 calculate_journal_section_size(ic);
2610 journal_sections = journal_sectors / ic->journal_section_sectors;
2611 if (!journal_sections)
2612 journal_sections = 1;
7eada909 2613
356d9d52
MP
2614 if (!ic->meta_dev) {
2615 ic->sb->journal_sections = cpu_to_le32(journal_sections);
2616 if (!interleave_sectors)
2617 interleave_sectors = DEFAULT_INTERLEAVE_SECTORS;
2618 ic->sb->log2_interleave_sectors = __fls(interleave_sectors);
2619 ic->sb->log2_interleave_sectors = max((__u8)MIN_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors);
2620 ic->sb->log2_interleave_sectors = min((__u8)MAX_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors);
2621
2622 ic->provided_data_sectors = 0;
2623 for (test_bit = fls64(ic->meta_device_sectors) - 1; test_bit >= 3; test_bit--) {
2624 __u64 prev_data_sectors = ic->provided_data_sectors;
2625
2626 ic->provided_data_sectors |= (sector_t)1 << test_bit;
2627 if (calculate_device_limits(ic))
2628 ic->provided_data_sectors = prev_data_sectors;
2629 }
2630 if (!ic->provided_data_sectors)
2631 return -EINVAL;
2632 } else {
2633 ic->sb->log2_interleave_sectors = 0;
2634 ic->provided_data_sectors = ic->data_device_sectors;
2635 ic->provided_data_sectors &= ~(sector_t)(ic->sectors_per_block - 1);
2636
2637try_smaller_buffer:
2638 ic->sb->journal_sections = cpu_to_le32(0);
2639 for (test_bit = fls(journal_sections) - 1; test_bit >= 0; test_bit--) {
2640 __u32 prev_journal_sections = le32_to_cpu(ic->sb->journal_sections);
2641 __u32 test_journal_sections = prev_journal_sections | (1U << test_bit);
2642 if (test_journal_sections > journal_sections)
2643 continue;
2644 ic->sb->journal_sections = cpu_to_le32(test_journal_sections);
2645 if (calculate_device_limits(ic))
2646 ic->sb->journal_sections = cpu_to_le32(prev_journal_sections);
7eada909 2647
356d9d52
MP
2648 }
2649 if (!le32_to_cpu(ic->sb->journal_sections)) {
2650 if (ic->log2_buffer_sectors > 3) {
2651 ic->log2_buffer_sectors--;
2652 goto try_smaller_buffer;
2653 }
2654 return -EINVAL;
2655 }
7eada909
MP
2656 }
2657
7eada909
MP
2658 ic->sb->provided_data_sectors = cpu_to_le64(ic->provided_data_sectors);
2659
1f9fc0b8
MP
2660 sb_set_version(ic);
2661
7eada909
MP
2662 return 0;
2663}
2664
2665static void dm_integrity_set(struct dm_target *ti, struct dm_integrity_c *ic)
2666{
2667 struct gendisk *disk = dm_disk(dm_table_get_md(ti->table));
2668 struct blk_integrity bi;
2669
2670 memset(&bi, 0, sizeof(bi));
2671 bi.profile = &dm_integrity_profile;
9d609f85
MP
2672 bi.tuple_size = ic->tag_size;
2673 bi.tag_size = bi.tuple_size;
84ff1bcc 2674 bi.interval_exp = ic->sb->log2_sectors_per_block + SECTOR_SHIFT;
7eada909
MP
2675
2676 blk_integrity_register(disk, &bi);
2677 blk_queue_max_integrity_segments(disk->queue, UINT_MAX);
2678}
2679
d5027e03 2680static void dm_integrity_free_page_list(struct page_list *pl)
7eada909
MP
2681{
2682 unsigned i;
2683
2684 if (!pl)
2685 return;
d5027e03
MP
2686 for (i = 0; pl[i].page; i++)
2687 __free_page(pl[i].page);
7eada909
MP
2688 kvfree(pl);
2689}
2690
d5027e03 2691static struct page_list *dm_integrity_alloc_page_list(unsigned n_pages)
7eada909 2692{
7eada909
MP
2693 struct page_list *pl;
2694 unsigned i;
2695
d5027e03 2696 pl = kvmalloc_array(n_pages + 1, sizeof(struct page_list), GFP_KERNEL | __GFP_ZERO);
7eada909
MP
2697 if (!pl)
2698 return NULL;
2699
d5027e03 2700 for (i = 0; i < n_pages; i++) {
7eada909
MP
2701 pl[i].page = alloc_page(GFP_KERNEL);
2702 if (!pl[i].page) {
d5027e03 2703 dm_integrity_free_page_list(pl);
7eada909
MP
2704 return NULL;
2705 }
2706 if (i)
2707 pl[i - 1].next = &pl[i];
2708 }
d5027e03
MP
2709 pl[i].page = NULL;
2710 pl[i].next = NULL;
7eada909
MP
2711
2712 return pl;
2713}
2714
2715static void dm_integrity_free_journal_scatterlist(struct dm_integrity_c *ic, struct scatterlist **sl)
2716{
2717 unsigned i;
2718 for (i = 0; i < ic->journal_sections; i++)
2719 kvfree(sl[i]);
fc8cec11 2720 kvfree(sl);
7eada909
MP
2721}
2722
2723static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_integrity_c *ic, struct page_list *pl)
2724{
2725 struct scatterlist **sl;
2726 unsigned i;
2727
344476e1
KC
2728 sl = kvmalloc_array(ic->journal_sections,
2729 sizeof(struct scatterlist *),
2730 GFP_KERNEL | __GFP_ZERO);
7eada909
MP
2731 if (!sl)
2732 return NULL;
2733
2734 for (i = 0; i < ic->journal_sections; i++) {
2735 struct scatterlist *s;
2736 unsigned start_index, start_offset;
2737 unsigned end_index, end_offset;
2738 unsigned n_pages;
2739 unsigned idx;
2740
2741 page_list_location(ic, i, 0, &start_index, &start_offset);
2742 page_list_location(ic, i, ic->journal_section_sectors - 1, &end_index, &end_offset);
2743
2744 n_pages = (end_index - start_index + 1);
2745
344476e1
KC
2746 s = kvmalloc_array(n_pages, sizeof(struct scatterlist),
2747 GFP_KERNEL);
7eada909
MP
2748 if (!s) {
2749 dm_integrity_free_journal_scatterlist(ic, sl);
2750 return NULL;
2751 }
2752
2753 sg_init_table(s, n_pages);
2754 for (idx = start_index; idx <= end_index; idx++) {
2755 char *va = lowmem_page_address(pl[idx].page);
2756 unsigned start = 0, end = PAGE_SIZE;
2757 if (idx == start_index)
2758 start = start_offset;
2759 if (idx == end_index)
2760 end = end_offset + (1 << SECTOR_SHIFT);
2761 sg_set_buf(&s[idx - start_index], va + start, end - start);
2762 }
2763
2764 sl[i] = s;
2765 }
2766
2767 return sl;
2768}
2769
2770static void free_alg(struct alg_spec *a)
2771{
2772 kzfree(a->alg_string);
2773 kzfree(a->key);
2774 memset(a, 0, sizeof *a);
2775}
2776
2777static int get_alg_and_key(const char *arg, struct alg_spec *a, char **error, char *error_inval)
2778{
2779 char *k;
2780
2781 free_alg(a);
2782
2783 a->alg_string = kstrdup(strchr(arg, ':') + 1, GFP_KERNEL);
2784 if (!a->alg_string)
2785 goto nomem;
2786
2787 k = strchr(a->alg_string, ':');
2788 if (k) {
7eada909
MP
2789 *k = 0;
2790 a->key_string = k + 1;
2791 if (strlen(a->key_string) & 1)
2792 goto inval;
2793
2794 a->key_size = strlen(a->key_string) / 2;
2795 a->key = kmalloc(a->key_size, GFP_KERNEL);
2796 if (!a->key)
2797 goto nomem;
6625d903
MP
2798 if (hex2bin(a->key, a->key_string, a->key_size))
2799 goto inval;
7eada909
MP
2800 }
2801
2802 return 0;
2803inval:
2804 *error = error_inval;
2805 return -EINVAL;
2806nomem:
2807 *error = "Out of memory for an argument";
2808 return -ENOMEM;
2809}
2810
2811static int get_mac(struct crypto_shash **hash, struct alg_spec *a, char **error,
2812 char *error_alg, char *error_key)
2813{
2814 int r;
2815
2816 if (a->alg_string) {
3d234b33 2817 *hash = crypto_alloc_shash(a->alg_string, 0, 0);
7eada909
MP
2818 if (IS_ERR(*hash)) {
2819 *error = error_alg;
2820 r = PTR_ERR(*hash);
2821 *hash = NULL;
2822 return r;
2823 }
2824
2825 if (a->key) {
2826 r = crypto_shash_setkey(*hash, a->key, a->key_size);
2827 if (r) {
2828 *error = error_key;
2829 return r;
2830 }
e16b4f99
MB
2831 } else if (crypto_shash_get_flags(*hash) & CRYPTO_TFM_NEED_KEY) {
2832 *error = error_key;
2833 return -ENOKEY;
7eada909
MP
2834 }
2835 }
2836
2837 return 0;
2838}
2839
1aa0efd4
MS
2840static int create_journal(struct dm_integrity_c *ic, char **error)
2841{
2842 int r = 0;
2843 unsigned i;
2844 __u64 journal_pages, journal_desc_size, journal_tree_size;
717f4b1c
MP
2845 unsigned char *crypt_data = NULL, *crypt_iv = NULL;
2846 struct skcipher_request *req = NULL;
56b67a4f
MP
2847
2848 ic->commit_ids[0] = cpu_to_le64(0x1111111111111111ULL);
2849 ic->commit_ids[1] = cpu_to_le64(0x2222222222222222ULL);
2850 ic->commit_ids[2] = cpu_to_le64(0x3333333333333333ULL);
2851 ic->commit_ids[3] = cpu_to_le64(0x4444444444444444ULL);
1aa0efd4
MS
2852
2853 journal_pages = roundup((__u64)ic->journal_sections * ic->journal_section_sectors,
2854 PAGE_SIZE >> SECTOR_SHIFT) >> (PAGE_SHIFT - SECTOR_SHIFT);
2855 journal_desc_size = journal_pages * sizeof(struct page_list);
ca79b0c2 2856 if (journal_pages >= totalram_pages() - totalhigh_pages() || journal_desc_size > ULONG_MAX) {
1aa0efd4
MS
2857 *error = "Journal doesn't fit into memory";
2858 r = -ENOMEM;
2859 goto bad;
2860 }
2861 ic->journal_pages = journal_pages;
2862
d5027e03 2863 ic->journal = dm_integrity_alloc_page_list(ic->journal_pages);
1aa0efd4
MS
2864 if (!ic->journal) {
2865 *error = "Could not allocate memory for journal";
2866 r = -ENOMEM;
2867 goto bad;
2868 }
2869 if (ic->journal_crypt_alg.alg_string) {
2870 unsigned ivsize, blocksize;
2871 struct journal_completion comp;
2872
2873 comp.ic = ic;
2874 ic->journal_crypt = crypto_alloc_skcipher(ic->journal_crypt_alg.alg_string, 0, 0);
2875 if (IS_ERR(ic->journal_crypt)) {
2876 *error = "Invalid journal cipher";
2877 r = PTR_ERR(ic->journal_crypt);
2878 ic->journal_crypt = NULL;
2879 goto bad;
2880 }
2881 ivsize = crypto_skcipher_ivsize(ic->journal_crypt);
2882 blocksize = crypto_skcipher_blocksize(ic->journal_crypt);
2883
2884 if (ic->journal_crypt_alg.key) {
2885 r = crypto_skcipher_setkey(ic->journal_crypt, ic->journal_crypt_alg.key,
2886 ic->journal_crypt_alg.key_size);
2887 if (r) {
2888 *error = "Error setting encryption key";
2889 goto bad;
2890 }
2891 }
2892 DEBUG_print("cipher %s, block size %u iv size %u\n",
2893 ic->journal_crypt_alg.alg_string, blocksize, ivsize);
2894
d5027e03 2895 ic->journal_io = dm_integrity_alloc_page_list(ic->journal_pages);
1aa0efd4
MS
2896 if (!ic->journal_io) {
2897 *error = "Could not allocate memory for journal io";
2898 r = -ENOMEM;
2899 goto bad;
2900 }
2901
2902 if (blocksize == 1) {
2903 struct scatterlist *sg;
717f4b1c
MP
2904
2905 req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
2906 if (!req) {
2907 *error = "Could not allocate crypt request";
2908 r = -ENOMEM;
2909 goto bad;
2910 }
2911
2912 crypt_iv = kmalloc(ivsize, GFP_KERNEL);
2913 if (!crypt_iv) {
2914 *error = "Could not allocate iv";
2915 r = -ENOMEM;
2916 goto bad;
2917 }
1aa0efd4 2918
d5027e03 2919 ic->journal_xor = dm_integrity_alloc_page_list(ic->journal_pages);
1aa0efd4
MS
2920 if (!ic->journal_xor) {
2921 *error = "Could not allocate memory for journal xor";
2922 r = -ENOMEM;
2923 goto bad;
2924 }
2925
344476e1
KC
2926 sg = kvmalloc_array(ic->journal_pages + 1,
2927 sizeof(struct scatterlist),
2928 GFP_KERNEL);
1aa0efd4
MS
2929 if (!sg) {
2930 *error = "Unable to allocate sg list";
2931 r = -ENOMEM;
2932 goto bad;
2933 }
2934 sg_init_table(sg, ic->journal_pages + 1);
2935 for (i = 0; i < ic->journal_pages; i++) {
2936 char *va = lowmem_page_address(ic->journal_xor[i].page);
2937 clear_page(va);
2938 sg_set_buf(&sg[i], va, PAGE_SIZE);
2939 }
2940 sg_set_buf(&sg[i], &ic->commit_ids, sizeof ic->commit_ids);
717f4b1c 2941 memset(crypt_iv, 0x00, ivsize);
1aa0efd4 2942
717f4b1c 2943 skcipher_request_set_crypt(req, sg, sg, PAGE_SIZE * ic->journal_pages + sizeof ic->commit_ids, crypt_iv);
b5e8ad92 2944 init_completion(&comp.comp);
1aa0efd4
MS
2945 comp.in_flight = (atomic_t)ATOMIC_INIT(1);
2946 if (do_crypt(true, req, &comp))
2947 wait_for_completion(&comp.comp);
2948 kvfree(sg);
2949 r = dm_integrity_failed(ic);
2950 if (r) {
2951 *error = "Unable to encrypt journal";
2952 goto bad;
2953 }
2954 DEBUG_bytes(lowmem_page_address(ic->journal_xor[0].page), 64, "xor data");
2955
2956 crypto_free_skcipher(ic->journal_crypt);
2957 ic->journal_crypt = NULL;
2958 } else {
1aa0efd4 2959 unsigned crypt_len = roundup(ivsize, blocksize);
56b67a4f 2960
717f4b1c
MP
2961 req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
2962 if (!req) {
2963 *error = "Could not allocate crypt request";
2964 r = -ENOMEM;
2965 goto bad;
2966 }
2967
2968 crypt_iv = kmalloc(ivsize, GFP_KERNEL);
2969 if (!crypt_iv) {
2970 *error = "Could not allocate iv";
2971 r = -ENOMEM;
2972 goto bad;
2973 }
2974
56b67a4f
MP
2975 crypt_data = kmalloc(crypt_len, GFP_KERNEL);
2976 if (!crypt_data) {
2977 *error = "Unable to allocate crypt data";
2978 r = -ENOMEM;
2979 goto bad;
2980 }
1aa0efd4 2981
1aa0efd4
MS
2982 ic->journal_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal);
2983 if (!ic->journal_scatterlist) {
2984 *error = "Unable to allocate sg list";
2985 r = -ENOMEM;
2986 goto bad;
2987 }
2988 ic->journal_io_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal_io);
2989 if (!ic->journal_io_scatterlist) {
2990 *error = "Unable to allocate sg list";
2991 r = -ENOMEM;
2992 goto bad;
2993 }
344476e1
KC
2994 ic->sk_requests = kvmalloc_array(ic->journal_sections,
2995 sizeof(struct skcipher_request *),
2996 GFP_KERNEL | __GFP_ZERO);
1aa0efd4
MS
2997 if (!ic->sk_requests) {
2998 *error = "Unable to allocate sk requests";
2999 r = -ENOMEM;
3000 goto bad;
3001 }
3002 for (i = 0; i < ic->journal_sections; i++) {
3003 struct scatterlist sg;
3004 struct skcipher_request *section_req;
3005 __u32 section_le = cpu_to_le32(i);
3006
717f4b1c 3007 memset(crypt_iv, 0x00, ivsize);
1aa0efd4
MS
3008 memset(crypt_data, 0x00, crypt_len);
3009 memcpy(crypt_data, &section_le, min((size_t)crypt_len, sizeof(section_le)));
3010
3011 sg_init_one(&sg, crypt_data, crypt_len);
717f4b1c 3012 skcipher_request_set_crypt(req, &sg, &sg, crypt_len, crypt_iv);
b5e8ad92 3013 init_completion(&comp.comp);
1aa0efd4
MS
3014 comp.in_flight = (atomic_t)ATOMIC_INIT(1);
3015 if (do_crypt(true, req, &comp))
3016 wait_for_completion(&comp.comp);
3017
3018 r = dm_integrity_failed(ic);
3019 if (r) {
3020 *error = "Unable to generate iv";
3021 goto bad;
3022 }
3023
3024 section_req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
3025 if (!section_req) {
3026 *error = "Unable to allocate crypt request";
3027 r = -ENOMEM;
3028 goto bad;
3029 }
6da2ec56
KC
3030 section_req->iv = kmalloc_array(ivsize, 2,
3031 GFP_KERNEL);
1aa0efd4
MS
3032 if (!section_req->iv) {
3033 skcipher_request_free(section_req);
3034 *error = "Unable to allocate iv";
3035 r = -ENOMEM;
3036 goto bad;
3037 }
3038 memcpy(section_req->iv + ivsize, crypt_data, ivsize);
3039 section_req->cryptlen = (size_t)ic->journal_section_sectors << SECTOR_SHIFT;
3040 ic->sk_requests[i] = section_req;
3041 DEBUG_bytes(crypt_data, ivsize, "iv(%u)", i);
3042 }
3043 }
3044 }
3045
3046 for (i = 0; i < N_COMMIT_IDS; i++) {
3047 unsigned j;
3048retest_commit_id:
3049 for (j = 0; j < i; j++) {
3050 if (ic->commit_ids[j] == ic->commit_ids[i]) {
3051 ic->commit_ids[i] = cpu_to_le64(le64_to_cpu(ic->commit_ids[i]) + 1);
3052 goto retest_commit_id;
3053 }
3054 }
3055 DEBUG_print("commit id %u: %016llx\n", i, ic->commit_ids[i]);
3056 }
3057
3058 journal_tree_size = (__u64)ic->journal_entries * sizeof(struct journal_node);
3059 if (journal_tree_size > ULONG_MAX) {
3060 *error = "Journal doesn't fit into memory";
3061 r = -ENOMEM;
3062 goto bad;
3063 }
702a6204 3064 ic->journal_tree = kvmalloc(journal_tree_size, GFP_KERNEL);
1aa0efd4
MS
3065 if (!ic->journal_tree) {
3066 *error = "Could not allocate memory for journal tree";
3067 r = -ENOMEM;
3068 }
3069bad:
56b67a4f 3070 kfree(crypt_data);
717f4b1c
MP
3071 kfree(crypt_iv);
3072 skcipher_request_free(req);
3073
1aa0efd4
MS
3074 return r;
3075}
3076
7eada909 3077/*
56b67a4f 3078 * Construct a integrity mapping
7eada909
MP
3079 *
3080 * Arguments:
3081 * device
3082 * offset from the start of the device
3083 * tag size
56b67a4f 3084 * D - direct writes, J - journal writes, R - recovery mode
7eada909
MP
3085 * number of optional arguments
3086 * optional arguments:
56b67a4f
MP
3087 * journal_sectors
3088 * interleave_sectors
3089 * buffer_sectors
3090 * journal_watermark
3091 * commit_time
88ad5d1e
MP
3092 * meta_device
3093 * block_size
56b67a4f
MP
3094 * internal_hash
3095 * journal_crypt
3096 * journal_mac
88ad5d1e 3097 * recalculate
7eada909
MP
3098 */
3099static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
3100{
3101 struct dm_integrity_c *ic;
3102 char dummy;
3103 int r;
7eada909
MP
3104 unsigned extra_args;
3105 struct dm_arg_set as;
5916a22b 3106 static const struct dm_arg _args[] = {
9d609f85 3107 {0, 9, "Invalid number of feature args"},
7eada909
MP
3108 };
3109 unsigned journal_sectors, interleave_sectors, buffer_sectors, journal_watermark, sync_msec;
a3fcf725 3110 bool recalculate;
7eada909 3111 bool should_write_sb;
7eada909
MP
3112 __u64 threshold;
3113 unsigned long long start;
3114
3115#define DIRECT_ARGUMENTS 4
3116
3117 if (argc <= DIRECT_ARGUMENTS) {
3118 ti->error = "Invalid argument count";
3119 return -EINVAL;
3120 }
3121
3122 ic = kzalloc(sizeof(struct dm_integrity_c), GFP_KERNEL);
3123 if (!ic) {
3124 ti->error = "Cannot allocate integrity context";
3125 return -ENOMEM;
3126 }
3127 ti->private = ic;
3128 ti->per_io_data_size = sizeof(struct dm_integrity_io);
3129
7eada909 3130 ic->in_progress = RB_ROOT;
724376a0 3131 INIT_LIST_HEAD(&ic->wait_list);
7eada909
MP
3132 init_waitqueue_head(&ic->endio_wait);
3133 bio_list_init(&ic->flush_bio_list);
3134 init_waitqueue_head(&ic->copy_to_journal_wait);
3135 init_completion(&ic->crypto_backoff);
3f2e5393 3136 atomic64_set(&ic->number_of_mismatches, 0);
7eada909
MP
3137
3138 r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &ic->dev);
3139 if (r) {
3140 ti->error = "Device lookup failed";
3141 goto bad;
3142 }
3143
3144 if (sscanf(argv[1], "%llu%c", &start, &dummy) != 1 || start != (sector_t)start) {
3145 ti->error = "Invalid starting offset";
3146 r = -EINVAL;
3147 goto bad;
3148 }
3149 ic->start = start;
3150
3151 if (strcmp(argv[2], "-")) {
3152 if (sscanf(argv[2], "%u%c", &ic->tag_size, &dummy) != 1 || !ic->tag_size) {
3153 ti->error = "Invalid tag size";
3154 r = -EINVAL;
3155 goto bad;
3156 }
3157 }
3158
c2bcb2b7 3159 if (!strcmp(argv[3], "J") || !strcmp(argv[3], "D") || !strcmp(argv[3], "R"))
7eada909
MP
3160 ic->mode = argv[3][0];
3161 else {
56b67a4f 3162 ti->error = "Invalid mode (expecting J, D, R)";
7eada909
MP
3163 r = -EINVAL;
3164 goto bad;
3165 }
3166
356d9d52 3167 journal_sectors = 0;
7eada909
MP
3168 interleave_sectors = DEFAULT_INTERLEAVE_SECTORS;
3169 buffer_sectors = DEFAULT_BUFFER_SECTORS;
3170 journal_watermark = DEFAULT_JOURNAL_WATERMARK;
3171 sync_msec = DEFAULT_SYNC_MSEC;
a3fcf725 3172 recalculate = false;
9d609f85 3173 ic->sectors_per_block = 1;
7eada909
MP
3174
3175 as.argc = argc - DIRECT_ARGUMENTS;
3176 as.argv = argv + DIRECT_ARGUMENTS;
3177 r = dm_read_arg_group(_args, &as, &extra_args, &ti->error);
3178 if (r)
3179 goto bad;
3180
3181 while (extra_args--) {
3182 const char *opt_string;
3183 unsigned val;
3184 opt_string = dm_shift_arg(&as);
3185 if (!opt_string) {
3186 r = -EINVAL;
3187 ti->error = "Not enough feature arguments";
3188 goto bad;
3189 }
56b67a4f 3190 if (sscanf(opt_string, "journal_sectors:%u%c", &val, &dummy) == 1)
356d9d52 3191 journal_sectors = val ? val : 1;
56b67a4f 3192 else if (sscanf(opt_string, "interleave_sectors:%u%c", &val, &dummy) == 1)
7eada909 3193 interleave_sectors = val;
56b67a4f 3194 else if (sscanf(opt_string, "buffer_sectors:%u%c", &val, &dummy) == 1)
7eada909 3195 buffer_sectors = val;
56b67a4f 3196 else if (sscanf(opt_string, "journal_watermark:%u%c", &val, &dummy) == 1 && val <= 100)
7eada909 3197 journal_watermark = val;
56b67a4f 3198 else if (sscanf(opt_string, "commit_time:%u%c", &val, &dummy) == 1)
7eada909 3199 sync_msec = val;
0d74e6a3 3200 else if (!strncmp(opt_string, "meta_device:", strlen("meta_device:"))) {
356d9d52
MP
3201 if (ic->meta_dev) {
3202 dm_put_device(ti, ic->meta_dev);
3203 ic->meta_dev = NULL;
3204 }
3205 r = dm_get_device(ti, strchr(opt_string, ':') + 1, dm_table_get_mode(ti->table), &ic->meta_dev);
3206 if (r) {
3207 ti->error = "Device lookup failed";
3208 goto bad;
3209 }
3210 } else if (sscanf(opt_string, "block_size:%u%c", &val, &dummy) == 1) {
9d609f85
MP
3211 if (val < 1 << SECTOR_SHIFT ||
3212 val > MAX_SECTORS_PER_BLOCK << SECTOR_SHIFT ||
3213 (val & (val -1))) {
3214 r = -EINVAL;
3215 ti->error = "Invalid block_size argument";
3216 goto bad;
3217 }
3218 ic->sectors_per_block = val >> SECTOR_SHIFT;
0d74e6a3 3219 } else if (!strncmp(opt_string, "internal_hash:", strlen("internal_hash:"))) {
7eada909 3220 r = get_alg_and_key(opt_string, &ic->internal_hash_alg, &ti->error,
56b67a4f 3221 "Invalid internal_hash argument");
7eada909
MP
3222 if (r)
3223 goto bad;
0d74e6a3 3224 } else if (!strncmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) {
7eada909 3225 r = get_alg_and_key(opt_string, &ic->journal_crypt_alg, &ti->error,
56b67a4f 3226 "Invalid journal_crypt argument");
7eada909
MP
3227 if (r)
3228 goto bad;
0d74e6a3 3229 } else if (!strncmp(opt_string, "journal_mac:", strlen("journal_mac:"))) {
7eada909 3230 r = get_alg_and_key(opt_string, &ic->journal_mac_alg, &ti->error,
56b67a4f 3231 "Invalid journal_mac argument");
7eada909
MP
3232 if (r)
3233 goto bad;
a3fcf725
MP
3234 } else if (!strcmp(opt_string, "recalculate")) {
3235 recalculate = true;
7eada909
MP
3236 } else {
3237 r = -EINVAL;
3238 ti->error = "Invalid argument";
3239 goto bad;
3240 }
3241 }
3242
356d9d52
MP
3243 ic->data_device_sectors = i_size_read(ic->dev->bdev->bd_inode) >> SECTOR_SHIFT;
3244 if (!ic->meta_dev)
3245 ic->meta_device_sectors = ic->data_device_sectors;
3246 else
3247 ic->meta_device_sectors = i_size_read(ic->meta_dev->bdev->bd_inode) >> SECTOR_SHIFT;
3248
3249 if (!journal_sectors) {
3250 journal_sectors = min((sector_t)DEFAULT_MAX_JOURNAL_SECTORS,
3251 ic->data_device_sectors >> DEFAULT_JOURNAL_SIZE_FACTOR);
3252 }
3253
3254 if (!buffer_sectors)
3255 buffer_sectors = 1;
3256 ic->log2_buffer_sectors = min((int)__fls(buffer_sectors), 31 - SECTOR_SHIFT);
3257
7eada909
MP
3258 r = get_mac(&ic->internal_hash, &ic->internal_hash_alg, &ti->error,
3259 "Invalid internal hash", "Error setting internal hash key");
3260 if (r)
3261 goto bad;
3262
3263 r = get_mac(&ic->journal_mac, &ic->journal_mac_alg, &ti->error,
3264 "Invalid journal mac", "Error setting journal mac key");
3265 if (r)
3266 goto bad;
3267
3268 if (!ic->tag_size) {
3269 if (!ic->internal_hash) {
3270 ti->error = "Unknown tag size";
3271 r = -EINVAL;
3272 goto bad;
3273 }
3274 ic->tag_size = crypto_shash_digestsize(ic->internal_hash);
3275 }
3276 if (ic->tag_size > MAX_TAG_SIZE) {
3277 ti->error = "Too big tag size";
3278 r = -EINVAL;
3279 goto bad;
3280 }
3281 if (!(ic->tag_size & (ic->tag_size - 1)))
3282 ic->log2_tag_size = __ffs(ic->tag_size);
3283 else
3284 ic->log2_tag_size = -1;
3285
3286 ic->autocommit_jiffies = msecs_to_jiffies(sync_msec);
3287 ic->autocommit_msec = sync_msec;
8376d3c1 3288 timer_setup(&ic->autocommit_timer, autocommit_fn, 0);
7eada909
MP
3289
3290 ic->io = dm_io_client_create();
3291 if (IS_ERR(ic->io)) {
3292 r = PTR_ERR(ic->io);
3293 ic->io = NULL;
3294 ti->error = "Cannot allocate dm io";
3295 goto bad;
3296 }
3297
6f1c819c
KO
3298 r = mempool_init_slab_pool(&ic->journal_io_mempool, JOURNAL_IO_MEMPOOL, journal_io_cache);
3299 if (r) {
7eada909
MP
3300 ti->error = "Cannot allocate mempool";
3301 goto bad;
3302 }
3303
3304 ic->metadata_wq = alloc_workqueue("dm-integrity-metadata",
3305 WQ_MEM_RECLAIM, METADATA_WORKQUEUE_MAX_ACTIVE);
3306 if (!ic->metadata_wq) {
3307 ti->error = "Cannot allocate workqueue";
3308 r = -ENOMEM;
3309 goto bad;
3310 }
3311
3312 /*
3313 * If this workqueue were percpu, it would cause bio reordering
3314 * and reduced performance.
3315 */
3316 ic->wait_wq = alloc_workqueue("dm-integrity-wait", WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
3317 if (!ic->wait_wq) {
3318 ti->error = "Cannot allocate workqueue";
3319 r = -ENOMEM;
3320 goto bad;
3321 }
3322
3323 ic->commit_wq = alloc_workqueue("dm-integrity-commit", WQ_MEM_RECLAIM, 1);
3324 if (!ic->commit_wq) {
3325 ti->error = "Cannot allocate workqueue";
3326 r = -ENOMEM;
3327 goto bad;
3328 }
3329 INIT_WORK(&ic->commit_work, integrity_commit);
3330
3331 if (ic->mode == 'J') {
3332 ic->writer_wq = alloc_workqueue("dm-integrity-writer", WQ_MEM_RECLAIM, 1);
3333 if (!ic->writer_wq) {
3334 ti->error = "Cannot allocate workqueue";
3335 r = -ENOMEM;
3336 goto bad;
3337 }
3338 INIT_WORK(&ic->writer_work, integrity_writer);
3339 }
3340
3341 ic->sb = alloc_pages_exact(SB_SECTORS << SECTOR_SHIFT, GFP_KERNEL);
3342 if (!ic->sb) {
3343 r = -ENOMEM;
3344 ti->error = "Cannot allocate superblock area";
3345 goto bad;
3346 }
3347
3348 r = sync_rw_sb(ic, REQ_OP_READ, 0);
3349 if (r) {
3350 ti->error = "Error reading superblock";
3351 goto bad;
3352 }
c2bcb2b7
MP
3353 should_write_sb = false;
3354 if (memcmp(ic->sb->magic, SB_MAGIC, 8)) {
3355 if (ic->mode != 'R') {
56b67a4f
MP
3356 if (memchr_inv(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT)) {
3357 r = -EINVAL;
3358 ti->error = "The device is not initialized";
3359 goto bad;
7eada909
MP
3360 }
3361 }
3362
3363 r = initialize_superblock(ic, journal_sectors, interleave_sectors);
3364 if (r) {
3365 ti->error = "Could not initialize superblock";
3366 goto bad;
3367 }
c2bcb2b7
MP
3368 if (ic->mode != 'R')
3369 should_write_sb = true;
7eada909
MP
3370 }
3371
1f9fc0b8 3372 if (!ic->sb->version || ic->sb->version > SB_VERSION_2) {
7eada909
MP
3373 r = -EINVAL;
3374 ti->error = "Unknown version";
3375 goto bad;
3376 }
3377 if (le16_to_cpu(ic->sb->integrity_tag_size) != ic->tag_size) {
3378 r = -EINVAL;
9d609f85
MP
3379 ti->error = "Tag size doesn't match the information in superblock";
3380 goto bad;
3381 }
3382 if (ic->sb->log2_sectors_per_block != __ffs(ic->sectors_per_block)) {
3383 r = -EINVAL;
3384 ti->error = "Block size doesn't match the information in superblock";
7eada909
MP
3385 goto bad;
3386 }
bc86a41e
MP
3387 if (!le32_to_cpu(ic->sb->journal_sections)) {
3388 r = -EINVAL;
3389 ti->error = "Corrupted superblock, journal_sections is 0";
3390 goto bad;
3391 }
7eada909 3392 /* make sure that ti->max_io_len doesn't overflow */
356d9d52
MP
3393 if (!ic->meta_dev) {
3394 if (ic->sb->log2_interleave_sectors < MIN_LOG2_INTERLEAVE_SECTORS ||
3395 ic->sb->log2_interleave_sectors > MAX_LOG2_INTERLEAVE_SECTORS) {
3396 r = -EINVAL;
3397 ti->error = "Invalid interleave_sectors in the superblock";
3398 goto bad;
3399 }
3400 } else {
3401 if (ic->sb->log2_interleave_sectors) {
3402 r = -EINVAL;
3403 ti->error = "Invalid interleave_sectors in the superblock";
3404 goto bad;
3405 }
7eada909
MP
3406 }
3407 ic->provided_data_sectors = le64_to_cpu(ic->sb->provided_data_sectors);
3408 if (ic->provided_data_sectors != le64_to_cpu(ic->sb->provided_data_sectors)) {
3409 /* test for overflow */
3410 r = -EINVAL;
3411 ti->error = "The superblock has 64-bit device size, but the kernel was compiled with 32-bit sectors";
3412 goto bad;
3413 }
3414 if (!!(ic->sb->flags & cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC)) != !!ic->journal_mac_alg.alg_string) {
3415 r = -EINVAL;
3416 ti->error = "Journal mac mismatch";
3417 goto bad;
3418 }
356d9d52
MP
3419
3420try_smaller_buffer:
7eada909
MP
3421 r = calculate_device_limits(ic);
3422 if (r) {
356d9d52
MP
3423 if (ic->meta_dev) {
3424 if (ic->log2_buffer_sectors > 3) {
3425 ic->log2_buffer_sectors--;
3426 goto try_smaller_buffer;
3427 }
3428 }
7eada909
MP
3429 ti->error = "The device is too small";
3430 goto bad;
3431 }
356d9d52
MP
3432 if (!ic->meta_dev)
3433 ic->log2_buffer_sectors = min(ic->log2_buffer_sectors, (__u8)__ffs(ic->metadata_run));
3434
2ad50606
OM
3435 if (ti->len > ic->provided_data_sectors) {
3436 r = -EINVAL;
3437 ti->error = "Not enough provided sectors for requested mapping size";
3438 goto bad;
3439 }
7eada909 3440
7eada909
MP
3441
3442 threshold = (__u64)ic->journal_entries * (100 - journal_watermark);
3443 threshold += 50;
3444 do_div(threshold, 100);
3445 ic->free_sectors_threshold = threshold;
3446
3447 DEBUG_print("initialized:\n");
3448 DEBUG_print(" integrity_tag_size %u\n", le16_to_cpu(ic->sb->integrity_tag_size));
3449 DEBUG_print(" journal_entry_size %u\n", ic->journal_entry_size);
3450 DEBUG_print(" journal_entries_per_sector %u\n", ic->journal_entries_per_sector);
3451 DEBUG_print(" journal_section_entries %u\n", ic->journal_section_entries);
3452 DEBUG_print(" journal_section_sectors %u\n", ic->journal_section_sectors);
3453 DEBUG_print(" journal_sections %u\n", (unsigned)le32_to_cpu(ic->sb->journal_sections));
3454 DEBUG_print(" journal_entries %u\n", ic->journal_entries);
3455 DEBUG_print(" log2_interleave_sectors %d\n", ic->sb->log2_interleave_sectors);
30bba430 3456 DEBUG_print(" data_device_sectors 0x%llx\n", (unsigned long long)ic->data_device_sectors);
7eada909
MP
3457 DEBUG_print(" initial_sectors 0x%x\n", ic->initial_sectors);
3458 DEBUG_print(" metadata_run 0x%x\n", ic->metadata_run);
3459 DEBUG_print(" log2_metadata_run %d\n", ic->log2_metadata_run);
3460 DEBUG_print(" provided_data_sectors 0x%llx (%llu)\n", (unsigned long long)ic->provided_data_sectors,
3461 (unsigned long long)ic->provided_data_sectors);
3462 DEBUG_print(" log2_buffer_sectors %u\n", ic->log2_buffer_sectors);
3463
a3fcf725
MP
3464 if (recalculate && !(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))) {
3465 ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
3466 ic->sb->recalc_sector = cpu_to_le64(0);
3467 }
3468
3469 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
3470 if (!ic->internal_hash) {
3471 r = -EINVAL;
3472 ti->error = "Recalculate is only valid with internal hash";
3473 goto bad;
3474 }
e8c2566f 3475 ic->recalc_wq = alloc_workqueue("dm-integrity-recalc", WQ_MEM_RECLAIM, 1);
a3fcf725
MP
3476 if (!ic->recalc_wq ) {
3477 ti->error = "Cannot allocate workqueue";
3478 r = -ENOMEM;
3479 goto bad;
3480 }
3481 INIT_WORK(&ic->recalc_work, integrity_recalc);
3482 ic->recalc_buffer = vmalloc(RECALC_SECTORS << SECTOR_SHIFT);
3483 if (!ic->recalc_buffer) {
3484 ti->error = "Cannot allocate buffer for recalculating";
3485 r = -ENOMEM;
3486 goto bad;
3487 }
329e0989
KC
3488 ic->recalc_tags = kvmalloc_array(RECALC_SECTORS >> ic->sb->log2_sectors_per_block,
3489 ic->tag_size, GFP_KERNEL);
a3fcf725
MP
3490 if (!ic->recalc_tags) {
3491 ti->error = "Cannot allocate tags for recalculating";
3492 r = -ENOMEM;
3493 goto bad;
3494 }
3495 }
3496
356d9d52
MP
3497 ic->bufio = dm_bufio_client_create(ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev,
3498 1U << (SECTOR_SHIFT + ic->log2_buffer_sectors), 1, 0, NULL, NULL);
7eada909
MP
3499 if (IS_ERR(ic->bufio)) {
3500 r = PTR_ERR(ic->bufio);
3501 ti->error = "Cannot initialize dm-bufio";
3502 ic->bufio = NULL;
3503 goto bad;
3504 }
3505 dm_bufio_set_sector_offset(ic->bufio, ic->start + ic->initial_sectors);
3506
c2bcb2b7
MP
3507 if (ic->mode != 'R') {
3508 r = create_journal(ic, &ti->error);
3509 if (r)
3510 goto bad;
3511 }
7eada909
MP
3512
3513 if (should_write_sb) {
3514 int r;
3515
3516 init_journal(ic, 0, ic->journal_sections, 0);
3517 r = dm_integrity_failed(ic);
3518 if (unlikely(r)) {
3519 ti->error = "Error initializing journal";
3520 goto bad;
3521 }
3522 r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
3523 if (r) {
3524 ti->error = "Error initializing superblock";
3525 goto bad;
3526 }
3527 ic->just_formatted = true;
3528 }
3529
356d9d52
MP
3530 if (!ic->meta_dev) {
3531 r = dm_set_target_max_io_len(ti, 1U << ic->sb->log2_interleave_sectors);
3532 if (r)
3533 goto bad;
3534 }
7eada909
MP
3535
3536 if (!ic->internal_hash)
3537 dm_integrity_set(ti, ic);
3538
3539 ti->num_flush_bios = 1;
3540 ti->flush_supported = true;
3541
3542 return 0;
3543bad:
3544 dm_integrity_dtr(ti);
3545 return r;
3546}
3547
3548static void dm_integrity_dtr(struct dm_target *ti)
3549{
3550 struct dm_integrity_c *ic = ti->private;
3551
3552 BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
724376a0 3553 BUG_ON(!list_empty(&ic->wait_list));
7eada909
MP
3554
3555 if (ic->metadata_wq)
3556 destroy_workqueue(ic->metadata_wq);
3557 if (ic->wait_wq)
3558 destroy_workqueue(ic->wait_wq);
3559 if (ic->commit_wq)
3560 destroy_workqueue(ic->commit_wq);
3561 if (ic->writer_wq)
3562 destroy_workqueue(ic->writer_wq);
a3fcf725
MP
3563 if (ic->recalc_wq)
3564 destroy_workqueue(ic->recalc_wq);
97abfde1
MP
3565 vfree(ic->recalc_buffer);
3566 kvfree(ic->recalc_tags);
7eada909
MP
3567 if (ic->bufio)
3568 dm_bufio_client_destroy(ic->bufio);
6f1c819c 3569 mempool_exit(&ic->journal_io_mempool);
7eada909
MP
3570 if (ic->io)
3571 dm_io_client_destroy(ic->io);
3572 if (ic->dev)
3573 dm_put_device(ti, ic->dev);
356d9d52
MP
3574 if (ic->meta_dev)
3575 dm_put_device(ti, ic->meta_dev);
d5027e03
MP
3576 dm_integrity_free_page_list(ic->journal);
3577 dm_integrity_free_page_list(ic->journal_io);
3578 dm_integrity_free_page_list(ic->journal_xor);
7eada909
MP
3579 if (ic->journal_scatterlist)
3580 dm_integrity_free_journal_scatterlist(ic, ic->journal_scatterlist);
3581 if (ic->journal_io_scatterlist)
3582 dm_integrity_free_journal_scatterlist(ic, ic->journal_io_scatterlist);
3583 if (ic->sk_requests) {
3584 unsigned i;
3585
3586 for (i = 0; i < ic->journal_sections; i++) {
3587 struct skcipher_request *req = ic->sk_requests[i];
3588 if (req) {
3589 kzfree(req->iv);
3590 skcipher_request_free(req);
3591 }
3592 }
3593 kvfree(ic->sk_requests);
3594 }
3595 kvfree(ic->journal_tree);
3596 if (ic->sb)
3597 free_pages_exact(ic->sb, SB_SECTORS << SECTOR_SHIFT);
3598
3599 if (ic->internal_hash)
3600 crypto_free_shash(ic->internal_hash);
3601 free_alg(&ic->internal_hash_alg);
3602
3603 if (ic->journal_crypt)
3604 crypto_free_skcipher(ic->journal_crypt);
3605 free_alg(&ic->journal_crypt_alg);
3606
3607 if (ic->journal_mac)
3608 crypto_free_shash(ic->journal_mac);
3609 free_alg(&ic->journal_mac_alg);
3610
3611 kfree(ic);
3612}
3613
3614static struct target_type integrity_target = {
3615 .name = "integrity",
a3fcf725 3616 .version = {1, 2, 0},
7eada909
MP
3617 .module = THIS_MODULE,
3618 .features = DM_TARGET_SINGLETON | DM_TARGET_INTEGRITY,
3619 .ctr = dm_integrity_ctr,
3620 .dtr = dm_integrity_dtr,
3621 .map = dm_integrity_map,
3622 .postsuspend = dm_integrity_postsuspend,
3623 .resume = dm_integrity_resume,
3624 .status = dm_integrity_status,
3625 .iterate_devices = dm_integrity_iterate_devices,
9d609f85 3626 .io_hints = dm_integrity_io_hints,
7eada909
MP
3627};
3628
5efedc9b 3629static int __init dm_integrity_init(void)
7eada909
MP
3630{
3631 int r;
3632
3633 journal_io_cache = kmem_cache_create("integrity_journal_io",
3634 sizeof(struct journal_io), 0, 0, NULL);
3635 if (!journal_io_cache) {
3636 DMERR("can't allocate journal io cache");
3637 return -ENOMEM;
3638 }
3639
3640 r = dm_register_target(&integrity_target);
3641
3642 if (r < 0)
3643 DMERR("register failed %d", r);
3644
3645 return r;
3646}
3647
5efedc9b 3648static void __exit dm_integrity_exit(void)
7eada909
MP
3649{
3650 dm_unregister_target(&integrity_target);
3651 kmem_cache_destroy(journal_io_cache);
3652}
3653
3654module_init(dm_integrity_init);
3655module_exit(dm_integrity_exit);
3656
3657MODULE_AUTHOR("Milan Broz");
3658MODULE_AUTHOR("Mikulas Patocka");
3659MODULE_DESCRIPTION(DM_NAME " target for integrity tags extension");
3660MODULE_LICENSE("GPL");