1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2016-2017 Red Hat, Inc. All rights reserved.
4 * Copyright (C) 2016-2017 Milan Broz
5 * Copyright (C) 2016-2017 Mikulas Patocka
7 * This file is released under the GPL.
10 #include "dm-bio-record.h"
12 #include <linux/compiler.h>
13 #include <linux/module.h>
14 #include <linux/device-mapper.h>
15 #include <linux/dm-io.h>
16 #include <linux/vmalloc.h>
17 #include <linux/sort.h>
18 #include <linux/rbtree.h>
19 #include <linux/delay.h>
20 #include <linux/random.h>
21 #include <linux/reboot.h>
22 #include <crypto/hash.h>
23 #include <crypto/skcipher.h>
24 #include <linux/async_tx.h>
25 #include <linux/dm-bufio.h>
29 #define DM_MSG_PREFIX "integrity"
31 #define DEFAULT_INTERLEAVE_SECTORS 32768
32 #define DEFAULT_JOURNAL_SIZE_FACTOR 7
33 #define DEFAULT_SECTORS_PER_BITMAP_BIT 32768
34 #define DEFAULT_BUFFER_SECTORS 128
35 #define DEFAULT_JOURNAL_WATERMARK 50
36 #define DEFAULT_SYNC_MSEC 10000
37 #define DEFAULT_MAX_JOURNAL_SECTORS (IS_ENABLED(CONFIG_64BIT) ? 131072 : 8192)
38 #define MIN_LOG2_INTERLEAVE_SECTORS 3
39 #define MAX_LOG2_INTERLEAVE_SECTORS 31
40 #define METADATA_WORKQUEUE_MAX_ACTIVE 16
41 #define RECALC_SECTORS (IS_ENABLED(CONFIG_64BIT) ? 32768 : 2048)
42 #define RECALC_WRITE_SUPER 16
43 #define BITMAP_BLOCK_SIZE 4096 /* don't change it */
44 #define BITMAP_FLUSH_INTERVAL (10 * HZ)
45 #define DISCARD_FILLER 0xf6
49 * Warning - DEBUG_PRINT prints security-sensitive data to the log,
50 * so it should not be enabled in the official kernel
53 //#define INTERNAL_VERIFY
59 #define SB_MAGIC "integrt"
60 #define SB_VERSION_1 1
61 #define SB_VERSION_2 2
62 #define SB_VERSION_3 3
63 #define SB_VERSION_4 4
64 #define SB_VERSION_5 5
66 #define MAX_SECTORS_PER_BLOCK 8
71 __u8 log2_interleave_sectors;
72 __le16 integrity_tag_size;
73 __le32 journal_sections;
74 __le64 provided_data_sectors; /* userspace uses this value */
76 __u8 log2_sectors_per_block;
77 __u8 log2_blocks_per_bitmap_bit;
84 #define SB_FLAG_HAVE_JOURNAL_MAC 0x1
85 #define SB_FLAG_RECALCULATING 0x2
86 #define SB_FLAG_DIRTY_BITMAP 0x4
87 #define SB_FLAG_FIXED_PADDING 0x8
88 #define SB_FLAG_FIXED_HMAC 0x10
90 #define JOURNAL_ENTRY_ROUNDUP 8
92 typedef __le64 commit_id_t;
93 #define JOURNAL_MAC_PER_SECTOR 8
95 struct journal_entry {
103 commit_id_t last_bytes[];
107 #define journal_entry_tag(ic, je) ((__u8 *)&(je)->last_bytes[(ic)->sectors_per_block])
109 #if BITS_PER_LONG == 64
110 #define journal_entry_set_sector(je, x) do { smp_wmb(); WRITE_ONCE((je)->u.sector, cpu_to_le64(x)); } while (0)
112 #define journal_entry_set_sector(je, x) do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); WRITE_ONCE((je)->u.s.sector_hi, cpu_to_le32((x) >> 32)); } while (0)
114 #define journal_entry_get_sector(je) le64_to_cpu((je)->u.sector)
115 #define journal_entry_is_unused(je) ((je)->u.s.sector_hi == cpu_to_le32(-1))
116 #define journal_entry_set_unused(je) ((je)->u.s.sector_hi = cpu_to_le32(-1))
117 #define journal_entry_is_inprogress(je) ((je)->u.s.sector_hi == cpu_to_le32(-2))
118 #define journal_entry_set_inprogress(je) ((je)->u.s.sector_hi = cpu_to_le32(-2))
120 #define JOURNAL_BLOCK_SECTORS 8
121 #define JOURNAL_SECTOR_DATA ((1 << SECTOR_SHIFT) - sizeof(commit_id_t))
122 #define JOURNAL_MAC_SIZE (JOURNAL_MAC_PER_SECTOR * JOURNAL_BLOCK_SECTORS)
124 struct journal_sector {
125 struct_group(sectors,
126 __u8 entries[JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR];
127 __u8 mac[JOURNAL_MAC_PER_SECTOR];
129 commit_id_t commit_id;
132 #define MAX_TAG_SIZE (JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR - offsetof(struct journal_entry, last_bytes[MAX_SECTORS_PER_BLOCK]))
134 #define METADATA_PADDING_SECTORS 8
136 #define N_COMMIT_IDS 4
138 static unsigned char prev_commit_seq(unsigned char seq)
140 return (seq + N_COMMIT_IDS - 1) % N_COMMIT_IDS;
143 static unsigned char next_commit_seq(unsigned char seq)
145 return (seq + 1) % N_COMMIT_IDS;
149 * In-memory structures
152 struct journal_node {
161 unsigned int key_size;
164 struct dm_integrity_c {
166 struct dm_dev *meta_dev;
167 unsigned int tag_size;
170 mempool_t journal_io_mempool;
171 struct dm_io_client *io;
172 struct dm_bufio_client *bufio;
173 struct workqueue_struct *metadata_wq;
174 struct superblock *sb;
175 unsigned int journal_pages;
176 unsigned int n_bitmap_blocks;
178 struct page_list *journal;
179 struct page_list *journal_io;
180 struct page_list *journal_xor;
181 struct page_list *recalc_bitmap;
182 struct page_list *may_write_bitmap;
183 struct bitmap_block_status *bbs;
184 unsigned int bitmap_flush_interval;
185 int synchronous_mode;
186 struct bio_list synchronous_bios;
187 struct delayed_work bitmap_flush_work;
189 struct crypto_skcipher *journal_crypt;
190 struct scatterlist **journal_scatterlist;
191 struct scatterlist **journal_io_scatterlist;
192 struct skcipher_request **sk_requests;
194 struct crypto_shash *journal_mac;
196 struct journal_node *journal_tree;
197 struct rb_root journal_tree_root;
199 sector_t provided_data_sectors;
201 unsigned short journal_entry_size;
202 unsigned char journal_entries_per_sector;
203 unsigned char journal_section_entries;
204 unsigned short journal_section_sectors;
205 unsigned int journal_sections;
206 unsigned int journal_entries;
207 sector_t data_device_sectors;
208 sector_t meta_device_sectors;
209 unsigned int initial_sectors;
210 unsigned int metadata_run;
211 __s8 log2_metadata_run;
212 __u8 log2_buffer_sectors;
213 __u8 sectors_per_block;
214 __u8 log2_blocks_per_bitmap_bit;
220 struct crypto_shash *internal_hash;
222 struct dm_target *ti;
224 /* these variables are locked with endio_wait.lock */
225 struct rb_root in_progress;
226 struct list_head wait_list;
227 wait_queue_head_t endio_wait;
228 struct workqueue_struct *wait_wq;
229 struct workqueue_struct *offload_wq;
231 unsigned char commit_seq;
232 commit_id_t commit_ids[N_COMMIT_IDS];
234 unsigned int committed_section;
235 unsigned int n_committed_sections;
237 unsigned int uncommitted_section;
238 unsigned int n_uncommitted_sections;
240 unsigned int free_section;
241 unsigned char free_section_entry;
242 unsigned int free_sectors;
244 unsigned int free_sectors_threshold;
246 struct workqueue_struct *commit_wq;
247 struct work_struct commit_work;
249 struct workqueue_struct *writer_wq;
250 struct work_struct writer_work;
252 struct workqueue_struct *recalc_wq;
253 struct work_struct recalc_work;
255 struct bio_list flush_bio_list;
257 unsigned long autocommit_jiffies;
258 struct timer_list autocommit_timer;
259 unsigned int autocommit_msec;
261 wait_queue_head_t copy_to_journal_wait;
263 struct completion crypto_backoff;
265 bool wrote_to_journal;
266 bool journal_uptodate;
268 bool recalculate_flag;
269 bool reset_recalculate_flag;
273 bool legacy_recalculate;
275 struct alg_spec internal_hash_alg;
276 struct alg_spec journal_crypt_alg;
277 struct alg_spec journal_mac_alg;
279 atomic64_t number_of_mismatches;
281 mempool_t recheck_pool;
283 struct notifier_block reboot_notifier;
286 struct dm_integrity_range {
287 sector_t logical_sector;
293 struct task_struct *task;
294 struct list_head wait_entry;
299 struct dm_integrity_io {
300 struct work_struct work;
302 struct dm_integrity_c *ic;
306 struct dm_integrity_range range;
308 sector_t metadata_block;
309 unsigned int metadata_offset;
312 blk_status_t bi_status;
314 struct completion *completion;
316 struct dm_bio_details bio_details;
319 struct journal_completion {
320 struct dm_integrity_c *ic;
322 struct completion comp;
326 struct dm_integrity_range range;
327 struct journal_completion *comp;
330 struct bitmap_block_status {
331 struct work_struct work;
332 struct dm_integrity_c *ic;
334 unsigned long *bitmap;
335 struct bio_list bio_queue;
336 spinlock_t bio_queue_lock;
340 static struct kmem_cache *journal_io_cache;
342 #define JOURNAL_IO_MEMPOOL 32
345 #define DEBUG_print(x, ...) printk(KERN_DEBUG x, ##__VA_ARGS__)
346 #define DEBUG_bytes(bytes, len, msg, ...) printk(KERN_DEBUG msg "%s%*ph\n", ##__VA_ARGS__, \
347 len ? ": " : "", len, bytes)
349 #define DEBUG_print(x, ...) do { } while (0)
350 #define DEBUG_bytes(bytes, len, msg, ...) do { } while (0)
353 static void dm_integrity_prepare(struct request *rq)
357 static void dm_integrity_complete(struct request *rq, unsigned int nr_bytes)
362 * DM Integrity profile, protection is performed layer above (dm-crypt)
364 static const struct blk_integrity_profile dm_integrity_profile = {
365 .name = "DM-DIF-EXT-TAG",
368 .prepare_fn = dm_integrity_prepare,
369 .complete_fn = dm_integrity_complete,
372 static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map);
373 static void integrity_bio_wait(struct work_struct *w);
374 static void dm_integrity_dtr(struct dm_target *ti);
376 static void dm_integrity_io_error(struct dm_integrity_c *ic, const char *msg, int err)
379 atomic64_inc(&ic->number_of_mismatches);
380 if (!cmpxchg(&ic->failed, 0, err))
381 DMERR("Error on %s: %d", msg, err);
384 static int dm_integrity_failed(struct dm_integrity_c *ic)
386 return READ_ONCE(ic->failed);
389 static bool dm_integrity_disable_recalculate(struct dm_integrity_c *ic)
391 if (ic->legacy_recalculate)
393 if (!(ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) ?
394 ic->internal_hash_alg.key || ic->journal_mac_alg.key :
395 ic->internal_hash_alg.key && !ic->journal_mac_alg.key)
400 static commit_id_t dm_integrity_commit_id(struct dm_integrity_c *ic, unsigned int i,
401 unsigned int j, unsigned char seq)
404 * Xor the number with section and sector, so that if a piece of
405 * journal is written at wrong place, it is detected.
407 return ic->commit_ids[seq] ^ cpu_to_le64(((__u64)i << 32) ^ j);
410 static void get_area_and_offset(struct dm_integrity_c *ic, sector_t data_sector,
411 sector_t *area, sector_t *offset)
414 __u8 log2_interleave_sectors = ic->sb->log2_interleave_sectors;
415 *area = data_sector >> log2_interleave_sectors;
416 *offset = (unsigned int)data_sector & ((1U << log2_interleave_sectors) - 1);
419 *offset = data_sector;
423 #define sector_to_block(ic, n) \
425 BUG_ON((n) & (unsigned int)((ic)->sectors_per_block - 1)); \
426 (n) >>= (ic)->sb->log2_sectors_per_block; \
429 static __u64 get_metadata_sector_and_offset(struct dm_integrity_c *ic, sector_t area,
430 sector_t offset, unsigned int *metadata_offset)
435 ms = area << ic->sb->log2_interleave_sectors;
436 if (likely(ic->log2_metadata_run >= 0))
437 ms += area << ic->log2_metadata_run;
439 ms += area * ic->metadata_run;
440 ms >>= ic->log2_buffer_sectors;
442 sector_to_block(ic, offset);
444 if (likely(ic->log2_tag_size >= 0)) {
445 ms += offset >> (SECTOR_SHIFT + ic->log2_buffer_sectors - ic->log2_tag_size);
446 mo = (offset << ic->log2_tag_size) & ((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - 1);
448 ms += (__u64)offset * ic->tag_size >> (SECTOR_SHIFT + ic->log2_buffer_sectors);
449 mo = (offset * ic->tag_size) & ((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - 1);
451 *metadata_offset = mo;
455 static sector_t get_data_sector(struct dm_integrity_c *ic, sector_t area, sector_t offset)
462 result = area << ic->sb->log2_interleave_sectors;
463 if (likely(ic->log2_metadata_run >= 0))
464 result += (area + 1) << ic->log2_metadata_run;
466 result += (area + 1) * ic->metadata_run;
468 result += (sector_t)ic->initial_sectors + offset;
474 static void wraparound_section(struct dm_integrity_c *ic, unsigned int *sec_ptr)
476 if (unlikely(*sec_ptr >= ic->journal_sections))
477 *sec_ptr -= ic->journal_sections;
480 static void sb_set_version(struct dm_integrity_c *ic)
482 if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC))
483 ic->sb->version = SB_VERSION_5;
484 else if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING))
485 ic->sb->version = SB_VERSION_4;
486 else if (ic->mode == 'B' || ic->sb->flags & cpu_to_le32(SB_FLAG_DIRTY_BITMAP))
487 ic->sb->version = SB_VERSION_3;
488 else if (ic->meta_dev || ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
489 ic->sb->version = SB_VERSION_2;
491 ic->sb->version = SB_VERSION_1;
494 static int sb_mac(struct dm_integrity_c *ic, bool wr)
496 SHASH_DESC_ON_STACK(desc, ic->journal_mac);
498 unsigned int mac_size = crypto_shash_digestsize(ic->journal_mac);
499 __u8 *sb = (__u8 *)ic->sb;
500 __u8 *mac = sb + (1 << SECTOR_SHIFT) - mac_size;
502 if (sizeof(struct superblock) + mac_size > 1 << SECTOR_SHIFT) {
503 dm_integrity_io_error(ic, "digest is too long", -EINVAL);
507 desc->tfm = ic->journal_mac;
510 r = crypto_shash_digest(desc, sb, mac - sb, mac);
511 if (unlikely(r < 0)) {
512 dm_integrity_io_error(ic, "crypto_shash_digest", r);
516 __u8 actual_mac[HASH_MAX_DIGESTSIZE];
518 r = crypto_shash_digest(desc, sb, mac - sb, actual_mac);
519 if (unlikely(r < 0)) {
520 dm_integrity_io_error(ic, "crypto_shash_digest", r);
523 if (memcmp(mac, actual_mac, mac_size)) {
524 dm_integrity_io_error(ic, "superblock mac", -EILSEQ);
525 dm_audit_log_target(DM_MSG_PREFIX, "mac-superblock", ic->ti, 0);
533 static int sync_rw_sb(struct dm_integrity_c *ic, blk_opf_t opf)
535 struct dm_io_request io_req;
536 struct dm_io_region io_loc;
537 const enum req_op op = opf & REQ_OP_MASK;
541 io_req.mem.type = DM_IO_KMEM;
542 io_req.mem.ptr.addr = ic->sb;
543 io_req.notify.fn = NULL;
544 io_req.client = ic->io;
545 io_loc.bdev = ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev;
546 io_loc.sector = ic->start;
547 io_loc.count = SB_SECTORS;
549 if (op == REQ_OP_WRITE) {
551 if (ic->journal_mac && ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) {
552 r = sb_mac(ic, true);
558 r = dm_io(&io_req, 1, &io_loc, NULL, IOPRIO_DEFAULT);
562 if (op == REQ_OP_READ) {
563 if (ic->mode != 'R' && ic->journal_mac && ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) {
564 r = sb_mac(ic, false);
573 #define BITMAP_OP_TEST_ALL_SET 0
574 #define BITMAP_OP_TEST_ALL_CLEAR 1
575 #define BITMAP_OP_SET 2
576 #define BITMAP_OP_CLEAR 3
578 static bool block_bitmap_op(struct dm_integrity_c *ic, struct page_list *bitmap,
579 sector_t sector, sector_t n_sectors, int mode)
581 unsigned long bit, end_bit, this_end_bit, page, end_page;
584 if (unlikely(((sector | n_sectors) & ((1 << ic->sb->log2_sectors_per_block) - 1)) != 0)) {
585 DMCRIT("invalid bitmap access (%llx,%llx,%d,%d,%d)",
588 ic->sb->log2_sectors_per_block,
589 ic->log2_blocks_per_bitmap_bit,
594 if (unlikely(!n_sectors))
597 bit = sector >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
598 end_bit = (sector + n_sectors - 1) >>
599 (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
601 page = bit / (PAGE_SIZE * 8);
602 bit %= PAGE_SIZE * 8;
604 end_page = end_bit / (PAGE_SIZE * 8);
605 end_bit %= PAGE_SIZE * 8;
609 this_end_bit = PAGE_SIZE * 8 - 1;
611 this_end_bit = end_bit;
613 data = lowmem_page_address(bitmap[page].page);
615 if (mode == BITMAP_OP_TEST_ALL_SET) {
616 while (bit <= this_end_bit) {
617 if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
619 if (data[bit / BITS_PER_LONG] != -1)
621 bit += BITS_PER_LONG;
622 } while (this_end_bit >= bit + BITS_PER_LONG - 1);
625 if (!test_bit(bit, data))
629 } else if (mode == BITMAP_OP_TEST_ALL_CLEAR) {
630 while (bit <= this_end_bit) {
631 if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
633 if (data[bit / BITS_PER_LONG] != 0)
635 bit += BITS_PER_LONG;
636 } while (this_end_bit >= bit + BITS_PER_LONG - 1);
639 if (test_bit(bit, data))
643 } else if (mode == BITMAP_OP_SET) {
644 while (bit <= this_end_bit) {
645 if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
647 data[bit / BITS_PER_LONG] = -1;
648 bit += BITS_PER_LONG;
649 } while (this_end_bit >= bit + BITS_PER_LONG - 1);
652 __set_bit(bit, data);
655 } else if (mode == BITMAP_OP_CLEAR) {
656 if (!bit && this_end_bit == PAGE_SIZE * 8 - 1)
659 while (bit <= this_end_bit) {
660 if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
662 data[bit / BITS_PER_LONG] = 0;
663 bit += BITS_PER_LONG;
664 } while (this_end_bit >= bit + BITS_PER_LONG - 1);
667 __clear_bit(bit, data);
675 if (unlikely(page < end_page)) {
684 static void block_bitmap_copy(struct dm_integrity_c *ic, struct page_list *dst, struct page_list *src)
686 unsigned int n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE);
689 for (i = 0; i < n_bitmap_pages; i++) {
690 unsigned long *dst_data = lowmem_page_address(dst[i].page);
691 unsigned long *src_data = lowmem_page_address(src[i].page);
693 copy_page(dst_data, src_data);
697 static struct bitmap_block_status *sector_to_bitmap_block(struct dm_integrity_c *ic, sector_t sector)
699 unsigned int bit = sector >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
700 unsigned int bitmap_block = bit / (BITMAP_BLOCK_SIZE * 8);
702 BUG_ON(bitmap_block >= ic->n_bitmap_blocks);
703 return &ic->bbs[bitmap_block];
706 static void access_journal_check(struct dm_integrity_c *ic, unsigned int section, unsigned int offset,
707 bool e, const char *function)
709 #if defined(CONFIG_DM_DEBUG) || defined(INTERNAL_VERIFY)
710 unsigned int limit = e ? ic->journal_section_entries : ic->journal_section_sectors;
712 if (unlikely(section >= ic->journal_sections) ||
713 unlikely(offset >= limit)) {
714 DMCRIT("%s: invalid access at (%u,%u), limit (%u,%u)",
715 function, section, offset, ic->journal_sections, limit);
721 static void page_list_location(struct dm_integrity_c *ic, unsigned int section, unsigned int offset,
722 unsigned int *pl_index, unsigned int *pl_offset)
726 access_journal_check(ic, section, offset, false, "page_list_location");
728 sector = section * ic->journal_section_sectors + offset;
730 *pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
731 *pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
734 static struct journal_sector *access_page_list(struct dm_integrity_c *ic, struct page_list *pl,
735 unsigned int section, unsigned int offset, unsigned int *n_sectors)
737 unsigned int pl_index, pl_offset;
740 page_list_location(ic, section, offset, &pl_index, &pl_offset);
743 *n_sectors = (PAGE_SIZE - pl_offset) >> SECTOR_SHIFT;
745 va = lowmem_page_address(pl[pl_index].page);
747 return (struct journal_sector *)(va + pl_offset);
750 static struct journal_sector *access_journal(struct dm_integrity_c *ic, unsigned int section, unsigned int offset)
752 return access_page_list(ic, ic->journal, section, offset, NULL);
755 static struct journal_entry *access_journal_entry(struct dm_integrity_c *ic, unsigned int section, unsigned int n)
757 unsigned int rel_sector, offset;
758 struct journal_sector *js;
760 access_journal_check(ic, section, n, true, "access_journal_entry");
762 rel_sector = n % JOURNAL_BLOCK_SECTORS;
763 offset = n / JOURNAL_BLOCK_SECTORS;
765 js = access_journal(ic, section, rel_sector);
766 return (struct journal_entry *)((char *)js + offset * ic->journal_entry_size);
769 static struct journal_sector *access_journal_data(struct dm_integrity_c *ic, unsigned int section, unsigned int n)
771 n <<= ic->sb->log2_sectors_per_block;
773 n += JOURNAL_BLOCK_SECTORS;
775 access_journal_check(ic, section, n, false, "access_journal_data");
777 return access_journal(ic, section, n);
780 static void section_mac(struct dm_integrity_c *ic, unsigned int section, __u8 result[JOURNAL_MAC_SIZE])
782 SHASH_DESC_ON_STACK(desc, ic->journal_mac);
784 unsigned int j, size;
786 desc->tfm = ic->journal_mac;
788 r = crypto_shash_init(desc);
789 if (unlikely(r < 0)) {
790 dm_integrity_io_error(ic, "crypto_shash_init", r);
794 if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) {
797 r = crypto_shash_update(desc, (__u8 *)&ic->sb->salt, SALT_SIZE);
798 if (unlikely(r < 0)) {
799 dm_integrity_io_error(ic, "crypto_shash_update", r);
803 section_le = cpu_to_le64(section);
804 r = crypto_shash_update(desc, (__u8 *)§ion_le, sizeof(section_le));
805 if (unlikely(r < 0)) {
806 dm_integrity_io_error(ic, "crypto_shash_update", r);
811 for (j = 0; j < ic->journal_section_entries; j++) {
812 struct journal_entry *je = access_journal_entry(ic, section, j);
814 r = crypto_shash_update(desc, (__u8 *)&je->u.sector, sizeof(je->u.sector));
815 if (unlikely(r < 0)) {
816 dm_integrity_io_error(ic, "crypto_shash_update", r);
821 size = crypto_shash_digestsize(ic->journal_mac);
823 if (likely(size <= JOURNAL_MAC_SIZE)) {
824 r = crypto_shash_final(desc, result);
825 if (unlikely(r < 0)) {
826 dm_integrity_io_error(ic, "crypto_shash_final", r);
829 memset(result + size, 0, JOURNAL_MAC_SIZE - size);
831 __u8 digest[HASH_MAX_DIGESTSIZE];
833 if (WARN_ON(size > sizeof(digest))) {
834 dm_integrity_io_error(ic, "digest_size", -EINVAL);
837 r = crypto_shash_final(desc, digest);
838 if (unlikely(r < 0)) {
839 dm_integrity_io_error(ic, "crypto_shash_final", r);
842 memcpy(result, digest, JOURNAL_MAC_SIZE);
847 memset(result, 0, JOURNAL_MAC_SIZE);
850 static void rw_section_mac(struct dm_integrity_c *ic, unsigned int section, bool wr)
852 __u8 result[JOURNAL_MAC_SIZE];
855 if (!ic->journal_mac)
858 section_mac(ic, section, result);
860 for (j = 0; j < JOURNAL_BLOCK_SECTORS; j++) {
861 struct journal_sector *js = access_journal(ic, section, j);
864 memcpy(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR);
866 if (memcmp(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR)) {
867 dm_integrity_io_error(ic, "journal mac", -EILSEQ);
868 dm_audit_log_target(DM_MSG_PREFIX, "mac-journal", ic->ti, 0);
874 static void complete_journal_op(void *context)
876 struct journal_completion *comp = context;
878 BUG_ON(!atomic_read(&comp->in_flight));
879 if (likely(atomic_dec_and_test(&comp->in_flight)))
880 complete(&comp->comp);
883 static void xor_journal(struct dm_integrity_c *ic, bool encrypt, unsigned int section,
884 unsigned int n_sections, struct journal_completion *comp)
886 struct async_submit_ctl submit;
887 size_t n_bytes = (size_t)(n_sections * ic->journal_section_sectors) << SECTOR_SHIFT;
888 unsigned int pl_index, pl_offset, section_index;
889 struct page_list *source_pl, *target_pl;
891 if (likely(encrypt)) {
892 source_pl = ic->journal;
893 target_pl = ic->journal_io;
895 source_pl = ic->journal_io;
896 target_pl = ic->journal;
899 page_list_location(ic, section, 0, &pl_index, &pl_offset);
901 atomic_add(roundup(pl_offset + n_bytes, PAGE_SIZE) >> PAGE_SHIFT, &comp->in_flight);
903 init_async_submit(&submit, ASYNC_TX_XOR_ZERO_DST, NULL, complete_journal_op, comp, NULL);
905 section_index = pl_index;
909 struct page *src_pages[2];
910 struct page *dst_page;
912 while (unlikely(pl_index == section_index)) {
916 rw_section_mac(ic, section, true);
921 page_list_location(ic, section, 0, §ion_index, &dummy);
924 this_step = min(n_bytes, (size_t)PAGE_SIZE - pl_offset);
925 dst_page = target_pl[pl_index].page;
926 src_pages[0] = source_pl[pl_index].page;
927 src_pages[1] = ic->journal_xor[pl_index].page;
929 async_xor(dst_page, src_pages, pl_offset, 2, this_step, &submit);
933 n_bytes -= this_step;
938 async_tx_issue_pending_all();
941 static void complete_journal_encrypt(void *data, int err)
943 struct journal_completion *comp = data;
946 if (likely(err == -EINPROGRESS)) {
947 complete(&comp->ic->crypto_backoff);
950 dm_integrity_io_error(comp->ic, "asynchronous encrypt", err);
952 complete_journal_op(comp);
955 static bool do_crypt(bool encrypt, struct skcipher_request *req, struct journal_completion *comp)
959 skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
960 complete_journal_encrypt, comp);
962 r = crypto_skcipher_encrypt(req);
964 r = crypto_skcipher_decrypt(req);
967 if (likely(r == -EINPROGRESS))
969 if (likely(r == -EBUSY)) {
970 wait_for_completion(&comp->ic->crypto_backoff);
971 reinit_completion(&comp->ic->crypto_backoff);
974 dm_integrity_io_error(comp->ic, "encrypt", r);
978 static void crypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned int section,
979 unsigned int n_sections, struct journal_completion *comp)
981 struct scatterlist **source_sg;
982 struct scatterlist **target_sg;
984 atomic_add(2, &comp->in_flight);
986 if (likely(encrypt)) {
987 source_sg = ic->journal_scatterlist;
988 target_sg = ic->journal_io_scatterlist;
990 source_sg = ic->journal_io_scatterlist;
991 target_sg = ic->journal_scatterlist;
995 struct skcipher_request *req;
1000 rw_section_mac(ic, section, true);
1002 req = ic->sk_requests[section];
1003 ivsize = crypto_skcipher_ivsize(ic->journal_crypt);
1006 memcpy(iv, iv + ivsize, ivsize);
1008 req->src = source_sg[section];
1009 req->dst = target_sg[section];
1011 if (unlikely(do_crypt(encrypt, req, comp)))
1012 atomic_inc(&comp->in_flight);
1016 } while (n_sections);
1018 atomic_dec(&comp->in_flight);
1019 complete_journal_op(comp);
1022 static void encrypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned int section,
1023 unsigned int n_sections, struct journal_completion *comp)
1025 if (ic->journal_xor)
1026 return xor_journal(ic, encrypt, section, n_sections, comp);
1028 return crypt_journal(ic, encrypt, section, n_sections, comp);
1031 static void complete_journal_io(unsigned long error, void *context)
1033 struct journal_completion *comp = context;
1035 if (unlikely(error != 0))
1036 dm_integrity_io_error(comp->ic, "writing journal", -EIO);
1037 complete_journal_op(comp);
1040 static void rw_journal_sectors(struct dm_integrity_c *ic, blk_opf_t opf,
1041 unsigned int sector, unsigned int n_sectors,
1042 struct journal_completion *comp)
1044 struct dm_io_request io_req;
1045 struct dm_io_region io_loc;
1046 unsigned int pl_index, pl_offset;
1049 if (unlikely(dm_integrity_failed(ic))) {
1051 complete_journal_io(-1UL, comp);
1055 pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
1056 pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
1058 io_req.bi_opf = opf;
1059 io_req.mem.type = DM_IO_PAGE_LIST;
1061 io_req.mem.ptr.pl = &ic->journal_io[pl_index];
1063 io_req.mem.ptr.pl = &ic->journal[pl_index];
1064 io_req.mem.offset = pl_offset;
1065 if (likely(comp != NULL)) {
1066 io_req.notify.fn = complete_journal_io;
1067 io_req.notify.context = comp;
1069 io_req.notify.fn = NULL;
1071 io_req.client = ic->io;
1072 io_loc.bdev = ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev;
1073 io_loc.sector = ic->start + SB_SECTORS + sector;
1074 io_loc.count = n_sectors;
1076 r = dm_io(&io_req, 1, &io_loc, NULL, IOPRIO_DEFAULT);
1078 dm_integrity_io_error(ic, (opf & REQ_OP_MASK) == REQ_OP_READ ?
1079 "reading journal" : "writing journal", r);
1081 WARN_ONCE(1, "asynchronous dm_io failed: %d", r);
1082 complete_journal_io(-1UL, comp);
1087 static void rw_journal(struct dm_integrity_c *ic, blk_opf_t opf,
1088 unsigned int section, unsigned int n_sections,
1089 struct journal_completion *comp)
1091 unsigned int sector, n_sectors;
1093 sector = section * ic->journal_section_sectors;
1094 n_sectors = n_sections * ic->journal_section_sectors;
1096 rw_journal_sectors(ic, opf, sector, n_sectors, comp);
1099 static void write_journal(struct dm_integrity_c *ic, unsigned int commit_start, unsigned int commit_sections)
1101 struct journal_completion io_comp;
1102 struct journal_completion crypt_comp_1;
1103 struct journal_completion crypt_comp_2;
1107 init_completion(&io_comp.comp);
1109 if (commit_start + commit_sections <= ic->journal_sections) {
1110 io_comp.in_flight = (atomic_t)ATOMIC_INIT(1);
1111 if (ic->journal_io) {
1112 crypt_comp_1.ic = ic;
1113 init_completion(&crypt_comp_1.comp);
1114 crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
1115 encrypt_journal(ic, true, commit_start, commit_sections, &crypt_comp_1);
1116 wait_for_completion_io(&crypt_comp_1.comp);
1118 for (i = 0; i < commit_sections; i++)
1119 rw_section_mac(ic, commit_start + i, true);
1121 rw_journal(ic, REQ_OP_WRITE | REQ_FUA | REQ_SYNC, commit_start,
1122 commit_sections, &io_comp);
1124 unsigned int to_end;
1126 io_comp.in_flight = (atomic_t)ATOMIC_INIT(2);
1127 to_end = ic->journal_sections - commit_start;
1128 if (ic->journal_io) {
1129 crypt_comp_1.ic = ic;
1130 init_completion(&crypt_comp_1.comp);
1131 crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
1132 encrypt_journal(ic, true, commit_start, to_end, &crypt_comp_1);
1133 if (try_wait_for_completion(&crypt_comp_1.comp)) {
1134 rw_journal(ic, REQ_OP_WRITE | REQ_FUA,
1135 commit_start, to_end, &io_comp);
1136 reinit_completion(&crypt_comp_1.comp);
1137 crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
1138 encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_1);
1139 wait_for_completion_io(&crypt_comp_1.comp);
1141 crypt_comp_2.ic = ic;
1142 init_completion(&crypt_comp_2.comp);
1143 crypt_comp_2.in_flight = (atomic_t)ATOMIC_INIT(0);
1144 encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_2);
1145 wait_for_completion_io(&crypt_comp_1.comp);
1146 rw_journal(ic, REQ_OP_WRITE | REQ_FUA, commit_start, to_end, &io_comp);
1147 wait_for_completion_io(&crypt_comp_2.comp);
1150 for (i = 0; i < to_end; i++)
1151 rw_section_mac(ic, commit_start + i, true);
1152 rw_journal(ic, REQ_OP_WRITE | REQ_FUA, commit_start, to_end, &io_comp);
1153 for (i = 0; i < commit_sections - to_end; i++)
1154 rw_section_mac(ic, i, true);
1156 rw_journal(ic, REQ_OP_WRITE | REQ_FUA, 0, commit_sections - to_end, &io_comp);
1159 wait_for_completion_io(&io_comp.comp);
1162 static void copy_from_journal(struct dm_integrity_c *ic, unsigned int section, unsigned int offset,
1163 unsigned int n_sectors, sector_t target, io_notify_fn fn, void *data)
1165 struct dm_io_request io_req;
1166 struct dm_io_region io_loc;
1168 unsigned int sector, pl_index, pl_offset;
1170 BUG_ON((target | n_sectors | offset) & (unsigned int)(ic->sectors_per_block - 1));
1172 if (unlikely(dm_integrity_failed(ic))) {
1177 sector = section * ic->journal_section_sectors + JOURNAL_BLOCK_SECTORS + offset;
1179 pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
1180 pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
1182 io_req.bi_opf = REQ_OP_WRITE;
1183 io_req.mem.type = DM_IO_PAGE_LIST;
1184 io_req.mem.ptr.pl = &ic->journal[pl_index];
1185 io_req.mem.offset = pl_offset;
1186 io_req.notify.fn = fn;
1187 io_req.notify.context = data;
1188 io_req.client = ic->io;
1189 io_loc.bdev = ic->dev->bdev;
1190 io_loc.sector = target;
1191 io_loc.count = n_sectors;
1193 r = dm_io(&io_req, 1, &io_loc, NULL, IOPRIO_DEFAULT);
1195 WARN_ONCE(1, "asynchronous dm_io failed: %d", r);
1200 static bool ranges_overlap(struct dm_integrity_range *range1, struct dm_integrity_range *range2)
1202 return range1->logical_sector < range2->logical_sector + range2->n_sectors &&
1203 range1->logical_sector + range1->n_sectors > range2->logical_sector;
1206 static bool add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range, bool check_waiting)
1208 struct rb_node **n = &ic->in_progress.rb_node;
1209 struct rb_node *parent;
1211 BUG_ON((new_range->logical_sector | new_range->n_sectors) & (unsigned int)(ic->sectors_per_block - 1));
1213 if (likely(check_waiting)) {
1214 struct dm_integrity_range *range;
1216 list_for_each_entry(range, &ic->wait_list, wait_entry) {
1217 if (unlikely(ranges_overlap(range, new_range)))
1225 struct dm_integrity_range *range = container_of(*n, struct dm_integrity_range, node);
1228 if (new_range->logical_sector + new_range->n_sectors <= range->logical_sector)
1229 n = &range->node.rb_left;
1230 else if (new_range->logical_sector >= range->logical_sector + range->n_sectors)
1231 n = &range->node.rb_right;
1236 rb_link_node(&new_range->node, parent, n);
1237 rb_insert_color(&new_range->node, &ic->in_progress);
1242 static void remove_range_unlocked(struct dm_integrity_c *ic, struct dm_integrity_range *range)
1244 rb_erase(&range->node, &ic->in_progress);
1245 while (unlikely(!list_empty(&ic->wait_list))) {
1246 struct dm_integrity_range *last_range =
1247 list_first_entry(&ic->wait_list, struct dm_integrity_range, wait_entry);
1248 struct task_struct *last_range_task;
1250 last_range_task = last_range->task;
1251 list_del(&last_range->wait_entry);
1252 if (!add_new_range(ic, last_range, false)) {
1253 last_range->task = last_range_task;
1254 list_add(&last_range->wait_entry, &ic->wait_list);
1257 last_range->waiting = false;
1258 wake_up_process(last_range_task);
1262 static void remove_range(struct dm_integrity_c *ic, struct dm_integrity_range *range)
1264 unsigned long flags;
1266 spin_lock_irqsave(&ic->endio_wait.lock, flags);
1267 remove_range_unlocked(ic, range);
1268 spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
1271 static void wait_and_add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range)
1273 new_range->waiting = true;
1274 list_add_tail(&new_range->wait_entry, &ic->wait_list);
1275 new_range->task = current;
1277 __set_current_state(TASK_UNINTERRUPTIBLE);
1278 spin_unlock_irq(&ic->endio_wait.lock);
1280 spin_lock_irq(&ic->endio_wait.lock);
1281 } while (unlikely(new_range->waiting));
1284 static void add_new_range_and_wait(struct dm_integrity_c *ic, struct dm_integrity_range *new_range)
1286 if (unlikely(!add_new_range(ic, new_range, true)))
1287 wait_and_add_new_range(ic, new_range);
1290 static void init_journal_node(struct journal_node *node)
1292 RB_CLEAR_NODE(&node->node);
1293 node->sector = (sector_t)-1;
1296 static void add_journal_node(struct dm_integrity_c *ic, struct journal_node *node, sector_t sector)
1298 struct rb_node **link;
1299 struct rb_node *parent;
1301 node->sector = sector;
1302 BUG_ON(!RB_EMPTY_NODE(&node->node));
1304 link = &ic->journal_tree_root.rb_node;
1308 struct journal_node *j;
1311 j = container_of(parent, struct journal_node, node);
1312 if (sector < j->sector)
1313 link = &j->node.rb_left;
1315 link = &j->node.rb_right;
1318 rb_link_node(&node->node, parent, link);
1319 rb_insert_color(&node->node, &ic->journal_tree_root);
1322 static void remove_journal_node(struct dm_integrity_c *ic, struct journal_node *node)
1324 BUG_ON(RB_EMPTY_NODE(&node->node));
1325 rb_erase(&node->node, &ic->journal_tree_root);
1326 init_journal_node(node);
1329 #define NOT_FOUND (-1U)
1331 static unsigned int find_journal_node(struct dm_integrity_c *ic, sector_t sector, sector_t *next_sector)
1333 struct rb_node *n = ic->journal_tree_root.rb_node;
1334 unsigned int found = NOT_FOUND;
1336 *next_sector = (sector_t)-1;
1338 struct journal_node *j = container_of(n, struct journal_node, node);
1340 if (sector == j->sector)
1341 found = j - ic->journal_tree;
1343 if (sector < j->sector) {
1344 *next_sector = j->sector;
1345 n = j->node.rb_left;
1347 n = j->node.rb_right;
1353 static bool test_journal_node(struct dm_integrity_c *ic, unsigned int pos, sector_t sector)
1355 struct journal_node *node, *next_node;
1356 struct rb_node *next;
1358 if (unlikely(pos >= ic->journal_entries))
1360 node = &ic->journal_tree[pos];
1361 if (unlikely(RB_EMPTY_NODE(&node->node)))
1363 if (unlikely(node->sector != sector))
1366 next = rb_next(&node->node);
1367 if (unlikely(!next))
1370 next_node = container_of(next, struct journal_node, node);
1371 return next_node->sector != sector;
1374 static bool find_newer_committed_node(struct dm_integrity_c *ic, struct journal_node *node)
1376 struct rb_node *next;
1377 struct journal_node *next_node;
1378 unsigned int next_section;
1380 BUG_ON(RB_EMPTY_NODE(&node->node));
1382 next = rb_next(&node->node);
1383 if (unlikely(!next))
1386 next_node = container_of(next, struct journal_node, node);
1388 if (next_node->sector != node->sector)
1391 next_section = (unsigned int)(next_node - ic->journal_tree) / ic->journal_section_entries;
1392 if (next_section >= ic->committed_section &&
1393 next_section < ic->committed_section + ic->n_committed_sections)
1395 if (next_section + ic->journal_sections < ic->committed_section + ic->n_committed_sections)
1405 static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, sector_t *metadata_block,
1406 unsigned int *metadata_offset, unsigned int total_size, int op)
1408 #define MAY_BE_FILLER 1
1409 #define MAY_BE_HASH 2
1410 unsigned int hash_offset = 0;
1411 unsigned int may_be = MAY_BE_HASH | (ic->discard ? MAY_BE_FILLER : 0);
1414 unsigned char *data, *dp;
1415 struct dm_buffer *b;
1416 unsigned int to_copy;
1419 r = dm_integrity_failed(ic);
1423 data = dm_bufio_read(ic->bufio, *metadata_block, &b);
1425 return PTR_ERR(data);
1427 to_copy = min((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - *metadata_offset, total_size);
1428 dp = data + *metadata_offset;
1429 if (op == TAG_READ) {
1430 memcpy(tag, dp, to_copy);
1431 } else if (op == TAG_WRITE) {
1432 if (memcmp(dp, tag, to_copy)) {
1433 memcpy(dp, tag, to_copy);
1434 dm_bufio_mark_partial_buffer_dirty(b, *metadata_offset, *metadata_offset + to_copy);
1437 /* e.g.: op == TAG_CMP */
1439 if (likely(is_power_of_2(ic->tag_size))) {
1440 if (unlikely(memcmp(dp, tag, to_copy)))
1441 if (unlikely(!ic->discard) ||
1442 unlikely(memchr_inv(dp, DISCARD_FILLER, to_copy) != NULL)) {
1450 for (i = 0; i < to_copy; i++, ts--) {
1451 if (unlikely(dp[i] != tag[i]))
1452 may_be &= ~MAY_BE_HASH;
1453 if (likely(dp[i] != DISCARD_FILLER))
1454 may_be &= ~MAY_BE_FILLER;
1456 if (unlikely(hash_offset == ic->tag_size)) {
1457 if (unlikely(!may_be)) {
1458 dm_bufio_release(b);
1462 may_be = MAY_BE_HASH | (ic->discard ? MAY_BE_FILLER : 0);
1467 dm_bufio_release(b);
1470 *metadata_offset += to_copy;
1471 if (unlikely(*metadata_offset == 1U << SECTOR_SHIFT << ic->log2_buffer_sectors)) {
1472 (*metadata_block)++;
1473 *metadata_offset = 0;
1476 if (unlikely(!is_power_of_2(ic->tag_size)))
1477 hash_offset = (hash_offset + to_copy) % ic->tag_size;
1479 total_size -= to_copy;
1480 } while (unlikely(total_size));
1483 #undef MAY_BE_FILLER
1487 struct flush_request {
1488 struct dm_io_request io_req;
1489 struct dm_io_region io_reg;
1490 struct dm_integrity_c *ic;
1491 struct completion comp;
1494 static void flush_notify(unsigned long error, void *fr_)
1496 struct flush_request *fr = fr_;
1498 if (unlikely(error != 0))
1499 dm_integrity_io_error(fr->ic, "flushing disk cache", -EIO);
1500 complete(&fr->comp);
1503 static void dm_integrity_flush_buffers(struct dm_integrity_c *ic, bool flush_data)
1506 struct flush_request fr;
1511 fr.io_req.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC,
1512 fr.io_req.mem.type = DM_IO_KMEM,
1513 fr.io_req.mem.ptr.addr = NULL,
1514 fr.io_req.notify.fn = flush_notify,
1515 fr.io_req.notify.context = &fr;
1516 fr.io_req.client = dm_bufio_get_dm_io_client(ic->bufio),
1517 fr.io_reg.bdev = ic->dev->bdev,
1518 fr.io_reg.sector = 0,
1519 fr.io_reg.count = 0,
1521 init_completion(&fr.comp);
1522 r = dm_io(&fr.io_req, 1, &fr.io_reg, NULL, IOPRIO_DEFAULT);
1526 r = dm_bufio_write_dirty_buffers(ic->bufio);
1528 dm_integrity_io_error(ic, "writing tags", r);
1531 wait_for_completion(&fr.comp);
1534 static void sleep_on_endio_wait(struct dm_integrity_c *ic)
1536 DECLARE_WAITQUEUE(wait, current);
1538 __add_wait_queue(&ic->endio_wait, &wait);
1539 __set_current_state(TASK_UNINTERRUPTIBLE);
1540 spin_unlock_irq(&ic->endio_wait.lock);
1542 spin_lock_irq(&ic->endio_wait.lock);
1543 __remove_wait_queue(&ic->endio_wait, &wait);
1546 static void autocommit_fn(struct timer_list *t)
1548 struct dm_integrity_c *ic = from_timer(ic, t, autocommit_timer);
1550 if (likely(!dm_integrity_failed(ic)))
1551 queue_work(ic->commit_wq, &ic->commit_work);
1554 static void schedule_autocommit(struct dm_integrity_c *ic)
1556 if (!timer_pending(&ic->autocommit_timer))
1557 mod_timer(&ic->autocommit_timer, jiffies + ic->autocommit_jiffies);
1560 static void submit_flush_bio(struct dm_integrity_c *ic, struct dm_integrity_io *dio)
1563 unsigned long flags;
1565 spin_lock_irqsave(&ic->endio_wait.lock, flags);
1566 bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1567 bio_list_add(&ic->flush_bio_list, bio);
1568 spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
1570 queue_work(ic->commit_wq, &ic->commit_work);
1573 static void do_endio(struct dm_integrity_c *ic, struct bio *bio)
1577 r = dm_integrity_failed(ic);
1578 if (unlikely(r) && !bio->bi_status)
1579 bio->bi_status = errno_to_blk_status(r);
1580 if (unlikely(ic->synchronous_mode) && bio_op(bio) == REQ_OP_WRITE) {
1581 unsigned long flags;
1583 spin_lock_irqsave(&ic->endio_wait.lock, flags);
1584 bio_list_add(&ic->synchronous_bios, bio);
1585 queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0);
1586 spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
1592 static void do_endio_flush(struct dm_integrity_c *ic, struct dm_integrity_io *dio)
1594 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1596 if (unlikely(dio->fua) && likely(!bio->bi_status) && likely(!dm_integrity_failed(ic)))
1597 submit_flush_bio(ic, dio);
1602 static void dec_in_flight(struct dm_integrity_io *dio)
1604 if (atomic_dec_and_test(&dio->in_flight)) {
1605 struct dm_integrity_c *ic = dio->ic;
1608 remove_range(ic, &dio->range);
1610 if (dio->op == REQ_OP_WRITE || unlikely(dio->op == REQ_OP_DISCARD))
1611 schedule_autocommit(ic);
1613 bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1614 if (unlikely(dio->bi_status) && !bio->bi_status)
1615 bio->bi_status = dio->bi_status;
1616 if (likely(!bio->bi_status) && unlikely(bio_sectors(bio) != dio->range.n_sectors)) {
1617 dio->range.logical_sector += dio->range.n_sectors;
1618 bio_advance(bio, dio->range.n_sectors << SECTOR_SHIFT);
1619 INIT_WORK(&dio->work, integrity_bio_wait);
1620 queue_work(ic->offload_wq, &dio->work);
1623 do_endio_flush(ic, dio);
1627 static void integrity_end_io(struct bio *bio)
1629 struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
1631 dm_bio_restore(&dio->bio_details, bio);
1632 if (bio->bi_integrity)
1633 bio->bi_opf |= REQ_INTEGRITY;
1635 if (dio->completion)
1636 complete(dio->completion);
1641 static void integrity_sector_checksum(struct dm_integrity_c *ic, sector_t sector,
1642 const char *data, char *result)
1644 __le64 sector_le = cpu_to_le64(sector);
1645 SHASH_DESC_ON_STACK(req, ic->internal_hash);
1647 unsigned int digest_size;
1649 req->tfm = ic->internal_hash;
1651 r = crypto_shash_init(req);
1652 if (unlikely(r < 0)) {
1653 dm_integrity_io_error(ic, "crypto_shash_init", r);
1657 if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) {
1658 r = crypto_shash_update(req, (__u8 *)&ic->sb->salt, SALT_SIZE);
1659 if (unlikely(r < 0)) {
1660 dm_integrity_io_error(ic, "crypto_shash_update", r);
1665 r = crypto_shash_update(req, (const __u8 *)§or_le, sizeof(sector_le));
1666 if (unlikely(r < 0)) {
1667 dm_integrity_io_error(ic, "crypto_shash_update", r);
1671 r = crypto_shash_update(req, data, ic->sectors_per_block << SECTOR_SHIFT);
1672 if (unlikely(r < 0)) {
1673 dm_integrity_io_error(ic, "crypto_shash_update", r);
1677 r = crypto_shash_final(req, result);
1678 if (unlikely(r < 0)) {
1679 dm_integrity_io_error(ic, "crypto_shash_final", r);
1683 digest_size = crypto_shash_digestsize(ic->internal_hash);
1684 if (unlikely(digest_size < ic->tag_size))
1685 memset(result + digest_size, 0, ic->tag_size - digest_size);
1690 /* this shouldn't happen anyway, the hash functions have no reason to fail */
1691 get_random_bytes(result, ic->tag_size);
1694 static noinline void integrity_recheck(struct dm_integrity_io *dio, char *checksum)
1696 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1697 struct dm_integrity_c *ic = dio->ic;
1698 struct bvec_iter iter;
1700 sector_t sector, logical_sector, area, offset;
1703 get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
1704 dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset,
1705 &dio->metadata_offset);
1706 sector = get_data_sector(ic, area, offset);
1707 logical_sector = dio->range.logical_sector;
1709 page = mempool_alloc(&ic->recheck_pool, GFP_NOIO);
1711 __bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) {
1717 char *buffer = page_to_virt(page);
1719 struct dm_io_request io_req;
1720 struct dm_io_region io_loc;
1721 io_req.bi_opf = REQ_OP_READ;
1722 io_req.mem.type = DM_IO_KMEM;
1723 io_req.mem.ptr.addr = buffer;
1724 io_req.notify.fn = NULL;
1725 io_req.client = ic->io;
1726 io_loc.bdev = ic->dev->bdev;
1727 io_loc.sector = sector;
1728 io_loc.count = ic->sectors_per_block;
1730 /* Align the bio to logical block size */
1731 alignment = dio->range.logical_sector | bio_sectors(bio) | (PAGE_SIZE >> SECTOR_SHIFT);
1732 alignment &= -alignment;
1733 io_loc.sector = round_down(io_loc.sector, alignment);
1734 io_loc.count += sector - io_loc.sector;
1735 buffer += (sector - io_loc.sector) << SECTOR_SHIFT;
1736 io_loc.count = round_up(io_loc.count, alignment);
1738 r = dm_io(&io_req, 1, &io_loc, NULL, IOPRIO_DEFAULT);
1740 dio->bi_status = errno_to_blk_status(r);
1744 integrity_sector_checksum(ic, logical_sector, buffer, checksum);
1745 r = dm_integrity_rw_tag(ic, checksum, &dio->metadata_block,
1746 &dio->metadata_offset, ic->tag_size, TAG_CMP);
1749 DMERR_LIMIT("%pg: Checksum failed at sector 0x%llx",
1750 bio->bi_bdev, logical_sector);
1751 atomic64_inc(&ic->number_of_mismatches);
1752 dm_audit_log_bio(DM_MSG_PREFIX, "integrity-checksum",
1753 bio, logical_sector, 0);
1756 dio->bi_status = errno_to_blk_status(r);
1760 mem = bvec_kmap_local(&bv);
1761 memcpy(mem + pos, buffer, ic->sectors_per_block << SECTOR_SHIFT);
1764 pos += ic->sectors_per_block << SECTOR_SHIFT;
1765 sector += ic->sectors_per_block;
1766 logical_sector += ic->sectors_per_block;
1767 } while (pos < bv.bv_len);
1770 mempool_free(page, &ic->recheck_pool);
1773 static void integrity_metadata(struct work_struct *w)
1775 struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
1776 struct dm_integrity_c *ic = dio->ic;
1780 if (ic->internal_hash) {
1781 struct bvec_iter iter;
1783 unsigned int digest_size = crypto_shash_digestsize(ic->internal_hash);
1784 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1786 unsigned int extra_space = unlikely(digest_size > ic->tag_size) ? digest_size - ic->tag_size : 0;
1787 char checksums_onstack[max_t(size_t, HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
1789 unsigned int sectors_to_process;
1791 if (unlikely(ic->mode == 'R'))
1794 if (likely(dio->op != REQ_OP_DISCARD))
1795 checksums = kmalloc((PAGE_SIZE >> SECTOR_SHIFT >> ic->sb->log2_sectors_per_block) * ic->tag_size + extra_space,
1796 GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN);
1798 checksums = kmalloc(PAGE_SIZE, GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN);
1800 checksums = checksums_onstack;
1801 if (WARN_ON(extra_space &&
1802 digest_size > sizeof(checksums_onstack))) {
1808 if (unlikely(dio->op == REQ_OP_DISCARD)) {
1809 unsigned int bi_size = dio->bio_details.bi_iter.bi_size;
1810 unsigned int max_size = likely(checksums != checksums_onstack) ? PAGE_SIZE : HASH_MAX_DIGESTSIZE;
1811 unsigned int max_blocks = max_size / ic->tag_size;
1813 memset(checksums, DISCARD_FILLER, max_size);
1816 unsigned int this_step_blocks = bi_size >> (SECTOR_SHIFT + ic->sb->log2_sectors_per_block);
1818 this_step_blocks = min(this_step_blocks, max_blocks);
1819 r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
1820 this_step_blocks * ic->tag_size, TAG_WRITE);
1822 if (likely(checksums != checksums_onstack))
1827 bi_size -= this_step_blocks << (SECTOR_SHIFT + ic->sb->log2_sectors_per_block);
1830 if (likely(checksums != checksums_onstack))
1835 sector = dio->range.logical_sector;
1836 sectors_to_process = dio->range.n_sectors;
1838 __bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) {
1839 struct bio_vec bv_copy = bv;
1841 char *mem, *checksums_ptr;
1844 mem = bvec_kmap_local(&bv_copy);
1846 checksums_ptr = checksums;
1848 integrity_sector_checksum(ic, sector, mem + pos, checksums_ptr);
1849 checksums_ptr += ic->tag_size;
1850 sectors_to_process -= ic->sectors_per_block;
1851 pos += ic->sectors_per_block << SECTOR_SHIFT;
1852 sector += ic->sectors_per_block;
1853 } while (pos < bv_copy.bv_len && sectors_to_process && checksums != checksums_onstack);
1856 r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
1857 checksums_ptr - checksums, dio->op == REQ_OP_READ ? TAG_CMP : TAG_WRITE);
1859 if (likely(checksums != checksums_onstack))
1862 integrity_recheck(dio, checksums_onstack);
1868 if (!sectors_to_process)
1871 if (unlikely(pos < bv_copy.bv_len)) {
1872 bv_copy.bv_offset += pos;
1873 bv_copy.bv_len -= pos;
1878 if (likely(checksums != checksums_onstack))
1881 struct bio_integrity_payload *bip = dio->bio_details.bi_integrity;
1885 struct bvec_iter iter;
1886 unsigned int data_to_process = dio->range.n_sectors;
1888 sector_to_block(ic, data_to_process);
1889 data_to_process *= ic->tag_size;
1891 bip_for_each_vec(biv, bip, iter) {
1893 unsigned int this_len;
1895 BUG_ON(PageHighMem(biv.bv_page));
1896 tag = bvec_virt(&biv);
1897 this_len = min(biv.bv_len, data_to_process);
1898 r = dm_integrity_rw_tag(ic, tag, &dio->metadata_block, &dio->metadata_offset,
1899 this_len, dio->op == REQ_OP_READ ? TAG_READ : TAG_WRITE);
1902 data_to_process -= this_len;
1903 if (!data_to_process)
1912 dio->bi_status = errno_to_blk_status(r);
1916 static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
1918 struct dm_integrity_c *ic = ti->private;
1919 struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
1920 struct bio_integrity_payload *bip;
1922 sector_t area, offset;
1926 dio->op = bio_op(bio);
1928 if (unlikely(dio->op == REQ_OP_DISCARD)) {
1929 if (ti->max_io_len) {
1930 sector_t sec = dm_target_offset(ti, bio->bi_iter.bi_sector);
1931 unsigned int log2_max_io_len = __fls(ti->max_io_len);
1932 sector_t start_boundary = sec >> log2_max_io_len;
1933 sector_t end_boundary = (sec + bio_sectors(bio) - 1) >> log2_max_io_len;
1935 if (start_boundary < end_boundary) {
1936 sector_t len = ti->max_io_len - (sec & (ti->max_io_len - 1));
1938 dm_accept_partial_bio(bio, len);
1943 if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
1944 submit_flush_bio(ic, dio);
1945 return DM_MAPIO_SUBMITTED;
1948 dio->range.logical_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
1949 dio->fua = dio->op == REQ_OP_WRITE && bio->bi_opf & REQ_FUA;
1950 if (unlikely(dio->fua)) {
1952 * Don't pass down the FUA flag because we have to flush
1953 * disk cache anyway.
1955 bio->bi_opf &= ~REQ_FUA;
1957 if (unlikely(dio->range.logical_sector + bio_sectors(bio) > ic->provided_data_sectors)) {
1958 DMERR("Too big sector number: 0x%llx + 0x%x > 0x%llx",
1959 dio->range.logical_sector, bio_sectors(bio),
1960 ic->provided_data_sectors);
1961 return DM_MAPIO_KILL;
1963 if (unlikely((dio->range.logical_sector | bio_sectors(bio)) & (unsigned int)(ic->sectors_per_block - 1))) {
1964 DMERR("Bio not aligned on %u sectors: 0x%llx, 0x%x",
1965 ic->sectors_per_block,
1966 dio->range.logical_sector, bio_sectors(bio));
1967 return DM_MAPIO_KILL;
1970 if (ic->sectors_per_block > 1 && likely(dio->op != REQ_OP_DISCARD)) {
1971 struct bvec_iter iter;
1974 bio_for_each_segment(bv, bio, iter) {
1975 if (unlikely(bv.bv_len & ((ic->sectors_per_block << SECTOR_SHIFT) - 1))) {
1976 DMERR("Bio vector (%u,%u) is not aligned on %u-sector boundary",
1977 bv.bv_offset, bv.bv_len, ic->sectors_per_block);
1978 return DM_MAPIO_KILL;
1983 bip = bio_integrity(bio);
1984 if (!ic->internal_hash) {
1986 unsigned int wanted_tag_size = bio_sectors(bio) >> ic->sb->log2_sectors_per_block;
1988 if (ic->log2_tag_size >= 0)
1989 wanted_tag_size <<= ic->log2_tag_size;
1991 wanted_tag_size *= ic->tag_size;
1992 if (unlikely(wanted_tag_size != bip->bip_iter.bi_size)) {
1993 DMERR("Invalid integrity data size %u, expected %u",
1994 bip->bip_iter.bi_size, wanted_tag_size);
1995 return DM_MAPIO_KILL;
1999 if (unlikely(bip != NULL)) {
2000 DMERR("Unexpected integrity data when using internal hash");
2001 return DM_MAPIO_KILL;
2005 if (unlikely(ic->mode == 'R') && unlikely(dio->op != REQ_OP_READ))
2006 return DM_MAPIO_KILL;
2008 get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
2009 dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset);
2010 bio->bi_iter.bi_sector = get_data_sector(ic, area, offset);
2012 dm_integrity_map_continue(dio, true);
2013 return DM_MAPIO_SUBMITTED;
2016 static bool __journal_read_write(struct dm_integrity_io *dio, struct bio *bio,
2017 unsigned int journal_section, unsigned int journal_entry)
2019 struct dm_integrity_c *ic = dio->ic;
2020 sector_t logical_sector;
2021 unsigned int n_sectors;
2023 logical_sector = dio->range.logical_sector;
2024 n_sectors = dio->range.n_sectors;
2026 struct bio_vec bv = bio_iovec(bio);
2029 if (unlikely(bv.bv_len >> SECTOR_SHIFT > n_sectors))
2030 bv.bv_len = n_sectors << SECTOR_SHIFT;
2031 n_sectors -= bv.bv_len >> SECTOR_SHIFT;
2032 bio_advance_iter(bio, &bio->bi_iter, bv.bv_len);
2034 mem = kmap_local_page(bv.bv_page);
2035 if (likely(dio->op == REQ_OP_WRITE))
2036 flush_dcache_page(bv.bv_page);
2039 struct journal_entry *je = access_journal_entry(ic, journal_section, journal_entry);
2041 if (unlikely(dio->op == REQ_OP_READ)) {
2042 struct journal_sector *js;
2046 if (unlikely(journal_entry_is_inprogress(je))) {
2047 flush_dcache_page(bv.bv_page);
2050 __io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je));
2054 BUG_ON(journal_entry_get_sector(je) != logical_sector);
2055 js = access_journal_data(ic, journal_section, journal_entry);
2056 mem_ptr = mem + bv.bv_offset;
2059 memcpy(mem_ptr, js, JOURNAL_SECTOR_DATA);
2060 *(commit_id_t *)(mem_ptr + JOURNAL_SECTOR_DATA) = je->last_bytes[s];
2062 mem_ptr += 1 << SECTOR_SHIFT;
2063 } while (++s < ic->sectors_per_block);
2064 #ifdef INTERNAL_VERIFY
2065 if (ic->internal_hash) {
2066 char checksums_onstack[max_t(size_t, HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
2068 integrity_sector_checksum(ic, logical_sector, mem + bv.bv_offset, checksums_onstack);
2069 if (unlikely(memcmp(checksums_onstack, journal_entry_tag(ic, je), ic->tag_size))) {
2070 DMERR_LIMIT("Checksum failed when reading from journal, at sector 0x%llx",
2072 dm_audit_log_bio(DM_MSG_PREFIX, "journal-checksum",
2073 bio, logical_sector, 0);
2079 if (!ic->internal_hash) {
2080 struct bio_integrity_payload *bip = bio_integrity(bio);
2081 unsigned int tag_todo = ic->tag_size;
2082 char *tag_ptr = journal_entry_tag(ic, je);
2086 struct bio_vec biv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter);
2087 unsigned int tag_now = min(biv.bv_len, tag_todo);
2090 BUG_ON(PageHighMem(biv.bv_page));
2091 tag_addr = bvec_virt(&biv);
2092 if (likely(dio->op == REQ_OP_WRITE))
2093 memcpy(tag_ptr, tag_addr, tag_now);
2095 memcpy(tag_addr, tag_ptr, tag_now);
2096 bvec_iter_advance(bip->bip_vec, &bip->bip_iter, tag_now);
2098 tag_todo -= tag_now;
2099 } while (unlikely(tag_todo));
2100 } else if (likely(dio->op == REQ_OP_WRITE))
2101 memset(tag_ptr, 0, tag_todo);
2104 if (likely(dio->op == REQ_OP_WRITE)) {
2105 struct journal_sector *js;
2108 js = access_journal_data(ic, journal_section, journal_entry);
2109 memcpy(js, mem + bv.bv_offset, ic->sectors_per_block << SECTOR_SHIFT);
2113 je->last_bytes[s] = js[s].commit_id;
2114 } while (++s < ic->sectors_per_block);
2116 if (ic->internal_hash) {
2117 unsigned int digest_size = crypto_shash_digestsize(ic->internal_hash);
2119 if (unlikely(digest_size > ic->tag_size)) {
2120 char checksums_onstack[HASH_MAX_DIGESTSIZE];
2122 integrity_sector_checksum(ic, logical_sector, (char *)js, checksums_onstack);
2123 memcpy(journal_entry_tag(ic, je), checksums_onstack, ic->tag_size);
2125 integrity_sector_checksum(ic, logical_sector, (char *)js, journal_entry_tag(ic, je));
2128 journal_entry_set_sector(je, logical_sector);
2130 logical_sector += ic->sectors_per_block;
2133 if (unlikely(journal_entry == ic->journal_section_entries)) {
2136 wraparound_section(ic, &journal_section);
2139 bv.bv_offset += ic->sectors_per_block << SECTOR_SHIFT;
2140 } while (bv.bv_len -= ic->sectors_per_block << SECTOR_SHIFT);
2142 if (unlikely(dio->op == REQ_OP_READ))
2143 flush_dcache_page(bv.bv_page);
2145 } while (n_sectors);
2147 if (likely(dio->op == REQ_OP_WRITE)) {
2149 if (unlikely(waitqueue_active(&ic->copy_to_journal_wait)))
2150 wake_up(&ic->copy_to_journal_wait);
2151 if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold)
2152 queue_work(ic->commit_wq, &ic->commit_work);
2154 schedule_autocommit(ic);
2156 remove_range(ic, &dio->range);
2158 if (unlikely(bio->bi_iter.bi_size)) {
2159 sector_t area, offset;
2161 dio->range.logical_sector = logical_sector;
2162 get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
2163 dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset);
2170 static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map)
2172 struct dm_integrity_c *ic = dio->ic;
2173 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
2174 unsigned int journal_section, journal_entry;
2175 unsigned int journal_read_pos;
2176 struct completion read_comp;
2177 bool discard_retried = false;
2178 bool need_sync_io = ic->internal_hash && dio->op == REQ_OP_READ;
2180 if (unlikely(dio->op == REQ_OP_DISCARD) && ic->mode != 'D')
2181 need_sync_io = true;
2183 if (need_sync_io && from_map) {
2184 INIT_WORK(&dio->work, integrity_bio_wait);
2185 queue_work(ic->offload_wq, &dio->work);
2190 spin_lock_irq(&ic->endio_wait.lock);
2192 if (unlikely(dm_integrity_failed(ic))) {
2193 spin_unlock_irq(&ic->endio_wait.lock);
2197 dio->range.n_sectors = bio_sectors(bio);
2198 journal_read_pos = NOT_FOUND;
2199 if (ic->mode == 'J' && likely(dio->op != REQ_OP_DISCARD)) {
2200 if (dio->op == REQ_OP_WRITE) {
2201 unsigned int next_entry, i, pos;
2202 unsigned int ws, we, range_sectors;
2204 dio->range.n_sectors = min(dio->range.n_sectors,
2205 (sector_t)ic->free_sectors << ic->sb->log2_sectors_per_block);
2206 if (unlikely(!dio->range.n_sectors)) {
2208 goto offload_to_thread;
2209 sleep_on_endio_wait(ic);
2212 range_sectors = dio->range.n_sectors >> ic->sb->log2_sectors_per_block;
2213 ic->free_sectors -= range_sectors;
2214 journal_section = ic->free_section;
2215 journal_entry = ic->free_section_entry;
2217 next_entry = ic->free_section_entry + range_sectors;
2218 ic->free_section_entry = next_entry % ic->journal_section_entries;
2219 ic->free_section += next_entry / ic->journal_section_entries;
2220 ic->n_uncommitted_sections += next_entry / ic->journal_section_entries;
2221 wraparound_section(ic, &ic->free_section);
2223 pos = journal_section * ic->journal_section_entries + journal_entry;
2224 ws = journal_section;
2228 struct journal_entry *je;
2230 add_journal_node(ic, &ic->journal_tree[pos], dio->range.logical_sector + i);
2232 if (unlikely(pos >= ic->journal_entries))
2235 je = access_journal_entry(ic, ws, we);
2236 BUG_ON(!journal_entry_is_unused(je));
2237 journal_entry_set_inprogress(je);
2239 if (unlikely(we == ic->journal_section_entries)) {
2242 wraparound_section(ic, &ws);
2244 } while ((i += ic->sectors_per_block) < dio->range.n_sectors);
2246 spin_unlock_irq(&ic->endio_wait.lock);
2247 goto journal_read_write;
2249 sector_t next_sector;
2251 journal_read_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
2252 if (likely(journal_read_pos == NOT_FOUND)) {
2253 if (unlikely(dio->range.n_sectors > next_sector - dio->range.logical_sector))
2254 dio->range.n_sectors = next_sector - dio->range.logical_sector;
2257 unsigned int jp = journal_read_pos + 1;
2259 for (i = ic->sectors_per_block; i < dio->range.n_sectors; i += ic->sectors_per_block, jp++) {
2260 if (!test_journal_node(ic, jp, dio->range.logical_sector + i))
2263 dio->range.n_sectors = i;
2267 if (unlikely(!add_new_range(ic, &dio->range, true))) {
2269 * We must not sleep in the request routine because it could
2270 * stall bios on current->bio_list.
2271 * So, we offload the bio to a workqueue if we have to sleep.
2275 spin_unlock_irq(&ic->endio_wait.lock);
2276 INIT_WORK(&dio->work, integrity_bio_wait);
2277 queue_work(ic->wait_wq, &dio->work);
2280 if (journal_read_pos != NOT_FOUND)
2281 dio->range.n_sectors = ic->sectors_per_block;
2282 wait_and_add_new_range(ic, &dio->range);
2284 * wait_and_add_new_range drops the spinlock, so the journal
2285 * may have been changed arbitrarily. We need to recheck.
2286 * To simplify the code, we restrict I/O size to just one block.
2288 if (journal_read_pos != NOT_FOUND) {
2289 sector_t next_sector;
2290 unsigned int new_pos;
2292 new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
2293 if (unlikely(new_pos != journal_read_pos)) {
2294 remove_range_unlocked(ic, &dio->range);
2299 if (ic->mode == 'J' && likely(dio->op == REQ_OP_DISCARD) && !discard_retried) {
2300 sector_t next_sector;
2301 unsigned int new_pos;
2303 new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
2304 if (unlikely(new_pos != NOT_FOUND) ||
2305 unlikely(next_sector < dio->range.logical_sector - dio->range.n_sectors)) {
2306 remove_range_unlocked(ic, &dio->range);
2307 spin_unlock_irq(&ic->endio_wait.lock);
2308 queue_work(ic->commit_wq, &ic->commit_work);
2309 flush_workqueue(ic->commit_wq);
2310 queue_work(ic->writer_wq, &ic->writer_work);
2311 flush_workqueue(ic->writer_wq);
2312 discard_retried = true;
2316 spin_unlock_irq(&ic->endio_wait.lock);
2318 if (unlikely(journal_read_pos != NOT_FOUND)) {
2319 journal_section = journal_read_pos / ic->journal_section_entries;
2320 journal_entry = journal_read_pos % ic->journal_section_entries;
2321 goto journal_read_write;
2324 if (ic->mode == 'B' && (dio->op == REQ_OP_WRITE || unlikely(dio->op == REQ_OP_DISCARD))) {
2325 if (!block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
2326 dio->range.n_sectors, BITMAP_OP_TEST_ALL_SET)) {
2327 struct bitmap_block_status *bbs;
2329 bbs = sector_to_bitmap_block(ic, dio->range.logical_sector);
2330 spin_lock(&bbs->bio_queue_lock);
2331 bio_list_add(&bbs->bio_queue, bio);
2332 spin_unlock(&bbs->bio_queue_lock);
2333 queue_work(ic->writer_wq, &bbs->work);
2338 dio->in_flight = (atomic_t)ATOMIC_INIT(2);
2341 init_completion(&read_comp);
2342 dio->completion = &read_comp;
2344 dio->completion = NULL;
2346 dm_bio_record(&dio->bio_details, bio);
2347 bio_set_dev(bio, ic->dev->bdev);
2348 bio->bi_integrity = NULL;
2349 bio->bi_opf &= ~REQ_INTEGRITY;
2350 bio->bi_end_io = integrity_end_io;
2351 bio->bi_iter.bi_size = dio->range.n_sectors << SECTOR_SHIFT;
2353 if (unlikely(dio->op == REQ_OP_DISCARD) && likely(ic->mode != 'D')) {
2354 integrity_metadata(&dio->work);
2355 dm_integrity_flush_buffers(ic, false);
2357 dio->in_flight = (atomic_t)ATOMIC_INIT(1);
2358 dio->completion = NULL;
2360 submit_bio_noacct(bio);
2365 submit_bio_noacct(bio);
2368 wait_for_completion_io(&read_comp);
2369 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
2370 dio->range.logical_sector + dio->range.n_sectors > le64_to_cpu(ic->sb->recalc_sector))
2372 if (ic->mode == 'B') {
2373 if (!block_bitmap_op(ic, ic->recalc_bitmap, dio->range.logical_sector,
2374 dio->range.n_sectors, BITMAP_OP_TEST_ALL_CLEAR))
2378 if (likely(!bio->bi_status))
2379 integrity_metadata(&dio->work);
2384 INIT_WORK(&dio->work, integrity_metadata);
2385 queue_work(ic->metadata_wq, &dio->work);
2391 if (unlikely(__journal_read_write(dio, bio, journal_section, journal_entry)))
2394 do_endio_flush(ic, dio);
2398 static void integrity_bio_wait(struct work_struct *w)
2400 struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
2402 dm_integrity_map_continue(dio, false);
2405 static void pad_uncommitted(struct dm_integrity_c *ic)
2407 if (ic->free_section_entry) {
2408 ic->free_sectors -= ic->journal_section_entries - ic->free_section_entry;
2409 ic->free_section_entry = 0;
2411 wraparound_section(ic, &ic->free_section);
2412 ic->n_uncommitted_sections++;
2414 if (WARN_ON(ic->journal_sections * ic->journal_section_entries !=
2415 (ic->n_uncommitted_sections + ic->n_committed_sections) *
2416 ic->journal_section_entries + ic->free_sectors)) {
2417 DMCRIT("journal_sections %u, journal_section_entries %u, "
2418 "n_uncommitted_sections %u, n_committed_sections %u, "
2419 "journal_section_entries %u, free_sectors %u",
2420 ic->journal_sections, ic->journal_section_entries,
2421 ic->n_uncommitted_sections, ic->n_committed_sections,
2422 ic->journal_section_entries, ic->free_sectors);
2426 static void integrity_commit(struct work_struct *w)
2428 struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, commit_work);
2429 unsigned int commit_start, commit_sections;
2430 unsigned int i, j, n;
2431 struct bio *flushes;
2433 del_timer(&ic->autocommit_timer);
2435 spin_lock_irq(&ic->endio_wait.lock);
2436 flushes = bio_list_get(&ic->flush_bio_list);
2437 if (unlikely(ic->mode != 'J')) {
2438 spin_unlock_irq(&ic->endio_wait.lock);
2439 dm_integrity_flush_buffers(ic, true);
2440 goto release_flush_bios;
2443 pad_uncommitted(ic);
2444 commit_start = ic->uncommitted_section;
2445 commit_sections = ic->n_uncommitted_sections;
2446 spin_unlock_irq(&ic->endio_wait.lock);
2448 if (!commit_sections)
2449 goto release_flush_bios;
2451 ic->wrote_to_journal = true;
2454 for (n = 0; n < commit_sections; n++) {
2455 for (j = 0; j < ic->journal_section_entries; j++) {
2456 struct journal_entry *je;
2458 je = access_journal_entry(ic, i, j);
2459 io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je));
2461 for (j = 0; j < ic->journal_section_sectors; j++) {
2462 struct journal_sector *js;
2464 js = access_journal(ic, i, j);
2465 js->commit_id = dm_integrity_commit_id(ic, i, j, ic->commit_seq);
2468 if (unlikely(i >= ic->journal_sections))
2469 ic->commit_seq = next_commit_seq(ic->commit_seq);
2470 wraparound_section(ic, &i);
2474 write_journal(ic, commit_start, commit_sections);
2476 spin_lock_irq(&ic->endio_wait.lock);
2477 ic->uncommitted_section += commit_sections;
2478 wraparound_section(ic, &ic->uncommitted_section);
2479 ic->n_uncommitted_sections -= commit_sections;
2480 ic->n_committed_sections += commit_sections;
2481 spin_unlock_irq(&ic->endio_wait.lock);
2483 if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold)
2484 queue_work(ic->writer_wq, &ic->writer_work);
2488 struct bio *next = flushes->bi_next;
2490 flushes->bi_next = NULL;
2491 do_endio(ic, flushes);
2496 static void complete_copy_from_journal(unsigned long error, void *context)
2498 struct journal_io *io = context;
2499 struct journal_completion *comp = io->comp;
2500 struct dm_integrity_c *ic = comp->ic;
2502 remove_range(ic, &io->range);
2503 mempool_free(io, &ic->journal_io_mempool);
2504 if (unlikely(error != 0))
2505 dm_integrity_io_error(ic, "copying from journal", -EIO);
2506 complete_journal_op(comp);
2509 static void restore_last_bytes(struct dm_integrity_c *ic, struct journal_sector *js,
2510 struct journal_entry *je)
2515 js->commit_id = je->last_bytes[s];
2517 } while (++s < ic->sectors_per_block);
2520 static void do_journal_write(struct dm_integrity_c *ic, unsigned int write_start,
2521 unsigned int write_sections, bool from_replay)
2523 unsigned int i, j, n;
2524 struct journal_completion comp;
2525 struct blk_plug plug;
2527 blk_start_plug(&plug);
2530 comp.in_flight = (atomic_t)ATOMIC_INIT(1);
2531 init_completion(&comp.comp);
2534 for (n = 0; n < write_sections; n++, i++, wraparound_section(ic, &i)) {
2535 #ifndef INTERNAL_VERIFY
2536 if (unlikely(from_replay))
2538 rw_section_mac(ic, i, false);
2539 for (j = 0; j < ic->journal_section_entries; j++) {
2540 struct journal_entry *je = access_journal_entry(ic, i, j);
2541 sector_t sec, area, offset;
2542 unsigned int k, l, next_loop;
2543 sector_t metadata_block;
2544 unsigned int metadata_offset;
2545 struct journal_io *io;
2547 if (journal_entry_is_unused(je))
2549 BUG_ON(unlikely(journal_entry_is_inprogress(je)) && !from_replay);
2550 sec = journal_entry_get_sector(je);
2551 if (unlikely(from_replay)) {
2552 if (unlikely(sec & (unsigned int)(ic->sectors_per_block - 1))) {
2553 dm_integrity_io_error(ic, "invalid sector in journal", -EIO);
2554 sec &= ~(sector_t)(ic->sectors_per_block - 1);
2556 if (unlikely(sec >= ic->provided_data_sectors)) {
2557 journal_entry_set_unused(je);
2561 get_area_and_offset(ic, sec, &area, &offset);
2562 restore_last_bytes(ic, access_journal_data(ic, i, j), je);
2563 for (k = j + 1; k < ic->journal_section_entries; k++) {
2564 struct journal_entry *je2 = access_journal_entry(ic, i, k);
2565 sector_t sec2, area2, offset2;
2567 if (journal_entry_is_unused(je2))
2569 BUG_ON(unlikely(journal_entry_is_inprogress(je2)) && !from_replay);
2570 sec2 = journal_entry_get_sector(je2);
2571 if (unlikely(sec2 >= ic->provided_data_sectors))
2573 get_area_and_offset(ic, sec2, &area2, &offset2);
2574 if (area2 != area || offset2 != offset + ((k - j) << ic->sb->log2_sectors_per_block))
2576 restore_last_bytes(ic, access_journal_data(ic, i, k), je2);
2580 io = mempool_alloc(&ic->journal_io_mempool, GFP_NOIO);
2582 io->range.logical_sector = sec;
2583 io->range.n_sectors = (k - j) << ic->sb->log2_sectors_per_block;
2585 spin_lock_irq(&ic->endio_wait.lock);
2586 add_new_range_and_wait(ic, &io->range);
2588 if (likely(!from_replay)) {
2589 struct journal_node *section_node = &ic->journal_tree[i * ic->journal_section_entries];
2591 /* don't write if there is newer committed sector */
2592 while (j < k && find_newer_committed_node(ic, §ion_node[j])) {
2593 struct journal_entry *je2 = access_journal_entry(ic, i, j);
2595 journal_entry_set_unused(je2);
2596 remove_journal_node(ic, §ion_node[j]);
2598 sec += ic->sectors_per_block;
2599 offset += ic->sectors_per_block;
2601 while (j < k && find_newer_committed_node(ic, §ion_node[k - 1])) {
2602 struct journal_entry *je2 = access_journal_entry(ic, i, k - 1);
2604 journal_entry_set_unused(je2);
2605 remove_journal_node(ic, §ion_node[k - 1]);
2609 remove_range_unlocked(ic, &io->range);
2610 spin_unlock_irq(&ic->endio_wait.lock);
2611 mempool_free(io, &ic->journal_io_mempool);
2614 for (l = j; l < k; l++)
2615 remove_journal_node(ic, §ion_node[l]);
2617 spin_unlock_irq(&ic->endio_wait.lock);
2619 metadata_block = get_metadata_sector_and_offset(ic, area, offset, &metadata_offset);
2620 for (l = j; l < k; l++) {
2622 struct journal_entry *je2 = access_journal_entry(ic, i, l);
2625 #ifndef INTERNAL_VERIFY
2626 unlikely(from_replay) &&
2628 ic->internal_hash) {
2629 char test_tag[max_t(size_t, HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
2631 integrity_sector_checksum(ic, sec + ((l - j) << ic->sb->log2_sectors_per_block),
2632 (char *)access_journal_data(ic, i, l), test_tag);
2633 if (unlikely(memcmp(test_tag, journal_entry_tag(ic, je2), ic->tag_size))) {
2634 dm_integrity_io_error(ic, "tag mismatch when replaying journal", -EILSEQ);
2635 dm_audit_log_target(DM_MSG_PREFIX, "integrity-replay-journal", ic->ti, 0);
2639 journal_entry_set_unused(je2);
2640 r = dm_integrity_rw_tag(ic, journal_entry_tag(ic, je2), &metadata_block, &metadata_offset,
2641 ic->tag_size, TAG_WRITE);
2643 dm_integrity_io_error(ic, "reading tags", r);
2646 atomic_inc(&comp.in_flight);
2647 copy_from_journal(ic, i, j << ic->sb->log2_sectors_per_block,
2648 (k - j) << ic->sb->log2_sectors_per_block,
2649 get_data_sector(ic, area, offset),
2650 complete_copy_from_journal, io);
2656 dm_bufio_write_dirty_buffers_async(ic->bufio);
2658 blk_finish_plug(&plug);
2660 complete_journal_op(&comp);
2661 wait_for_completion_io(&comp.comp);
2663 dm_integrity_flush_buffers(ic, true);
2666 static void integrity_writer(struct work_struct *w)
2668 struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, writer_work);
2669 unsigned int write_start, write_sections;
2670 unsigned int prev_free_sectors;
2672 spin_lock_irq(&ic->endio_wait.lock);
2673 write_start = ic->committed_section;
2674 write_sections = ic->n_committed_sections;
2675 spin_unlock_irq(&ic->endio_wait.lock);
2677 if (!write_sections)
2680 do_journal_write(ic, write_start, write_sections, false);
2682 spin_lock_irq(&ic->endio_wait.lock);
2684 ic->committed_section += write_sections;
2685 wraparound_section(ic, &ic->committed_section);
2686 ic->n_committed_sections -= write_sections;
2688 prev_free_sectors = ic->free_sectors;
2689 ic->free_sectors += write_sections * ic->journal_section_entries;
2690 if (unlikely(!prev_free_sectors))
2691 wake_up_locked(&ic->endio_wait);
2693 spin_unlock_irq(&ic->endio_wait.lock);
2696 static void recalc_write_super(struct dm_integrity_c *ic)
2700 dm_integrity_flush_buffers(ic, false);
2701 if (dm_integrity_failed(ic))
2704 r = sync_rw_sb(ic, REQ_OP_WRITE);
2706 dm_integrity_io_error(ic, "writing superblock", r);
2709 static void integrity_recalc(struct work_struct *w)
2711 struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, recalc_work);
2712 size_t recalc_tags_size;
2713 u8 *recalc_buffer = NULL;
2714 u8 *recalc_tags = NULL;
2715 struct dm_integrity_range range;
2716 struct dm_io_request io_req;
2717 struct dm_io_region io_loc;
2718 sector_t area, offset;
2719 sector_t metadata_block;
2720 unsigned int metadata_offset;
2721 sector_t logical_sector, n_sectors;
2725 unsigned int super_counter = 0;
2726 unsigned recalc_sectors = RECALC_SECTORS;
2729 recalc_buffer = __vmalloc(recalc_sectors << SECTOR_SHIFT, GFP_NOIO);
2730 if (!recalc_buffer) {
2732 recalc_sectors >>= 1;
2733 if (recalc_sectors >= 1U << ic->sb->log2_sectors_per_block)
2735 DMCRIT("out of memory for recalculate buffer - recalculation disabled");
2738 recalc_tags_size = (recalc_sectors >> ic->sb->log2_sectors_per_block) * ic->tag_size;
2739 if (crypto_shash_digestsize(ic->internal_hash) > ic->tag_size)
2740 recalc_tags_size += crypto_shash_digestsize(ic->internal_hash) - ic->tag_size;
2741 recalc_tags = kvmalloc(recalc_tags_size, GFP_NOIO);
2743 vfree(recalc_buffer);
2744 recalc_buffer = NULL;
2748 DEBUG_print("start recalculation... (position %llx)\n", le64_to_cpu(ic->sb->recalc_sector));
2750 spin_lock_irq(&ic->endio_wait.lock);
2754 if (unlikely(dm_post_suspending(ic->ti)))
2757 range.logical_sector = le64_to_cpu(ic->sb->recalc_sector);
2758 if (unlikely(range.logical_sector >= ic->provided_data_sectors)) {
2759 if (ic->mode == 'B') {
2760 block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
2761 DEBUG_print("queue_delayed_work: bitmap_flush_work\n");
2762 queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0);
2767 get_area_and_offset(ic, range.logical_sector, &area, &offset);
2768 range.n_sectors = min((sector_t)recalc_sectors, ic->provided_data_sectors - range.logical_sector);
2770 range.n_sectors = min(range.n_sectors, ((sector_t)1U << ic->sb->log2_interleave_sectors) - (unsigned int)offset);
2772 add_new_range_and_wait(ic, &range);
2773 spin_unlock_irq(&ic->endio_wait.lock);
2774 logical_sector = range.logical_sector;
2775 n_sectors = range.n_sectors;
2777 if (ic->mode == 'B') {
2778 if (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector, n_sectors, BITMAP_OP_TEST_ALL_CLEAR))
2779 goto advance_and_next;
2781 while (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector,
2782 ic->sectors_per_block, BITMAP_OP_TEST_ALL_CLEAR)) {
2783 logical_sector += ic->sectors_per_block;
2784 n_sectors -= ic->sectors_per_block;
2787 while (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector + n_sectors - ic->sectors_per_block,
2788 ic->sectors_per_block, BITMAP_OP_TEST_ALL_CLEAR)) {
2789 n_sectors -= ic->sectors_per_block;
2792 get_area_and_offset(ic, logical_sector, &area, &offset);
2795 DEBUG_print("recalculating: %llx, %llx\n", logical_sector, n_sectors);
2797 if (unlikely(++super_counter == RECALC_WRITE_SUPER)) {
2798 recalc_write_super(ic);
2799 if (ic->mode == 'B')
2800 queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, ic->bitmap_flush_interval);
2805 if (unlikely(dm_integrity_failed(ic)))
2808 io_req.bi_opf = REQ_OP_READ;
2809 io_req.mem.type = DM_IO_VMA;
2810 io_req.mem.ptr.addr = recalc_buffer;
2811 io_req.notify.fn = NULL;
2812 io_req.client = ic->io;
2813 io_loc.bdev = ic->dev->bdev;
2814 io_loc.sector = get_data_sector(ic, area, offset);
2815 io_loc.count = n_sectors;
2817 r = dm_io(&io_req, 1, &io_loc, NULL, IOPRIO_DEFAULT);
2819 dm_integrity_io_error(ic, "reading data", r);
2824 for (i = 0; i < n_sectors; i += ic->sectors_per_block) {
2825 integrity_sector_checksum(ic, logical_sector + i, recalc_buffer + (i << SECTOR_SHIFT), t);
2829 metadata_block = get_metadata_sector_and_offset(ic, area, offset, &metadata_offset);
2831 r = dm_integrity_rw_tag(ic, recalc_tags, &metadata_block, &metadata_offset, t - recalc_tags, TAG_WRITE);
2833 dm_integrity_io_error(ic, "writing tags", r);
2837 if (ic->mode == 'B') {
2838 sector_t start, end;
2840 start = (range.logical_sector >>
2841 (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)) <<
2842 (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
2843 end = ((range.logical_sector + range.n_sectors) >>
2844 (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)) <<
2845 (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
2846 block_bitmap_op(ic, ic->recalc_bitmap, start, end - start, BITMAP_OP_CLEAR);
2852 spin_lock_irq(&ic->endio_wait.lock);
2853 remove_range_unlocked(ic, &range);
2854 ic->sb->recalc_sector = cpu_to_le64(range.logical_sector + range.n_sectors);
2858 remove_range(ic, &range);
2862 spin_unlock_irq(&ic->endio_wait.lock);
2864 recalc_write_super(ic);
2867 vfree(recalc_buffer);
2868 kvfree(recalc_tags);
2871 static void bitmap_block_work(struct work_struct *w)
2873 struct bitmap_block_status *bbs = container_of(w, struct bitmap_block_status, work);
2874 struct dm_integrity_c *ic = bbs->ic;
2876 struct bio_list bio_queue;
2877 struct bio_list waiting;
2879 bio_list_init(&waiting);
2881 spin_lock(&bbs->bio_queue_lock);
2882 bio_queue = bbs->bio_queue;
2883 bio_list_init(&bbs->bio_queue);
2884 spin_unlock(&bbs->bio_queue_lock);
2886 while ((bio = bio_list_pop(&bio_queue))) {
2887 struct dm_integrity_io *dio;
2889 dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
2891 if (block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
2892 dio->range.n_sectors, BITMAP_OP_TEST_ALL_SET)) {
2893 remove_range(ic, &dio->range);
2894 INIT_WORK(&dio->work, integrity_bio_wait);
2895 queue_work(ic->offload_wq, &dio->work);
2897 block_bitmap_op(ic, ic->journal, dio->range.logical_sector,
2898 dio->range.n_sectors, BITMAP_OP_SET);
2899 bio_list_add(&waiting, bio);
2903 if (bio_list_empty(&waiting))
2906 rw_journal_sectors(ic, REQ_OP_WRITE | REQ_FUA | REQ_SYNC,
2907 bbs->idx * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT),
2908 BITMAP_BLOCK_SIZE >> SECTOR_SHIFT, NULL);
2910 while ((bio = bio_list_pop(&waiting))) {
2911 struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
2913 block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
2914 dio->range.n_sectors, BITMAP_OP_SET);
2916 remove_range(ic, &dio->range);
2917 INIT_WORK(&dio->work, integrity_bio_wait);
2918 queue_work(ic->offload_wq, &dio->work);
2921 queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, ic->bitmap_flush_interval);
2924 static void bitmap_flush_work(struct work_struct *work)
2926 struct dm_integrity_c *ic = container_of(work, struct dm_integrity_c, bitmap_flush_work.work);
2927 struct dm_integrity_range range;
2928 unsigned long limit;
2931 dm_integrity_flush_buffers(ic, false);
2933 range.logical_sector = 0;
2934 range.n_sectors = ic->provided_data_sectors;
2936 spin_lock_irq(&ic->endio_wait.lock);
2937 add_new_range_and_wait(ic, &range);
2938 spin_unlock_irq(&ic->endio_wait.lock);
2940 dm_integrity_flush_buffers(ic, true);
2942 limit = ic->provided_data_sectors;
2943 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
2944 limit = le64_to_cpu(ic->sb->recalc_sector)
2945 >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)
2946 << (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
2948 /*DEBUG_print("zeroing journal\n");*/
2949 block_bitmap_op(ic, ic->journal, 0, limit, BITMAP_OP_CLEAR);
2950 block_bitmap_op(ic, ic->may_write_bitmap, 0, limit, BITMAP_OP_CLEAR);
2952 rw_journal_sectors(ic, REQ_OP_WRITE | REQ_FUA | REQ_SYNC, 0,
2953 ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
2955 spin_lock_irq(&ic->endio_wait.lock);
2956 remove_range_unlocked(ic, &range);
2957 while (unlikely((bio = bio_list_pop(&ic->synchronous_bios)) != NULL)) {
2959 spin_unlock_irq(&ic->endio_wait.lock);
2960 spin_lock_irq(&ic->endio_wait.lock);
2962 spin_unlock_irq(&ic->endio_wait.lock);
2966 static void init_journal(struct dm_integrity_c *ic, unsigned int start_section,
2967 unsigned int n_sections, unsigned char commit_seq)
2969 unsigned int i, j, n;
2974 for (n = 0; n < n_sections; n++) {
2975 i = start_section + n;
2976 wraparound_section(ic, &i);
2977 for (j = 0; j < ic->journal_section_sectors; j++) {
2978 struct journal_sector *js = access_journal(ic, i, j);
2980 BUILD_BUG_ON(sizeof(js->sectors) != JOURNAL_SECTOR_DATA);
2981 memset(&js->sectors, 0, sizeof(js->sectors));
2982 js->commit_id = dm_integrity_commit_id(ic, i, j, commit_seq);
2984 for (j = 0; j < ic->journal_section_entries; j++) {
2985 struct journal_entry *je = access_journal_entry(ic, i, j);
2987 journal_entry_set_unused(je);
2991 write_journal(ic, start_section, n_sections);
2994 static int find_commit_seq(struct dm_integrity_c *ic, unsigned int i, unsigned int j, commit_id_t id)
2998 for (k = 0; k < N_COMMIT_IDS; k++) {
2999 if (dm_integrity_commit_id(ic, i, j, k) == id)
3002 dm_integrity_io_error(ic, "journal commit id", -EIO);
3006 static void replay_journal(struct dm_integrity_c *ic)
3009 bool used_commit_ids[N_COMMIT_IDS];
3010 unsigned int max_commit_id_sections[N_COMMIT_IDS];
3011 unsigned int write_start, write_sections;
3012 unsigned int continue_section;
3014 unsigned char unused, last_used, want_commit_seq;
3016 if (ic->mode == 'R')
3019 if (ic->journal_uptodate)
3025 if (!ic->just_formatted) {
3026 DEBUG_print("reading journal\n");
3027 rw_journal(ic, REQ_OP_READ, 0, ic->journal_sections, NULL);
3029 DEBUG_bytes(lowmem_page_address(ic->journal_io[0].page), 64, "read journal");
3030 if (ic->journal_io) {
3031 struct journal_completion crypt_comp;
3034 init_completion(&crypt_comp.comp);
3035 crypt_comp.in_flight = (atomic_t)ATOMIC_INIT(0);
3036 encrypt_journal(ic, false, 0, ic->journal_sections, &crypt_comp);
3037 wait_for_completion(&crypt_comp.comp);
3039 DEBUG_bytes(lowmem_page_address(ic->journal[0].page), 64, "decrypted journal");
3042 if (dm_integrity_failed(ic))
3045 journal_empty = true;
3046 memset(used_commit_ids, 0, sizeof(used_commit_ids));
3047 memset(max_commit_id_sections, 0, sizeof(max_commit_id_sections));
3048 for (i = 0; i < ic->journal_sections; i++) {
3049 for (j = 0; j < ic->journal_section_sectors; j++) {
3051 struct journal_sector *js = access_journal(ic, i, j);
3053 k = find_commit_seq(ic, i, j, js->commit_id);
3056 used_commit_ids[k] = true;
3057 max_commit_id_sections[k] = i;
3059 if (journal_empty) {
3060 for (j = 0; j < ic->journal_section_entries; j++) {
3061 struct journal_entry *je = access_journal_entry(ic, i, j);
3063 if (!journal_entry_is_unused(je)) {
3064 journal_empty = false;
3071 if (!used_commit_ids[N_COMMIT_IDS - 1]) {
3072 unused = N_COMMIT_IDS - 1;
3073 while (unused && !used_commit_ids[unused - 1])
3076 for (unused = 0; unused < N_COMMIT_IDS; unused++)
3077 if (!used_commit_ids[unused])
3079 if (unused == N_COMMIT_IDS) {
3080 dm_integrity_io_error(ic, "journal commit ids", -EIO);
3084 DEBUG_print("first unused commit seq %d [%d,%d,%d,%d]\n",
3085 unused, used_commit_ids[0], used_commit_ids[1],
3086 used_commit_ids[2], used_commit_ids[3]);
3088 last_used = prev_commit_seq(unused);
3089 want_commit_seq = prev_commit_seq(last_used);
3091 if (!used_commit_ids[want_commit_seq] && used_commit_ids[prev_commit_seq(want_commit_seq)])
3092 journal_empty = true;
3094 write_start = max_commit_id_sections[last_used] + 1;
3095 if (unlikely(write_start >= ic->journal_sections))
3096 want_commit_seq = next_commit_seq(want_commit_seq);
3097 wraparound_section(ic, &write_start);
3100 for (write_sections = 0; write_sections < ic->journal_sections; write_sections++) {
3101 for (j = 0; j < ic->journal_section_sectors; j++) {
3102 struct journal_sector *js = access_journal(ic, i, j);
3104 if (js->commit_id != dm_integrity_commit_id(ic, i, j, want_commit_seq)) {
3106 * This could be caused by crash during writing.
3107 * We won't replay the inconsistent part of the
3110 DEBUG_print("commit id mismatch at position (%u, %u): %d != %d\n",
3111 i, j, find_commit_seq(ic, i, j, js->commit_id), want_commit_seq);
3116 if (unlikely(i >= ic->journal_sections))
3117 want_commit_seq = next_commit_seq(want_commit_seq);
3118 wraparound_section(ic, &i);
3122 if (!journal_empty) {
3123 DEBUG_print("replaying %u sections, starting at %u, commit seq %d\n",
3124 write_sections, write_start, want_commit_seq);
3125 do_journal_write(ic, write_start, write_sections, true);
3128 if (write_sections == ic->journal_sections && (ic->mode == 'J' || journal_empty)) {
3129 continue_section = write_start;
3130 ic->commit_seq = want_commit_seq;
3131 DEBUG_print("continuing from section %u, commit seq %d\n", write_start, ic->commit_seq);
3134 unsigned char erase_seq;
3137 DEBUG_print("clearing journal\n");
3139 erase_seq = prev_commit_seq(prev_commit_seq(last_used));
3141 init_journal(ic, s, 1, erase_seq);
3143 wraparound_section(ic, &s);
3144 if (ic->journal_sections >= 2) {
3145 init_journal(ic, s, ic->journal_sections - 2, erase_seq);
3146 s += ic->journal_sections - 2;
3147 wraparound_section(ic, &s);
3148 init_journal(ic, s, 1, erase_seq);
3151 continue_section = 0;
3152 ic->commit_seq = next_commit_seq(erase_seq);
3155 ic->committed_section = continue_section;
3156 ic->n_committed_sections = 0;
3158 ic->uncommitted_section = continue_section;
3159 ic->n_uncommitted_sections = 0;
3161 ic->free_section = continue_section;
3162 ic->free_section_entry = 0;
3163 ic->free_sectors = ic->journal_entries;
3165 ic->journal_tree_root = RB_ROOT;
3166 for (i = 0; i < ic->journal_entries; i++)
3167 init_journal_node(&ic->journal_tree[i]);
3170 static void dm_integrity_enter_synchronous_mode(struct dm_integrity_c *ic)
3172 DEBUG_print("%s\n", __func__);
3174 if (ic->mode == 'B') {
3175 ic->bitmap_flush_interval = msecs_to_jiffies(10) + 1;
3176 ic->synchronous_mode = 1;
3178 cancel_delayed_work_sync(&ic->bitmap_flush_work);
3179 queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0);
3180 flush_workqueue(ic->commit_wq);
3184 static int dm_integrity_reboot(struct notifier_block *n, unsigned long code, void *x)
3186 struct dm_integrity_c *ic = container_of(n, struct dm_integrity_c, reboot_notifier);
3188 DEBUG_print("%s\n", __func__);
3190 dm_integrity_enter_synchronous_mode(ic);
3195 static void dm_integrity_postsuspend(struct dm_target *ti)
3197 struct dm_integrity_c *ic = ti->private;
3200 WARN_ON(unregister_reboot_notifier(&ic->reboot_notifier));
3202 del_timer_sync(&ic->autocommit_timer);
3205 drain_workqueue(ic->recalc_wq);
3207 if (ic->mode == 'B')
3208 cancel_delayed_work_sync(&ic->bitmap_flush_work);
3210 queue_work(ic->commit_wq, &ic->commit_work);
3211 drain_workqueue(ic->commit_wq);
3213 if (ic->mode == 'J') {
3214 queue_work(ic->writer_wq, &ic->writer_work);
3215 drain_workqueue(ic->writer_wq);
3216 dm_integrity_flush_buffers(ic, true);
3217 if (ic->wrote_to_journal) {
3218 init_journal(ic, ic->free_section,
3219 ic->journal_sections - ic->free_section, ic->commit_seq);
3220 if (ic->free_section) {
3221 init_journal(ic, 0, ic->free_section,
3222 next_commit_seq(ic->commit_seq));
3227 if (ic->mode == 'B') {
3228 dm_integrity_flush_buffers(ic, true);
3230 /* set to 0 to test bitmap replay code */
3231 init_journal(ic, 0, ic->journal_sections, 0);
3232 ic->sb->flags &= ~cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
3233 r = sync_rw_sb(ic, REQ_OP_WRITE | REQ_FUA);
3235 dm_integrity_io_error(ic, "writing superblock", r);
3239 BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
3241 ic->journal_uptodate = true;
3244 static void dm_integrity_resume(struct dm_target *ti)
3246 struct dm_integrity_c *ic = ti->private;
3247 __u64 old_provided_data_sectors = le64_to_cpu(ic->sb->provided_data_sectors);
3250 DEBUG_print("resume\n");
3252 ic->wrote_to_journal = false;
3254 if (ic->provided_data_sectors != old_provided_data_sectors) {
3255 if (ic->provided_data_sectors > old_provided_data_sectors &&
3257 ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit) {
3258 rw_journal_sectors(ic, REQ_OP_READ, 0,
3259 ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
3260 block_bitmap_op(ic, ic->journal, old_provided_data_sectors,
3261 ic->provided_data_sectors - old_provided_data_sectors, BITMAP_OP_SET);
3262 rw_journal_sectors(ic, REQ_OP_WRITE | REQ_FUA | REQ_SYNC, 0,
3263 ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
3266 ic->sb->provided_data_sectors = cpu_to_le64(ic->provided_data_sectors);
3267 r = sync_rw_sb(ic, REQ_OP_WRITE | REQ_FUA);
3269 dm_integrity_io_error(ic, "writing superblock", r);
3272 if (ic->sb->flags & cpu_to_le32(SB_FLAG_DIRTY_BITMAP)) {
3273 DEBUG_print("resume dirty_bitmap\n");
3274 rw_journal_sectors(ic, REQ_OP_READ, 0,
3275 ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
3276 if (ic->mode == 'B') {
3277 if (ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit &&
3278 !ic->reset_recalculate_flag) {
3279 block_bitmap_copy(ic, ic->recalc_bitmap, ic->journal);
3280 block_bitmap_copy(ic, ic->may_write_bitmap, ic->journal);
3281 if (!block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors,
3282 BITMAP_OP_TEST_ALL_CLEAR)) {
3283 ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
3284 ic->sb->recalc_sector = cpu_to_le64(0);
3287 DEBUG_print("non-matching blocks_per_bitmap_bit: %u, %u\n",
3288 ic->sb->log2_blocks_per_bitmap_bit, ic->log2_blocks_per_bitmap_bit);
3289 ic->sb->log2_blocks_per_bitmap_bit = ic->log2_blocks_per_bitmap_bit;
3290 block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_SET);
3291 block_bitmap_op(ic, ic->may_write_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_SET);
3292 block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_SET);
3293 rw_journal_sectors(ic, REQ_OP_WRITE | REQ_FUA | REQ_SYNC, 0,
3294 ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
3295 ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
3296 ic->sb->recalc_sector = cpu_to_le64(0);
3299 if (!(ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit &&
3300 block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_TEST_ALL_CLEAR)) ||
3301 ic->reset_recalculate_flag) {
3302 ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
3303 ic->sb->recalc_sector = cpu_to_le64(0);
3305 init_journal(ic, 0, ic->journal_sections, 0);
3307 ic->sb->flags &= ~cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
3309 r = sync_rw_sb(ic, REQ_OP_WRITE | REQ_FUA);
3311 dm_integrity_io_error(ic, "writing superblock", r);
3314 if (ic->reset_recalculate_flag) {
3315 ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
3316 ic->sb->recalc_sector = cpu_to_le64(0);
3318 if (ic->mode == 'B') {
3319 ic->sb->flags |= cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
3320 ic->sb->log2_blocks_per_bitmap_bit = ic->log2_blocks_per_bitmap_bit;
3321 r = sync_rw_sb(ic, REQ_OP_WRITE | REQ_FUA);
3323 dm_integrity_io_error(ic, "writing superblock", r);
3325 block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
3326 block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
3327 block_bitmap_op(ic, ic->may_write_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
3328 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
3329 le64_to_cpu(ic->sb->recalc_sector) < ic->provided_data_sectors) {
3330 block_bitmap_op(ic, ic->journal, le64_to_cpu(ic->sb->recalc_sector),
3331 ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET);
3332 block_bitmap_op(ic, ic->recalc_bitmap, le64_to_cpu(ic->sb->recalc_sector),
3333 ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET);
3334 block_bitmap_op(ic, ic->may_write_bitmap, le64_to_cpu(ic->sb->recalc_sector),
3335 ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET);
3337 rw_journal_sectors(ic, REQ_OP_WRITE | REQ_FUA | REQ_SYNC, 0,
3338 ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
3342 DEBUG_print("testing recalc: %x\n", ic->sb->flags);
3343 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
3344 __u64 recalc_pos = le64_to_cpu(ic->sb->recalc_sector);
3346 DEBUG_print("recalc pos: %llx / %llx\n", recalc_pos, ic->provided_data_sectors);
3347 if (recalc_pos < ic->provided_data_sectors) {
3348 queue_work(ic->recalc_wq, &ic->recalc_work);
3349 } else if (recalc_pos > ic->provided_data_sectors) {
3350 ic->sb->recalc_sector = cpu_to_le64(ic->provided_data_sectors);
3351 recalc_write_super(ic);
3355 ic->reboot_notifier.notifier_call = dm_integrity_reboot;
3356 ic->reboot_notifier.next = NULL;
3357 ic->reboot_notifier.priority = INT_MAX - 1; /* be notified after md and before hardware drivers */
3358 WARN_ON(register_reboot_notifier(&ic->reboot_notifier));
3361 /* set to 1 to stress test synchronous mode */
3362 dm_integrity_enter_synchronous_mode(ic);
3366 static void dm_integrity_status(struct dm_target *ti, status_type_t type,
3367 unsigned int status_flags, char *result, unsigned int maxlen)
3369 struct dm_integrity_c *ic = ti->private;
3370 unsigned int arg_count;
3374 case STATUSTYPE_INFO:
3376 (unsigned long long)atomic64_read(&ic->number_of_mismatches),
3377 ic->provided_data_sectors);
3378 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
3379 DMEMIT(" %llu", le64_to_cpu(ic->sb->recalc_sector));
3384 case STATUSTYPE_TABLE: {
3385 __u64 watermark_percentage = (__u64)(ic->journal_entries - ic->free_sectors_threshold) * 100;
3387 watermark_percentage += ic->journal_entries / 2;
3388 do_div(watermark_percentage, ic->journal_entries);
3390 arg_count += !!ic->meta_dev;
3391 arg_count += ic->sectors_per_block != 1;
3392 arg_count += !!(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING));
3393 arg_count += ic->reset_recalculate_flag;
3394 arg_count += ic->discard;
3395 arg_count += ic->mode == 'J';
3396 arg_count += ic->mode == 'J';
3397 arg_count += ic->mode == 'B';
3398 arg_count += ic->mode == 'B';
3399 arg_count += !!ic->internal_hash_alg.alg_string;
3400 arg_count += !!ic->journal_crypt_alg.alg_string;
3401 arg_count += !!ic->journal_mac_alg.alg_string;
3402 arg_count += (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0;
3403 arg_count += (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) != 0;
3404 arg_count += ic->legacy_recalculate;
3405 DMEMIT("%s %llu %u %c %u", ic->dev->name, ic->start,
3406 ic->tag_size, ic->mode, arg_count);
3408 DMEMIT(" meta_device:%s", ic->meta_dev->name);
3409 if (ic->sectors_per_block != 1)
3410 DMEMIT(" block_size:%u", ic->sectors_per_block << SECTOR_SHIFT);
3411 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
3412 DMEMIT(" recalculate");
3413 if (ic->reset_recalculate_flag)
3414 DMEMIT(" reset_recalculate");
3416 DMEMIT(" allow_discards");
3417 DMEMIT(" journal_sectors:%u", ic->initial_sectors - SB_SECTORS);
3418 DMEMIT(" interleave_sectors:%u", 1U << ic->sb->log2_interleave_sectors);
3419 DMEMIT(" buffer_sectors:%u", 1U << ic->log2_buffer_sectors);
3420 if (ic->mode == 'J') {
3421 DMEMIT(" journal_watermark:%u", (unsigned int)watermark_percentage);
3422 DMEMIT(" commit_time:%u", ic->autocommit_msec);
3424 if (ic->mode == 'B') {
3425 DMEMIT(" sectors_per_bit:%llu", (sector_t)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit);
3426 DMEMIT(" bitmap_flush_interval:%u", jiffies_to_msecs(ic->bitmap_flush_interval));
3428 if ((ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0)
3429 DMEMIT(" fix_padding");
3430 if ((ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) != 0)
3431 DMEMIT(" fix_hmac");
3432 if (ic->legacy_recalculate)
3433 DMEMIT(" legacy_recalculate");
3435 #define EMIT_ALG(a, n) \
3437 if (ic->a.alg_string) { \
3438 DMEMIT(" %s:%s", n, ic->a.alg_string); \
3439 if (ic->a.key_string) \
3440 DMEMIT(":%s", ic->a.key_string);\
3443 EMIT_ALG(internal_hash_alg, "internal_hash");
3444 EMIT_ALG(journal_crypt_alg, "journal_crypt");
3445 EMIT_ALG(journal_mac_alg, "journal_mac");
3448 case STATUSTYPE_IMA:
3449 DMEMIT_TARGET_NAME_VERSION(ti->type);
3450 DMEMIT(",dev_name=%s,start=%llu,tag_size=%u,mode=%c",
3451 ic->dev->name, ic->start, ic->tag_size, ic->mode);
3454 DMEMIT(",meta_device=%s", ic->meta_dev->name);
3455 if (ic->sectors_per_block != 1)
3456 DMEMIT(",block_size=%u", ic->sectors_per_block << SECTOR_SHIFT);
3458 DMEMIT(",recalculate=%c", (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) ?
3460 DMEMIT(",allow_discards=%c", ic->discard ? 'y' : 'n');
3461 DMEMIT(",fix_padding=%c",
3462 ((ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0) ? 'y' : 'n');
3463 DMEMIT(",fix_hmac=%c",
3464 ((ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) != 0) ? 'y' : 'n');
3465 DMEMIT(",legacy_recalculate=%c", ic->legacy_recalculate ? 'y' : 'n');
3467 DMEMIT(",journal_sectors=%u", ic->initial_sectors - SB_SECTORS);
3468 DMEMIT(",interleave_sectors=%u", 1U << ic->sb->log2_interleave_sectors);
3469 DMEMIT(",buffer_sectors=%u", 1U << ic->log2_buffer_sectors);
3475 static int dm_integrity_iterate_devices(struct dm_target *ti,
3476 iterate_devices_callout_fn fn, void *data)
3478 struct dm_integrity_c *ic = ti->private;
3481 return fn(ti, ic->dev, ic->start + ic->initial_sectors + ic->metadata_run, ti->len, data);
3483 return fn(ti, ic->dev, 0, ti->len, data);
3486 static void dm_integrity_io_hints(struct dm_target *ti, struct queue_limits *limits)
3488 struct dm_integrity_c *ic = ti->private;
3490 if (ic->sectors_per_block > 1) {
3491 limits->logical_block_size = ic->sectors_per_block << SECTOR_SHIFT;
3492 limits->physical_block_size = ic->sectors_per_block << SECTOR_SHIFT;
3493 blk_limits_io_min(limits, ic->sectors_per_block << SECTOR_SHIFT);
3494 limits->dma_alignment = limits->logical_block_size - 1;
3496 limits->max_integrity_segments = USHRT_MAX;
3499 static void calculate_journal_section_size(struct dm_integrity_c *ic)
3501 unsigned int sector_space = JOURNAL_SECTOR_DATA;
3503 ic->journal_sections = le32_to_cpu(ic->sb->journal_sections);
3504 ic->journal_entry_size = roundup(offsetof(struct journal_entry, last_bytes[ic->sectors_per_block]) + ic->tag_size,
3505 JOURNAL_ENTRY_ROUNDUP);
3507 if (ic->sb->flags & cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC))
3508 sector_space -= JOURNAL_MAC_PER_SECTOR;
3509 ic->journal_entries_per_sector = sector_space / ic->journal_entry_size;
3510 ic->journal_section_entries = ic->journal_entries_per_sector * JOURNAL_BLOCK_SECTORS;
3511 ic->journal_section_sectors = (ic->journal_section_entries << ic->sb->log2_sectors_per_block) + JOURNAL_BLOCK_SECTORS;
3512 ic->journal_entries = ic->journal_section_entries * ic->journal_sections;
3515 static int calculate_device_limits(struct dm_integrity_c *ic)
3517 __u64 initial_sectors;
3519 calculate_journal_section_size(ic);
3520 initial_sectors = SB_SECTORS + (__u64)ic->journal_section_sectors * ic->journal_sections;
3521 if (initial_sectors + METADATA_PADDING_SECTORS >= ic->meta_device_sectors || initial_sectors > UINT_MAX)
3523 ic->initial_sectors = initial_sectors;
3525 if (!ic->meta_dev) {
3526 sector_t last_sector, last_area, last_offset;
3528 /* we have to maintain excessive padding for compatibility with existing volumes */
3529 __u64 metadata_run_padding =
3530 ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING) ?
3531 (__u64)(METADATA_PADDING_SECTORS << SECTOR_SHIFT) :
3532 (__u64)(1 << SECTOR_SHIFT << METADATA_PADDING_SECTORS);
3534 ic->metadata_run = round_up((__u64)ic->tag_size << (ic->sb->log2_interleave_sectors - ic->sb->log2_sectors_per_block),
3535 metadata_run_padding) >> SECTOR_SHIFT;
3536 if (!(ic->metadata_run & (ic->metadata_run - 1)))
3537 ic->log2_metadata_run = __ffs(ic->metadata_run);
3539 ic->log2_metadata_run = -1;
3541 get_area_and_offset(ic, ic->provided_data_sectors - 1, &last_area, &last_offset);
3542 last_sector = get_data_sector(ic, last_area, last_offset);
3543 if (last_sector < ic->start || last_sector >= ic->meta_device_sectors)
3546 __u64 meta_size = (ic->provided_data_sectors >> ic->sb->log2_sectors_per_block) * ic->tag_size;
3548 meta_size = (meta_size + ((1U << (ic->log2_buffer_sectors + SECTOR_SHIFT)) - 1))
3549 >> (ic->log2_buffer_sectors + SECTOR_SHIFT);
3550 meta_size <<= ic->log2_buffer_sectors;
3551 if (ic->initial_sectors + meta_size < ic->initial_sectors ||
3552 ic->initial_sectors + meta_size > ic->meta_device_sectors)
3554 ic->metadata_run = 1;
3555 ic->log2_metadata_run = 0;
3561 static void get_provided_data_sectors(struct dm_integrity_c *ic)
3563 if (!ic->meta_dev) {
3566 ic->provided_data_sectors = 0;
3567 for (test_bit = fls64(ic->meta_device_sectors) - 1; test_bit >= 3; test_bit--) {
3568 __u64 prev_data_sectors = ic->provided_data_sectors;
3570 ic->provided_data_sectors |= (sector_t)1 << test_bit;
3571 if (calculate_device_limits(ic))
3572 ic->provided_data_sectors = prev_data_sectors;
3575 ic->provided_data_sectors = ic->data_device_sectors;
3576 ic->provided_data_sectors &= ~(sector_t)(ic->sectors_per_block - 1);
3580 static int initialize_superblock(struct dm_integrity_c *ic,
3581 unsigned int journal_sectors, unsigned int interleave_sectors)
3583 unsigned int journal_sections;
3586 memset(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT);
3587 memcpy(ic->sb->magic, SB_MAGIC, 8);
3588 ic->sb->integrity_tag_size = cpu_to_le16(ic->tag_size);
3589 ic->sb->log2_sectors_per_block = __ffs(ic->sectors_per_block);
3590 if (ic->journal_mac_alg.alg_string)
3591 ic->sb->flags |= cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC);
3593 calculate_journal_section_size(ic);
3594 journal_sections = journal_sectors / ic->journal_section_sectors;
3595 if (!journal_sections)
3596 journal_sections = 1;
3598 if (ic->fix_hmac && (ic->internal_hash_alg.alg_string || ic->journal_mac_alg.alg_string)) {
3599 ic->sb->flags |= cpu_to_le32(SB_FLAG_FIXED_HMAC);
3600 get_random_bytes(ic->sb->salt, SALT_SIZE);
3603 if (!ic->meta_dev) {
3604 if (ic->fix_padding)
3605 ic->sb->flags |= cpu_to_le32(SB_FLAG_FIXED_PADDING);
3606 ic->sb->journal_sections = cpu_to_le32(journal_sections);
3607 if (!interleave_sectors)
3608 interleave_sectors = DEFAULT_INTERLEAVE_SECTORS;
3609 ic->sb->log2_interleave_sectors = __fls(interleave_sectors);
3610 ic->sb->log2_interleave_sectors = max_t(__u8, MIN_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors);
3611 ic->sb->log2_interleave_sectors = min_t(__u8, MAX_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors);
3613 get_provided_data_sectors(ic);
3614 if (!ic->provided_data_sectors)
3617 ic->sb->log2_interleave_sectors = 0;
3619 get_provided_data_sectors(ic);
3620 if (!ic->provided_data_sectors)
3624 ic->sb->journal_sections = cpu_to_le32(0);
3625 for (test_bit = fls(journal_sections) - 1; test_bit >= 0; test_bit--) {
3626 __u32 prev_journal_sections = le32_to_cpu(ic->sb->journal_sections);
3627 __u32 test_journal_sections = prev_journal_sections | (1U << test_bit);
3629 if (test_journal_sections > journal_sections)
3631 ic->sb->journal_sections = cpu_to_le32(test_journal_sections);
3632 if (calculate_device_limits(ic))
3633 ic->sb->journal_sections = cpu_to_le32(prev_journal_sections);
3636 if (!le32_to_cpu(ic->sb->journal_sections)) {
3637 if (ic->log2_buffer_sectors > 3) {
3638 ic->log2_buffer_sectors--;
3639 goto try_smaller_buffer;
3645 ic->sb->provided_data_sectors = cpu_to_le64(ic->provided_data_sectors);
3652 static void dm_integrity_set(struct dm_target *ti, struct dm_integrity_c *ic)
3654 struct gendisk *disk = dm_disk(dm_table_get_md(ti->table));
3655 struct blk_integrity bi;
3657 memset(&bi, 0, sizeof(bi));
3658 bi.profile = &dm_integrity_profile;
3659 bi.tuple_size = ic->tag_size;
3660 bi.tag_size = bi.tuple_size;
3661 bi.interval_exp = ic->sb->log2_sectors_per_block + SECTOR_SHIFT;
3663 blk_integrity_register(disk, &bi);
3666 static void dm_integrity_free_page_list(struct page_list *pl)
3672 for (i = 0; pl[i].page; i++)
3673 __free_page(pl[i].page);
3677 static struct page_list *dm_integrity_alloc_page_list(unsigned int n_pages)
3679 struct page_list *pl;
3682 pl = kvmalloc_array(n_pages + 1, sizeof(struct page_list), GFP_KERNEL | __GFP_ZERO);
3686 for (i = 0; i < n_pages; i++) {
3687 pl[i].page = alloc_page(GFP_KERNEL);
3689 dm_integrity_free_page_list(pl);
3693 pl[i - 1].next = &pl[i];
3701 static void dm_integrity_free_journal_scatterlist(struct dm_integrity_c *ic, struct scatterlist **sl)
3705 for (i = 0; i < ic->journal_sections; i++)
3710 static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_integrity_c *ic,
3711 struct page_list *pl)
3713 struct scatterlist **sl;
3716 sl = kvmalloc_array(ic->journal_sections,
3717 sizeof(struct scatterlist *),
3718 GFP_KERNEL | __GFP_ZERO);
3722 for (i = 0; i < ic->journal_sections; i++) {
3723 struct scatterlist *s;
3724 unsigned int start_index, start_offset;
3725 unsigned int end_index, end_offset;
3726 unsigned int n_pages;
3729 page_list_location(ic, i, 0, &start_index, &start_offset);
3730 page_list_location(ic, i, ic->journal_section_sectors - 1,
3731 &end_index, &end_offset);
3733 n_pages = (end_index - start_index + 1);
3735 s = kvmalloc_array(n_pages, sizeof(struct scatterlist),
3738 dm_integrity_free_journal_scatterlist(ic, sl);
3742 sg_init_table(s, n_pages);
3743 for (idx = start_index; idx <= end_index; idx++) {
3744 char *va = lowmem_page_address(pl[idx].page);
3745 unsigned int start = 0, end = PAGE_SIZE;
3747 if (idx == start_index)
3748 start = start_offset;
3749 if (idx == end_index)
3750 end = end_offset + (1 << SECTOR_SHIFT);
3751 sg_set_buf(&s[idx - start_index], va + start, end - start);
3760 static void free_alg(struct alg_spec *a)
3762 kfree_sensitive(a->alg_string);
3763 kfree_sensitive(a->key);
3764 memset(a, 0, sizeof(*a));
3767 static int get_alg_and_key(const char *arg, struct alg_spec *a, char **error, char *error_inval)
3773 a->alg_string = kstrdup(strchr(arg, ':') + 1, GFP_KERNEL);
3777 k = strchr(a->alg_string, ':');
3780 a->key_string = k + 1;
3781 if (strlen(a->key_string) & 1)
3784 a->key_size = strlen(a->key_string) / 2;
3785 a->key = kmalloc(a->key_size, GFP_KERNEL);
3788 if (hex2bin(a->key, a->key_string, a->key_size))
3794 *error = error_inval;
3797 *error = "Out of memory for an argument";
3801 static int get_mac(struct crypto_shash **hash, struct alg_spec *a, char **error,
3802 char *error_alg, char *error_key)
3806 if (a->alg_string) {
3807 *hash = crypto_alloc_shash(a->alg_string, 0, CRYPTO_ALG_ALLOCATES_MEMORY);
3808 if (IS_ERR(*hash)) {
3816 r = crypto_shash_setkey(*hash, a->key, a->key_size);
3821 } else if (crypto_shash_get_flags(*hash) & CRYPTO_TFM_NEED_KEY) {
3830 static int create_journal(struct dm_integrity_c *ic, char **error)
3834 __u64 journal_pages, journal_desc_size, journal_tree_size;
3835 unsigned char *crypt_data = NULL, *crypt_iv = NULL;
3836 struct skcipher_request *req = NULL;
3838 ic->commit_ids[0] = cpu_to_le64(0x1111111111111111ULL);
3839 ic->commit_ids[1] = cpu_to_le64(0x2222222222222222ULL);
3840 ic->commit_ids[2] = cpu_to_le64(0x3333333333333333ULL);
3841 ic->commit_ids[3] = cpu_to_le64(0x4444444444444444ULL);
3843 journal_pages = roundup((__u64)ic->journal_sections * ic->journal_section_sectors,
3844 PAGE_SIZE >> SECTOR_SHIFT) >> (PAGE_SHIFT - SECTOR_SHIFT);
3845 journal_desc_size = journal_pages * sizeof(struct page_list);
3846 if (journal_pages >= totalram_pages() - totalhigh_pages() || journal_desc_size > ULONG_MAX) {
3847 *error = "Journal doesn't fit into memory";
3851 ic->journal_pages = journal_pages;
3853 ic->journal = dm_integrity_alloc_page_list(ic->journal_pages);
3855 *error = "Could not allocate memory for journal";
3859 if (ic->journal_crypt_alg.alg_string) {
3860 unsigned int ivsize, blocksize;
3861 struct journal_completion comp;
3864 ic->journal_crypt = crypto_alloc_skcipher(ic->journal_crypt_alg.alg_string, 0, CRYPTO_ALG_ALLOCATES_MEMORY);
3865 if (IS_ERR(ic->journal_crypt)) {
3866 *error = "Invalid journal cipher";
3867 r = PTR_ERR(ic->journal_crypt);
3868 ic->journal_crypt = NULL;
3871 ivsize = crypto_skcipher_ivsize(ic->journal_crypt);
3872 blocksize = crypto_skcipher_blocksize(ic->journal_crypt);
3874 if (ic->journal_crypt_alg.key) {
3875 r = crypto_skcipher_setkey(ic->journal_crypt, ic->journal_crypt_alg.key,
3876 ic->journal_crypt_alg.key_size);
3878 *error = "Error setting encryption key";
3882 DEBUG_print("cipher %s, block size %u iv size %u\n",
3883 ic->journal_crypt_alg.alg_string, blocksize, ivsize);
3885 ic->journal_io = dm_integrity_alloc_page_list(ic->journal_pages);
3886 if (!ic->journal_io) {
3887 *error = "Could not allocate memory for journal io";
3892 if (blocksize == 1) {
3893 struct scatterlist *sg;
3895 req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
3897 *error = "Could not allocate crypt request";
3902 crypt_iv = kzalloc(ivsize, GFP_KERNEL);
3904 *error = "Could not allocate iv";
3909 ic->journal_xor = dm_integrity_alloc_page_list(ic->journal_pages);
3910 if (!ic->journal_xor) {
3911 *error = "Could not allocate memory for journal xor";
3916 sg = kvmalloc_array(ic->journal_pages + 1,
3917 sizeof(struct scatterlist),
3920 *error = "Unable to allocate sg list";
3924 sg_init_table(sg, ic->journal_pages + 1);
3925 for (i = 0; i < ic->journal_pages; i++) {
3926 char *va = lowmem_page_address(ic->journal_xor[i].page);
3929 sg_set_buf(&sg[i], va, PAGE_SIZE);
3931 sg_set_buf(&sg[i], &ic->commit_ids, sizeof(ic->commit_ids));
3933 skcipher_request_set_crypt(req, sg, sg,
3934 PAGE_SIZE * ic->journal_pages + sizeof(ic->commit_ids), crypt_iv);
3935 init_completion(&comp.comp);
3936 comp.in_flight = (atomic_t)ATOMIC_INIT(1);
3937 if (do_crypt(true, req, &comp))
3938 wait_for_completion(&comp.comp);
3940 r = dm_integrity_failed(ic);
3942 *error = "Unable to encrypt journal";
3945 DEBUG_bytes(lowmem_page_address(ic->journal_xor[0].page), 64, "xor data");
3947 crypto_free_skcipher(ic->journal_crypt);
3948 ic->journal_crypt = NULL;
3950 unsigned int crypt_len = roundup(ivsize, blocksize);
3952 req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
3954 *error = "Could not allocate crypt request";
3959 crypt_iv = kmalloc(ivsize, GFP_KERNEL);
3961 *error = "Could not allocate iv";
3966 crypt_data = kmalloc(crypt_len, GFP_KERNEL);
3968 *error = "Unable to allocate crypt data";
3973 ic->journal_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal);
3974 if (!ic->journal_scatterlist) {
3975 *error = "Unable to allocate sg list";
3979 ic->journal_io_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal_io);
3980 if (!ic->journal_io_scatterlist) {
3981 *error = "Unable to allocate sg list";
3985 ic->sk_requests = kvmalloc_array(ic->journal_sections,
3986 sizeof(struct skcipher_request *),
3987 GFP_KERNEL | __GFP_ZERO);
3988 if (!ic->sk_requests) {
3989 *error = "Unable to allocate sk requests";
3993 for (i = 0; i < ic->journal_sections; i++) {
3994 struct scatterlist sg;
3995 struct skcipher_request *section_req;
3996 __le32 section_le = cpu_to_le32(i);
3998 memset(crypt_iv, 0x00, ivsize);
3999 memset(crypt_data, 0x00, crypt_len);
4000 memcpy(crypt_data, §ion_le, min_t(size_t, crypt_len, sizeof(section_le)));
4002 sg_init_one(&sg, crypt_data, crypt_len);
4003 skcipher_request_set_crypt(req, &sg, &sg, crypt_len, crypt_iv);
4004 init_completion(&comp.comp);
4005 comp.in_flight = (atomic_t)ATOMIC_INIT(1);
4006 if (do_crypt(true, req, &comp))
4007 wait_for_completion(&comp.comp);
4009 r = dm_integrity_failed(ic);
4011 *error = "Unable to generate iv";
4015 section_req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
4017 *error = "Unable to allocate crypt request";
4021 section_req->iv = kmalloc_array(ivsize, 2,
4023 if (!section_req->iv) {
4024 skcipher_request_free(section_req);
4025 *error = "Unable to allocate iv";
4029 memcpy(section_req->iv + ivsize, crypt_data, ivsize);
4030 section_req->cryptlen = (size_t)ic->journal_section_sectors << SECTOR_SHIFT;
4031 ic->sk_requests[i] = section_req;
4032 DEBUG_bytes(crypt_data, ivsize, "iv(%u)", i);
4037 for (i = 0; i < N_COMMIT_IDS; i++) {
4041 for (j = 0; j < i; j++) {
4042 if (ic->commit_ids[j] == ic->commit_ids[i]) {
4043 ic->commit_ids[i] = cpu_to_le64(le64_to_cpu(ic->commit_ids[i]) + 1);
4044 goto retest_commit_id;
4047 DEBUG_print("commit id %u: %016llx\n", i, ic->commit_ids[i]);
4050 journal_tree_size = (__u64)ic->journal_entries * sizeof(struct journal_node);
4051 if (journal_tree_size > ULONG_MAX) {
4052 *error = "Journal doesn't fit into memory";
4056 ic->journal_tree = kvmalloc(journal_tree_size, GFP_KERNEL);
4057 if (!ic->journal_tree) {
4058 *error = "Could not allocate memory for journal tree";
4064 skcipher_request_free(req);
4070 * Construct a integrity mapping
4074 * offset from the start of the device
4076 * D - direct writes, J - journal writes, B - bitmap mode, R - recovery mode
4077 * number of optional arguments
4078 * optional arguments:
4080 * interleave_sectors
4087 * bitmap_flush_interval
4093 static int dm_integrity_ctr(struct dm_target *ti, unsigned int argc, char **argv)
4095 struct dm_integrity_c *ic;
4098 unsigned int extra_args;
4099 struct dm_arg_set as;
4100 static const struct dm_arg _args[] = {
4101 {0, 18, "Invalid number of feature args"},
4103 unsigned int journal_sectors, interleave_sectors, buffer_sectors, journal_watermark, sync_msec;
4104 bool should_write_sb;
4106 unsigned long long start;
4107 __s8 log2_sectors_per_bitmap_bit = -1;
4108 __s8 log2_blocks_per_bitmap_bit;
4109 __u64 bits_in_journal;
4110 __u64 n_bitmap_bits;
4112 #define DIRECT_ARGUMENTS 4
4114 if (argc <= DIRECT_ARGUMENTS) {
4115 ti->error = "Invalid argument count";
4119 ic = kzalloc(sizeof(struct dm_integrity_c), GFP_KERNEL);
4121 ti->error = "Cannot allocate integrity context";
4125 ti->per_io_data_size = sizeof(struct dm_integrity_io);
4128 ic->in_progress = RB_ROOT;
4129 INIT_LIST_HEAD(&ic->wait_list);
4130 init_waitqueue_head(&ic->endio_wait);
4131 bio_list_init(&ic->flush_bio_list);
4132 init_waitqueue_head(&ic->copy_to_journal_wait);
4133 init_completion(&ic->crypto_backoff);
4134 atomic64_set(&ic->number_of_mismatches, 0);
4135 ic->bitmap_flush_interval = BITMAP_FLUSH_INTERVAL;
4137 r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &ic->dev);
4139 ti->error = "Device lookup failed";
4143 if (sscanf(argv[1], "%llu%c", &start, &dummy) != 1 || start != (sector_t)start) {
4144 ti->error = "Invalid starting offset";
4150 if (strcmp(argv[2], "-")) {
4151 if (sscanf(argv[2], "%u%c", &ic->tag_size, &dummy) != 1 || !ic->tag_size) {
4152 ti->error = "Invalid tag size";
4158 if (!strcmp(argv[3], "J") || !strcmp(argv[3], "B") ||
4159 !strcmp(argv[3], "D") || !strcmp(argv[3], "R")) {
4160 ic->mode = argv[3][0];
4162 ti->error = "Invalid mode (expecting J, B, D, R)";
4167 journal_sectors = 0;
4168 interleave_sectors = DEFAULT_INTERLEAVE_SECTORS;
4169 buffer_sectors = DEFAULT_BUFFER_SECTORS;
4170 journal_watermark = DEFAULT_JOURNAL_WATERMARK;
4171 sync_msec = DEFAULT_SYNC_MSEC;
4172 ic->sectors_per_block = 1;
4174 as.argc = argc - DIRECT_ARGUMENTS;
4175 as.argv = argv + DIRECT_ARGUMENTS;
4176 r = dm_read_arg_group(_args, &as, &extra_args, &ti->error);
4180 while (extra_args--) {
4181 const char *opt_string;
4183 unsigned long long llval;
4185 opt_string = dm_shift_arg(&as);
4188 ti->error = "Not enough feature arguments";
4191 if (sscanf(opt_string, "journal_sectors:%u%c", &val, &dummy) == 1)
4192 journal_sectors = val ? val : 1;
4193 else if (sscanf(opt_string, "interleave_sectors:%u%c", &val, &dummy) == 1)
4194 interleave_sectors = val;
4195 else if (sscanf(opt_string, "buffer_sectors:%u%c", &val, &dummy) == 1)
4196 buffer_sectors = val;
4197 else if (sscanf(opt_string, "journal_watermark:%u%c", &val, &dummy) == 1 && val <= 100)
4198 journal_watermark = val;
4199 else if (sscanf(opt_string, "commit_time:%u%c", &val, &dummy) == 1)
4201 else if (!strncmp(opt_string, "meta_device:", strlen("meta_device:"))) {
4203 dm_put_device(ti, ic->meta_dev);
4204 ic->meta_dev = NULL;
4206 r = dm_get_device(ti, strchr(opt_string, ':') + 1,
4207 dm_table_get_mode(ti->table), &ic->meta_dev);
4209 ti->error = "Device lookup failed";
4212 } else if (sscanf(opt_string, "block_size:%u%c", &val, &dummy) == 1) {
4213 if (val < 1 << SECTOR_SHIFT ||
4214 val > MAX_SECTORS_PER_BLOCK << SECTOR_SHIFT ||
4215 (val & (val - 1))) {
4217 ti->error = "Invalid block_size argument";
4220 ic->sectors_per_block = val >> SECTOR_SHIFT;
4221 } else if (sscanf(opt_string, "sectors_per_bit:%llu%c", &llval, &dummy) == 1) {
4222 log2_sectors_per_bitmap_bit = !llval ? 0 : __ilog2_u64(llval);
4223 } else if (sscanf(opt_string, "bitmap_flush_interval:%u%c", &val, &dummy) == 1) {
4224 if ((uint64_t)val >= (uint64_t)UINT_MAX * 1000 / HZ) {
4226 ti->error = "Invalid bitmap_flush_interval argument";
4229 ic->bitmap_flush_interval = msecs_to_jiffies(val);
4230 } else if (!strncmp(opt_string, "internal_hash:", strlen("internal_hash:"))) {
4231 r = get_alg_and_key(opt_string, &ic->internal_hash_alg, &ti->error,
4232 "Invalid internal_hash argument");
4235 } else if (!strncmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) {
4236 r = get_alg_and_key(opt_string, &ic->journal_crypt_alg, &ti->error,
4237 "Invalid journal_crypt argument");
4240 } else if (!strncmp(opt_string, "journal_mac:", strlen("journal_mac:"))) {
4241 r = get_alg_and_key(opt_string, &ic->journal_mac_alg, &ti->error,
4242 "Invalid journal_mac argument");
4245 } else if (!strcmp(opt_string, "recalculate")) {
4246 ic->recalculate_flag = true;
4247 } else if (!strcmp(opt_string, "reset_recalculate")) {
4248 ic->recalculate_flag = true;
4249 ic->reset_recalculate_flag = true;
4250 } else if (!strcmp(opt_string, "allow_discards")) {
4252 } else if (!strcmp(opt_string, "fix_padding")) {
4253 ic->fix_padding = true;
4254 } else if (!strcmp(opt_string, "fix_hmac")) {
4255 ic->fix_hmac = true;
4256 } else if (!strcmp(opt_string, "legacy_recalculate")) {
4257 ic->legacy_recalculate = true;
4260 ti->error = "Invalid argument";
4265 ic->data_device_sectors = bdev_nr_sectors(ic->dev->bdev);
4267 ic->meta_device_sectors = ic->data_device_sectors;
4269 ic->meta_device_sectors = bdev_nr_sectors(ic->meta_dev->bdev);
4271 if (!journal_sectors) {
4272 journal_sectors = min((sector_t)DEFAULT_MAX_JOURNAL_SECTORS,
4273 ic->data_device_sectors >> DEFAULT_JOURNAL_SIZE_FACTOR);
4276 if (!buffer_sectors)
4278 ic->log2_buffer_sectors = min((int)__fls(buffer_sectors), 31 - SECTOR_SHIFT);
4280 r = get_mac(&ic->internal_hash, &ic->internal_hash_alg, &ti->error,
4281 "Invalid internal hash", "Error setting internal hash key");
4285 r = get_mac(&ic->journal_mac, &ic->journal_mac_alg, &ti->error,
4286 "Invalid journal mac", "Error setting journal mac key");
4290 if (!ic->tag_size) {
4291 if (!ic->internal_hash) {
4292 ti->error = "Unknown tag size";
4296 ic->tag_size = crypto_shash_digestsize(ic->internal_hash);
4298 if (ic->tag_size > MAX_TAG_SIZE) {
4299 ti->error = "Too big tag size";
4303 if (!(ic->tag_size & (ic->tag_size - 1)))
4304 ic->log2_tag_size = __ffs(ic->tag_size);
4306 ic->log2_tag_size = -1;
4308 if (ic->mode == 'B' && !ic->internal_hash) {
4310 ti->error = "Bitmap mode can be only used with internal hash";
4314 if (ic->discard && !ic->internal_hash) {
4316 ti->error = "Discard can be only used with internal hash";
4320 ic->autocommit_jiffies = msecs_to_jiffies(sync_msec);
4321 ic->autocommit_msec = sync_msec;
4322 timer_setup(&ic->autocommit_timer, autocommit_fn, 0);
4324 ic->io = dm_io_client_create();
4325 if (IS_ERR(ic->io)) {
4326 r = PTR_ERR(ic->io);
4328 ti->error = "Cannot allocate dm io";
4332 r = mempool_init_slab_pool(&ic->journal_io_mempool, JOURNAL_IO_MEMPOOL, journal_io_cache);
4334 ti->error = "Cannot allocate mempool";
4338 r = mempool_init_page_pool(&ic->recheck_pool, 1, 0);
4340 ti->error = "Cannot allocate mempool";
4344 ic->metadata_wq = alloc_workqueue("dm-integrity-metadata",
4345 WQ_MEM_RECLAIM, METADATA_WORKQUEUE_MAX_ACTIVE);
4346 if (!ic->metadata_wq) {
4347 ti->error = "Cannot allocate workqueue";
4353 * If this workqueue weren't ordered, it would cause bio reordering
4354 * and reduced performance.
4356 ic->wait_wq = alloc_ordered_workqueue("dm-integrity-wait", WQ_MEM_RECLAIM);
4358 ti->error = "Cannot allocate workqueue";
4363 ic->offload_wq = alloc_workqueue("dm-integrity-offload", WQ_MEM_RECLAIM,
4364 METADATA_WORKQUEUE_MAX_ACTIVE);
4365 if (!ic->offload_wq) {
4366 ti->error = "Cannot allocate workqueue";
4371 ic->commit_wq = alloc_workqueue("dm-integrity-commit", WQ_MEM_RECLAIM, 1);
4372 if (!ic->commit_wq) {
4373 ti->error = "Cannot allocate workqueue";
4377 INIT_WORK(&ic->commit_work, integrity_commit);
4379 if (ic->mode == 'J' || ic->mode == 'B') {
4380 ic->writer_wq = alloc_workqueue("dm-integrity-writer", WQ_MEM_RECLAIM, 1);
4381 if (!ic->writer_wq) {
4382 ti->error = "Cannot allocate workqueue";
4386 INIT_WORK(&ic->writer_work, integrity_writer);
4389 ic->sb = alloc_pages_exact(SB_SECTORS << SECTOR_SHIFT, GFP_KERNEL);
4392 ti->error = "Cannot allocate superblock area";
4396 r = sync_rw_sb(ic, REQ_OP_READ);
4398 ti->error = "Error reading superblock";
4401 should_write_sb = false;
4402 if (memcmp(ic->sb->magic, SB_MAGIC, 8)) {
4403 if (ic->mode != 'R') {
4404 if (memchr_inv(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT)) {
4406 ti->error = "The device is not initialized";
4411 r = initialize_superblock(ic, journal_sectors, interleave_sectors);
4413 ti->error = "Could not initialize superblock";
4416 if (ic->mode != 'R')
4417 should_write_sb = true;
4420 if (!ic->sb->version || ic->sb->version > SB_VERSION_5) {
4422 ti->error = "Unknown version";
4425 if (le16_to_cpu(ic->sb->integrity_tag_size) != ic->tag_size) {
4427 ti->error = "Tag size doesn't match the information in superblock";
4430 if (ic->sb->log2_sectors_per_block != __ffs(ic->sectors_per_block)) {
4432 ti->error = "Block size doesn't match the information in superblock";
4435 if (!le32_to_cpu(ic->sb->journal_sections)) {
4437 ti->error = "Corrupted superblock, journal_sections is 0";
4440 /* make sure that ti->max_io_len doesn't overflow */
4441 if (!ic->meta_dev) {
4442 if (ic->sb->log2_interleave_sectors < MIN_LOG2_INTERLEAVE_SECTORS ||
4443 ic->sb->log2_interleave_sectors > MAX_LOG2_INTERLEAVE_SECTORS) {
4445 ti->error = "Invalid interleave_sectors in the superblock";
4449 if (ic->sb->log2_interleave_sectors) {
4451 ti->error = "Invalid interleave_sectors in the superblock";
4455 if (!!(ic->sb->flags & cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC)) != !!ic->journal_mac_alg.alg_string) {
4457 ti->error = "Journal mac mismatch";
4461 get_provided_data_sectors(ic);
4462 if (!ic->provided_data_sectors) {
4464 ti->error = "The device is too small";
4469 r = calculate_device_limits(ic);
4472 if (ic->log2_buffer_sectors > 3) {
4473 ic->log2_buffer_sectors--;
4474 goto try_smaller_buffer;
4477 ti->error = "The device is too small";
4481 if (log2_sectors_per_bitmap_bit < 0)
4482 log2_sectors_per_bitmap_bit = __fls(DEFAULT_SECTORS_PER_BITMAP_BIT);
4483 if (log2_sectors_per_bitmap_bit < ic->sb->log2_sectors_per_block)
4484 log2_sectors_per_bitmap_bit = ic->sb->log2_sectors_per_block;
4486 bits_in_journal = ((__u64)ic->journal_section_sectors * ic->journal_sections) << (SECTOR_SHIFT + 3);
4487 if (bits_in_journal > UINT_MAX)
4488 bits_in_journal = UINT_MAX;
4489 while (bits_in_journal < (ic->provided_data_sectors + ((sector_t)1 << log2_sectors_per_bitmap_bit) - 1) >> log2_sectors_per_bitmap_bit)
4490 log2_sectors_per_bitmap_bit++;
4492 log2_blocks_per_bitmap_bit = log2_sectors_per_bitmap_bit - ic->sb->log2_sectors_per_block;
4493 ic->log2_blocks_per_bitmap_bit = log2_blocks_per_bitmap_bit;
4494 if (should_write_sb)
4495 ic->sb->log2_blocks_per_bitmap_bit = log2_blocks_per_bitmap_bit;
4497 n_bitmap_bits = ((ic->provided_data_sectors >> ic->sb->log2_sectors_per_block)
4498 + (((sector_t)1 << log2_blocks_per_bitmap_bit) - 1)) >> log2_blocks_per_bitmap_bit;
4499 ic->n_bitmap_blocks = DIV_ROUND_UP(n_bitmap_bits, BITMAP_BLOCK_SIZE * 8);
4502 ic->log2_buffer_sectors = min(ic->log2_buffer_sectors, (__u8)__ffs(ic->metadata_run));
4504 if (ti->len > ic->provided_data_sectors) {
4506 ti->error = "Not enough provided sectors for requested mapping size";
4511 threshold = (__u64)ic->journal_entries * (100 - journal_watermark);
4513 do_div(threshold, 100);
4514 ic->free_sectors_threshold = threshold;
4516 DEBUG_print("initialized:\n");
4517 DEBUG_print(" integrity_tag_size %u\n", le16_to_cpu(ic->sb->integrity_tag_size));
4518 DEBUG_print(" journal_entry_size %u\n", ic->journal_entry_size);
4519 DEBUG_print(" journal_entries_per_sector %u\n", ic->journal_entries_per_sector);
4520 DEBUG_print(" journal_section_entries %u\n", ic->journal_section_entries);
4521 DEBUG_print(" journal_section_sectors %u\n", ic->journal_section_sectors);
4522 DEBUG_print(" journal_sections %u\n", (unsigned int)le32_to_cpu(ic->sb->journal_sections));
4523 DEBUG_print(" journal_entries %u\n", ic->journal_entries);
4524 DEBUG_print(" log2_interleave_sectors %d\n", ic->sb->log2_interleave_sectors);
4525 DEBUG_print(" data_device_sectors 0x%llx\n", bdev_nr_sectors(ic->dev->bdev));
4526 DEBUG_print(" initial_sectors 0x%x\n", ic->initial_sectors);
4527 DEBUG_print(" metadata_run 0x%x\n", ic->metadata_run);
4528 DEBUG_print(" log2_metadata_run %d\n", ic->log2_metadata_run);
4529 DEBUG_print(" provided_data_sectors 0x%llx (%llu)\n", ic->provided_data_sectors, ic->provided_data_sectors);
4530 DEBUG_print(" log2_buffer_sectors %u\n", ic->log2_buffer_sectors);
4531 DEBUG_print(" bits_in_journal %llu\n", bits_in_journal);
4533 if (ic->recalculate_flag && !(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))) {
4534 ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
4535 ic->sb->recalc_sector = cpu_to_le64(0);
4538 if (ic->internal_hash) {
4539 ic->recalc_wq = alloc_workqueue("dm-integrity-recalc", WQ_MEM_RECLAIM, 1);
4540 if (!ic->recalc_wq) {
4541 ti->error = "Cannot allocate workqueue";
4545 INIT_WORK(&ic->recalc_work, integrity_recalc);
4547 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
4548 ti->error = "Recalculate can only be specified with internal_hash";
4554 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
4555 le64_to_cpu(ic->sb->recalc_sector) < ic->provided_data_sectors &&
4556 dm_integrity_disable_recalculate(ic)) {
4557 ti->error = "Recalculating with HMAC is disabled for security reasons - if you really need it, use the argument \"legacy_recalculate\"";
4562 ic->bufio = dm_bufio_client_create(ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev,
4563 1U << (SECTOR_SHIFT + ic->log2_buffer_sectors), 1, 0, NULL, NULL, 0);
4564 if (IS_ERR(ic->bufio)) {
4565 r = PTR_ERR(ic->bufio);
4566 ti->error = "Cannot initialize dm-bufio";
4570 dm_bufio_set_sector_offset(ic->bufio, ic->start + ic->initial_sectors);
4572 if (ic->mode != 'R') {
4573 r = create_journal(ic, &ti->error);
4579 if (ic->mode == 'B') {
4581 unsigned int n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE);
4583 ic->recalc_bitmap = dm_integrity_alloc_page_list(n_bitmap_pages);
4584 if (!ic->recalc_bitmap) {
4588 ic->may_write_bitmap = dm_integrity_alloc_page_list(n_bitmap_pages);
4589 if (!ic->may_write_bitmap) {
4593 ic->bbs = kvmalloc_array(ic->n_bitmap_blocks, sizeof(struct bitmap_block_status), GFP_KERNEL);
4598 INIT_DELAYED_WORK(&ic->bitmap_flush_work, bitmap_flush_work);
4599 for (i = 0; i < ic->n_bitmap_blocks; i++) {
4600 struct bitmap_block_status *bbs = &ic->bbs[i];
4601 unsigned int sector, pl_index, pl_offset;
4603 INIT_WORK(&bbs->work, bitmap_block_work);
4606 bio_list_init(&bbs->bio_queue);
4607 spin_lock_init(&bbs->bio_queue_lock);
4609 sector = i * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT);
4610 pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
4611 pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
4613 bbs->bitmap = lowmem_page_address(ic->journal[pl_index].page) + pl_offset;
4617 if (should_write_sb) {
4618 init_journal(ic, 0, ic->journal_sections, 0);
4619 r = dm_integrity_failed(ic);
4621 ti->error = "Error initializing journal";
4624 r = sync_rw_sb(ic, REQ_OP_WRITE | REQ_FUA);
4626 ti->error = "Error initializing superblock";
4629 ic->just_formatted = true;
4632 if (!ic->meta_dev) {
4633 r = dm_set_target_max_io_len(ti, 1U << ic->sb->log2_interleave_sectors);
4637 if (ic->mode == 'B') {
4638 unsigned int max_io_len;
4640 max_io_len = ((sector_t)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit) * (BITMAP_BLOCK_SIZE * 8);
4642 max_io_len = 1U << 31;
4643 DEBUG_print("max_io_len: old %u, new %u\n", ti->max_io_len, max_io_len);
4644 if (!ti->max_io_len || ti->max_io_len > max_io_len) {
4645 r = dm_set_target_max_io_len(ti, max_io_len);
4651 if (!ic->internal_hash)
4652 dm_integrity_set(ti, ic);
4654 ti->num_flush_bios = 1;
4655 ti->flush_supported = true;
4657 ti->num_discard_bios = 1;
4659 dm_audit_log_ctr(DM_MSG_PREFIX, ti, 1);
4663 dm_audit_log_ctr(DM_MSG_PREFIX, ti, 0);
4664 dm_integrity_dtr(ti);
4668 static void dm_integrity_dtr(struct dm_target *ti)
4670 struct dm_integrity_c *ic = ti->private;
4672 BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
4673 BUG_ON(!list_empty(&ic->wait_list));
4675 if (ic->mode == 'B')
4676 cancel_delayed_work_sync(&ic->bitmap_flush_work);
4677 if (ic->metadata_wq)
4678 destroy_workqueue(ic->metadata_wq);
4680 destroy_workqueue(ic->wait_wq);
4682 destroy_workqueue(ic->offload_wq);
4684 destroy_workqueue(ic->commit_wq);
4686 destroy_workqueue(ic->writer_wq);
4688 destroy_workqueue(ic->recalc_wq);
4691 dm_bufio_client_destroy(ic->bufio);
4692 mempool_exit(&ic->recheck_pool);
4693 mempool_exit(&ic->journal_io_mempool);
4695 dm_io_client_destroy(ic->io);
4697 dm_put_device(ti, ic->dev);
4699 dm_put_device(ti, ic->meta_dev);
4700 dm_integrity_free_page_list(ic->journal);
4701 dm_integrity_free_page_list(ic->journal_io);
4702 dm_integrity_free_page_list(ic->journal_xor);
4703 dm_integrity_free_page_list(ic->recalc_bitmap);
4704 dm_integrity_free_page_list(ic->may_write_bitmap);
4705 if (ic->journal_scatterlist)
4706 dm_integrity_free_journal_scatterlist(ic, ic->journal_scatterlist);
4707 if (ic->journal_io_scatterlist)
4708 dm_integrity_free_journal_scatterlist(ic, ic->journal_io_scatterlist);
4709 if (ic->sk_requests) {
4712 for (i = 0; i < ic->journal_sections; i++) {
4713 struct skcipher_request *req;
4715 req = ic->sk_requests[i];
4717 kfree_sensitive(req->iv);
4718 skcipher_request_free(req);
4721 kvfree(ic->sk_requests);
4723 kvfree(ic->journal_tree);
4725 free_pages_exact(ic->sb, SB_SECTORS << SECTOR_SHIFT);
4727 if (ic->internal_hash)
4728 crypto_free_shash(ic->internal_hash);
4729 free_alg(&ic->internal_hash_alg);
4731 if (ic->journal_crypt)
4732 crypto_free_skcipher(ic->journal_crypt);
4733 free_alg(&ic->journal_crypt_alg);
4735 if (ic->journal_mac)
4736 crypto_free_shash(ic->journal_mac);
4737 free_alg(&ic->journal_mac_alg);
4740 dm_audit_log_dtr(DM_MSG_PREFIX, ti, 1);
4743 static struct target_type integrity_target = {
4744 .name = "integrity",
4745 .version = {1, 11, 0},
4746 .module = THIS_MODULE,
4747 .features = DM_TARGET_SINGLETON | DM_TARGET_INTEGRITY,
4748 .ctr = dm_integrity_ctr,
4749 .dtr = dm_integrity_dtr,
4750 .map = dm_integrity_map,
4751 .postsuspend = dm_integrity_postsuspend,
4752 .resume = dm_integrity_resume,
4753 .status = dm_integrity_status,
4754 .iterate_devices = dm_integrity_iterate_devices,
4755 .io_hints = dm_integrity_io_hints,
4758 static int __init dm_integrity_init(void)
4762 journal_io_cache = kmem_cache_create("integrity_journal_io",
4763 sizeof(struct journal_io), 0, 0, NULL);
4764 if (!journal_io_cache) {
4765 DMERR("can't allocate journal io cache");
4769 r = dm_register_target(&integrity_target);
4771 kmem_cache_destroy(journal_io_cache);
4778 static void __exit dm_integrity_exit(void)
4780 dm_unregister_target(&integrity_target);
4781 kmem_cache_destroy(journal_io_cache);
4784 module_init(dm_integrity_init);
4785 module_exit(dm_integrity_exit);
4787 MODULE_AUTHOR("Milan Broz");
4788 MODULE_AUTHOR("Mikulas Patocka");
4789 MODULE_DESCRIPTION(DM_NAME " target for integrity tags extension");
4790 MODULE_LICENSE("GPL");