locking: export contention tracepoints for bcachefs six locks
[linux-2.6-block.git] / drivers / md / dm-crypt.c
CommitLineData
3bd94003 1// SPDX-License-Identifier: GPL-2.0-only
1da177e4 2/*
bf14299f 3 * Copyright (C) 2003 Jana Saout <jana@saout.de>
1da177e4 4 * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org>
bbb16584
MB
5 * Copyright (C) 2006-2020 Red Hat, Inc. All rights reserved.
6 * Copyright (C) 2013-2020 Milan Broz <gmazyland@gmail.com>
1da177e4
LT
7 *
8 * This file is released under the GPL.
9 */
10
43d69034 11#include <linux/completion.h>
d1806f6a 12#include <linux/err.h>
1da177e4
LT
13#include <linux/module.h>
14#include <linux/init.h>
15#include <linux/kernel.h>
c538f6ec 16#include <linux/key.h>
1da177e4
LT
17#include <linux/bio.h>
18#include <linux/blkdev.h>
fe45e630 19#include <linux/blk-integrity.h>
1da177e4
LT
20#include <linux/mempool.h>
21#include <linux/slab.h>
22#include <linux/crypto.h>
23#include <linux/workqueue.h>
dc267621 24#include <linux/kthread.h>
3fcfab16 25#include <linux/backing-dev.h>
60063497 26#include <linux/atomic.h>
378f058c 27#include <linux/scatterlist.h>
b3c5fd30 28#include <linux/rbtree.h>
027c431c 29#include <linux/ctype.h>
1da177e4 30#include <asm/page.h>
48527fa7 31#include <asm/unaligned.h>
34745785
MB
32#include <crypto/hash.h>
33#include <crypto/md5.h>
bbdb23b5 34#include <crypto/skcipher.h>
ef43aa38
MB
35#include <crypto/aead.h>
36#include <crypto/authenc.h>
e3023094 37#include <crypto/utils.h>
ef43aa38 38#include <linux/rtnetlink.h> /* for struct rtattr and RTA macros only */
27f5411a 39#include <linux/key-type.h>
c538f6ec 40#include <keys/user-type.h>
27f5411a 41#include <keys/encrypted-type.h>
363880c4 42#include <keys/trusted-type.h>
1da177e4 43
586e80e6 44#include <linux/device-mapper.h>
1da177e4 45
58d0f180
MW
46#include "dm-audit.h"
47
72d94861 48#define DM_MSG_PREFIX "crypt"
1da177e4 49
1da177e4
LT
50/*
51 * context holding the current state of a multi-part conversion
52 */
53struct convert_context {
43d69034 54 struct completion restart;
1da177e4
LT
55 struct bio *bio_in;
56 struct bio *bio_out;
003b5c57
KO
57 struct bvec_iter iter_in;
58 struct bvec_iter iter_out;
8d683dcd 59 u64 cc_sector;
40b6229b 60 atomic_t cc_pending;
ef43aa38
MB
61 union {
62 struct skcipher_request *req;
63 struct aead_request *req_aead;
64 } r;
65
1da177e4
LT
66};
67
53017030
MB
68/*
69 * per bio private data
70 */
71struct dm_crypt_io {
49a8a920 72 struct crypt_config *cc;
53017030 73 struct bio *base_bio;
ef43aa38 74 u8 *integrity_metadata;
d9a02e01
MS
75 bool integrity_metadata_from_pool:1;
76 bool in_tasklet:1;
77
53017030 78 struct work_struct work;
39d42fa9 79 struct tasklet_struct tasklet;
53017030
MB
80
81 struct convert_context ctx;
82
40b6229b 83 atomic_t io_pending;
4e4cbee9 84 blk_status_t error;
0c395b0f 85 sector_t sector;
dc267621 86
b3c5fd30 87 struct rb_node rb_node;
298a9fa0 88} CRYPTO_MINALIGN_ATTR;
53017030 89
01482b76 90struct dm_crypt_request {
b2174eeb 91 struct convert_context *ctx;
ef43aa38
MB
92 struct scatterlist sg_in[4];
93 struct scatterlist sg_out[4];
8d683dcd 94 u64 iv_sector;
01482b76
MB
95};
96
1da177e4
LT
97struct crypt_config;
98
99struct crypt_iv_operations {
100 int (*ctr)(struct crypt_config *cc, struct dm_target *ti,
d469f841 101 const char *opts);
1da177e4 102 void (*dtr)(struct crypt_config *cc);
b95bf2d3 103 int (*init)(struct crypt_config *cc);
542da317 104 int (*wipe)(struct crypt_config *cc);
2dc5327d
MB
105 int (*generator)(struct crypt_config *cc, u8 *iv,
106 struct dm_crypt_request *dmreq);
107 int (*post)(struct crypt_config *cc, u8 *iv,
108 struct dm_crypt_request *dmreq);
1da177e4
LT
109};
110
60473592
MB
111struct iv_benbi_private {
112 int shift;
113};
114
34745785
MB
115#define LMK_SEED_SIZE 64 /* hash + 0 */
116struct iv_lmk_private {
117 struct crypto_shash *hash_tfm;
118 u8 *seed;
119};
120
ed04d981
MB
121#define TCW_WHITENING_SIZE 16
122struct iv_tcw_private {
123 struct crypto_shash *crc32_tfm;
124 u8 *iv_seed;
125 u8 *whitening;
126};
127
bbb16584
MB
128#define ELEPHANT_MAX_KEY_SIZE 32
129struct iv_elephant_private {
130 struct crypto_skcipher *tfm;
131};
132
1da177e4
LT
133/*
134 * Crypt: maps a linear range of a block device
135 * and encrypts / decrypts at the same time.
136 */
0f5d8e6e 137enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID,
39d42fa9 138 DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD,
8e225f04
DLM
139 DM_CRYPT_NO_READ_WORKQUEUE, DM_CRYPT_NO_WRITE_WORKQUEUE,
140 DM_CRYPT_WRITE_INLINE };
c0297721 141
ef43aa38 142enum cipher_flags {
74d1da39 143 CRYPT_MODE_INTEGRITY_AEAD, /* Use authenticated mode for cipher */
8f0009a2 144 CRYPT_IV_LARGE_SECTORS, /* Calculate IV from sector_size, not 512B sectors */
bbb16584 145 CRYPT_ENCRYPT_PREPROCESS, /* Must preprocess data for encryption (elephant) */
ef43aa38
MB
146};
147
c0297721 148/*
610f2de3 149 * The fields in here must be read only after initialization.
c0297721 150 */
1da177e4
LT
151struct crypt_config {
152 struct dm_dev *dev;
153 sector_t start;
154
5059353d
MP
155 struct percpu_counter n_allocated_pages;
156
cabf08e4
MB
157 struct workqueue_struct *io_queue;
158 struct workqueue_struct *crypt_queue;
3f1e9070 159
c7329eff 160 spinlock_t write_thread_lock;
72d711c8 161 struct task_struct *write_thread;
b3c5fd30 162 struct rb_root write_tree;
dc267621 163
7dbcd137 164 char *cipher_string;
ef43aa38 165 char *cipher_auth;
c538f6ec 166 char *key_string;
5ebaee6d 167
1b1b58f5 168 const struct crypt_iv_operations *iv_gen_ops;
79066ad3 169 union {
60473592 170 struct iv_benbi_private benbi;
34745785 171 struct iv_lmk_private lmk;
ed04d981 172 struct iv_tcw_private tcw;
bbb16584 173 struct iv_elephant_private elephant;
79066ad3 174 } iv_gen_private;
8d683dcd 175 u64 iv_offset;
1da177e4 176 unsigned int iv_size;
86a3238c 177 unsigned short sector_size;
ff3af92b 178 unsigned char sector_shift;
1da177e4 179
ef43aa38
MB
180 union {
181 struct crypto_skcipher **tfms;
182 struct crypto_aead **tfms_aead;
183 } cipher_tfm;
86a3238c 184 unsigned int tfms_count;
ef43aa38 185 unsigned long cipher_flags;
c0297721 186
ddd42edf
MB
187 /*
188 * Layout of each crypto request:
189 *
bbdb23b5 190 * struct skcipher_request
ddd42edf
MB
191 * context
192 * padding
193 * struct dm_crypt_request
194 * padding
195 * IV
196 *
197 * The padding is added so that dm_crypt_request and the IV are
198 * correctly aligned.
199 */
200 unsigned int dmreq_start;
ddd42edf 201
298a9fa0
MP
202 unsigned int per_bio_data_size;
203
e48d4bbf 204 unsigned long flags;
1da177e4 205 unsigned int key_size;
da31a078
MB
206 unsigned int key_parts; /* independent parts in key buffer */
207 unsigned int key_extra_size; /* additional keys length */
ef43aa38
MB
208 unsigned int key_mac_size; /* MAC key size for authenc(...) */
209
210 unsigned int integrity_tag_size;
211 unsigned int integrity_iv_size;
212 unsigned int on_disk_tag_size;
213
72d711c8
MS
214 /*
215 * pool for per bio private data, crypto requests,
216 * encryption requeusts/buffer pages and integrity tags
217 */
86a3238c 218 unsigned int tag_pool_max_sectors;
72d711c8
MS
219 mempool_t tag_pool;
220 mempool_t req_pool;
221 mempool_t page_pool;
222
223 struct bio_set bs;
224 struct mutex bio_alloc_lock;
225
ef43aa38 226 u8 *authenc_key; /* space for keys in authenc() format (if used) */
b18ae8dd 227 u8 key[];
1da177e4
LT
228};
229
ef43aa38
MB
230#define MIN_IOS 64
231#define MAX_TAG_SIZE 480
232#define POOL_ENTRY_SIZE 512
1da177e4 233
5059353d 234static DEFINE_SPINLOCK(dm_crypt_clients_lock);
2f06cd12 235static unsigned int dm_crypt_clients_n;
5059353d
MP
236static volatile unsigned long dm_crypt_pages_per_client;
237#define DM_CRYPT_MEMORY_PERCENT 2
a8affc03 238#define DM_CRYPT_MIN_PAGES_PER_CLIENT (BIO_MAX_VECS * 16)
5059353d 239
3f868c09 240static void crypt_endio(struct bio *clone);
395b167c 241static void kcryptd_queue_crypt(struct dm_crypt_io *io);
ef43aa38
MB
242static struct scatterlist *crypt_get_sg_data(struct crypt_config *cc,
243 struct scatterlist *sg);
027581f3 244
3fd53533
YY
245static bool crypt_integrity_aead(struct crypt_config *cc);
246
c0297721 247/*
86f917ad 248 * Use this to access cipher attributes that are independent of the key.
c0297721 249 */
bbdb23b5 250static struct crypto_skcipher *any_tfm(struct crypt_config *cc)
c0297721 251{
ef43aa38
MB
252 return cc->cipher_tfm.tfms[0];
253}
254
255static struct crypto_aead *any_tfm_aead(struct crypt_config *cc)
256{
257 return cc->cipher_tfm.tfms_aead[0];
c0297721
AK
258}
259
1da177e4
LT
260/*
261 * Different IV generation algorithms:
262 *
3c164bd8 263 * plain: the initial vector is the 32-bit little-endian version of the sector
3a4fa0a2 264 * number, padded with zeros if necessary.
1da177e4 265 *
61afef61
MB
266 * plain64: the initial vector is the 64-bit little-endian version of the sector
267 * number, padded with zeros if necessary.
268 *
7e3fd855
MB
269 * plain64be: the initial vector is the 64-bit big-endian version of the sector
270 * number, padded with zeros if necessary.
271 *
3c164bd8
RS
272 * essiv: "encrypted sector|salt initial vector", the sector number is
273 * encrypted with the bulk cipher using a salt as key. The salt
274 * should be derived from the bulk cipher's key via hashing.
1da177e4 275 *
48527fa7
RS
276 * benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1
277 * (needed for LRW-32-AES and possible other narrow block modes)
278 *
46b47730
LN
279 * null: the initial vector is always zero. Provides compatibility with
280 * obsolete loop_fish2 devices. Do not use for new devices.
281 *
34745785
MB
282 * lmk: Compatible implementation of the block chaining mode used
283 * by the Loop-AES block device encryption system
284 * designed by Jari Ruusu. See http://loop-aes.sourceforge.net/
285 * It operates on full 512 byte sectors and uses CBC
286 * with an IV derived from the sector number, the data and
287 * optionally extra IV seed.
288 * This means that after decryption the first block
289 * of sector must be tweaked according to decrypted data.
290 * Loop-AES can use three encryption schemes:
291 * version 1: is plain aes-cbc mode
292 * version 2: uses 64 multikey scheme with lmk IV generator
293 * version 3: the same as version 2 with additional IV seed
294 * (it uses 65 keys, last key is used as IV seed)
295 *
ed04d981
MB
296 * tcw: Compatible implementation of the block chaining mode used
297 * by the TrueCrypt device encryption system (prior to version 4.1).
e44f23b3 298 * For more info see: https://gitlab.com/cryptsetup/cryptsetup/wikis/TrueCryptOnDiskFormat
ed04d981
MB
299 * It operates on full 512 byte sectors and uses CBC
300 * with an IV derived from initial key and the sector number.
301 * In addition, whitening value is applied on every sector, whitening
302 * is calculated from initial key, sector number and mixed using CRC32.
303 * Note that this encryption scheme is vulnerable to watermarking attacks
304 * and should be used for old compatible containers access only.
b9411d73
MB
305 *
306 * eboiv: Encrypted byte-offset IV (used in Bitlocker in CBC mode)
307 * The IV is encrypted little-endian byte-offset (with the same key
308 * and cipher as the volume).
bbb16584
MB
309 *
310 * elephant: The extended version of eboiv with additional Elephant diffuser
311 * used with Bitlocker CBC mode.
312 * This mode was used in older Windows systems
6f3bc22b 313 * https://download.microsoft.com/download/0/2/3/0238acaf-d3bf-4a6d-b3d6-0a0be4bbb36e/bitlockercipher200608.pdf
1da177e4
LT
314 */
315
2dc5327d
MB
316static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv,
317 struct dm_crypt_request *dmreq)
1da177e4
LT
318{
319 memset(iv, 0, cc->iv_size);
283a8328 320 *(__le32 *)iv = cpu_to_le32(dmreq->iv_sector & 0xffffffff);
1da177e4
LT
321
322 return 0;
323}
324
61afef61 325static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv,
2dc5327d 326 struct dm_crypt_request *dmreq)
61afef61
MB
327{
328 memset(iv, 0, cc->iv_size);
283a8328 329 *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector);
61afef61
MB
330
331 return 0;
332}
333
7e3fd855
MB
334static int crypt_iv_plain64be_gen(struct crypt_config *cc, u8 *iv,
335 struct dm_crypt_request *dmreq)
336{
337 memset(iv, 0, cc->iv_size);
338 /* iv_size is at least of size u64; usually it is 16 bytes */
339 *(__be64 *)&iv[cc->iv_size - sizeof(u64)] = cpu_to_be64(dmreq->iv_sector);
340
341 return 0;
342}
343
2dc5327d
MB
344static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv,
345 struct dm_crypt_request *dmreq)
1da177e4 346{
a1a262b6
AB
347 /*
348 * ESSIV encryption of the IV is now handled by the crypto API,
349 * so just pass the plain sector number here.
350 */
1da177e4 351 memset(iv, 0, cc->iv_size);
283a8328 352 *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector);
c0297721 353
1da177e4
LT
354 return 0;
355}
356
48527fa7
RS
357static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
358 const char *opts)
359{
86a3238c 360 unsigned int bs;
4ea9471f
MB
361 int log;
362
3fd53533 363 if (crypt_integrity_aead(cc))
4ea9471f
MB
364 bs = crypto_aead_blocksize(any_tfm_aead(cc));
365 else
366 bs = crypto_skcipher_blocksize(any_tfm(cc));
367 log = ilog2(bs);
48527fa7 368
a4a82ce3
HM
369 /*
370 * We need to calculate how far we must shift the sector count
371 * to get the cipher block count, we use this shift in _gen.
372 */
48527fa7
RS
373 if (1 << log != bs) {
374 ti->error = "cypher blocksize is not a power of 2";
375 return -EINVAL;
376 }
377
378 if (log > 9) {
379 ti->error = "cypher blocksize is > 512";
380 return -EINVAL;
381 }
382
60473592 383 cc->iv_gen_private.benbi.shift = 9 - log;
48527fa7
RS
384
385 return 0;
386}
387
388static void crypt_iv_benbi_dtr(struct crypt_config *cc)
389{
48527fa7
RS
390}
391
2dc5327d
MB
392static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv,
393 struct dm_crypt_request *dmreq)
48527fa7 394{
79066ad3
HX
395 __be64 val;
396
48527fa7 397 memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */
79066ad3 398
2dc5327d 399 val = cpu_to_be64(((u64)dmreq->iv_sector << cc->iv_gen_private.benbi.shift) + 1);
79066ad3 400 put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64)));
48527fa7 401
1da177e4
LT
402 return 0;
403}
404
2dc5327d
MB
405static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv,
406 struct dm_crypt_request *dmreq)
46b47730
LN
407{
408 memset(iv, 0, cc->iv_size);
409
410 return 0;
411}
412
34745785
MB
413static void crypt_iv_lmk_dtr(struct crypt_config *cc)
414{
415 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
416
417 if (lmk->hash_tfm && !IS_ERR(lmk->hash_tfm))
418 crypto_free_shash(lmk->hash_tfm);
419 lmk->hash_tfm = NULL;
420
453431a5 421 kfree_sensitive(lmk->seed);
34745785
MB
422 lmk->seed = NULL;
423}
424
425static int crypt_iv_lmk_ctr(struct crypt_config *cc, struct dm_target *ti,
426 const char *opts)
427{
428 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
429
8f0009a2
MB
430 if (cc->sector_size != (1 << SECTOR_SHIFT)) {
431 ti->error = "Unsupported sector size for LMK";
432 return -EINVAL;
433 }
434
cd746938
MP
435 lmk->hash_tfm = crypto_alloc_shash("md5", 0,
436 CRYPTO_ALG_ALLOCATES_MEMORY);
34745785
MB
437 if (IS_ERR(lmk->hash_tfm)) {
438 ti->error = "Error initializing LMK hash";
439 return PTR_ERR(lmk->hash_tfm);
440 }
441
442 /* No seed in LMK version 2 */
443 if (cc->key_parts == cc->tfms_count) {
444 lmk->seed = NULL;
445 return 0;
446 }
447
448 lmk->seed = kzalloc(LMK_SEED_SIZE, GFP_KERNEL);
449 if (!lmk->seed) {
450 crypt_iv_lmk_dtr(cc);
451 ti->error = "Error kmallocing seed storage in LMK";
452 return -ENOMEM;
453 }
454
455 return 0;
456}
457
458static int crypt_iv_lmk_init(struct crypt_config *cc)
459{
460 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
461 int subkey_size = cc->key_size / cc->key_parts;
462
463 /* LMK seed is on the position of LMK_KEYS + 1 key */
464 if (lmk->seed)
465 memcpy(lmk->seed, cc->key + (cc->tfms_count * subkey_size),
466 crypto_shash_digestsize(lmk->hash_tfm));
467
468 return 0;
469}
470
471static int crypt_iv_lmk_wipe(struct crypt_config *cc)
472{
473 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
474
475 if (lmk->seed)
476 memset(lmk->seed, 0, LMK_SEED_SIZE);
477
478 return 0;
479}
480
481static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv,
482 struct dm_crypt_request *dmreq,
483 u8 *data)
484{
485 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
b6106265 486 SHASH_DESC_ON_STACK(desc, lmk->hash_tfm);
34745785 487 struct md5_state md5state;
da31a078 488 __le32 buf[4];
34745785
MB
489 int i, r;
490
b6106265 491 desc->tfm = lmk->hash_tfm;
34745785 492
b6106265 493 r = crypto_shash_init(desc);
34745785
MB
494 if (r)
495 return r;
496
497 if (lmk->seed) {
b6106265 498 r = crypto_shash_update(desc, lmk->seed, LMK_SEED_SIZE);
34745785
MB
499 if (r)
500 return r;
501 }
502
503 /* Sector is always 512B, block size 16, add data of blocks 1-31 */
b6106265 504 r = crypto_shash_update(desc, data + 16, 16 * 31);
34745785
MB
505 if (r)
506 return r;
507
508 /* Sector is cropped to 56 bits here */
509 buf[0] = cpu_to_le32(dmreq->iv_sector & 0xFFFFFFFF);
510 buf[1] = cpu_to_le32((((u64)dmreq->iv_sector >> 32) & 0x00FFFFFF) | 0x80000000);
511 buf[2] = cpu_to_le32(4024);
512 buf[3] = 0;
b6106265 513 r = crypto_shash_update(desc, (u8 *)buf, sizeof(buf));
34745785
MB
514 if (r)
515 return r;
516
517 /* No MD5 padding here */
b6106265 518 r = crypto_shash_export(desc, &md5state);
34745785
MB
519 if (r)
520 return r;
521
522 for (i = 0; i < MD5_HASH_WORDS; i++)
523 __cpu_to_le32s(&md5state.hash[i]);
524 memcpy(iv, &md5state.hash, cc->iv_size);
525
526 return 0;
527}
528
529static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv,
530 struct dm_crypt_request *dmreq)
531{
ef43aa38 532 struct scatterlist *sg;
34745785
MB
533 u8 *src;
534 int r = 0;
535
536 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
ef43aa38 537 sg = crypt_get_sg_data(cc, dmreq->sg_in);
0d78954a 538 src = kmap_local_page(sg_page(sg));
ef43aa38 539 r = crypt_iv_lmk_one(cc, iv, dmreq, src + sg->offset);
0d78954a 540 kunmap_local(src);
34745785
MB
541 } else
542 memset(iv, 0, cc->iv_size);
543
544 return r;
545}
546
547static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv,
548 struct dm_crypt_request *dmreq)
549{
ef43aa38 550 struct scatterlist *sg;
34745785
MB
551 u8 *dst;
552 int r;
553
554 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE)
555 return 0;
556
ef43aa38 557 sg = crypt_get_sg_data(cc, dmreq->sg_out);
0d78954a 558 dst = kmap_local_page(sg_page(sg));
ef43aa38 559 r = crypt_iv_lmk_one(cc, iv, dmreq, dst + sg->offset);
34745785
MB
560
561 /* Tweak the first block of plaintext sector */
562 if (!r)
ef43aa38 563 crypto_xor(dst + sg->offset, iv, cc->iv_size);
34745785 564
0d78954a 565 kunmap_local(dst);
34745785
MB
566 return r;
567}
568
ed04d981
MB
569static void crypt_iv_tcw_dtr(struct crypt_config *cc)
570{
571 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
572
453431a5 573 kfree_sensitive(tcw->iv_seed);
ed04d981 574 tcw->iv_seed = NULL;
453431a5 575 kfree_sensitive(tcw->whitening);
ed04d981
MB
576 tcw->whitening = NULL;
577
578 if (tcw->crc32_tfm && !IS_ERR(tcw->crc32_tfm))
579 crypto_free_shash(tcw->crc32_tfm);
580 tcw->crc32_tfm = NULL;
581}
582
583static int crypt_iv_tcw_ctr(struct crypt_config *cc, struct dm_target *ti,
584 const char *opts)
585{
586 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
587
8f0009a2
MB
588 if (cc->sector_size != (1 << SECTOR_SHIFT)) {
589 ti->error = "Unsupported sector size for TCW";
590 return -EINVAL;
591 }
592
ed04d981
MB
593 if (cc->key_size <= (cc->iv_size + TCW_WHITENING_SIZE)) {
594 ti->error = "Wrong key size for TCW";
595 return -EINVAL;
596 }
597
cd746938
MP
598 tcw->crc32_tfm = crypto_alloc_shash("crc32", 0,
599 CRYPTO_ALG_ALLOCATES_MEMORY);
ed04d981
MB
600 if (IS_ERR(tcw->crc32_tfm)) {
601 ti->error = "Error initializing CRC32 in TCW";
602 return PTR_ERR(tcw->crc32_tfm);
603 }
604
605 tcw->iv_seed = kzalloc(cc->iv_size, GFP_KERNEL);
606 tcw->whitening = kzalloc(TCW_WHITENING_SIZE, GFP_KERNEL);
607 if (!tcw->iv_seed || !tcw->whitening) {
608 crypt_iv_tcw_dtr(cc);
609 ti->error = "Error allocating seed storage in TCW";
610 return -ENOMEM;
611 }
612
613 return 0;
614}
615
616static int crypt_iv_tcw_init(struct crypt_config *cc)
617{
618 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
619 int key_offset = cc->key_size - cc->iv_size - TCW_WHITENING_SIZE;
620
621 memcpy(tcw->iv_seed, &cc->key[key_offset], cc->iv_size);
622 memcpy(tcw->whitening, &cc->key[key_offset + cc->iv_size],
623 TCW_WHITENING_SIZE);
624
625 return 0;
626}
627
628static int crypt_iv_tcw_wipe(struct crypt_config *cc)
629{
630 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
631
632 memset(tcw->iv_seed, 0, cc->iv_size);
633 memset(tcw->whitening, 0, TCW_WHITENING_SIZE);
634
635 return 0;
636}
637
638static int crypt_iv_tcw_whitening(struct crypt_config *cc,
639 struct dm_crypt_request *dmreq,
640 u8 *data)
641{
642 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
350b5393 643 __le64 sector = cpu_to_le64(dmreq->iv_sector);
ed04d981 644 u8 buf[TCW_WHITENING_SIZE];
b6106265 645 SHASH_DESC_ON_STACK(desc, tcw->crc32_tfm);
ed04d981
MB
646 int i, r;
647
648 /* xor whitening with sector number */
45fe93df
AB
649 crypto_xor_cpy(buf, tcw->whitening, (u8 *)&sector, 8);
650 crypto_xor_cpy(&buf[8], tcw->whitening + 8, (u8 *)&sector, 8);
ed04d981
MB
651
652 /* calculate crc32 for every 32bit part and xor it */
b6106265 653 desc->tfm = tcw->crc32_tfm;
ed04d981 654 for (i = 0; i < 4; i++) {
b6106265 655 r = crypto_shash_init(desc);
ed04d981
MB
656 if (r)
657 goto out;
b6106265 658 r = crypto_shash_update(desc, &buf[i * 4], 4);
ed04d981
MB
659 if (r)
660 goto out;
b6106265 661 r = crypto_shash_final(desc, &buf[i * 4]);
ed04d981
MB
662 if (r)
663 goto out;
664 }
665 crypto_xor(&buf[0], &buf[12], 4);
666 crypto_xor(&buf[4], &buf[8], 4);
667
668 /* apply whitening (8 bytes) to whole sector */
669 for (i = 0; i < ((1 << SECTOR_SHIFT) / 8); i++)
670 crypto_xor(data + i * 8, buf, 8);
671out:
1a71d6ff 672 memzero_explicit(buf, sizeof(buf));
ed04d981
MB
673 return r;
674}
675
676static int crypt_iv_tcw_gen(struct crypt_config *cc, u8 *iv,
677 struct dm_crypt_request *dmreq)
678{
ef43aa38 679 struct scatterlist *sg;
ed04d981 680 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
350b5393 681 __le64 sector = cpu_to_le64(dmreq->iv_sector);
ed04d981
MB
682 u8 *src;
683 int r = 0;
684
685 /* Remove whitening from ciphertext */
686 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) {
ef43aa38 687 sg = crypt_get_sg_data(cc, dmreq->sg_in);
0d78954a 688 src = kmap_local_page(sg_page(sg));
ef43aa38 689 r = crypt_iv_tcw_whitening(cc, dmreq, src + sg->offset);
0d78954a 690 kunmap_local(src);
ed04d981
MB
691 }
692
693 /* Calculate IV */
45fe93df 694 crypto_xor_cpy(iv, tcw->iv_seed, (u8 *)&sector, 8);
ed04d981 695 if (cc->iv_size > 8)
45fe93df
AB
696 crypto_xor_cpy(&iv[8], tcw->iv_seed + 8, (u8 *)&sector,
697 cc->iv_size - 8);
ed04d981
MB
698
699 return r;
700}
701
702static int crypt_iv_tcw_post(struct crypt_config *cc, u8 *iv,
703 struct dm_crypt_request *dmreq)
704{
ef43aa38 705 struct scatterlist *sg;
ed04d981
MB
706 u8 *dst;
707 int r;
708
709 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE)
710 return 0;
711
712 /* Apply whitening on ciphertext */
ef43aa38 713 sg = crypt_get_sg_data(cc, dmreq->sg_out);
0d78954a 714 dst = kmap_local_page(sg_page(sg));
ef43aa38 715 r = crypt_iv_tcw_whitening(cc, dmreq, dst + sg->offset);
0d78954a 716 kunmap_local(dst);
ed04d981
MB
717
718 return r;
719}
720
ef43aa38
MB
721static int crypt_iv_random_gen(struct crypt_config *cc, u8 *iv,
722 struct dm_crypt_request *dmreq)
723{
724 /* Used only for writes, there must be an additional space to store IV */
725 get_random_bytes(iv, cc->iv_size);
726 return 0;
727}
728
b9411d73
MB
729static int crypt_iv_eboiv_ctr(struct crypt_config *cc, struct dm_target *ti,
730 const char *opts)
731{
3fd53533 732 if (crypt_integrity_aead(cc)) {
39d13a1a
AB
733 ti->error = "AEAD transforms not supported for EBOIV";
734 return -EINVAL;
b9411d73
MB
735 }
736
39d13a1a 737 if (crypto_skcipher_blocksize(any_tfm(cc)) != cc->iv_size) {
2e84fecf 738 ti->error = "Block size of EBOIV cipher does not match IV size of block cipher";
b9411d73
MB
739 return -EINVAL;
740 }
741
b9411d73
MB
742 return 0;
743}
744
39d13a1a
AB
745static int crypt_iv_eboiv_gen(struct crypt_config *cc, u8 *iv,
746 struct dm_crypt_request *dmreq)
b9411d73 747{
e3023094 748 struct crypto_skcipher *tfm = any_tfm(cc);
39d13a1a
AB
749 struct skcipher_request *req;
750 struct scatterlist src, dst;
7785a9e4 751 DECLARE_CRYPTO_WAIT(wait);
e3023094 752 unsigned int reqsize;
b9411d73 753 int err;
e3023094 754 u8 *buf;
b9411d73 755
e3023094
HX
756 reqsize = ALIGN(crypto_skcipher_reqsize(tfm), __alignof__(__le64));
757
758 req = kmalloc(reqsize + cc->iv_size, GFP_NOIO);
39d13a1a
AB
759 if (!req)
760 return -ENOMEM;
b9411d73 761
e3023094
HX
762 skcipher_request_set_tfm(req, tfm);
763
764 buf = (u8 *)req + reqsize;
39d13a1a
AB
765 memset(buf, 0, cc->iv_size);
766 *(__le64 *)buf = cpu_to_le64(dmreq->iv_sector * cc->sector_size);
b9411d73 767
39d13a1a
AB
768 sg_init_one(&src, page_address(ZERO_PAGE(0)), cc->iv_size);
769 sg_init_one(&dst, iv, cc->iv_size);
770 skcipher_request_set_crypt(req, &src, &dst, cc->iv_size, buf);
771 skcipher_request_set_callback(req, 0, crypto_req_done, &wait);
772 err = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
e3023094 773 kfree_sensitive(req);
b9411d73 774
39d13a1a 775 return err;
b9411d73
MB
776}
777
bbb16584
MB
778static void crypt_iv_elephant_dtr(struct crypt_config *cc)
779{
780 struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
781
782 crypto_free_skcipher(elephant->tfm);
783 elephant->tfm = NULL;
784}
785
786static int crypt_iv_elephant_ctr(struct crypt_config *cc, struct dm_target *ti,
787 const char *opts)
788{
789 struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
790 int r;
791
cd746938
MP
792 elephant->tfm = crypto_alloc_skcipher("ecb(aes)", 0,
793 CRYPTO_ALG_ALLOCATES_MEMORY);
bbb16584
MB
794 if (IS_ERR(elephant->tfm)) {
795 r = PTR_ERR(elephant->tfm);
796 elephant->tfm = NULL;
797 return r;
798 }
799
800 r = crypt_iv_eboiv_ctr(cc, ti, NULL);
801 if (r)
802 crypt_iv_elephant_dtr(cc);
803 return r;
804}
805
806static void diffuser_disk_to_cpu(u32 *d, size_t n)
807{
808#ifndef __LITTLE_ENDIAN
809 int i;
810
811 for (i = 0; i < n; i++)
812 d[i] = le32_to_cpu((__le32)d[i]);
813#endif
814}
815
816static void diffuser_cpu_to_disk(__le32 *d, size_t n)
817{
818#ifndef __LITTLE_ENDIAN
819 int i;
820
821 for (i = 0; i < n; i++)
822 d[i] = cpu_to_le32((u32)d[i]);
823#endif
824}
825
826static void diffuser_a_decrypt(u32 *d, size_t n)
827{
828 int i, i1, i2, i3;
829
830 for (i = 0; i < 5; i++) {
831 i1 = 0;
832 i2 = n - 2;
833 i3 = n - 5;
834
835 while (i1 < (n - 1)) {
836 d[i1] += d[i2] ^ (d[i3] << 9 | d[i3] >> 23);
837 i1++; i2++; i3++;
838
839 if (i3 >= n)
840 i3 -= n;
841
842 d[i1] += d[i2] ^ d[i3];
843 i1++; i2++; i3++;
844
845 if (i2 >= n)
846 i2 -= n;
847
848 d[i1] += d[i2] ^ (d[i3] << 13 | d[i3] >> 19);
849 i1++; i2++; i3++;
850
851 d[i1] += d[i2] ^ d[i3];
852 i1++; i2++; i3++;
853 }
854 }
855}
856
857static void diffuser_a_encrypt(u32 *d, size_t n)
858{
859 int i, i1, i2, i3;
860
861 for (i = 0; i < 5; i++) {
862 i1 = n - 1;
863 i2 = n - 2 - 1;
864 i3 = n - 5 - 1;
865
866 while (i1 > 0) {
867 d[i1] -= d[i2] ^ d[i3];
868 i1--; i2--; i3--;
869
870 d[i1] -= d[i2] ^ (d[i3] << 13 | d[i3] >> 19);
871 i1--; i2--; i3--;
872
873 if (i2 < 0)
874 i2 += n;
875
876 d[i1] -= d[i2] ^ d[i3];
877 i1--; i2--; i3--;
878
879 if (i3 < 0)
880 i3 += n;
881
882 d[i1] -= d[i2] ^ (d[i3] << 9 | d[i3] >> 23);
883 i1--; i2--; i3--;
884 }
885 }
886}
887
888static void diffuser_b_decrypt(u32 *d, size_t n)
889{
890 int i, i1, i2, i3;
891
892 for (i = 0; i < 3; i++) {
893 i1 = 0;
894 i2 = 2;
895 i3 = 5;
896
897 while (i1 < (n - 1)) {
898 d[i1] += d[i2] ^ d[i3];
899 i1++; i2++; i3++;
900
901 d[i1] += d[i2] ^ (d[i3] << 10 | d[i3] >> 22);
902 i1++; i2++; i3++;
903
904 if (i2 >= n)
905 i2 -= n;
906
907 d[i1] += d[i2] ^ d[i3];
908 i1++; i2++; i3++;
909
910 if (i3 >= n)
911 i3 -= n;
912
913 d[i1] += d[i2] ^ (d[i3] << 25 | d[i3] >> 7);
914 i1++; i2++; i3++;
915 }
916 }
917}
918
919static void diffuser_b_encrypt(u32 *d, size_t n)
920{
921 int i, i1, i2, i3;
922
923 for (i = 0; i < 3; i++) {
924 i1 = n - 1;
925 i2 = 2 - 1;
926 i3 = 5 - 1;
927
928 while (i1 > 0) {
929 d[i1] -= d[i2] ^ (d[i3] << 25 | d[i3] >> 7);
930 i1--; i2--; i3--;
931
932 if (i3 < 0)
933 i3 += n;
934
935 d[i1] -= d[i2] ^ d[i3];
936 i1--; i2--; i3--;
937
938 if (i2 < 0)
939 i2 += n;
940
941 d[i1] -= d[i2] ^ (d[i3] << 10 | d[i3] >> 22);
942 i1--; i2--; i3--;
943
944 d[i1] -= d[i2] ^ d[i3];
945 i1--; i2--; i3--;
946 }
947 }
948}
949
950static int crypt_iv_elephant(struct crypt_config *cc, struct dm_crypt_request *dmreq)
951{
952 struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
953 u8 *es, *ks, *data, *data2, *data_offset;
954 struct skcipher_request *req;
955 struct scatterlist *sg, *sg2, src, dst;
7785a9e4 956 DECLARE_CRYPTO_WAIT(wait);
bbb16584
MB
957 int i, r;
958
959 req = skcipher_request_alloc(elephant->tfm, GFP_NOIO);
960 es = kzalloc(16, GFP_NOIO); /* Key for AES */
961 ks = kzalloc(32, GFP_NOIO); /* Elephant sector key */
962
963 if (!req || !es || !ks) {
964 r = -ENOMEM;
965 goto out;
966 }
967
968 *(__le64 *)es = cpu_to_le64(dmreq->iv_sector * cc->sector_size);
969
970 /* E(Ks, e(s)) */
971 sg_init_one(&src, es, 16);
972 sg_init_one(&dst, ks, 16);
973 skcipher_request_set_crypt(req, &src, &dst, 16, NULL);
974 skcipher_request_set_callback(req, 0, crypto_req_done, &wait);
975 r = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
976 if (r)
977 goto out;
978
979 /* E(Ks, e'(s)) */
980 es[15] = 0x80;
981 sg_init_one(&dst, &ks[16], 16);
982 r = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
983 if (r)
984 goto out;
985
986 sg = crypt_get_sg_data(cc, dmreq->sg_out);
0d78954a 987 data = kmap_local_page(sg_page(sg));
bbb16584
MB
988 data_offset = data + sg->offset;
989
990 /* Cannot modify original bio, copy to sg_out and apply Elephant to it */
991 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
992 sg2 = crypt_get_sg_data(cc, dmreq->sg_in);
0d78954a 993 data2 = kmap_local_page(sg_page(sg2));
bbb16584 994 memcpy(data_offset, data2 + sg2->offset, cc->sector_size);
0d78954a 995 kunmap_local(data2);
bbb16584
MB
996 }
997
998 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) {
ced6e475
HM
999 diffuser_disk_to_cpu((u32 *)data_offset, cc->sector_size / sizeof(u32));
1000 diffuser_b_decrypt((u32 *)data_offset, cc->sector_size / sizeof(u32));
1001 diffuser_a_decrypt((u32 *)data_offset, cc->sector_size / sizeof(u32));
1002 diffuser_cpu_to_disk((__le32 *)data_offset, cc->sector_size / sizeof(u32));
bbb16584
MB
1003 }
1004
1005 for (i = 0; i < (cc->sector_size / 32); i++)
1006 crypto_xor(data_offset + i * 32, ks, 32);
1007
1008 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
ced6e475
HM
1009 diffuser_disk_to_cpu((u32 *)data_offset, cc->sector_size / sizeof(u32));
1010 diffuser_a_encrypt((u32 *)data_offset, cc->sector_size / sizeof(u32));
1011 diffuser_b_encrypt((u32 *)data_offset, cc->sector_size / sizeof(u32));
1012 diffuser_cpu_to_disk((__le32 *)data_offset, cc->sector_size / sizeof(u32));
bbb16584
MB
1013 }
1014
0d78954a 1015 kunmap_local(data);
bbb16584 1016out:
453431a5
WL
1017 kfree_sensitive(ks);
1018 kfree_sensitive(es);
bbb16584
MB
1019 skcipher_request_free(req);
1020 return r;
1021}
1022
1023static int crypt_iv_elephant_gen(struct crypt_config *cc, u8 *iv,
1024 struct dm_crypt_request *dmreq)
1025{
1026 int r;
1027
1028 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
1029 r = crypt_iv_elephant(cc, dmreq);
1030 if (r)
1031 return r;
1032 }
1033
1034 return crypt_iv_eboiv_gen(cc, iv, dmreq);
1035}
1036
1037static int crypt_iv_elephant_post(struct crypt_config *cc, u8 *iv,
1038 struct dm_crypt_request *dmreq)
1039{
1040 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE)
1041 return crypt_iv_elephant(cc, dmreq);
1042
1043 return 0;
1044}
1045
1046static int crypt_iv_elephant_init(struct crypt_config *cc)
1047{
1048 struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
1049 int key_offset = cc->key_size - cc->key_extra_size;
1050
1051 return crypto_skcipher_setkey(elephant->tfm, &cc->key[key_offset], cc->key_extra_size);
1052}
1053
1054static int crypt_iv_elephant_wipe(struct crypt_config *cc)
1055{
1056 struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
1057 u8 key[ELEPHANT_MAX_KEY_SIZE];
1058
1059 memset(key, 0, cc->key_extra_size);
1060 return crypto_skcipher_setkey(elephant->tfm, key, cc->key_extra_size);
1061}
1062
1b1b58f5 1063static const struct crypt_iv_operations crypt_iv_plain_ops = {
1da177e4
LT
1064 .generator = crypt_iv_plain_gen
1065};
1066
1b1b58f5 1067static const struct crypt_iv_operations crypt_iv_plain64_ops = {
61afef61
MB
1068 .generator = crypt_iv_plain64_gen
1069};
1070
7e3fd855
MB
1071static const struct crypt_iv_operations crypt_iv_plain64be_ops = {
1072 .generator = crypt_iv_plain64be_gen
1073};
1074
1b1b58f5 1075static const struct crypt_iv_operations crypt_iv_essiv_ops = {
1da177e4
LT
1076 .generator = crypt_iv_essiv_gen
1077};
1078
1b1b58f5 1079static const struct crypt_iv_operations crypt_iv_benbi_ops = {
48527fa7
RS
1080 .ctr = crypt_iv_benbi_ctr,
1081 .dtr = crypt_iv_benbi_dtr,
1082 .generator = crypt_iv_benbi_gen
1083};
1da177e4 1084
1b1b58f5 1085static const struct crypt_iv_operations crypt_iv_null_ops = {
46b47730
LN
1086 .generator = crypt_iv_null_gen
1087};
1088
1b1b58f5 1089static const struct crypt_iv_operations crypt_iv_lmk_ops = {
34745785
MB
1090 .ctr = crypt_iv_lmk_ctr,
1091 .dtr = crypt_iv_lmk_dtr,
1092 .init = crypt_iv_lmk_init,
1093 .wipe = crypt_iv_lmk_wipe,
1094 .generator = crypt_iv_lmk_gen,
1095 .post = crypt_iv_lmk_post
1096};
1097
1b1b58f5 1098static const struct crypt_iv_operations crypt_iv_tcw_ops = {
ed04d981
MB
1099 .ctr = crypt_iv_tcw_ctr,
1100 .dtr = crypt_iv_tcw_dtr,
1101 .init = crypt_iv_tcw_init,
1102 .wipe = crypt_iv_tcw_wipe,
1103 .generator = crypt_iv_tcw_gen,
1104 .post = crypt_iv_tcw_post
1105};
1106
e8dc79d1 1107static const struct crypt_iv_operations crypt_iv_random_ops = {
ef43aa38
MB
1108 .generator = crypt_iv_random_gen
1109};
1110
e8dc79d1 1111static const struct crypt_iv_operations crypt_iv_eboiv_ops = {
b9411d73 1112 .ctr = crypt_iv_eboiv_ctr,
b9411d73
MB
1113 .generator = crypt_iv_eboiv_gen
1114};
1115
e8dc79d1 1116static const struct crypt_iv_operations crypt_iv_elephant_ops = {
bbb16584
MB
1117 .ctr = crypt_iv_elephant_ctr,
1118 .dtr = crypt_iv_elephant_dtr,
1119 .init = crypt_iv_elephant_init,
1120 .wipe = crypt_iv_elephant_wipe,
1121 .generator = crypt_iv_elephant_gen,
1122 .post = crypt_iv_elephant_post
1123};
1124
ef43aa38
MB
1125/*
1126 * Integrity extensions
1127 */
1128static bool crypt_integrity_aead(struct crypt_config *cc)
1129{
1130 return test_bit(CRYPT_MODE_INTEGRITY_AEAD, &cc->cipher_flags);
1131}
1132
1133static bool crypt_integrity_hmac(struct crypt_config *cc)
1134{
33d2f09f 1135 return crypt_integrity_aead(cc) && cc->key_mac_size;
ef43aa38
MB
1136}
1137
1138/* Get sg containing data */
1139static struct scatterlist *crypt_get_sg_data(struct crypt_config *cc,
1140 struct scatterlist *sg)
1141{
33d2f09f 1142 if (unlikely(crypt_integrity_aead(cc)))
ef43aa38
MB
1143 return &sg[2];
1144
1145 return sg;
1146}
1147
1148static int dm_crypt_integrity_io_alloc(struct dm_crypt_io *io, struct bio *bio)
1149{
1150 struct bio_integrity_payload *bip;
1151 unsigned int tag_len;
1152 int ret;
1153
1154 if (!bio_sectors(bio) || !io->cc->on_disk_tag_size)
1155 return 0;
1156
1157 bip = bio_integrity_alloc(bio, GFP_NOIO, 1);
1158 if (IS_ERR(bip))
1159 return PTR_ERR(bip);
1160
ff0c129d 1161 tag_len = io->cc->on_disk_tag_size * (bio_sectors(bio) >> io->cc->sector_shift);
ef43aa38 1162
ef43aa38
MB
1163 bip->bip_iter.bi_sector = io->cc->start + io->sector;
1164
ef43aa38
MB
1165 ret = bio_integrity_add_page(bio, virt_to_page(io->integrity_metadata),
1166 tag_len, offset_in_page(io->integrity_metadata));
1167 if (unlikely(ret != tag_len))
1168 return -ENOMEM;
1169
1170 return 0;
1171}
1172
1173static int crypt_integrity_ctr(struct crypt_config *cc, struct dm_target *ti)
1174{
1175#ifdef CONFIG_BLK_DEV_INTEGRITY
1176 struct blk_integrity *bi = blk_get_integrity(cc->dev->bdev->bd_disk);
7a1cd723 1177 struct mapped_device *md = dm_table_get_md(ti->table);
ef43aa38
MB
1178
1179 /* From now we require underlying device with our integrity profile */
1180 if (!bi || strcasecmp(bi->profile->name, "DM-DIF-EXT-TAG")) {
1181 ti->error = "Integrity profile not supported.";
1182 return -EINVAL;
1183 }
1184
583fe747
MP
1185 if (bi->tag_size != cc->on_disk_tag_size ||
1186 bi->tuple_size != cc->on_disk_tag_size) {
ef43aa38
MB
1187 ti->error = "Integrity profile tag size mismatch.";
1188 return -EINVAL;
1189 }
583fe747
MP
1190 if (1 << bi->interval_exp != cc->sector_size) {
1191 ti->error = "Integrity profile sector size mismatch.";
1192 return -EINVAL;
1193 }
ef43aa38 1194
33d2f09f 1195 if (crypt_integrity_aead(cc)) {
ef43aa38 1196 cc->integrity_tag_size = cc->on_disk_tag_size - cc->integrity_iv_size;
7a1cd723 1197 DMDEBUG("%s: Integrity AEAD, tag size %u, IV size %u.", dm_device_name(md),
ef43aa38
MB
1198 cc->integrity_tag_size, cc->integrity_iv_size);
1199
1200 if (crypto_aead_setauthsize(any_tfm_aead(cc), cc->integrity_tag_size)) {
1201 ti->error = "Integrity AEAD auth tag size is not supported.";
1202 return -EINVAL;
1203 }
1204 } else if (cc->integrity_iv_size)
7a1cd723 1205 DMDEBUG("%s: Additional per-sector space %u bytes for IV.", dm_device_name(md),
ef43aa38
MB
1206 cc->integrity_iv_size);
1207
1208 if ((cc->integrity_tag_size + cc->integrity_iv_size) != bi->tag_size) {
1209 ti->error = "Not enough space for integrity tag in the profile.";
1210 return -EINVAL;
1211 }
1212
1213 return 0;
1214#else
1215 ti->error = "Integrity profile not supported.";
1216 return -EINVAL;
1217#endif
1218}
1219
d469f841
MB
1220static void crypt_convert_init(struct crypt_config *cc,
1221 struct convert_context *ctx,
1222 struct bio *bio_out, struct bio *bio_in,
fcd369da 1223 sector_t sector)
1da177e4
LT
1224{
1225 ctx->bio_in = bio_in;
1226 ctx->bio_out = bio_out;
003b5c57
KO
1227 if (bio_in)
1228 ctx->iter_in = bio_in->bi_iter;
1229 if (bio_out)
1230 ctx->iter_out = bio_out->bi_iter;
c66029f4 1231 ctx->cc_sector = sector + cc->iv_offset;
43d69034 1232 init_completion(&ctx->restart);
1da177e4
LT
1233}
1234
b2174eeb 1235static struct dm_crypt_request *dmreq_of_req(struct crypt_config *cc,
ef43aa38 1236 void *req)
b2174eeb
HY
1237{
1238 return (struct dm_crypt_request *)((char *)req + cc->dmreq_start);
1239}
1240
ef43aa38 1241static void *req_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq)
b2174eeb 1242{
ef43aa38 1243 return (void *)((char *)dmreq - cc->dmreq_start);
b2174eeb
HY
1244}
1245
2dc5327d
MB
1246static u8 *iv_of_dmreq(struct crypt_config *cc,
1247 struct dm_crypt_request *dmreq)
1248{
33d2f09f 1249 if (crypt_integrity_aead(cc))
ef43aa38
MB
1250 return (u8 *)ALIGN((unsigned long)(dmreq + 1),
1251 crypto_aead_alignmask(any_tfm_aead(cc)) + 1);
1252 else
1253 return (u8 *)ALIGN((unsigned long)(dmreq + 1),
1254 crypto_skcipher_alignmask(any_tfm(cc)) + 1);
2dc5327d
MB
1255}
1256
ef43aa38
MB
1257static u8 *org_iv_of_dmreq(struct crypt_config *cc,
1258 struct dm_crypt_request *dmreq)
1259{
1260 return iv_of_dmreq(cc, dmreq) + cc->iv_size;
1261}
1262
c13b5487 1263static __le64 *org_sector_of_dmreq(struct crypt_config *cc,
ef43aa38
MB
1264 struct dm_crypt_request *dmreq)
1265{
1266 u8 *ptr = iv_of_dmreq(cc, dmreq) + cc->iv_size + cc->iv_size;
0ef0b471 1267
c13b5487 1268 return (__le64 *) ptr;
ef43aa38
MB
1269}
1270
1271static unsigned int *org_tag_of_dmreq(struct crypt_config *cc,
1272 struct dm_crypt_request *dmreq)
1273{
1274 u8 *ptr = iv_of_dmreq(cc, dmreq) + cc->iv_size +
1275 cc->iv_size + sizeof(uint64_t);
0ef0b471 1276
ced6e475 1277 return (unsigned int *)ptr;
ef43aa38
MB
1278}
1279
1280static void *tag_from_dmreq(struct crypt_config *cc,
1281 struct dm_crypt_request *dmreq)
1282{
1283 struct convert_context *ctx = dmreq->ctx;
1284 struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
1285
1286 return &io->integrity_metadata[*org_tag_of_dmreq(cc, dmreq) *
1287 cc->on_disk_tag_size];
1288}
1289
1290static void *iv_tag_from_dmreq(struct crypt_config *cc,
1291 struct dm_crypt_request *dmreq)
1292{
1293 return tag_from_dmreq(cc, dmreq) + cc->integrity_tag_size;
1294}
1295
1296static int crypt_convert_block_aead(struct crypt_config *cc,
1297 struct convert_context *ctx,
1298 struct aead_request *req,
1299 unsigned int tag_offset)
01482b76 1300{
003b5c57
KO
1301 struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in);
1302 struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out);
3a7f6c99 1303 struct dm_crypt_request *dmreq;
ef43aa38 1304 u8 *iv, *org_iv, *tag_iv, *tag;
c13b5487 1305 __le64 *sector;
ef43aa38
MB
1306 int r = 0;
1307
1308 BUG_ON(cc->integrity_iv_size && cc->integrity_iv_size != cc->iv_size);
3a7f6c99 1309
8f0009a2 1310 /* Reject unexpected unaligned bio. */
0440d5c0 1311 if (unlikely(bv_in.bv_len & (cc->sector_size - 1)))
8f0009a2 1312 return -EIO;
3a7f6c99 1313
b2174eeb 1314 dmreq = dmreq_of_req(cc, req);
ef43aa38 1315 dmreq->iv_sector = ctx->cc_sector;
8f0009a2 1316 if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags))
ff3af92b 1317 dmreq->iv_sector >>= cc->sector_shift;
ef43aa38
MB
1318 dmreq->ctx = ctx;
1319
1320 *org_tag_of_dmreq(cc, dmreq) = tag_offset;
1321
1322 sector = org_sector_of_dmreq(cc, dmreq);
1323 *sector = cpu_to_le64(ctx->cc_sector - cc->iv_offset);
1324
2dc5327d 1325 iv = iv_of_dmreq(cc, dmreq);
ef43aa38
MB
1326 org_iv = org_iv_of_dmreq(cc, dmreq);
1327 tag = tag_from_dmreq(cc, dmreq);
1328 tag_iv = iv_tag_from_dmreq(cc, dmreq);
1329
1330 /* AEAD request:
1331 * |----- AAD -------|------ DATA -------|-- AUTH TAG --|
1332 * | (authenticated) | (auth+encryption) | |
1333 * | sector_LE | IV | sector in/out | tag in/out |
1334 */
1335 sg_init_table(dmreq->sg_in, 4);
1336 sg_set_buf(&dmreq->sg_in[0], sector, sizeof(uint64_t));
1337 sg_set_buf(&dmreq->sg_in[1], org_iv, cc->iv_size);
8f0009a2 1338 sg_set_page(&dmreq->sg_in[2], bv_in.bv_page, cc->sector_size, bv_in.bv_offset);
ef43aa38
MB
1339 sg_set_buf(&dmreq->sg_in[3], tag, cc->integrity_tag_size);
1340
1341 sg_init_table(dmreq->sg_out, 4);
1342 sg_set_buf(&dmreq->sg_out[0], sector, sizeof(uint64_t));
1343 sg_set_buf(&dmreq->sg_out[1], org_iv, cc->iv_size);
8f0009a2 1344 sg_set_page(&dmreq->sg_out[2], bv_out.bv_page, cc->sector_size, bv_out.bv_offset);
ef43aa38
MB
1345 sg_set_buf(&dmreq->sg_out[3], tag, cc->integrity_tag_size);
1346
1347 if (cc->iv_gen_ops) {
1348 /* For READs use IV stored in integrity metadata */
1349 if (cc->integrity_iv_size && bio_data_dir(ctx->bio_in) != WRITE) {
1350 memcpy(org_iv, tag_iv, cc->iv_size);
1351 } else {
1352 r = cc->iv_gen_ops->generator(cc, org_iv, dmreq);
1353 if (r < 0)
1354 return r;
1355 /* Store generated IV in integrity metadata */
1356 if (cc->integrity_iv_size)
1357 memcpy(tag_iv, org_iv, cc->iv_size);
1358 }
1359 /* Working copy of IV, to be modified in crypto API */
1360 memcpy(iv, org_iv, cc->iv_size);
1361 }
1362
1363 aead_request_set_ad(req, sizeof(uint64_t) + cc->iv_size);
1364 if (bio_data_dir(ctx->bio_in) == WRITE) {
1365 aead_request_set_crypt(req, dmreq->sg_in, dmreq->sg_out,
8f0009a2 1366 cc->sector_size, iv);
ef43aa38
MB
1367 r = crypto_aead_encrypt(req);
1368 if (cc->integrity_tag_size + cc->integrity_iv_size != cc->on_disk_tag_size)
1369 memset(tag + cc->integrity_tag_size + cc->integrity_iv_size, 0,
1370 cc->on_disk_tag_size - (cc->integrity_tag_size + cc->integrity_iv_size));
1371 } else {
1372 aead_request_set_crypt(req, dmreq->sg_in, dmreq->sg_out,
8f0009a2 1373 cc->sector_size + cc->integrity_tag_size, iv);
ef43aa38
MB
1374 r = crypto_aead_decrypt(req);
1375 }
1376
f710126c 1377 if (r == -EBADMSG) {
58d0f180
MW
1378 sector_t s = le64_to_cpu(*sector);
1379
66671719
CH
1380 DMERR_LIMIT("%pg: INTEGRITY AEAD ERROR, sector %llu",
1381 ctx->bio_in->bi_bdev, s);
58d0f180
MW
1382 dm_audit_log_bio(DM_MSG_PREFIX, "integrity-aead",
1383 ctx->bio_in, s, 0);
f710126c 1384 }
ef43aa38
MB
1385
1386 if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
1387 r = cc->iv_gen_ops->post(cc, org_iv, dmreq);
1388
8f0009a2
MB
1389 bio_advance_iter(ctx->bio_in, &ctx->iter_in, cc->sector_size);
1390 bio_advance_iter(ctx->bio_out, &ctx->iter_out, cc->sector_size);
01482b76 1391
ef43aa38
MB
1392 return r;
1393}
1394
1395static int crypt_convert_block_skcipher(struct crypt_config *cc,
1396 struct convert_context *ctx,
1397 struct skcipher_request *req,
1398 unsigned int tag_offset)
1399{
1400 struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in);
1401 struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out);
1402 struct scatterlist *sg_in, *sg_out;
1403 struct dm_crypt_request *dmreq;
ef43aa38 1404 u8 *iv, *org_iv, *tag_iv;
c13b5487 1405 __le64 *sector;
ef43aa38 1406 int r = 0;
01482b76 1407
8f0009a2 1408 /* Reject unexpected unaligned bio. */
0440d5c0 1409 if (unlikely(bv_in.bv_len & (cc->sector_size - 1)))
8f0009a2
MB
1410 return -EIO;
1411
ef43aa38 1412 dmreq = dmreq_of_req(cc, req);
c66029f4 1413 dmreq->iv_sector = ctx->cc_sector;
8f0009a2 1414 if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags))
ff3af92b 1415 dmreq->iv_sector >>= cc->sector_shift;
b2174eeb 1416 dmreq->ctx = ctx;
01482b76 1417
ef43aa38
MB
1418 *org_tag_of_dmreq(cc, dmreq) = tag_offset;
1419
1420 iv = iv_of_dmreq(cc, dmreq);
1421 org_iv = org_iv_of_dmreq(cc, dmreq);
1422 tag_iv = iv_tag_from_dmreq(cc, dmreq);
1423
1424 sector = org_sector_of_dmreq(cc, dmreq);
1425 *sector = cpu_to_le64(ctx->cc_sector - cc->iv_offset);
1426
1427 /* For skcipher we use only the first sg item */
1428 sg_in = &dmreq->sg_in[0];
1429 sg_out = &dmreq->sg_out[0];
01482b76 1430
ef43aa38 1431 sg_init_table(sg_in, 1);
8f0009a2 1432 sg_set_page(sg_in, bv_in.bv_page, cc->sector_size, bv_in.bv_offset);
ef43aa38
MB
1433
1434 sg_init_table(sg_out, 1);
8f0009a2 1435 sg_set_page(sg_out, bv_out.bv_page, cc->sector_size, bv_out.bv_offset);
01482b76 1436
3a7f6c99 1437 if (cc->iv_gen_ops) {
ef43aa38
MB
1438 /* For READs use IV stored in integrity metadata */
1439 if (cc->integrity_iv_size && bio_data_dir(ctx->bio_in) != WRITE) {
1440 memcpy(org_iv, tag_iv, cc->integrity_iv_size);
1441 } else {
1442 r = cc->iv_gen_ops->generator(cc, org_iv, dmreq);
1443 if (r < 0)
1444 return r;
bbb16584
MB
1445 /* Data can be already preprocessed in generator */
1446 if (test_bit(CRYPT_ENCRYPT_PREPROCESS, &cc->cipher_flags))
1447 sg_in = sg_out;
ef43aa38
MB
1448 /* Store generated IV in integrity metadata */
1449 if (cc->integrity_iv_size)
1450 memcpy(tag_iv, org_iv, cc->integrity_iv_size);
1451 }
1452 /* Working copy of IV, to be modified in crypto API */
1453 memcpy(iv, org_iv, cc->iv_size);
3a7f6c99
MB
1454 }
1455
8f0009a2 1456 skcipher_request_set_crypt(req, sg_in, sg_out, cc->sector_size, iv);
3a7f6c99
MB
1457
1458 if (bio_data_dir(ctx->bio_in) == WRITE)
bbdb23b5 1459 r = crypto_skcipher_encrypt(req);
3a7f6c99 1460 else
bbdb23b5 1461 r = crypto_skcipher_decrypt(req);
3a7f6c99 1462
2dc5327d 1463 if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
ef43aa38
MB
1464 r = cc->iv_gen_ops->post(cc, org_iv, dmreq);
1465
8f0009a2
MB
1466 bio_advance_iter(ctx->bio_in, &ctx->iter_in, cc->sector_size);
1467 bio_advance_iter(ctx->bio_out, &ctx->iter_out, cc->sector_size);
2dc5327d 1468
3a7f6c99 1469 return r;
01482b76
MB
1470}
1471
dcfe653d 1472static void kcryptd_async_done(void *async_req, int error);
c0297721 1473
d68b2958 1474static int crypt_alloc_req_skcipher(struct crypt_config *cc,
ef43aa38 1475 struct convert_context *ctx)
ddd42edf 1476{
86a3238c 1477 unsigned int key_index = ctx->cc_sector & (cc->tfms_count - 1);
c0297721 1478
d68b2958
IK
1479 if (!ctx->r.req) {
1480 ctx->r.req = mempool_alloc(&cc->req_pool, in_interrupt() ? GFP_ATOMIC : GFP_NOIO);
1481 if (!ctx->r.req)
1482 return -ENOMEM;
1483 }
c0297721 1484
ef43aa38 1485 skcipher_request_set_tfm(ctx->r.req, cc->cipher_tfm.tfms[key_index]);
54cea3f6
MB
1486
1487 /*
1488 * Use REQ_MAY_BACKLOG so a cipher driver internally backlogs
1489 * requests if driver request queue is full.
1490 */
ef43aa38 1491 skcipher_request_set_callback(ctx->r.req,
432061b3 1492 CRYPTO_TFM_REQ_MAY_BACKLOG,
ef43aa38 1493 kcryptd_async_done, dmreq_of_req(cc, ctx->r.req));
d68b2958
IK
1494
1495 return 0;
ddd42edf
MB
1496}
1497
d68b2958 1498static int crypt_alloc_req_aead(struct crypt_config *cc,
ef43aa38
MB
1499 struct convert_context *ctx)
1500{
004b8ae9
IK
1501 if (!ctx->r.req_aead) {
1502 ctx->r.req_aead = mempool_alloc(&cc->req_pool, in_interrupt() ? GFP_ATOMIC : GFP_NOIO);
1503 if (!ctx->r.req_aead)
d68b2958
IK
1504 return -ENOMEM;
1505 }
c0297721 1506
ef43aa38 1507 aead_request_set_tfm(ctx->r.req_aead, cc->cipher_tfm.tfms_aead[0]);
54cea3f6
MB
1508
1509 /*
1510 * Use REQ_MAY_BACKLOG so a cipher driver internally backlogs
1511 * requests if driver request queue is full.
1512 */
ef43aa38 1513 aead_request_set_callback(ctx->r.req_aead,
432061b3 1514 CRYPTO_TFM_REQ_MAY_BACKLOG,
ef43aa38 1515 kcryptd_async_done, dmreq_of_req(cc, ctx->r.req_aead));
d68b2958
IK
1516
1517 return 0;
ef43aa38
MB
1518}
1519
d68b2958 1520static int crypt_alloc_req(struct crypt_config *cc,
ef43aa38
MB
1521 struct convert_context *ctx)
1522{
33d2f09f 1523 if (crypt_integrity_aead(cc))
d68b2958 1524 return crypt_alloc_req_aead(cc, ctx);
ef43aa38 1525 else
d68b2958 1526 return crypt_alloc_req_skcipher(cc, ctx);
ddd42edf
MB
1527}
1528
ef43aa38
MB
1529static void crypt_free_req_skcipher(struct crypt_config *cc,
1530 struct skcipher_request *req, struct bio *base_bio)
298a9fa0
MP
1531{
1532 struct dm_crypt_io *io = dm_per_bio_data(base_bio, cc->per_bio_data_size);
1533
bbdb23b5 1534 if ((struct skcipher_request *)(io + 1) != req)
6f1c819c 1535 mempool_free(req, &cc->req_pool);
298a9fa0
MP
1536}
1537
ef43aa38
MB
1538static void crypt_free_req_aead(struct crypt_config *cc,
1539 struct aead_request *req, struct bio *base_bio)
1540{
1541 struct dm_crypt_io *io = dm_per_bio_data(base_bio, cc->per_bio_data_size);
1542
1543 if ((struct aead_request *)(io + 1) != req)
6f1c819c 1544 mempool_free(req, &cc->req_pool);
ef43aa38
MB
1545}
1546
1547static void crypt_free_req(struct crypt_config *cc, void *req, struct bio *base_bio)
1548{
33d2f09f 1549 if (crypt_integrity_aead(cc))
ef43aa38
MB
1550 crypt_free_req_aead(cc, req, base_bio);
1551 else
1552 crypt_free_req_skcipher(cc, req, base_bio);
1553}
1554
1da177e4
LT
1555/*
1556 * Encrypt / decrypt data from one bio to another one (can be the same one)
1557 */
4e4cbee9 1558static blk_status_t crypt_convert(struct crypt_config *cc,
8abec36d 1559 struct convert_context *ctx, bool atomic, bool reset_pending)
1da177e4 1560{
ef43aa38 1561 unsigned int tag_offset = 0;
ff3af92b 1562 unsigned int sector_step = cc->sector_size >> SECTOR_SHIFT;
3f1e9070 1563 int r;
1da177e4 1564
8abec36d
IK
1565 /*
1566 * if reset_pending is set we are dealing with the bio for the first time,
1567 * else we're continuing to work on the previous bio, so don't mess with
1568 * the cc_pending counter
1569 */
1570 if (reset_pending)
1571 atomic_set(&ctx->cc_pending, 1);
c8081618 1572
003b5c57 1573 while (ctx->iter_in.bi_size && ctx->iter_out.bi_size) {
1da177e4 1574
d68b2958
IK
1575 r = crypt_alloc_req(cc, ctx);
1576 if (r) {
1577 complete(&ctx->restart);
1578 return BLK_STS_DEV_RESOURCE;
1579 }
1580
40b6229b 1581 atomic_inc(&ctx->cc_pending);
3f1e9070 1582
33d2f09f 1583 if (crypt_integrity_aead(cc))
ef43aa38
MB
1584 r = crypt_convert_block_aead(cc, ctx, ctx->r.req_aead, tag_offset);
1585 else
1586 r = crypt_convert_block_skcipher(cc, ctx, ctx->r.req, tag_offset);
3a7f6c99
MB
1587
1588 switch (r) {
54cea3f6
MB
1589 /*
1590 * The request was queued by a crypto driver
1591 * but the driver request queue is full, let's wait.
1592 */
3a7f6c99 1593 case -EBUSY:
8abec36d
IK
1594 if (in_interrupt()) {
1595 if (try_wait_for_completion(&ctx->restart)) {
1596 /*
1597 * we don't have to block to wait for completion,
1598 * so proceed
1599 */
1600 } else {
1601 /*
1602 * we can't wait for completion without blocking
1603 * exit and continue processing in a workqueue
1604 */
1605 ctx->r.req = NULL;
1606 ctx->cc_sector += sector_step;
1607 tag_offset++;
1608 return BLK_STS_DEV_RESOURCE;
1609 }
1610 } else {
1611 wait_for_completion(&ctx->restart);
1612 }
16735d02 1613 reinit_completion(&ctx->restart);
df561f66 1614 fallthrough;
54cea3f6
MB
1615 /*
1616 * The request is queued and processed asynchronously,
1617 * completion function kcryptd_async_done() will be called.
1618 */
c0403ec0 1619 case -EINPROGRESS:
ef43aa38 1620 ctx->r.req = NULL;
8f0009a2 1621 ctx->cc_sector += sector_step;
583fe747 1622 tag_offset++;
3f1e9070 1623 continue;
54cea3f6
MB
1624 /*
1625 * The request was already processed (synchronously).
1626 */
3a7f6c99 1627 case 0:
40b6229b 1628 atomic_dec(&ctx->cc_pending);
8f0009a2 1629 ctx->cc_sector += sector_step;
583fe747 1630 tag_offset++;
39d42fa9
IK
1631 if (!atomic)
1632 cond_resched();
3a7f6c99 1633 continue;
ef43aa38
MB
1634 /*
1635 * There was a data integrity error.
1636 */
1637 case -EBADMSG:
1638 atomic_dec(&ctx->cc_pending);
4e4cbee9 1639 return BLK_STS_PROTECTION;
ef43aa38
MB
1640 /*
1641 * There was an error while processing the request.
1642 */
3f1e9070 1643 default:
40b6229b 1644 atomic_dec(&ctx->cc_pending);
4e4cbee9 1645 return BLK_STS_IOERR;
3f1e9070 1646 }
1da177e4
LT
1647 }
1648
3f1e9070 1649 return 0;
1da177e4
LT
1650}
1651
cf2f1abf
MP
1652static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone);
1653
1da177e4
LT
1654/*
1655 * Generate a new unfragmented bio with the given size
586b286b
MS
1656 * This should never violate the device limitations (but only because
1657 * max_segment_size is being constrained to PAGE_SIZE).
7145c241
MP
1658 *
1659 * This function may be called concurrently. If we allocate from the mempool
1660 * concurrently, there is a possibility of deadlock. For example, if we have
1661 * mempool of 256 pages, two processes, each wanting 256, pages allocate from
1662 * the mempool concurrently, it may deadlock in a situation where both processes
1663 * have allocated 128 pages and the mempool is exhausted.
1664 *
1665 * In order to avoid this scenario we allocate the pages under a mutex.
1666 *
1667 * In order to not degrade performance with excessive locking, we try
1668 * non-blocking allocations without a mutex first but on failure we fallback
1669 * to blocking allocations with a mutex.
5054e778
MP
1670 *
1671 * In order to reduce allocation overhead, we try to allocate compound pages in
1672 * the first pass. If they are not available, we fall back to the mempool.
1da177e4 1673 */
86a3238c 1674static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned int size)
1da177e4 1675{
49a8a920 1676 struct crypt_config *cc = io->cc;
8b004457 1677 struct bio *clone;
1da177e4 1678 unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
7145c241 1679 gfp_t gfp_mask = GFP_NOWAIT | __GFP_HIGHMEM;
5054e778
MP
1680 unsigned int remaining_size;
1681 unsigned int order = MAX_ORDER - 1;
1da177e4 1682
7145c241 1683retry:
d0164adc 1684 if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM))
7145c241
MP
1685 mutex_lock(&cc->bio_alloc_lock);
1686
609be106
CH
1687 clone = bio_alloc_bioset(cc->dev->bdev, nr_iovecs, io->base_bio->bi_opf,
1688 GFP_NOIO, &cc->bs);
3f868c09
CH
1689 clone->bi_private = io;
1690 clone->bi_end_io = crypt_endio;
6a24c718 1691
7145c241
MP
1692 remaining_size = size;
1693
5054e778
MP
1694 while (remaining_size) {
1695 struct page *pages;
1696 unsigned size_to_add;
1697 unsigned remaining_order = __fls((remaining_size + PAGE_SIZE - 1) >> PAGE_SHIFT);
1698 order = min(order, remaining_order);
1699
1700 while (order > 0) {
1701 pages = alloc_pages(gfp_mask
1702 | __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | __GFP_COMP,
1703 order);
1704 if (likely(pages != NULL))
1705 goto have_pages;
1706 order--;
1707 }
1708
1709 pages = mempool_alloc(&cc->page_pool, gfp_mask);
1710 if (!pages) {
7145c241
MP
1711 crypt_free_buffer_pages(cc, clone);
1712 bio_put(clone);
d0164adc 1713 gfp_mask |= __GFP_DIRECT_RECLAIM;
5054e778 1714 order = 0;
7145c241
MP
1715 goto retry;
1716 }
1da177e4 1717
5054e778
MP
1718have_pages:
1719 size_to_add = min((unsigned)PAGE_SIZE << order, remaining_size);
1720 __bio_add_page(clone, pages, size_to_add, 0);
1721 remaining_size -= size_to_add;
1da177e4
LT
1722 }
1723
ef43aa38
MB
1724 /* Allocate space for integrity tags */
1725 if (dm_crypt_integrity_io_alloc(io, clone)) {
1726 crypt_free_buffer_pages(cc, clone);
1727 bio_put(clone);
1728 clone = NULL;
1729 }
53db984e 1730
d0164adc 1731 if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM))
7145c241
MP
1732 mutex_unlock(&cc->bio_alloc_lock);
1733
8b004457 1734 return clone;
1da177e4
LT
1735}
1736
644bd2f0 1737static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
1da177e4 1738{
5054e778 1739 struct folio_iter fi;
1da177e4 1740
5054e778
MP
1741 if (clone->bi_vcnt > 0) { /* bio_for_each_folio_all crashes with an empty bio */
1742 bio_for_each_folio_all(fi, clone) {
1743 if (folio_test_large(fi.folio))
1744 folio_put(fi.folio);
1745 else
1746 mempool_free(&fi.folio->page, &cc->page_pool);
1747 }
1da177e4
LT
1748 }
1749}
1750
298a9fa0
MP
1751static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc,
1752 struct bio *bio, sector_t sector)
dc440d1e 1753{
49a8a920 1754 io->cc = cc;
dc440d1e
MB
1755 io->base_bio = bio;
1756 io->sector = sector;
1757 io->error = 0;
ef43aa38
MB
1758 io->ctx.r.req = NULL;
1759 io->integrity_metadata = NULL;
1760 io->integrity_metadata_from_pool = false;
d9a02e01 1761 io->in_tasklet = false;
40b6229b 1762 atomic_set(&io->io_pending, 0);
dc440d1e
MB
1763}
1764
3e1a8bdd
MB
1765static void crypt_inc_pending(struct dm_crypt_io *io)
1766{
40b6229b 1767 atomic_inc(&io->io_pending);
3e1a8bdd
MB
1768}
1769
8e14f610
IK
1770static void kcryptd_io_bio_endio(struct work_struct *work)
1771{
1772 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
0ef0b471 1773
8e14f610
IK
1774 bio_endio(io->base_bio);
1775}
1776
1da177e4
LT
1777/*
1778 * One of the bios was finished. Check for completion of
1779 * the whole request and correctly clean up the buffer.
1780 */
5742fd77 1781static void crypt_dec_pending(struct dm_crypt_io *io)
1da177e4 1782{
49a8a920 1783 struct crypt_config *cc = io->cc;
b35f8caa 1784 struct bio *base_bio = io->base_bio;
4e4cbee9 1785 blk_status_t error = io->error;
1da177e4 1786
40b6229b 1787 if (!atomic_dec_and_test(&io->io_pending))
1da177e4
LT
1788 return;
1789
ef43aa38
MB
1790 if (io->ctx.r.req)
1791 crypt_free_req(cc, io->ctx.r.req, base_bio);
1792
1793 if (unlikely(io->integrity_metadata_from_pool))
6f1c819c 1794 mempool_free(io->integrity_metadata, &io->cc->tag_pool);
ef43aa38
MB
1795 else
1796 kfree(io->integrity_metadata);
b35f8caa 1797
4e4cbee9 1798 base_bio->bi_status = error;
8e14f610
IK
1799
1800 /*
1801 * If we are running this function from our tasklet,
1802 * we can't call bio_endio() here, because it will call
1803 * clone_endio() from dm.c, which in turn will
1804 * free the current struct dm_crypt_io structure with
1805 * our tasklet. In this case we need to delay bio_endio()
1806 * execution to after the tasklet is done and dequeued.
1807 */
d9a02e01
MS
1808 if (io->in_tasklet) {
1809 INIT_WORK(&io->work, kcryptd_io_bio_endio);
1810 queue_work(cc->io_queue, &io->work);
8e14f610
IK
1811 return;
1812 }
1813
d9a02e01 1814 bio_endio(base_bio);
1da177e4
LT
1815}
1816
1817/*
cabf08e4 1818 * kcryptd/kcryptd_io:
1da177e4
LT
1819 *
1820 * Needed because it would be very unwise to do decryption in an
23541d2d 1821 * interrupt context.
cabf08e4
MB
1822 *
1823 * kcryptd performs the actual encryption or decryption.
1824 *
1825 * kcryptd_io performs the IO submission.
1826 *
1827 * They must be separated as otherwise the final stages could be
1828 * starved by new requests which can block in the first stages due
1829 * to memory allocation.
c0297721
AK
1830 *
1831 * The work is done per CPU global for all dm-crypt instances.
1832 * They should not depend on each other and do not block.
1da177e4 1833 */
4246a0b6 1834static void crypt_endio(struct bio *clone)
8b004457 1835{
028867ac 1836 struct dm_crypt_io *io = clone->bi_private;
49a8a920 1837 struct crypt_config *cc = io->cc;
86a3238c 1838 unsigned int rw = bio_data_dir(clone);
4e4cbee9 1839 blk_status_t error;
8b004457
MB
1840
1841 /*
6712ecf8 1842 * free the processed pages
8b004457 1843 */
ee7a491e 1844 if (rw == WRITE)
644bd2f0 1845 crypt_free_buffer_pages(cc, clone);
8b004457 1846
4e4cbee9 1847 error = clone->bi_status;
8b004457 1848 bio_put(clone);
8b004457 1849
9b81c842 1850 if (rw == READ && !error) {
ee7a491e
MB
1851 kcryptd_queue_crypt(io);
1852 return;
1853 }
5742fd77 1854
9b81c842
SL
1855 if (unlikely(error))
1856 io->error = error;
5742fd77
MB
1857
1858 crypt_dec_pending(io);
8b004457
MB
1859}
1860
e5524e12
MS
1861#define CRYPT_MAP_READ_GFP GFP_NOWAIT
1862
20c82538 1863static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
8b004457 1864{
49a8a920 1865 struct crypt_config *cc = io->cc;
8b004457 1866 struct bio *clone;
93e605c2 1867
8b004457 1868 /*
abfc426d
CH
1869 * We need the original biovec array in order to decrypt the whole bio
1870 * data *afterwards* -- thanks to immutable biovecs we don't need to
1871 * worry about the block layer modifying the biovec array; so leverage
1872 * bio_alloc_clone().
8b004457 1873 */
abfc426d 1874 clone = bio_alloc_clone(cc->dev->bdev, io->base_bio, gfp, &cc->bs);
7eaceacc 1875 if (!clone)
20c82538 1876 return 1;
3f868c09
CH
1877 clone->bi_private = io;
1878 clone->bi_end_io = crypt_endio;
8b004457 1879
20c82538
MB
1880 crypt_inc_pending(io);
1881
4f024f37 1882 clone->bi_iter.bi_sector = cc->start + io->sector;
8b004457 1883
ef43aa38
MB
1884 if (dm_crypt_integrity_io_alloc(io, clone)) {
1885 crypt_dec_pending(io);
1886 bio_put(clone);
1887 return 1;
1888 }
1889
b7f8dff0 1890 dm_submit_bio_remap(io->base_bio, clone);
20c82538 1891 return 0;
8b004457
MB
1892}
1893
dc267621
MP
1894static void kcryptd_io_read_work(struct work_struct *work)
1895{
1896 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
1897
1898 crypt_inc_pending(io);
1899 if (kcryptd_io_read(io, GFP_NOIO))
4e4cbee9 1900 io->error = BLK_STS_RESOURCE;
dc267621
MP
1901 crypt_dec_pending(io);
1902}
1903
1904static void kcryptd_queue_read(struct dm_crypt_io *io)
1905{
1906 struct crypt_config *cc = io->cc;
1907
1908 INIT_WORK(&io->work, kcryptd_io_read_work);
1909 queue_work(cc->io_queue, &io->work);
1910}
1911
4e4eef64
MB
1912static void kcryptd_io_write(struct dm_crypt_io *io)
1913{
95497a96 1914 struct bio *clone = io->ctx.bio_out;
dc267621 1915
b7f8dff0 1916 dm_submit_bio_remap(io->base_bio, clone);
4e4eef64
MB
1917}
1918
b3c5fd30
MP
1919#define crypt_io_from_node(node) rb_entry((node), struct dm_crypt_io, rb_node)
1920
dc267621 1921static int dmcrypt_write(void *data)
395b167c 1922{
dc267621 1923 struct crypt_config *cc = data;
b3c5fd30
MP
1924 struct dm_crypt_io *io;
1925
dc267621 1926 while (1) {
b3c5fd30 1927 struct rb_root write_tree;
dc267621 1928 struct blk_plug plug;
395b167c 1929
c7329eff 1930 spin_lock_irq(&cc->write_thread_lock);
dc267621 1931continue_locked:
395b167c 1932
b3c5fd30 1933 if (!RB_EMPTY_ROOT(&cc->write_tree))
dc267621
MP
1934 goto pop_from_list;
1935
f659b100 1936 set_current_state(TASK_INTERRUPTIBLE);
dc267621 1937
c7329eff 1938 spin_unlock_irq(&cc->write_thread_lock);
dc267621 1939
f659b100 1940 if (unlikely(kthread_should_stop())) {
642fa448 1941 set_current_state(TASK_RUNNING);
f659b100
RV
1942 break;
1943 }
1944
dc267621
MP
1945 schedule();
1946
642fa448 1947 set_current_state(TASK_RUNNING);
c7329eff 1948 spin_lock_irq(&cc->write_thread_lock);
dc267621
MP
1949 goto continue_locked;
1950
1951pop_from_list:
b3c5fd30
MP
1952 write_tree = cc->write_tree;
1953 cc->write_tree = RB_ROOT;
c7329eff 1954 spin_unlock_irq(&cc->write_thread_lock);
dc267621 1955
b3c5fd30
MP
1956 BUG_ON(rb_parent(write_tree.rb_node));
1957
1958 /*
1959 * Note: we cannot walk the tree here with rb_next because
1960 * the structures may be freed when kcryptd_io_write is called.
1961 */
dc267621
MP
1962 blk_start_plug(&plug);
1963 do {
b3c5fd30
MP
1964 io = crypt_io_from_node(rb_first(&write_tree));
1965 rb_erase(&io->rb_node, &write_tree);
dc267621 1966 kcryptd_io_write(io);
fb294b1c 1967 cond_resched();
b3c5fd30 1968 } while (!RB_EMPTY_ROOT(&write_tree));
dc267621
MP
1969 blk_finish_plug(&plug);
1970 }
1971 return 0;
395b167c
AK
1972}
1973
72c6e7af 1974static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
4e4eef64 1975{
dec1cedf 1976 struct bio *clone = io->ctx.bio_out;
49a8a920 1977 struct crypt_config *cc = io->cc;
dc267621 1978 unsigned long flags;
b3c5fd30
MP
1979 sector_t sector;
1980 struct rb_node **rbp, *parent;
dec1cedf 1981
4e4cbee9 1982 if (unlikely(io->error)) {
dec1cedf
MB
1983 crypt_free_buffer_pages(cc, clone);
1984 bio_put(clone);
6c031f41 1985 crypt_dec_pending(io);
dec1cedf
MB
1986 return;
1987 }
1988
1989 /* crypt_convert should have filled the clone bio */
003b5c57 1990 BUG_ON(io->ctx.iter_out.bi_size);
dec1cedf 1991
4f024f37 1992 clone->bi_iter.bi_sector = cc->start + io->sector;
899c95d3 1993
39d42fa9
IK
1994 if ((likely(!async) && test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags)) ||
1995 test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags)) {
b7f8dff0 1996 dm_submit_bio_remap(io->base_bio, clone);
0f5d8e6e
MP
1997 return;
1998 }
1999
c7329eff
MP
2000 spin_lock_irqsave(&cc->write_thread_lock, flags);
2001 if (RB_EMPTY_ROOT(&cc->write_tree))
2002 wake_up_process(cc->write_thread);
b3c5fd30
MP
2003 rbp = &cc->write_tree.rb_node;
2004 parent = NULL;
2005 sector = io->sector;
2006 while (*rbp) {
2007 parent = *rbp;
2008 if (sector < crypt_io_from_node(parent)->sector)
2009 rbp = &(*rbp)->rb_left;
2010 else
2011 rbp = &(*rbp)->rb_right;
2012 }
2013 rb_link_node(&io->rb_node, parent, rbp);
2014 rb_insert_color(&io->rb_node, &cc->write_tree);
c7329eff 2015 spin_unlock_irqrestore(&cc->write_thread_lock, flags);
4e4eef64
MB
2016}
2017
8e225f04
DLM
2018static bool kcryptd_crypt_write_inline(struct crypt_config *cc,
2019 struct convert_context *ctx)
2020
2021{
2022 if (!test_bit(DM_CRYPT_WRITE_INLINE, &cc->flags))
2023 return false;
2024
2025 /*
2026 * Note: zone append writes (REQ_OP_ZONE_APPEND) do not have ordering
2027 * constraints so they do not need to be issued inline by
2028 * kcryptd_crypt_write_convert().
2029 */
2030 switch (bio_op(ctx->bio_in)) {
2031 case REQ_OP_WRITE:
8e225f04
DLM
2032 case REQ_OP_WRITE_ZEROES:
2033 return true;
2034 default:
2035 return false;
2036 }
2037}
2038
8abec36d
IK
2039static void kcryptd_crypt_write_continue(struct work_struct *work)
2040{
2041 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
2042 struct crypt_config *cc = io->cc;
2043 struct convert_context *ctx = &io->ctx;
2044 int crypt_finished;
2045 sector_t sector = io->sector;
2046 blk_status_t r;
2047
2048 wait_for_completion(&ctx->restart);
2049 reinit_completion(&ctx->restart);
2050
2051 r = crypt_convert(cc, &io->ctx, true, false);
2052 if (r)
2053 io->error = r;
2054 crypt_finished = atomic_dec_and_test(&ctx->cc_pending);
2055 if (!crypt_finished && kcryptd_crypt_write_inline(cc, ctx)) {
2056 /* Wait for completion signaled by kcryptd_async_done() */
2057 wait_for_completion(&ctx->restart);
2058 crypt_finished = 1;
2059 }
2060
2061 /* Encryption was already finished, submit io now */
2062 if (crypt_finished) {
2063 kcryptd_crypt_write_io_submit(io, 0);
2064 io->sector = sector;
2065 }
2066
2067 crypt_dec_pending(io);
2068}
2069
fc5a5e9a 2070static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
8b004457 2071{
49a8a920 2072 struct crypt_config *cc = io->cc;
8e225f04 2073 struct convert_context *ctx = &io->ctx;
8b004457 2074 struct bio *clone;
c8081618 2075 int crypt_finished;
b635b00e 2076 sector_t sector = io->sector;
4e4cbee9 2077 blk_status_t r;
8b004457 2078
fc5a5e9a
MB
2079 /*
2080 * Prevent io from disappearing until this function completes.
2081 */
2082 crypt_inc_pending(io);
8e225f04 2083 crypt_convert_init(cc, ctx, NULL, io->base_bio, sector);
fc5a5e9a 2084
cf2f1abf
MP
2085 clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size);
2086 if (unlikely(!clone)) {
4e4cbee9 2087 io->error = BLK_STS_IOERR;
cf2f1abf
MP
2088 goto dec;
2089 }
c8081618 2090
cf2f1abf
MP
2091 io->ctx.bio_out = clone;
2092 io->ctx.iter_out = clone->bi_iter;
b635b00e 2093
cf2f1abf 2094 sector += bio_sectors(clone);
93e605c2 2095
cf2f1abf 2096 crypt_inc_pending(io);
8e225f04 2097 r = crypt_convert(cc, ctx,
8abec36d
IK
2098 test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags), true);
2099 /*
2100 * Crypto API backlogged the request, because its queue was full
2101 * and we're in softirq context, so continue from a workqueue
2102 * (TODO: is it actually possible to be in softirq in the write path?)
2103 */
2104 if (r == BLK_STS_DEV_RESOURCE) {
2105 INIT_WORK(&io->work, kcryptd_crypt_write_continue);
2106 queue_work(cc->crypt_queue, &io->work);
2107 return;
2108 }
4e4cbee9 2109 if (r)
ef43aa38 2110 io->error = r;
8e225f04
DLM
2111 crypt_finished = atomic_dec_and_test(&ctx->cc_pending);
2112 if (!crypt_finished && kcryptd_crypt_write_inline(cc, ctx)) {
2113 /* Wait for completion signaled by kcryptd_async_done() */
2114 wait_for_completion(&ctx->restart);
2115 crypt_finished = 1;
2116 }
933f01d4 2117
cf2f1abf
MP
2118 /* Encryption was already finished, submit io now */
2119 if (crypt_finished) {
2120 kcryptd_crypt_write_io_submit(io, 0);
2121 io->sector = sector;
93e605c2 2122 }
899c95d3 2123
cf2f1abf 2124dec:
899c95d3 2125 crypt_dec_pending(io);
84131db6
MB
2126}
2127
72c6e7af 2128static void kcryptd_crypt_read_done(struct dm_crypt_io *io)
5742fd77 2129{
5742fd77
MB
2130 crypt_dec_pending(io);
2131}
2132
8abec36d
IK
2133static void kcryptd_crypt_read_continue(struct work_struct *work)
2134{
2135 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
2136 struct crypt_config *cc = io->cc;
2137 blk_status_t r;
2138
2139 wait_for_completion(&io->ctx.restart);
2140 reinit_completion(&io->ctx.restart);
2141
2142 r = crypt_convert(cc, &io->ctx, true, false);
2143 if (r)
2144 io->error = r;
2145
2146 if (atomic_dec_and_test(&io->ctx.cc_pending))
2147 kcryptd_crypt_read_done(io);
2148
2149 crypt_dec_pending(io);
2150}
2151
4e4eef64 2152static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
8b004457 2153{
49a8a920 2154 struct crypt_config *cc = io->cc;
4e4cbee9 2155 blk_status_t r;
1da177e4 2156
3e1a8bdd 2157 crypt_inc_pending(io);
3a7f6c99 2158
53017030 2159 crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
0c395b0f 2160 io->sector);
1da177e4 2161
39d42fa9 2162 r = crypt_convert(cc, &io->ctx,
8abec36d
IK
2163 test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags), true);
2164 /*
2165 * Crypto API backlogged the request, because its queue was full
2166 * and we're in softirq context, so continue from a workqueue
2167 */
2168 if (r == BLK_STS_DEV_RESOURCE) {
2169 INIT_WORK(&io->work, kcryptd_crypt_read_continue);
2170 queue_work(cc->crypt_queue, &io->work);
2171 return;
2172 }
4e4cbee9 2173 if (r)
ef43aa38 2174 io->error = r;
5742fd77 2175
40b6229b 2176 if (atomic_dec_and_test(&io->ctx.cc_pending))
72c6e7af 2177 kcryptd_crypt_read_done(io);
3a7f6c99
MB
2178
2179 crypt_dec_pending(io);
1da177e4
LT
2180}
2181
dcfe653d 2182static void kcryptd_async_done(void *data, int error)
95497a96 2183{
dcfe653d 2184 struct dm_crypt_request *dmreq = data;
b2174eeb 2185 struct convert_context *ctx = dmreq->ctx;
95497a96 2186 struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
49a8a920 2187 struct crypt_config *cc = io->cc;
95497a96 2188
54cea3f6
MB
2189 /*
2190 * A request from crypto driver backlog is going to be processed now,
2191 * finish the completion and continue in crypt_convert().
2192 * (Callback will be called for the second time for this request.)
2193 */
c0403ec0
RV
2194 if (error == -EINPROGRESS) {
2195 complete(&ctx->restart);
95497a96 2196 return;
c0403ec0 2197 }
95497a96 2198
2dc5327d 2199 if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post)
ef43aa38 2200 error = cc->iv_gen_ops->post(cc, org_iv_of_dmreq(cc, dmreq), dmreq);
2dc5327d 2201
ef43aa38 2202 if (error == -EBADMSG) {
58d0f180
MW
2203 sector_t s = le64_to_cpu(*org_sector_of_dmreq(cc, dmreq));
2204
66671719
CH
2205 DMERR_LIMIT("%pg: INTEGRITY AEAD ERROR, sector %llu",
2206 ctx->bio_in->bi_bdev, s);
58d0f180
MW
2207 dm_audit_log_bio(DM_MSG_PREFIX, "integrity-aead",
2208 ctx->bio_in, s, 0);
4e4cbee9 2209 io->error = BLK_STS_PROTECTION;
ef43aa38 2210 } else if (error < 0)
4e4cbee9 2211 io->error = BLK_STS_IOERR;
72c6e7af 2212
298a9fa0 2213 crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio);
95497a96 2214
40b6229b 2215 if (!atomic_dec_and_test(&ctx->cc_pending))
c0403ec0 2216 return;
95497a96 2217
8e225f04
DLM
2218 /*
2219 * The request is fully completed: for inline writes, let
2220 * kcryptd_crypt_write_convert() do the IO submission.
2221 */
2222 if (bio_data_dir(io->base_bio) == READ) {
72c6e7af 2223 kcryptd_crypt_read_done(io);
8e225f04
DLM
2224 return;
2225 }
2226
2227 if (kcryptd_crypt_write_inline(cc, ctx)) {
2228 complete(&ctx->restart);
2229 return;
2230 }
2231
2232 kcryptd_crypt_write_io_submit(io, 1);
95497a96
MB
2233}
2234
395b167c 2235static void kcryptd_crypt(struct work_struct *work)
1da177e4 2236{
028867ac 2237 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
8b004457 2238
cabf08e4 2239 if (bio_data_dir(io->base_bio) == READ)
395b167c 2240 kcryptd_crypt_read_convert(io);
4e4eef64 2241 else
395b167c 2242 kcryptd_crypt_write_convert(io);
cabf08e4
MB
2243}
2244
39d42fa9
IK
2245static void kcryptd_crypt_tasklet(unsigned long work)
2246{
2247 kcryptd_crypt((struct work_struct *)work);
2248}
2249
395b167c 2250static void kcryptd_queue_crypt(struct dm_crypt_io *io)
cabf08e4 2251{
49a8a920 2252 struct crypt_config *cc = io->cc;
cabf08e4 2253
39d42fa9
IK
2254 if ((bio_data_dir(io->base_bio) == READ && test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags)) ||
2255 (bio_data_dir(io->base_bio) == WRITE && test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags))) {
c87a95dc 2256 /*
d3703ef3 2257 * in_hardirq(): Crypto API's skcipher_walk_first() refuses to work in hard IRQ context.
c87a95dc
IK
2258 * irqs_disabled(): the kernel may run some IO completion from the idle thread, but
2259 * it is being executed with irqs disabled.
2260 */
d3703ef3 2261 if (in_hardirq() || irqs_disabled()) {
d9a02e01 2262 io->in_tasklet = true;
39d42fa9
IK
2263 tasklet_init(&io->tasklet, kcryptd_crypt_tasklet, (unsigned long)&io->work);
2264 tasklet_schedule(&io->tasklet);
2265 return;
2266 }
2267
2268 kcryptd_crypt(&io->work);
2269 return;
2270 }
2271
395b167c
AK
2272 INIT_WORK(&io->work, kcryptd_crypt);
2273 queue_work(cc->crypt_queue, &io->work);
1da177e4
LT
2274}
2275
ef43aa38 2276static void crypt_free_tfms_aead(struct crypt_config *cc)
1da177e4 2277{
ef43aa38
MB
2278 if (!cc->cipher_tfm.tfms_aead)
2279 return;
1da177e4 2280
ef43aa38
MB
2281 if (cc->cipher_tfm.tfms_aead[0] && !IS_ERR(cc->cipher_tfm.tfms_aead[0])) {
2282 crypto_free_aead(cc->cipher_tfm.tfms_aead[0]);
2283 cc->cipher_tfm.tfms_aead[0] = NULL;
1da177e4
LT
2284 }
2285
ef43aa38
MB
2286 kfree(cc->cipher_tfm.tfms_aead);
2287 cc->cipher_tfm.tfms_aead = NULL;
1da177e4
LT
2288}
2289
ef43aa38 2290static void crypt_free_tfms_skcipher(struct crypt_config *cc)
d1f96423 2291{
86a3238c 2292 unsigned int i;
d1f96423 2293
ef43aa38 2294 if (!cc->cipher_tfm.tfms)
fd2d231f
MP
2295 return;
2296
d1f96423 2297 for (i = 0; i < cc->tfms_count; i++)
ef43aa38
MB
2298 if (cc->cipher_tfm.tfms[i] && !IS_ERR(cc->cipher_tfm.tfms[i])) {
2299 crypto_free_skcipher(cc->cipher_tfm.tfms[i]);
2300 cc->cipher_tfm.tfms[i] = NULL;
d1f96423 2301 }
fd2d231f 2302
ef43aa38
MB
2303 kfree(cc->cipher_tfm.tfms);
2304 cc->cipher_tfm.tfms = NULL;
d1f96423
MB
2305}
2306
ef43aa38
MB
2307static void crypt_free_tfms(struct crypt_config *cc)
2308{
33d2f09f 2309 if (crypt_integrity_aead(cc))
ef43aa38
MB
2310 crypt_free_tfms_aead(cc);
2311 else
2312 crypt_free_tfms_skcipher(cc);
2313}
2314
2315static int crypt_alloc_tfms_skcipher(struct crypt_config *cc, char *ciphermode)
d1f96423 2316{
86a3238c 2317 unsigned int i;
d1f96423
MB
2318 int err;
2319
6396bb22
KC
2320 cc->cipher_tfm.tfms = kcalloc(cc->tfms_count,
2321 sizeof(struct crypto_skcipher *),
2322 GFP_KERNEL);
ef43aa38 2323 if (!cc->cipher_tfm.tfms)
fd2d231f
MP
2324 return -ENOMEM;
2325
d1f96423 2326 for (i = 0; i < cc->tfms_count; i++) {
cd746938
MP
2327 cc->cipher_tfm.tfms[i] = crypto_alloc_skcipher(ciphermode, 0,
2328 CRYPTO_ALG_ALLOCATES_MEMORY);
ef43aa38
MB
2329 if (IS_ERR(cc->cipher_tfm.tfms[i])) {
2330 err = PTR_ERR(cc->cipher_tfm.tfms[i]);
fd2d231f 2331 crypt_free_tfms(cc);
d1f96423
MB
2332 return err;
2333 }
2334 }
2335
af331eba
EB
2336 /*
2337 * dm-crypt performance can vary greatly depending on which crypto
2338 * algorithm implementation is used. Help people debug performance
2339 * problems by logging the ->cra_driver_name.
2340 */
7a1cd723 2341 DMDEBUG_LIMIT("%s using implementation \"%s\"", ciphermode,
af331eba 2342 crypto_skcipher_alg(any_tfm(cc))->base.cra_driver_name);
d1f96423
MB
2343 return 0;
2344}
2345
ef43aa38
MB
2346static int crypt_alloc_tfms_aead(struct crypt_config *cc, char *ciphermode)
2347{
ef43aa38
MB
2348 int err;
2349
2350 cc->cipher_tfm.tfms = kmalloc(sizeof(struct crypto_aead *), GFP_KERNEL);
2351 if (!cc->cipher_tfm.tfms)
2352 return -ENOMEM;
2353
cd746938
MP
2354 cc->cipher_tfm.tfms_aead[0] = crypto_alloc_aead(ciphermode, 0,
2355 CRYPTO_ALG_ALLOCATES_MEMORY);
ef43aa38
MB
2356 if (IS_ERR(cc->cipher_tfm.tfms_aead[0])) {
2357 err = PTR_ERR(cc->cipher_tfm.tfms_aead[0]);
2358 crypt_free_tfms(cc);
2359 return err;
2360 }
2361
7a1cd723 2362 DMDEBUG_LIMIT("%s using implementation \"%s\"", ciphermode,
af331eba 2363 crypto_aead_alg(any_tfm_aead(cc))->base.cra_driver_name);
ef43aa38
MB
2364 return 0;
2365}
2366
2367static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode)
2368{
33d2f09f 2369 if (crypt_integrity_aead(cc))
ef43aa38
MB
2370 return crypt_alloc_tfms_aead(cc, ciphermode);
2371 else
2372 return crypt_alloc_tfms_skcipher(cc, ciphermode);
2373}
2374
86a3238c 2375static unsigned int crypt_subkey_size(struct crypt_config *cc)
ef43aa38
MB
2376{
2377 return (cc->key_size - cc->key_extra_size) >> ilog2(cc->tfms_count);
2378}
2379
86a3238c 2380static unsigned int crypt_authenckey_size(struct crypt_config *cc)
ef43aa38
MB
2381{
2382 return crypt_subkey_size(cc) + RTA_SPACE(sizeof(struct crypto_authenc_key_param));
2383}
2384
2385/*
2386 * If AEAD is composed like authenc(hmac(sha256),xts(aes)),
2387 * the key must be for some reason in special format.
2388 * This funcion converts cc->key to this special format.
2389 */
2390static void crypt_copy_authenckey(char *p, const void *key,
86a3238c 2391 unsigned int enckeylen, unsigned int authkeylen)
ef43aa38
MB
2392{
2393 struct crypto_authenc_key_param *param;
2394 struct rtattr *rta;
2395
2396 rta = (struct rtattr *)p;
2397 param = RTA_DATA(rta);
2398 param->enckeylen = cpu_to_be32(enckeylen);
2399 rta->rta_len = RTA_LENGTH(sizeof(*param));
2400 rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
2401 p += RTA_SPACE(sizeof(*param));
2402 memcpy(p, key + enckeylen, authkeylen);
2403 p += authkeylen;
2404 memcpy(p, key, enckeylen);
2405}
2406
671ea6b4 2407static int crypt_setkey(struct crypt_config *cc)
c0297721 2408{
86a3238c 2409 unsigned int subkey_size;
fd2d231f
MP
2410 int err = 0, i, r;
2411
da31a078 2412 /* Ignore extra keys (which are used for IV etc) */
ef43aa38 2413 subkey_size = crypt_subkey_size(cc);
da31a078 2414
27c70036
MB
2415 if (crypt_integrity_hmac(cc)) {
2416 if (subkey_size < cc->key_mac_size)
2417 return -EINVAL;
2418
ef43aa38
MB
2419 crypt_copy_authenckey(cc->authenc_key, cc->key,
2420 subkey_size - cc->key_mac_size,
2421 cc->key_mac_size);
27c70036
MB
2422 }
2423
fd2d231f 2424 for (i = 0; i < cc->tfms_count; i++) {
33d2f09f 2425 if (crypt_integrity_hmac(cc))
ef43aa38
MB
2426 r = crypto_aead_setkey(cc->cipher_tfm.tfms_aead[i],
2427 cc->authenc_key, crypt_authenckey_size(cc));
33d2f09f
MB
2428 else if (crypt_integrity_aead(cc))
2429 r = crypto_aead_setkey(cc->cipher_tfm.tfms_aead[i],
2430 cc->key + (i * subkey_size),
2431 subkey_size);
ef43aa38
MB
2432 else
2433 r = crypto_skcipher_setkey(cc->cipher_tfm.tfms[i],
2434 cc->key + (i * subkey_size),
2435 subkey_size);
fd2d231f
MP
2436 if (r)
2437 err = r;
c0297721
AK
2438 }
2439
ef43aa38
MB
2440 if (crypt_integrity_hmac(cc))
2441 memzero_explicit(cc->authenc_key, crypt_authenckey_size(cc));
2442
c0297721
AK
2443 return err;
2444}
2445
c538f6ec
OK
2446#ifdef CONFIG_KEYS
2447
027c431c
OK
2448static bool contains_whitespace(const char *str)
2449{
2450 while (*str)
2451 if (isspace(*str++))
2452 return true;
2453 return false;
2454}
2455
27f5411a
DB
2456static int set_key_user(struct crypt_config *cc, struct key *key)
2457{
2458 const struct user_key_payload *ukp;
2459
2460 ukp = user_key_payload_locked(key);
2461 if (!ukp)
2462 return -EKEYREVOKED;
2463
2464 if (cc->key_size != ukp->datalen)
2465 return -EINVAL;
2466
2467 memcpy(cc->key, ukp->data, cc->key_size);
2468
2469 return 0;
2470}
2471
27f5411a
DB
2472static int set_key_encrypted(struct crypt_config *cc, struct key *key)
2473{
2474 const struct encrypted_key_payload *ekp;
2475
2476 ekp = key->payload.data[0];
2477 if (!ekp)
2478 return -EKEYREVOKED;
2479
2480 if (cc->key_size != ekp->decrypted_datalen)
2481 return -EINVAL;
2482
2483 memcpy(cc->key, ekp->decrypted_data, cc->key_size);
2484
2485 return 0;
2486}
27f5411a 2487
363880c4
AF
2488static int set_key_trusted(struct crypt_config *cc, struct key *key)
2489{
2490 const struct trusted_key_payload *tkp;
2491
2492 tkp = key->payload.data[0];
2493 if (!tkp)
2494 return -EKEYREVOKED;
2495
2496 if (cc->key_size != tkp->key_len)
2497 return -EINVAL;
2498
2499 memcpy(cc->key, tkp->key, cc->key_size);
2500
2501 return 0;
2502}
2503
c538f6ec
OK
2504static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string)
2505{
2506 char *new_key_string, *key_desc;
2507 int ret;
27f5411a 2508 struct key_type *type;
c538f6ec 2509 struct key *key;
27f5411a 2510 int (*set_key)(struct crypt_config *cc, struct key *key);
c538f6ec 2511
027c431c
OK
2512 /*
2513 * Reject key_string with whitespace. dm core currently lacks code for
2514 * proper whitespace escaping in arguments on DM_TABLE_STATUS path.
2515 */
2516 if (contains_whitespace(key_string)) {
2517 DMERR("whitespace chars not allowed in key string");
2518 return -EINVAL;
2519 }
2520
c538f6ec 2521 /* look for next ':' separating key_type from key_description */
fc772580 2522 key_desc = strchr(key_string, ':');
c538f6ec
OK
2523 if (!key_desc || key_desc == key_string || !strlen(key_desc + 1))
2524 return -EINVAL;
2525
27f5411a
DB
2526 if (!strncmp(key_string, "logon:", key_desc - key_string + 1)) {
2527 type = &key_type_logon;
2528 set_key = set_key_user;
2529 } else if (!strncmp(key_string, "user:", key_desc - key_string + 1)) {
2530 type = &key_type_user;
2531 set_key = set_key_user;
831475cc
AF
2532 } else if (IS_ENABLED(CONFIG_ENCRYPTED_KEYS) &&
2533 !strncmp(key_string, "encrypted:", key_desc - key_string + 1)) {
27f5411a
DB
2534 type = &key_type_encrypted;
2535 set_key = set_key_encrypted;
363880c4 2536 } else if (IS_ENABLED(CONFIG_TRUSTED_KEYS) &&
255e2646 2537 !strncmp(key_string, "trusted:", key_desc - key_string + 1)) {
363880c4
AF
2538 type = &key_type_trusted;
2539 set_key = set_key_trusted;
27f5411a 2540 } else {
c538f6ec 2541 return -EINVAL;
27f5411a 2542 }
c538f6ec
OK
2543
2544 new_key_string = kstrdup(key_string, GFP_KERNEL);
2545 if (!new_key_string)
2546 return -ENOMEM;
2547
27f5411a 2548 key = request_key(type, key_desc + 1, NULL);
c538f6ec 2549 if (IS_ERR(key)) {
453431a5 2550 kfree_sensitive(new_key_string);
c538f6ec
OK
2551 return PTR_ERR(key);
2552 }
2553
f5b0cba8 2554 down_read(&key->sem);
c538f6ec 2555
27f5411a
DB
2556 ret = set_key(cc, key);
2557 if (ret < 0) {
f5b0cba8 2558 up_read(&key->sem);
c538f6ec 2559 key_put(key);
453431a5 2560 kfree_sensitive(new_key_string);
27f5411a 2561 return ret;
c538f6ec
OK
2562 }
2563
f5b0cba8 2564 up_read(&key->sem);
c538f6ec
OK
2565 key_put(key);
2566
2567 /* clear the flag since following operations may invalidate previously valid key */
2568 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
2569
2570 ret = crypt_setkey(cc);
2571
c538f6ec
OK
2572 if (!ret) {
2573 set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
453431a5 2574 kfree_sensitive(cc->key_string);
c538f6ec
OK
2575 cc->key_string = new_key_string;
2576 } else
453431a5 2577 kfree_sensitive(new_key_string);
c538f6ec
OK
2578
2579 return ret;
2580}
2581
2582static int get_key_size(char **key_string)
2583{
2584 char *colon, dummy;
2585 int ret;
2586
2587 if (*key_string[0] != ':')
2588 return strlen(*key_string) >> 1;
2589
2590 /* look for next ':' in key string */
2591 colon = strpbrk(*key_string + 1, ":");
2592 if (!colon)
2593 return -EINVAL;
2594
2595 if (sscanf(*key_string + 1, "%u%c", &ret, &dummy) != 2 || dummy != ':')
2596 return -EINVAL;
2597
2598 *key_string = colon;
2599
2600 /* remaining key string should be :<logon|user>:<key_desc> */
2601
2602 return ret;
2603}
2604
2605#else
2606
2607static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string)
2608{
2609 return -EINVAL;
2610}
2611
2612static int get_key_size(char **key_string)
2613{
6fc51504 2614 return (*key_string[0] == ':') ? -EINVAL : (int)(strlen(*key_string) >> 1);
c538f6ec
OK
2615}
2616
27f5411a 2617#endif /* CONFIG_KEYS */
c538f6ec 2618
e48d4bbf
MB
2619static int crypt_set_key(struct crypt_config *cc, char *key)
2620{
de8be5ac
MB
2621 int r = -EINVAL;
2622 int key_string_len = strlen(key);
2623
69a8cfcd
MB
2624 /* Hyphen (which gives a key_size of zero) means there is no key. */
2625 if (!cc->key_size && strcmp(key, "-"))
de8be5ac 2626 goto out;
e48d4bbf 2627
c538f6ec
OK
2628 /* ':' means the key is in kernel keyring, short-circuit normal key processing */
2629 if (key[0] == ':') {
2630 r = crypt_set_keyring_key(cc, key + 1);
de8be5ac 2631 goto out;
c538f6ec 2632 }
e48d4bbf 2633
265e9098
OK
2634 /* clear the flag since following operations may invalidate previously valid key */
2635 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
e48d4bbf 2636
c538f6ec 2637 /* wipe references to any kernel keyring key */
453431a5 2638 kfree_sensitive(cc->key_string);
c538f6ec
OK
2639 cc->key_string = NULL;
2640
e944e03e
AS
2641 /* Decode key from its hex representation. */
2642 if (cc->key_size && hex2bin(cc->key, key, cc->key_size) < 0)
de8be5ac 2643 goto out;
e48d4bbf 2644
671ea6b4 2645 r = crypt_setkey(cc);
265e9098
OK
2646 if (!r)
2647 set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
de8be5ac
MB
2648
2649out:
2650 /* Hex key string not needed after here, so wipe it. */
2651 memset(key, '0', key_string_len);
2652
2653 return r;
e48d4bbf
MB
2654}
2655
2656static int crypt_wipe_key(struct crypt_config *cc)
2657{
c82feeec
OK
2658 int r;
2659
e48d4bbf 2660 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
c82feeec 2661 get_random_bytes(&cc->key, cc->key_size);
4a52ffc7
MB
2662
2663 /* Wipe IV private keys */
2664 if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) {
2665 r = cc->iv_gen_ops->wipe(cc);
2666 if (r)
2667 return r;
2668 }
2669
453431a5 2670 kfree_sensitive(cc->key_string);
c538f6ec 2671 cc->key_string = NULL;
c82feeec
OK
2672 r = crypt_setkey(cc);
2673 memset(&cc->key, 0, cc->key_size * sizeof(u8));
c0297721 2674
c82feeec 2675 return r;
e48d4bbf
MB
2676}
2677
5059353d
MP
2678static void crypt_calculate_pages_per_client(void)
2679{
ca79b0c2 2680 unsigned long pages = (totalram_pages() - totalhigh_pages()) * DM_CRYPT_MEMORY_PERCENT / 100;
5059353d
MP
2681
2682 if (!dm_crypt_clients_n)
2683 return;
2684
2685 pages /= dm_crypt_clients_n;
2686 if (pages < DM_CRYPT_MIN_PAGES_PER_CLIENT)
2687 pages = DM_CRYPT_MIN_PAGES_PER_CLIENT;
2688 dm_crypt_pages_per_client = pages;
2689}
2690
2691static void *crypt_page_alloc(gfp_t gfp_mask, void *pool_data)
2692{
2693 struct crypt_config *cc = pool_data;
2694 struct page *page;
2695
528b16bf
AW
2696 /*
2697 * Note, percpu_counter_read_positive() may over (and under) estimate
2698 * the current usage by at most (batch - 1) * num_online_cpus() pages,
2699 * but avoids potential spinlock contention of an exact result.
2700 */
2701 if (unlikely(percpu_counter_read_positive(&cc->n_allocated_pages) >= dm_crypt_pages_per_client) &&
5059353d
MP
2702 likely(gfp_mask & __GFP_NORETRY))
2703 return NULL;
2704
2705 page = alloc_page(gfp_mask);
2706 if (likely(page != NULL))
2707 percpu_counter_add(&cc->n_allocated_pages, 1);
2708
2709 return page;
2710}
2711
2712static void crypt_page_free(void *page, void *pool_data)
2713{
2714 struct crypt_config *cc = pool_data;
2715
2716 __free_page(page);
2717 percpu_counter_sub(&cc->n_allocated_pages, 1);
2718}
2719
28513fcc
MB
2720static void crypt_dtr(struct dm_target *ti)
2721{
2722 struct crypt_config *cc = ti->private;
2723
2724 ti->private = NULL;
2725
2726 if (!cc)
2727 return;
2728
f659b100 2729 if (cc->write_thread)
dc267621
MP
2730 kthread_stop(cc->write_thread);
2731
28513fcc
MB
2732 if (cc->io_queue)
2733 destroy_workqueue(cc->io_queue);
2734 if (cc->crypt_queue)
2735 destroy_workqueue(cc->crypt_queue);
2736
fd2d231f
MP
2737 crypt_free_tfms(cc);
2738
6f1c819c 2739 bioset_exit(&cc->bs);
28513fcc 2740
6f1c819c
KO
2741 mempool_exit(&cc->page_pool);
2742 mempool_exit(&cc->req_pool);
2743 mempool_exit(&cc->tag_pool);
2744
d00a11df
KO
2745 WARN_ON(percpu_counter_sum(&cc->n_allocated_pages) != 0);
2746 percpu_counter_destroy(&cc->n_allocated_pages);
2747
28513fcc
MB
2748 if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
2749 cc->iv_gen_ops->dtr(cc);
2750
28513fcc
MB
2751 if (cc->dev)
2752 dm_put_device(ti, cc->dev);
2753
453431a5
WL
2754 kfree_sensitive(cc->cipher_string);
2755 kfree_sensitive(cc->key_string);
2756 kfree_sensitive(cc->cipher_auth);
2757 kfree_sensitive(cc->authenc_key);
28513fcc 2758
d5ffebdd
MS
2759 mutex_destroy(&cc->bio_alloc_lock);
2760
28513fcc 2761 /* Must zero key material before freeing */
453431a5 2762 kfree_sensitive(cc);
5059353d
MP
2763
2764 spin_lock(&dm_crypt_clients_lock);
2765 WARN_ON(!dm_crypt_clients_n);
2766 dm_crypt_clients_n--;
2767 crypt_calculate_pages_per_client();
2768 spin_unlock(&dm_crypt_clients_lock);
58d0f180
MW
2769
2770 dm_audit_log_dtr(DM_MSG_PREFIX, ti, 1);
28513fcc
MB
2771}
2772
e889f97a
MB
2773static int crypt_ctr_ivmode(struct dm_target *ti, const char *ivmode)
2774{
2775 struct crypt_config *cc = ti->private;
2776
33d2f09f 2777 if (crypt_integrity_aead(cc))
e889f97a
MB
2778 cc->iv_size = crypto_aead_ivsize(any_tfm_aead(cc));
2779 else
2780 cc->iv_size = crypto_skcipher_ivsize(any_tfm(cc));
2781
e889f97a
MB
2782 if (cc->iv_size)
2783 /* at least a 64 bit sector number should fit in our buffer */
2784 cc->iv_size = max(cc->iv_size,
2785 (unsigned int)(sizeof(u64) / sizeof(u8)));
2786 else if (ivmode) {
2787 DMWARN("Selected cipher does not support IVs");
2788 ivmode = NULL;
2789 }
2790
2791 /* Choose ivmode, see comments at iv code. */
2792 if (ivmode == NULL)
2793 cc->iv_gen_ops = NULL;
2794 else if (strcmp(ivmode, "plain") == 0)
2795 cc->iv_gen_ops = &crypt_iv_plain_ops;
2796 else if (strcmp(ivmode, "plain64") == 0)
2797 cc->iv_gen_ops = &crypt_iv_plain64_ops;
7e3fd855
MB
2798 else if (strcmp(ivmode, "plain64be") == 0)
2799 cc->iv_gen_ops = &crypt_iv_plain64be_ops;
e889f97a
MB
2800 else if (strcmp(ivmode, "essiv") == 0)
2801 cc->iv_gen_ops = &crypt_iv_essiv_ops;
2802 else if (strcmp(ivmode, "benbi") == 0)
2803 cc->iv_gen_ops = &crypt_iv_benbi_ops;
2804 else if (strcmp(ivmode, "null") == 0)
2805 cc->iv_gen_ops = &crypt_iv_null_ops;
b9411d73
MB
2806 else if (strcmp(ivmode, "eboiv") == 0)
2807 cc->iv_gen_ops = &crypt_iv_eboiv_ops;
bbb16584
MB
2808 else if (strcmp(ivmode, "elephant") == 0) {
2809 cc->iv_gen_ops = &crypt_iv_elephant_ops;
2810 cc->key_parts = 2;
2811 cc->key_extra_size = cc->key_size / 2;
2812 if (cc->key_extra_size > ELEPHANT_MAX_KEY_SIZE)
2813 return -EINVAL;
2814 set_bit(CRYPT_ENCRYPT_PREPROCESS, &cc->cipher_flags);
2815 } else if (strcmp(ivmode, "lmk") == 0) {
e889f97a
MB
2816 cc->iv_gen_ops = &crypt_iv_lmk_ops;
2817 /*
2818 * Version 2 and 3 is recognised according
2819 * to length of provided multi-key string.
2820 * If present (version 3), last key is used as IV seed.
2821 * All keys (including IV seed) are always the same size.
2822 */
2823 if (cc->key_size % cc->key_parts) {
2824 cc->key_parts++;
2825 cc->key_extra_size = cc->key_size / cc->key_parts;
2826 }
2827 } else if (strcmp(ivmode, "tcw") == 0) {
2828 cc->iv_gen_ops = &crypt_iv_tcw_ops;
2829 cc->key_parts += 2; /* IV + whitening */
2830 cc->key_extra_size = cc->iv_size + TCW_WHITENING_SIZE;
2831 } else if (strcmp(ivmode, "random") == 0) {
2832 cc->iv_gen_ops = &crypt_iv_random_ops;
2833 /* Need storage space in integrity fields. */
2834 cc->integrity_iv_size = cc->iv_size;
2835 } else {
2836 ti->error = "Invalid IV mode";
2837 return -EINVAL;
2838 }
2839
2840 return 0;
2841}
2842
33d2f09f
MB
2843/*
2844 * Workaround to parse HMAC algorithm from AEAD crypto API spec.
2845 * The HMAC is needed to calculate tag size (HMAC digest size).
2846 * This should be probably done by crypto-api calls (once available...)
2847 */
2848static int crypt_ctr_auth_cipher(struct crypt_config *cc, char *cipher_api)
2849{
2850 char *start, *end, *mac_alg = NULL;
2851 struct crypto_ahash *mac;
2852
2853 if (!strstarts(cipher_api, "authenc("))
2854 return 0;
2855
2856 start = strchr(cipher_api, '(');
2857 end = strchr(cipher_api, ',');
2858 if (!start || !end || ++start > end)
2859 return -EINVAL;
2860
2861 mac_alg = kzalloc(end - start + 1, GFP_KERNEL);
2862 if (!mac_alg)
2863 return -ENOMEM;
2864 strncpy(mac_alg, start, end - start);
2865
cd746938 2866 mac = crypto_alloc_ahash(mac_alg, 0, CRYPTO_ALG_ALLOCATES_MEMORY);
33d2f09f
MB
2867 kfree(mac_alg);
2868
2869 if (IS_ERR(mac))
2870 return PTR_ERR(mac);
2871
2872 cc->key_mac_size = crypto_ahash_digestsize(mac);
2873 crypto_free_ahash(mac);
2874
2875 cc->authenc_key = kmalloc(crypt_authenckey_size(cc), GFP_KERNEL);
2876 if (!cc->authenc_key)
2877 return -ENOMEM;
2878
2879 return 0;
2880}
2881
2882static int crypt_ctr_cipher_new(struct dm_target *ti, char *cipher_in, char *key,
2883 char **ivmode, char **ivopts)
2884{
2885 struct crypt_config *cc = ti->private;
a1a262b6 2886 char *tmp, *cipher_api, buf[CRYPTO_MAX_ALG_NAME];
33d2f09f
MB
2887 int ret = -EINVAL;
2888
2889 cc->tfms_count = 1;
2890
2891 /*
2892 * New format (capi: prefix)
2893 * capi:cipher_api_spec-iv:ivopts
2894 */
2895 tmp = &cipher_in[strlen("capi:")];
1856b9f7
MB
2896
2897 /* Separate IV options if present, it can contain another '-' in hash name */
2898 *ivopts = strrchr(tmp, ':');
2899 if (*ivopts) {
2900 **ivopts = '\0';
2901 (*ivopts)++;
2902 }
2903 /* Parse IV mode */
2904 *ivmode = strrchr(tmp, '-');
2905 if (*ivmode) {
2906 **ivmode = '\0';
2907 (*ivmode)++;
2908 }
2909 /* The rest is crypto API spec */
2910 cipher_api = tmp;
33d2f09f 2911
a1a262b6
AB
2912 /* Alloc AEAD, can be used only in new format. */
2913 if (crypt_integrity_aead(cc)) {
2914 ret = crypt_ctr_auth_cipher(cc, cipher_api);
2915 if (ret < 0) {
2916 ti->error = "Invalid AEAD cipher spec";
2a32897c 2917 return ret;
a1a262b6
AB
2918 }
2919 }
2920
33d2f09f
MB
2921 if (*ivmode && !strcmp(*ivmode, "lmk"))
2922 cc->tfms_count = 64;
2923
a1a262b6
AB
2924 if (*ivmode && !strcmp(*ivmode, "essiv")) {
2925 if (!*ivopts) {
2926 ti->error = "Digest algorithm missing for ESSIV mode";
2927 return -EINVAL;
2928 }
2929 ret = snprintf(buf, CRYPTO_MAX_ALG_NAME, "essiv(%s,%s)",
2930 cipher_api, *ivopts);
2931 if (ret < 0 || ret >= CRYPTO_MAX_ALG_NAME) {
2932 ti->error = "Cannot allocate cipher string";
2933 return -ENOMEM;
2934 }
2935 cipher_api = buf;
2936 }
2937
33d2f09f
MB
2938 cc->key_parts = cc->tfms_count;
2939
2940 /* Allocate cipher */
2941 ret = crypt_alloc_tfms(cc, cipher_api);
2942 if (ret < 0) {
2943 ti->error = "Error allocating crypto tfm";
2944 return ret;
2945 }
2946
a1a262b6 2947 if (crypt_integrity_aead(cc))
33d2f09f 2948 cc->iv_size = crypto_aead_ivsize(any_tfm_aead(cc));
a1a262b6 2949 else
33d2f09f
MB
2950 cc->iv_size = crypto_skcipher_ivsize(any_tfm(cc));
2951
33d2f09f
MB
2952 return 0;
2953}
2954
2955static int crypt_ctr_cipher_old(struct dm_target *ti, char *cipher_in, char *key,
2956 char **ivmode, char **ivopts)
1da177e4 2957{
5ebaee6d 2958 struct crypt_config *cc = ti->private;
33d2f09f 2959 char *tmp, *cipher, *chainmode, *keycount;
5ebaee6d 2960 char *cipher_api = NULL;
fd2d231f 2961 int ret = -EINVAL;
31998ef1 2962 char dummy;
1da177e4 2963
33d2f09f 2964 if (strchr(cipher_in, '(') || crypt_integrity_aead(cc)) {
5ebaee6d 2965 ti->error = "Bad cipher specification";
1da177e4
LT
2966 return -EINVAL;
2967 }
2968
5ebaee6d
MB
2969 /*
2970 * Legacy dm-crypt cipher specification
d1f96423 2971 * cipher[:keycount]-mode-iv:ivopts
5ebaee6d
MB
2972 */
2973 tmp = cipher_in;
d1f96423
MB
2974 keycount = strsep(&tmp, "-");
2975 cipher = strsep(&keycount, ":");
2976
2977 if (!keycount)
2978 cc->tfms_count = 1;
31998ef1 2979 else if (sscanf(keycount, "%u%c", &cc->tfms_count, &dummy) != 1 ||
d1f96423
MB
2980 !is_power_of_2(cc->tfms_count)) {
2981 ti->error = "Bad cipher key count specification";
2982 return -EINVAL;
2983 }
2984 cc->key_parts = cc->tfms_count;
5ebaee6d 2985
1da177e4 2986 chainmode = strsep(&tmp, "-");
1856b9f7
MB
2987 *ivmode = strsep(&tmp, ":");
2988 *ivopts = tmp;
1da177e4 2989
7dbcd137
MB
2990 /*
2991 * For compatibility with the original dm-crypt mapping format, if
2992 * only the cipher name is supplied, use cbc-plain.
2993 */
33d2f09f 2994 if (!chainmode || (!strcmp(chainmode, "plain") && !*ivmode)) {
1da177e4 2995 chainmode = "cbc";
33d2f09f 2996 *ivmode = "plain";
1da177e4
LT
2997 }
2998
33d2f09f 2999 if (strcmp(chainmode, "ecb") && !*ivmode) {
5ebaee6d
MB
3000 ti->error = "IV mechanism required";
3001 return -EINVAL;
1da177e4
LT
3002 }
3003
5ebaee6d
MB
3004 cipher_api = kmalloc(CRYPTO_MAX_ALG_NAME, GFP_KERNEL);
3005 if (!cipher_api)
3006 goto bad_mem;
3007
a1a262b6
AB
3008 if (*ivmode && !strcmp(*ivmode, "essiv")) {
3009 if (!*ivopts) {
3010 ti->error = "Digest algorithm missing for ESSIV mode";
3011 kfree(cipher_api);
3012 return -EINVAL;
3013 }
3014 ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME,
3015 "essiv(%s(%s),%s)", chainmode, cipher, *ivopts);
3016 } else {
3017 ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME,
3018 "%s(%s)", chainmode, cipher);
3019 }
3020 if (ret < 0 || ret >= CRYPTO_MAX_ALG_NAME) {
5ebaee6d
MB
3021 kfree(cipher_api);
3022 goto bad_mem;
1da177e4
LT
3023 }
3024
5ebaee6d 3025 /* Allocate cipher */
fd2d231f
MP
3026 ret = crypt_alloc_tfms(cc, cipher_api);
3027 if (ret < 0) {
3028 ti->error = "Error allocating crypto tfm";
33d2f09f
MB
3029 kfree(cipher_api);
3030 return ret;
1da177e4 3031 }
bd86e320 3032 kfree(cipher_api);
1da177e4 3033
33d2f09f
MB
3034 return 0;
3035bad_mem:
3036 ti->error = "Cannot allocate cipher strings";
3037 return -ENOMEM;
3038}
5ebaee6d 3039
33d2f09f
MB
3040static int crypt_ctr_cipher(struct dm_target *ti, char *cipher_in, char *key)
3041{
3042 struct crypt_config *cc = ti->private;
3043 char *ivmode = NULL, *ivopts = NULL;
3044 int ret;
3045
3046 cc->cipher_string = kstrdup(cipher_in, GFP_KERNEL);
3047 if (!cc->cipher_string) {
3048 ti->error = "Cannot allocate cipher strings";
3049 return -ENOMEM;
1da177e4
LT
3050 }
3051
33d2f09f
MB
3052 if (strstarts(cipher_in, "capi:"))
3053 ret = crypt_ctr_cipher_new(ti, cipher_in, key, &ivmode, &ivopts);
3054 else
3055 ret = crypt_ctr_cipher_old(ti, cipher_in, key, &ivmode, &ivopts);
3056 if (ret)
3057 return ret;
3058
5ebaee6d 3059 /* Initialize IV */
e889f97a
MB
3060 ret = crypt_ctr_ivmode(ti, ivmode);
3061 if (ret < 0)
33d2f09f 3062 return ret;
1da177e4 3063
da31a078
MB
3064 /* Initialize and set key */
3065 ret = crypt_set_key(cc, key);
3066 if (ret < 0) {
3067 ti->error = "Error decoding and setting key";
33d2f09f 3068 return ret;
da31a078
MB
3069 }
3070
28513fcc
MB
3071 /* Allocate IV */
3072 if (cc->iv_gen_ops && cc->iv_gen_ops->ctr) {
3073 ret = cc->iv_gen_ops->ctr(cc, ti, ivopts);
3074 if (ret < 0) {
3075 ti->error = "Error creating IV";
33d2f09f 3076 return ret;
28513fcc
MB
3077 }
3078 }
1da177e4 3079
28513fcc
MB
3080 /* Initialize IV (set keys for ESSIV etc) */
3081 if (cc->iv_gen_ops && cc->iv_gen_ops->init) {
3082 ret = cc->iv_gen_ops->init(cc);
3083 if (ret < 0) {
3084 ti->error = "Error initialising IV";
33d2f09f 3085 return ret;
28513fcc 3086 }
b95bf2d3
MB
3087 }
3088
dc94902b
OK
3089 /* wipe the kernel key payload copy */
3090 if (cc->key_string)
3091 memset(cc->key, 0, cc->key_size * sizeof(u8));
3092
5ebaee6d 3093 return ret;
5ebaee6d 3094}
5ebaee6d 3095
ef43aa38
MB
3096static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **argv)
3097{
3098 struct crypt_config *cc = ti->private;
3099 struct dm_arg_set as;
5916a22b 3100 static const struct dm_arg _args[] = {
39d42fa9 3101 {0, 8, "Invalid number of feature args"},
ef43aa38
MB
3102 };
3103 unsigned int opt_params, val;
3104 const char *opt_string, *sval;
8f0009a2 3105 char dummy;
ef43aa38
MB
3106 int ret;
3107
3108 /* Optional parameters */
3109 as.argc = argc;
3110 as.argv = argv;
3111
3112 ret = dm_read_arg_group(_args, &as, &opt_params, &ti->error);
3113 if (ret)
3114 return ret;
3115
3116 while (opt_params--) {
3117 opt_string = dm_shift_arg(&as);
3118 if (!opt_string) {
3119 ti->error = "Not enough feature arguments";
3120 return -EINVAL;
3121 }
3122
3123 if (!strcasecmp(opt_string, "allow_discards"))
3124 ti->num_discard_bios = 1;
3125
3126 else if (!strcasecmp(opt_string, "same_cpu_crypt"))
3127 set_bit(DM_CRYPT_SAME_CPU, &cc->flags);
3128
3129 else if (!strcasecmp(opt_string, "submit_from_crypt_cpus"))
3130 set_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags);
39d42fa9
IK
3131 else if (!strcasecmp(opt_string, "no_read_workqueue"))
3132 set_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags);
3133 else if (!strcasecmp(opt_string, "no_write_workqueue"))
3134 set_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags);
ef43aa38
MB
3135 else if (sscanf(opt_string, "integrity:%u:", &val) == 1) {
3136 if (val == 0 || val > MAX_TAG_SIZE) {
3137 ti->error = "Invalid integrity arguments";
3138 return -EINVAL;
3139 }
3140 cc->on_disk_tag_size = val;
3141 sval = strchr(opt_string + strlen("integrity:"), ':') + 1;
3142 if (!strcasecmp(sval, "aead")) {
3143 set_bit(CRYPT_MODE_INTEGRITY_AEAD, &cc->cipher_flags);
ef43aa38
MB
3144 } else if (strcasecmp(sval, "none")) {
3145 ti->error = "Unknown integrity profile";
3146 return -EINVAL;
3147 }
3148
3149 cc->cipher_auth = kstrdup(sval, GFP_KERNEL);
3150 if (!cc->cipher_auth)
3151 return -ENOMEM;
ff3af92b 3152 } else if (sscanf(opt_string, "sector_size:%hu%c", &cc->sector_size, &dummy) == 1) {
8f0009a2
MB
3153 if (cc->sector_size < (1 << SECTOR_SHIFT) ||
3154 cc->sector_size > 4096 ||
ff3af92b 3155 (cc->sector_size & (cc->sector_size - 1))) {
8f0009a2
MB
3156 ti->error = "Invalid feature value for sector_size";
3157 return -EINVAL;
3158 }
783874b0
MB
3159 if (ti->len & ((cc->sector_size >> SECTOR_SHIFT) - 1)) {
3160 ti->error = "Device size is not multiple of sector_size feature";
3161 return -EINVAL;
3162 }
ff3af92b 3163 cc->sector_shift = __ffs(cc->sector_size) - SECTOR_SHIFT;
8f0009a2
MB
3164 } else if (!strcasecmp(opt_string, "iv_large_sectors"))
3165 set_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags);
3166 else {
ef43aa38
MB
3167 ti->error = "Invalid feature arguments";
3168 return -EINVAL;
3169 }
3170 }
3171
3172 return 0;
5ebaee6d
MB
3173}
3174
8e225f04 3175#ifdef CONFIG_BLK_DEV_ZONED
8e225f04
DLM
3176static int crypt_report_zones(struct dm_target *ti,
3177 struct dm_report_zones_args *args, unsigned int nr_zones)
3178{
3179 struct crypt_config *cc = ti->private;
8e225f04 3180
912e8875
DLM
3181 return dm_report_zones(cc->dev->bdev, cc->start,
3182 cc->start + dm_target_offset(ti, args->next_sector),
3183 args, nr_zones);
8e225f04 3184}
e3290b94
MS
3185#else
3186#define crypt_report_zones NULL
8e225f04
DLM
3187#endif
3188
5ebaee6d
MB
3189/*
3190 * Construct an encryption mapping:
c538f6ec 3191 * <cipher> [<key>|:<key_size>:<user|logon>:<key_description>] <iv_offset> <dev_path> <start>
5ebaee6d
MB
3192 */
3193static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
3194{
3195 struct crypt_config *cc;
ed0302e8 3196 const char *devname = dm_table_device_name(ti->table);
c538f6ec 3197 int key_size;
ef43aa38 3198 unsigned int align_mask;
5ebaee6d
MB
3199 unsigned long long tmpll;
3200 int ret;
ef43aa38 3201 size_t iv_size_padding, additional_req_size;
31998ef1 3202 char dummy;
772ae5f5 3203
772ae5f5 3204 if (argc < 5) {
5ebaee6d
MB
3205 ti->error = "Not enough arguments";
3206 return -EINVAL;
1da177e4
LT
3207 }
3208
c538f6ec
OK
3209 key_size = get_key_size(&argv[1]);
3210 if (key_size < 0) {
3211 ti->error = "Cannot parse key size";
3212 return -EINVAL;
3213 }
5ebaee6d 3214
9c81c99b 3215 cc = kzalloc(struct_size(cc, key, key_size), GFP_KERNEL);
5ebaee6d
MB
3216 if (!cc) {
3217 ti->error = "Cannot allocate encryption context";
3218 return -ENOMEM;
3219 }
69a8cfcd 3220 cc->key_size = key_size;
8f0009a2 3221 cc->sector_size = (1 << SECTOR_SHIFT);
ff3af92b 3222 cc->sector_shift = 0;
5ebaee6d
MB
3223
3224 ti->private = cc;
ef43aa38 3225
5059353d
MP
3226 spin_lock(&dm_crypt_clients_lock);
3227 dm_crypt_clients_n++;
3228 crypt_calculate_pages_per_client();
3229 spin_unlock(&dm_crypt_clients_lock);
3230
3231 ret = percpu_counter_init(&cc->n_allocated_pages, 0, GFP_KERNEL);
3232 if (ret < 0)
3233 goto bad;
3234
ef43aa38
MB
3235 /* Optional parameters need to be read before cipher constructor */
3236 if (argc > 5) {
3237 ret = crypt_ctr_optional(ti, argc - 5, &argv[5]);
3238 if (ret)
3239 goto bad;
3240 }
3241
5ebaee6d
MB
3242 ret = crypt_ctr_cipher(ti, argv[0], argv[1]);
3243 if (ret < 0)
3244 goto bad;
3245
33d2f09f 3246 if (crypt_integrity_aead(cc)) {
ef43aa38
MB
3247 cc->dmreq_start = sizeof(struct aead_request);
3248 cc->dmreq_start += crypto_aead_reqsize(any_tfm_aead(cc));
3249 align_mask = crypto_aead_alignmask(any_tfm_aead(cc));
3250 } else {
3251 cc->dmreq_start = sizeof(struct skcipher_request);
3252 cc->dmreq_start += crypto_skcipher_reqsize(any_tfm(cc));
3253 align_mask = crypto_skcipher_alignmask(any_tfm(cc));
3254 }
d49ec52f
MP
3255 cc->dmreq_start = ALIGN(cc->dmreq_start, __alignof__(struct dm_crypt_request));
3256
ef43aa38 3257 if (align_mask < CRYPTO_MINALIGN) {
d49ec52f
MP
3258 /* Allocate the padding exactly */
3259 iv_size_padding = -(cc->dmreq_start + sizeof(struct dm_crypt_request))
ef43aa38 3260 & align_mask;
d49ec52f
MP
3261 } else {
3262 /*
3263 * If the cipher requires greater alignment than kmalloc
3264 * alignment, we don't know the exact position of the
3265 * initialization vector. We must assume worst case.
3266 */
ef43aa38 3267 iv_size_padding = align_mask;
d49ec52f 3268 }
ddd42edf 3269
ef43aa38
MB
3270 /* ...| IV + padding | original IV | original sec. number | bio tag offset | */
3271 additional_req_size = sizeof(struct dm_crypt_request) +
3272 iv_size_padding + cc->iv_size +
3273 cc->iv_size +
3274 sizeof(uint64_t) +
3275 sizeof(unsigned int);
3276
6f1c819c
KO
3277 ret = mempool_init_kmalloc_pool(&cc->req_pool, MIN_IOS, cc->dmreq_start + additional_req_size);
3278 if (ret) {
ddd42edf 3279 ti->error = "Cannot allocate crypt request mempool";
28513fcc 3280 goto bad;
ddd42edf 3281 }
ddd42edf 3282
30187e1d 3283 cc->per_bio_data_size = ti->per_io_data_size =
ef43aa38 3284 ALIGN(sizeof(struct dm_crypt_io) + cc->dmreq_start + additional_req_size,
7bc75714 3285 ARCH_DMA_MINALIGN);
298a9fa0 3286
a8affc03 3287 ret = mempool_init(&cc->page_pool, BIO_MAX_VECS, crypt_page_alloc, crypt_page_free, cc);
6f1c819c 3288 if (ret) {
72d94861 3289 ti->error = "Cannot allocate page mempool";
28513fcc 3290 goto bad;
1da177e4
LT
3291 }
3292
6f1c819c
KO
3293 ret = bioset_init(&cc->bs, MIN_IOS, 0, BIOSET_NEED_BVECS);
3294 if (ret) {
6a24c718 3295 ti->error = "Cannot allocate crypt bioset";
28513fcc 3296 goto bad;
6a24c718
MB
3297 }
3298
7145c241
MP
3299 mutex_init(&cc->bio_alloc_lock);
3300
28513fcc 3301 ret = -EINVAL;
8f0009a2
MB
3302 if ((sscanf(argv[2], "%llu%c", &tmpll, &dummy) != 1) ||
3303 (tmpll & ((cc->sector_size >> SECTOR_SHIFT) - 1))) {
72d94861 3304 ti->error = "Invalid iv_offset sector";
28513fcc 3305 goto bad;
1da177e4 3306 }
4ee218cd 3307 cc->iv_offset = tmpll;
1da177e4 3308
e80d1c80
VG
3309 ret = dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &cc->dev);
3310 if (ret) {
28513fcc
MB
3311 ti->error = "Device lookup failed";
3312 goto bad;
3313 }
3314
e80d1c80 3315 ret = -EINVAL;
ef87bfc2 3316 if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1 || tmpll != (sector_t)tmpll) {
72d94861 3317 ti->error = "Invalid device sector";
28513fcc 3318 goto bad;
1da177e4 3319 }
4ee218cd 3320 cc->start = tmpll;
1da177e4 3321
8e225f04 3322 if (bdev_is_zoned(cc->dev->bdev)) {
f34ee1dc
DLM
3323 /*
3324 * For zoned block devices, we need to preserve the issuer write
3325 * ordering. To do so, disable write workqueues and force inline
3326 * encryption completion.
3327 */
8e225f04
DLM
3328 set_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags);
3329 set_bit(DM_CRYPT_WRITE_INLINE, &cc->flags);
f34ee1dc
DLM
3330
3331 /*
3332 * All zone append writes to a zone of a zoned block device will
3333 * have the same BIO sector, the start of the zone. When the
3334 * cypher IV mode uses sector values, all data targeting a
3335 * zone will be encrypted using the first sector numbers of the
3336 * zone. This will not result in write errors but will
3337 * cause most reads to fail as reads will use the sector values
3338 * for the actual data locations, resulting in IV mismatch.
3339 * To avoid this problem, ask DM core to emulate zone append
3340 * operations with regular writes.
3341 */
3342 DMDEBUG("Zone append operations will be emulated");
3343 ti->emulate_zone_append = true;
8e225f04
DLM
3344 }
3345
33d2f09f 3346 if (crypt_integrity_aead(cc) || cc->integrity_iv_size) {
ef43aa38 3347 ret = crypt_integrity_ctr(cc, ti);
772ae5f5
MB
3348 if (ret)
3349 goto bad;
3350
ef43aa38
MB
3351 cc->tag_pool_max_sectors = POOL_ENTRY_SIZE / cc->on_disk_tag_size;
3352 if (!cc->tag_pool_max_sectors)
3353 cc->tag_pool_max_sectors = 1;
f3396c58 3354
6f1c819c 3355 ret = mempool_init_kmalloc_pool(&cc->tag_pool, MIN_IOS,
ef43aa38 3356 cc->tag_pool_max_sectors * cc->on_disk_tag_size);
6f1c819c 3357 if (ret) {
ef43aa38
MB
3358 ti->error = "Cannot allocate integrity tags mempool";
3359 goto bad;
772ae5f5 3360 }
583fe747
MP
3361
3362 cc->tag_pool_max_sectors <<= cc->sector_shift;
772ae5f5
MB
3363 }
3364
28513fcc 3365 ret = -ENOMEM;
f612b213 3366 cc->io_queue = alloc_workqueue("kcryptd_io/%s", WQ_MEM_RECLAIM, 1, devname);
cabf08e4
MB
3367 if (!cc->io_queue) {
3368 ti->error = "Couldn't create kcryptd io queue";
28513fcc 3369 goto bad;
cabf08e4
MB
3370 }
3371
f3396c58 3372 if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
48b0777c 3373 cc->crypt_queue = alloc_workqueue("kcryptd/%s", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM,
ed0302e8 3374 1, devname);
f3396c58 3375 else
48b0777c
MS
3376 cc->crypt_queue = alloc_workqueue("kcryptd/%s",
3377 WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND,
ed0302e8 3378 num_online_cpus(), devname);
cabf08e4 3379 if (!cc->crypt_queue) {
9934a8be 3380 ti->error = "Couldn't create kcryptd queue";
28513fcc 3381 goto bad;
9934a8be
MB
3382 }
3383
c7329eff 3384 spin_lock_init(&cc->write_thread_lock);
b3c5fd30 3385 cc->write_tree = RB_ROOT;
dc267621 3386
a5217c11 3387 cc->write_thread = kthread_run(dmcrypt_write, cc, "dmcrypt_write/%s", devname);
dc267621
MP
3388 if (IS_ERR(cc->write_thread)) {
3389 ret = PTR_ERR(cc->write_thread);
3390 cc->write_thread = NULL;
3391 ti->error = "Couldn't spawn write thread";
3392 goto bad;
3393 }
dc267621 3394
55a62eef 3395 ti->num_flush_bios = 1;
a666e5c0 3396 ti->limit_swap_bios = true;
e5524e12 3397 ti->accounts_remapped_io = true;
983c7db3 3398
58d0f180 3399 dm_audit_log_ctr(DM_MSG_PREFIX, ti, 1);
1da177e4
LT
3400 return 0;
3401
28513fcc 3402bad:
58d0f180 3403 dm_audit_log_ctr(DM_MSG_PREFIX, ti, 0);
28513fcc
MB
3404 crypt_dtr(ti);
3405 return ret;
1da177e4
LT
3406}
3407
7de3ee57 3408static int crypt_map(struct dm_target *ti, struct bio *bio)
1da177e4 3409{
028867ac 3410 struct dm_crypt_io *io;
49a8a920 3411 struct crypt_config *cc = ti->private;
647c7db1 3412
772ae5f5 3413 /*
28a8f0d3
MC
3414 * If bio is REQ_PREFLUSH or REQ_OP_DISCARD, just bypass crypt queues.
3415 * - for REQ_PREFLUSH device-mapper core ensures that no IO is in-flight
e6047149 3416 * - for REQ_OP_DISCARD caller must use flush if IO ordering matters
772ae5f5 3417 */
1eff9d32 3418 if (unlikely(bio->bi_opf & REQ_PREFLUSH ||
28a8f0d3 3419 bio_op(bio) == REQ_OP_DISCARD)) {
74d46992 3420 bio_set_dev(bio, cc->dev->bdev);
772ae5f5 3421 if (bio_sectors(bio))
4f024f37
KO
3422 bio->bi_iter.bi_sector = cc->start +
3423 dm_target_offset(ti, bio->bi_iter.bi_sector);
647c7db1
MP
3424 return DM_MAPIO_REMAPPED;
3425 }
1da177e4 3426
4e870e94
MP
3427 /*
3428 * Check if bio is too large, split as needed.
3429 */
a8affc03 3430 if (unlikely(bio->bi_iter.bi_size > (BIO_MAX_VECS << PAGE_SHIFT)) &&
ef43aa38 3431 (bio_data_dir(bio) == WRITE || cc->on_disk_tag_size))
a8affc03 3432 dm_accept_partial_bio(bio, ((BIO_MAX_VECS << PAGE_SHIFT) >> SECTOR_SHIFT));
4e870e94 3433
8f0009a2
MB
3434 /*
3435 * Ensure that bio is a multiple of internal sector encryption size
3436 * and is aligned to this size as defined in IO hints.
3437 */
3438 if (unlikely((bio->bi_iter.bi_sector & ((cc->sector_size >> SECTOR_SHIFT) - 1)) != 0))
846785e6 3439 return DM_MAPIO_KILL;
8f0009a2
MB
3440
3441 if (unlikely(bio->bi_iter.bi_size & (cc->sector_size - 1)))
846785e6 3442 return DM_MAPIO_KILL;
8f0009a2 3443
298a9fa0
MP
3444 io = dm_per_bio_data(bio, cc->per_bio_data_size);
3445 crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
ef43aa38
MB
3446
3447 if (cc->on_disk_tag_size) {
86a3238c 3448 unsigned int tag_len = cc->on_disk_tag_size * (bio_sectors(bio) >> cc->sector_shift);
ef43aa38 3449
d715fa23
HM
3450 if (unlikely(tag_len > KMALLOC_MAX_SIZE))
3451 io->integrity_metadata = NULL;
3452 else
3453 io->integrity_metadata = kmalloc(tag_len, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
ef43aa38 3454
d715fa23 3455 if (unlikely(!io->integrity_metadata)) {
ef43aa38
MB
3456 if (bio_sectors(bio) > cc->tag_pool_max_sectors)
3457 dm_accept_partial_bio(bio, cc->tag_pool_max_sectors);
6f1c819c 3458 io->integrity_metadata = mempool_alloc(&cc->tag_pool, GFP_NOIO);
ef43aa38
MB
3459 io->integrity_metadata_from_pool = true;
3460 }
3461 }
3462
33d2f09f 3463 if (crypt_integrity_aead(cc))
ef43aa38
MB
3464 io->ctx.r.req_aead = (struct aead_request *)(io + 1);
3465 else
3466 io->ctx.r.req = (struct skcipher_request *)(io + 1);
cabf08e4 3467
20c82538 3468 if (bio_data_dir(io->base_bio) == READ) {
e5524e12 3469 if (kcryptd_io_read(io, CRYPT_MAP_READ_GFP))
dc267621 3470 kcryptd_queue_read(io);
20c82538 3471 } else
cabf08e4 3472 kcryptd_queue_crypt(io);
1da177e4 3473
d2a7ad29 3474 return DM_MAPIO_SUBMITTED;
1da177e4
LT
3475}
3476
567dd8f3
MP
3477static char hex2asc(unsigned char c)
3478{
86a3238c 3479 return c + '0' + ((unsigned int)(9 - c) >> 4 & 0x27);
567dd8f3
MP
3480}
3481
fd7c092e 3482static void crypt_status(struct dm_target *ti, status_type_t type,
86a3238c 3483 unsigned int status_flags, char *result, unsigned int maxlen)
1da177e4 3484{
5ebaee6d 3485 struct crypt_config *cc = ti->private;
86a3238c 3486 unsigned int i, sz = 0;
f3396c58 3487 int num_feature_args = 0;
1da177e4
LT
3488
3489 switch (type) {
3490 case STATUSTYPE_INFO:
3491 result[0] = '\0';
3492 break;
3493
3494 case STATUSTYPE_TABLE:
7dbcd137 3495 DMEMIT("%s ", cc->cipher_string);
1da177e4 3496
c538f6ec
OK
3497 if (cc->key_size > 0) {
3498 if (cc->key_string)
3499 DMEMIT(":%u:%s", cc->key_size, cc->key_string);
567dd8f3
MP
3500 else {
3501 for (i = 0; i < cc->key_size; i++) {
3502 DMEMIT("%c%c", hex2asc(cc->key[i] >> 4),
3503 hex2asc(cc->key[i] & 0xf));
3504 }
3505 }
c538f6ec 3506 } else
fd7c092e 3507 DMEMIT("-");
1da177e4 3508
4ee218cd
AM
3509 DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset,
3510 cc->dev->name, (unsigned long long)cc->start);
772ae5f5 3511
f3396c58
MP
3512 num_feature_args += !!ti->num_discard_bios;
3513 num_feature_args += test_bit(DM_CRYPT_SAME_CPU, &cc->flags);
0f5d8e6e 3514 num_feature_args += test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags);
39d42fa9
IK
3515 num_feature_args += test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags);
3516 num_feature_args += test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags);
ff3af92b 3517 num_feature_args += cc->sector_size != (1 << SECTOR_SHIFT);
8f0009a2 3518 num_feature_args += test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags);
ef43aa38
MB
3519 if (cc->on_disk_tag_size)
3520 num_feature_args++;
f3396c58
MP
3521 if (num_feature_args) {
3522 DMEMIT(" %d", num_feature_args);
3523 if (ti->num_discard_bios)
3524 DMEMIT(" allow_discards");
3525 if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
3526 DMEMIT(" same_cpu_crypt");
0f5d8e6e
MP
3527 if (test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags))
3528 DMEMIT(" submit_from_crypt_cpus");
39d42fa9
IK
3529 if (test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags))
3530 DMEMIT(" no_read_workqueue");
3531 if (test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags))
3532 DMEMIT(" no_write_workqueue");
ef43aa38
MB
3533 if (cc->on_disk_tag_size)
3534 DMEMIT(" integrity:%u:%s", cc->on_disk_tag_size, cc->cipher_auth);
8f0009a2
MB
3535 if (cc->sector_size != (1 << SECTOR_SHIFT))
3536 DMEMIT(" sector_size:%d", cc->sector_size);
3537 if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags))
3538 DMEMIT(" iv_large_sectors");
f3396c58 3539 }
8ec45662
TS
3540 break;
3541
3542 case STATUSTYPE_IMA:
3543 DMEMIT_TARGET_NAME_VERSION(ti->type);
3544 DMEMIT(",allow_discards=%c", ti->num_discard_bios ? 'y' : 'n');
3545 DMEMIT(",same_cpu_crypt=%c", test_bit(DM_CRYPT_SAME_CPU, &cc->flags) ? 'y' : 'n');
3546 DMEMIT(",submit_from_crypt_cpus=%c", test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags) ?
3547 'y' : 'n');
3548 DMEMIT(",no_read_workqueue=%c", test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags) ?
3549 'y' : 'n');
3550 DMEMIT(",no_write_workqueue=%c", test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags) ?
3551 'y' : 'n');
3552 DMEMIT(",iv_large_sectors=%c", test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags) ?
3553 'y' : 'n');
772ae5f5 3554
8ec45662
TS
3555 if (cc->on_disk_tag_size)
3556 DMEMIT(",integrity_tag_size=%u,cipher_auth=%s",
3557 cc->on_disk_tag_size, cc->cipher_auth);
3558 if (cc->sector_size != (1 << SECTOR_SHIFT))
3559 DMEMIT(",sector_size=%d", cc->sector_size);
3560 if (cc->cipher_string)
3561 DMEMIT(",cipher_string=%s", cc->cipher_string);
3562
3563 DMEMIT(",key_size=%u", cc->key_size);
3564 DMEMIT(",key_parts=%u", cc->key_parts);
3565 DMEMIT(",key_extra_size=%u", cc->key_extra_size);
3566 DMEMIT(",key_mac_size=%u", cc->key_mac_size);
3567 DMEMIT(";");
1da177e4
LT
3568 break;
3569 }
1da177e4
LT
3570}
3571
e48d4bbf
MB
3572static void crypt_postsuspend(struct dm_target *ti)
3573{
3574 struct crypt_config *cc = ti->private;
3575
3576 set_bit(DM_CRYPT_SUSPENDED, &cc->flags);
3577}
3578
3579static int crypt_preresume(struct dm_target *ti)
3580{
3581 struct crypt_config *cc = ti->private;
3582
3583 if (!test_bit(DM_CRYPT_KEY_VALID, &cc->flags)) {
3584 DMERR("aborting resume - crypt key is not set.");
3585 return -EAGAIN;
3586 }
3587
3588 return 0;
3589}
3590
3591static void crypt_resume(struct dm_target *ti)
3592{
3593 struct crypt_config *cc = ti->private;
3594
3595 clear_bit(DM_CRYPT_SUSPENDED, &cc->flags);
3596}
3597
3598/* Message interface
3599 * key set <key>
3600 * key wipe
3601 */
86a3238c
HM
3602static int crypt_message(struct dm_target *ti, unsigned int argc, char **argv,
3603 char *result, unsigned int maxlen)
e48d4bbf
MB
3604{
3605 struct crypt_config *cc = ti->private;
c538f6ec 3606 int key_size, ret = -EINVAL;
e48d4bbf
MB
3607
3608 if (argc < 2)
3609 goto error;
3610
498f0103 3611 if (!strcasecmp(argv[0], "key")) {
e48d4bbf
MB
3612 if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) {
3613 DMWARN("not suspended during key manipulation.");
3614 return -EINVAL;
3615 }
498f0103 3616 if (argc == 3 && !strcasecmp(argv[1], "set")) {
c538f6ec
OK
3617 /* The key size may not be changed. */
3618 key_size = get_key_size(&argv[2]);
3619 if (key_size < 0 || cc->key_size != key_size) {
3620 memset(argv[2], '0', strlen(argv[2]));
3621 return -EINVAL;
3622 }
3623
542da317
MB
3624 ret = crypt_set_key(cc, argv[2]);
3625 if (ret)
3626 return ret;
3627 if (cc->iv_gen_ops && cc->iv_gen_ops->init)
3628 ret = cc->iv_gen_ops->init(cc);
dc94902b
OK
3629 /* wipe the kernel key payload copy */
3630 if (cc->key_string)
3631 memset(cc->key, 0, cc->key_size * sizeof(u8));
542da317
MB
3632 return ret;
3633 }
4a52ffc7 3634 if (argc == 2 && !strcasecmp(argv[1], "wipe"))
e48d4bbf
MB
3635 return crypt_wipe_key(cc);
3636 }
3637
3638error:
3639 DMWARN("unrecognised message received.");
3640 return -EINVAL;
3641}
3642
af4874e0
MS
3643static int crypt_iterate_devices(struct dm_target *ti,
3644 iterate_devices_callout_fn fn, void *data)
3645{
3646 struct crypt_config *cc = ti->private;
3647
5dea271b 3648 return fn(ti, cc->dev, cc->start, ti->len, data);
af4874e0
MS
3649}
3650
586b286b
MS
3651static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
3652{
8f0009a2
MB
3653 struct crypt_config *cc = ti->private;
3654
586b286b
MS
3655 /*
3656 * Unfortunate constraint that is required to avoid the potential
3657 * for exceeding underlying device's max_segments limits -- due to
3658 * crypt_alloc_buffer() possibly allocating pages for the encryption
3659 * bio that are not as physically contiguous as the original bio.
3660 */
3661 limits->max_segment_size = PAGE_SIZE;
8f0009a2 3662
bc9e9cf0 3663 limits->logical_block_size =
86a3238c 3664 max_t(unsigned int, limits->logical_block_size, cc->sector_size);
bc9e9cf0 3665 limits->physical_block_size =
86a3238c
HM
3666 max_t(unsigned int, limits->physical_block_size, cc->sector_size);
3667 limits->io_min = max_t(unsigned int, limits->io_min, cc->sector_size);
86e4d3e8 3668 limits->dma_alignment = limits->logical_block_size - 1;
586b286b
MS
3669}
3670
1da177e4
LT
3671static struct target_type crypt_target = {
3672 .name = "crypt",
e5524e12 3673 .version = {1, 24, 0},
1da177e4
LT
3674 .module = THIS_MODULE,
3675 .ctr = crypt_ctr,
3676 .dtr = crypt_dtr,
8e225f04
DLM
3677 .features = DM_TARGET_ZONED_HM,
3678 .report_zones = crypt_report_zones,
1da177e4
LT
3679 .map = crypt_map,
3680 .status = crypt_status,
e48d4bbf
MB
3681 .postsuspend = crypt_postsuspend,
3682 .preresume = crypt_preresume,
3683 .resume = crypt_resume,
3684 .message = crypt_message,
af4874e0 3685 .iterate_devices = crypt_iterate_devices,
586b286b 3686 .io_hints = crypt_io_hints,
1da177e4 3687};
3664ff82 3688module_dm(crypt);
1da177e4 3689
bf14299f 3690MODULE_AUTHOR("Jana Saout <jana@saout.de>");
1da177e4
LT
3691MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption");
3692MODULE_LICENSE("GPL");