Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
bf14299f | 2 | * Copyright (C) 2003 Jana Saout <jana@saout.de> |
1da177e4 | 3 | * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org> |
bbb16584 MB |
4 | * Copyright (C) 2006-2020 Red Hat, Inc. All rights reserved. |
5 | * Copyright (C) 2013-2020 Milan Broz <gmazyland@gmail.com> | |
1da177e4 LT |
6 | * |
7 | * This file is released under the GPL. | |
8 | */ | |
9 | ||
43d69034 | 10 | #include <linux/completion.h> |
d1806f6a | 11 | #include <linux/err.h> |
1da177e4 LT |
12 | #include <linux/module.h> |
13 | #include <linux/init.h> | |
14 | #include <linux/kernel.h> | |
c538f6ec | 15 | #include <linux/key.h> |
1da177e4 LT |
16 | #include <linux/bio.h> |
17 | #include <linux/blkdev.h> | |
18 | #include <linux/mempool.h> | |
19 | #include <linux/slab.h> | |
20 | #include <linux/crypto.h> | |
21 | #include <linux/workqueue.h> | |
dc267621 | 22 | #include <linux/kthread.h> |
3fcfab16 | 23 | #include <linux/backing-dev.h> |
60063497 | 24 | #include <linux/atomic.h> |
378f058c | 25 | #include <linux/scatterlist.h> |
b3c5fd30 | 26 | #include <linux/rbtree.h> |
027c431c | 27 | #include <linux/ctype.h> |
1da177e4 | 28 | #include <asm/page.h> |
48527fa7 | 29 | #include <asm/unaligned.h> |
34745785 MB |
30 | #include <crypto/hash.h> |
31 | #include <crypto/md5.h> | |
32 | #include <crypto/algapi.h> | |
bbdb23b5 | 33 | #include <crypto/skcipher.h> |
ef43aa38 MB |
34 | #include <crypto/aead.h> |
35 | #include <crypto/authenc.h> | |
36 | #include <linux/rtnetlink.h> /* for struct rtattr and RTA macros only */ | |
27f5411a | 37 | #include <linux/key-type.h> |
c538f6ec | 38 | #include <keys/user-type.h> |
27f5411a | 39 | #include <keys/encrypted-type.h> |
363880c4 | 40 | #include <keys/trusted-type.h> |
1da177e4 | 41 | |
586e80e6 | 42 | #include <linux/device-mapper.h> |
1da177e4 | 43 | |
72d94861 | 44 | #define DM_MSG_PREFIX "crypt" |
1da177e4 | 45 | |
1da177e4 LT |
46 | /* |
47 | * context holding the current state of a multi-part conversion | |
48 | */ | |
49 | struct convert_context { | |
43d69034 | 50 | struct completion restart; |
1da177e4 LT |
51 | struct bio *bio_in; |
52 | struct bio *bio_out; | |
003b5c57 KO |
53 | struct bvec_iter iter_in; |
54 | struct bvec_iter iter_out; | |
8d683dcd | 55 | u64 cc_sector; |
40b6229b | 56 | atomic_t cc_pending; |
ef43aa38 MB |
57 | union { |
58 | struct skcipher_request *req; | |
59 | struct aead_request *req_aead; | |
60 | } r; | |
61 | ||
1da177e4 LT |
62 | }; |
63 | ||
53017030 MB |
64 | /* |
65 | * per bio private data | |
66 | */ | |
67 | struct dm_crypt_io { | |
49a8a920 | 68 | struct crypt_config *cc; |
53017030 | 69 | struct bio *base_bio; |
ef43aa38 MB |
70 | u8 *integrity_metadata; |
71 | bool integrity_metadata_from_pool; | |
53017030 | 72 | struct work_struct work; |
39d42fa9 | 73 | struct tasklet_struct tasklet; |
53017030 MB |
74 | |
75 | struct convert_context ctx; | |
76 | ||
40b6229b | 77 | atomic_t io_pending; |
4e4cbee9 | 78 | blk_status_t error; |
0c395b0f | 79 | sector_t sector; |
dc267621 | 80 | |
b3c5fd30 | 81 | struct rb_node rb_node; |
298a9fa0 | 82 | } CRYPTO_MINALIGN_ATTR; |
53017030 | 83 | |
01482b76 | 84 | struct dm_crypt_request { |
b2174eeb | 85 | struct convert_context *ctx; |
ef43aa38 MB |
86 | struct scatterlist sg_in[4]; |
87 | struct scatterlist sg_out[4]; | |
8d683dcd | 88 | u64 iv_sector; |
01482b76 MB |
89 | }; |
90 | ||
1da177e4 LT |
91 | struct crypt_config; |
92 | ||
93 | struct crypt_iv_operations { | |
94 | int (*ctr)(struct crypt_config *cc, struct dm_target *ti, | |
d469f841 | 95 | const char *opts); |
1da177e4 | 96 | void (*dtr)(struct crypt_config *cc); |
b95bf2d3 | 97 | int (*init)(struct crypt_config *cc); |
542da317 | 98 | int (*wipe)(struct crypt_config *cc); |
2dc5327d MB |
99 | int (*generator)(struct crypt_config *cc, u8 *iv, |
100 | struct dm_crypt_request *dmreq); | |
101 | int (*post)(struct crypt_config *cc, u8 *iv, | |
102 | struct dm_crypt_request *dmreq); | |
1da177e4 LT |
103 | }; |
104 | ||
60473592 MB |
105 | struct iv_benbi_private { |
106 | int shift; | |
107 | }; | |
108 | ||
34745785 MB |
109 | #define LMK_SEED_SIZE 64 /* hash + 0 */ |
110 | struct iv_lmk_private { | |
111 | struct crypto_shash *hash_tfm; | |
112 | u8 *seed; | |
113 | }; | |
114 | ||
ed04d981 MB |
115 | #define TCW_WHITENING_SIZE 16 |
116 | struct iv_tcw_private { | |
117 | struct crypto_shash *crc32_tfm; | |
118 | u8 *iv_seed; | |
119 | u8 *whitening; | |
120 | }; | |
121 | ||
bbb16584 MB |
122 | #define ELEPHANT_MAX_KEY_SIZE 32 |
123 | struct iv_elephant_private { | |
124 | struct crypto_skcipher *tfm; | |
125 | }; | |
126 | ||
1da177e4 LT |
127 | /* |
128 | * Crypt: maps a linear range of a block device | |
129 | * and encrypts / decrypts at the same time. | |
130 | */ | |
0f5d8e6e | 131 | enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID, |
39d42fa9 | 132 | DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD, |
8e225f04 DLM |
133 | DM_CRYPT_NO_READ_WORKQUEUE, DM_CRYPT_NO_WRITE_WORKQUEUE, |
134 | DM_CRYPT_WRITE_INLINE }; | |
c0297721 | 135 | |
ef43aa38 | 136 | enum cipher_flags { |
74d1da39 | 137 | CRYPT_MODE_INTEGRITY_AEAD, /* Use authenticated mode for cipher */ |
8f0009a2 | 138 | CRYPT_IV_LARGE_SECTORS, /* Calculate IV from sector_size, not 512B sectors */ |
bbb16584 | 139 | CRYPT_ENCRYPT_PREPROCESS, /* Must preprocess data for encryption (elephant) */ |
ef43aa38 MB |
140 | }; |
141 | ||
c0297721 | 142 | /* |
610f2de3 | 143 | * The fields in here must be read only after initialization. |
c0297721 | 144 | */ |
1da177e4 LT |
145 | struct crypt_config { |
146 | struct dm_dev *dev; | |
147 | sector_t start; | |
148 | ||
5059353d MP |
149 | struct percpu_counter n_allocated_pages; |
150 | ||
cabf08e4 MB |
151 | struct workqueue_struct *io_queue; |
152 | struct workqueue_struct *crypt_queue; | |
3f1e9070 | 153 | |
c7329eff | 154 | spinlock_t write_thread_lock; |
72d711c8 | 155 | struct task_struct *write_thread; |
b3c5fd30 | 156 | struct rb_root write_tree; |
dc267621 | 157 | |
7dbcd137 | 158 | char *cipher_string; |
ef43aa38 | 159 | char *cipher_auth; |
c538f6ec | 160 | char *key_string; |
5ebaee6d | 161 | |
1b1b58f5 | 162 | const struct crypt_iv_operations *iv_gen_ops; |
79066ad3 | 163 | union { |
60473592 | 164 | struct iv_benbi_private benbi; |
34745785 | 165 | struct iv_lmk_private lmk; |
ed04d981 | 166 | struct iv_tcw_private tcw; |
bbb16584 | 167 | struct iv_elephant_private elephant; |
79066ad3 | 168 | } iv_gen_private; |
8d683dcd | 169 | u64 iv_offset; |
1da177e4 | 170 | unsigned int iv_size; |
ff3af92b MP |
171 | unsigned short int sector_size; |
172 | unsigned char sector_shift; | |
1da177e4 | 173 | |
ef43aa38 MB |
174 | union { |
175 | struct crypto_skcipher **tfms; | |
176 | struct crypto_aead **tfms_aead; | |
177 | } cipher_tfm; | |
d1f96423 | 178 | unsigned tfms_count; |
ef43aa38 | 179 | unsigned long cipher_flags; |
c0297721 | 180 | |
ddd42edf MB |
181 | /* |
182 | * Layout of each crypto request: | |
183 | * | |
bbdb23b5 | 184 | * struct skcipher_request |
ddd42edf MB |
185 | * context |
186 | * padding | |
187 | * struct dm_crypt_request | |
188 | * padding | |
189 | * IV | |
190 | * | |
191 | * The padding is added so that dm_crypt_request and the IV are | |
192 | * correctly aligned. | |
193 | */ | |
194 | unsigned int dmreq_start; | |
ddd42edf | 195 | |
298a9fa0 MP |
196 | unsigned int per_bio_data_size; |
197 | ||
e48d4bbf | 198 | unsigned long flags; |
1da177e4 | 199 | unsigned int key_size; |
da31a078 MB |
200 | unsigned int key_parts; /* independent parts in key buffer */ |
201 | unsigned int key_extra_size; /* additional keys length */ | |
ef43aa38 MB |
202 | unsigned int key_mac_size; /* MAC key size for authenc(...) */ |
203 | ||
204 | unsigned int integrity_tag_size; | |
205 | unsigned int integrity_iv_size; | |
206 | unsigned int on_disk_tag_size; | |
207 | ||
72d711c8 MS |
208 | /* |
209 | * pool for per bio private data, crypto requests, | |
210 | * encryption requeusts/buffer pages and integrity tags | |
211 | */ | |
212 | unsigned tag_pool_max_sectors; | |
213 | mempool_t tag_pool; | |
214 | mempool_t req_pool; | |
215 | mempool_t page_pool; | |
216 | ||
217 | struct bio_set bs; | |
218 | struct mutex bio_alloc_lock; | |
219 | ||
ef43aa38 | 220 | u8 *authenc_key; /* space for keys in authenc() format (if used) */ |
b18ae8dd | 221 | u8 key[]; |
1da177e4 LT |
222 | }; |
223 | ||
ef43aa38 MB |
224 | #define MIN_IOS 64 |
225 | #define MAX_TAG_SIZE 480 | |
226 | #define POOL_ENTRY_SIZE 512 | |
1da177e4 | 227 | |
5059353d MP |
228 | static DEFINE_SPINLOCK(dm_crypt_clients_lock); |
229 | static unsigned dm_crypt_clients_n = 0; | |
230 | static volatile unsigned long dm_crypt_pages_per_client; | |
231 | #define DM_CRYPT_MEMORY_PERCENT 2 | |
a8affc03 | 232 | #define DM_CRYPT_MIN_PAGES_PER_CLIENT (BIO_MAX_VECS * 16) |
5059353d | 233 | |
028867ac | 234 | static void clone_init(struct dm_crypt_io *, struct bio *); |
395b167c | 235 | static void kcryptd_queue_crypt(struct dm_crypt_io *io); |
ef43aa38 MB |
236 | static struct scatterlist *crypt_get_sg_data(struct crypt_config *cc, |
237 | struct scatterlist *sg); | |
027581f3 | 238 | |
3fd53533 YY |
239 | static bool crypt_integrity_aead(struct crypt_config *cc); |
240 | ||
c0297721 | 241 | /* |
86f917ad | 242 | * Use this to access cipher attributes that are independent of the key. |
c0297721 | 243 | */ |
bbdb23b5 | 244 | static struct crypto_skcipher *any_tfm(struct crypt_config *cc) |
c0297721 | 245 | { |
ef43aa38 MB |
246 | return cc->cipher_tfm.tfms[0]; |
247 | } | |
248 | ||
249 | static struct crypto_aead *any_tfm_aead(struct crypt_config *cc) | |
250 | { | |
251 | return cc->cipher_tfm.tfms_aead[0]; | |
c0297721 AK |
252 | } |
253 | ||
1da177e4 LT |
254 | /* |
255 | * Different IV generation algorithms: | |
256 | * | |
3c164bd8 | 257 | * plain: the initial vector is the 32-bit little-endian version of the sector |
3a4fa0a2 | 258 | * number, padded with zeros if necessary. |
1da177e4 | 259 | * |
61afef61 MB |
260 | * plain64: the initial vector is the 64-bit little-endian version of the sector |
261 | * number, padded with zeros if necessary. | |
262 | * | |
7e3fd855 MB |
263 | * plain64be: the initial vector is the 64-bit big-endian version of the sector |
264 | * number, padded with zeros if necessary. | |
265 | * | |
3c164bd8 RS |
266 | * essiv: "encrypted sector|salt initial vector", the sector number is |
267 | * encrypted with the bulk cipher using a salt as key. The salt | |
268 | * should be derived from the bulk cipher's key via hashing. | |
1da177e4 | 269 | * |
48527fa7 RS |
270 | * benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1 |
271 | * (needed for LRW-32-AES and possible other narrow block modes) | |
272 | * | |
46b47730 LN |
273 | * null: the initial vector is always zero. Provides compatibility with |
274 | * obsolete loop_fish2 devices. Do not use for new devices. | |
275 | * | |
34745785 MB |
276 | * lmk: Compatible implementation of the block chaining mode used |
277 | * by the Loop-AES block device encryption system | |
278 | * designed by Jari Ruusu. See http://loop-aes.sourceforge.net/ | |
279 | * It operates on full 512 byte sectors and uses CBC | |
280 | * with an IV derived from the sector number, the data and | |
281 | * optionally extra IV seed. | |
282 | * This means that after decryption the first block | |
283 | * of sector must be tweaked according to decrypted data. | |
284 | * Loop-AES can use three encryption schemes: | |
285 | * version 1: is plain aes-cbc mode | |
286 | * version 2: uses 64 multikey scheme with lmk IV generator | |
287 | * version 3: the same as version 2 with additional IV seed | |
288 | * (it uses 65 keys, last key is used as IV seed) | |
289 | * | |
ed04d981 MB |
290 | * tcw: Compatible implementation of the block chaining mode used |
291 | * by the TrueCrypt device encryption system (prior to version 4.1). | |
e44f23b3 | 292 | * For more info see: https://gitlab.com/cryptsetup/cryptsetup/wikis/TrueCryptOnDiskFormat |
ed04d981 MB |
293 | * It operates on full 512 byte sectors and uses CBC |
294 | * with an IV derived from initial key and the sector number. | |
295 | * In addition, whitening value is applied on every sector, whitening | |
296 | * is calculated from initial key, sector number and mixed using CRC32. | |
297 | * Note that this encryption scheme is vulnerable to watermarking attacks | |
298 | * and should be used for old compatible containers access only. | |
b9411d73 MB |
299 | * |
300 | * eboiv: Encrypted byte-offset IV (used in Bitlocker in CBC mode) | |
301 | * The IV is encrypted little-endian byte-offset (with the same key | |
302 | * and cipher as the volume). | |
bbb16584 MB |
303 | * |
304 | * elephant: The extended version of eboiv with additional Elephant diffuser | |
305 | * used with Bitlocker CBC mode. | |
306 | * This mode was used in older Windows systems | |
6f3bc22b | 307 | * https://download.microsoft.com/download/0/2/3/0238acaf-d3bf-4a6d-b3d6-0a0be4bbb36e/bitlockercipher200608.pdf |
1da177e4 LT |
308 | */ |
309 | ||
2dc5327d MB |
310 | static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv, |
311 | struct dm_crypt_request *dmreq) | |
1da177e4 LT |
312 | { |
313 | memset(iv, 0, cc->iv_size); | |
283a8328 | 314 | *(__le32 *)iv = cpu_to_le32(dmreq->iv_sector & 0xffffffff); |
1da177e4 LT |
315 | |
316 | return 0; | |
317 | } | |
318 | ||
61afef61 | 319 | static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv, |
2dc5327d | 320 | struct dm_crypt_request *dmreq) |
61afef61 MB |
321 | { |
322 | memset(iv, 0, cc->iv_size); | |
283a8328 | 323 | *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector); |
61afef61 MB |
324 | |
325 | return 0; | |
326 | } | |
327 | ||
7e3fd855 MB |
328 | static int crypt_iv_plain64be_gen(struct crypt_config *cc, u8 *iv, |
329 | struct dm_crypt_request *dmreq) | |
330 | { | |
331 | memset(iv, 0, cc->iv_size); | |
332 | /* iv_size is at least of size u64; usually it is 16 bytes */ | |
333 | *(__be64 *)&iv[cc->iv_size - sizeof(u64)] = cpu_to_be64(dmreq->iv_sector); | |
334 | ||
335 | return 0; | |
336 | } | |
337 | ||
2dc5327d MB |
338 | static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, |
339 | struct dm_crypt_request *dmreq) | |
1da177e4 | 340 | { |
a1a262b6 AB |
341 | /* |
342 | * ESSIV encryption of the IV is now handled by the crypto API, | |
343 | * so just pass the plain sector number here. | |
344 | */ | |
1da177e4 | 345 | memset(iv, 0, cc->iv_size); |
283a8328 | 346 | *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector); |
c0297721 | 347 | |
1da177e4 LT |
348 | return 0; |
349 | } | |
350 | ||
48527fa7 RS |
351 | static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti, |
352 | const char *opts) | |
353 | { | |
4ea9471f MB |
354 | unsigned bs; |
355 | int log; | |
356 | ||
3fd53533 | 357 | if (crypt_integrity_aead(cc)) |
4ea9471f MB |
358 | bs = crypto_aead_blocksize(any_tfm_aead(cc)); |
359 | else | |
360 | bs = crypto_skcipher_blocksize(any_tfm(cc)); | |
361 | log = ilog2(bs); | |
48527fa7 RS |
362 | |
363 | /* we need to calculate how far we must shift the sector count | |
364 | * to get the cipher block count, we use this shift in _gen */ | |
365 | ||
366 | if (1 << log != bs) { | |
367 | ti->error = "cypher blocksize is not a power of 2"; | |
368 | return -EINVAL; | |
369 | } | |
370 | ||
371 | if (log > 9) { | |
372 | ti->error = "cypher blocksize is > 512"; | |
373 | return -EINVAL; | |
374 | } | |
375 | ||
60473592 | 376 | cc->iv_gen_private.benbi.shift = 9 - log; |
48527fa7 RS |
377 | |
378 | return 0; | |
379 | } | |
380 | ||
381 | static void crypt_iv_benbi_dtr(struct crypt_config *cc) | |
382 | { | |
48527fa7 RS |
383 | } |
384 | ||
2dc5327d MB |
385 | static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv, |
386 | struct dm_crypt_request *dmreq) | |
48527fa7 | 387 | { |
79066ad3 HX |
388 | __be64 val; |
389 | ||
48527fa7 | 390 | memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */ |
79066ad3 | 391 | |
2dc5327d | 392 | val = cpu_to_be64(((u64)dmreq->iv_sector << cc->iv_gen_private.benbi.shift) + 1); |
79066ad3 | 393 | put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64))); |
48527fa7 | 394 | |
1da177e4 LT |
395 | return 0; |
396 | } | |
397 | ||
2dc5327d MB |
398 | static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv, |
399 | struct dm_crypt_request *dmreq) | |
46b47730 LN |
400 | { |
401 | memset(iv, 0, cc->iv_size); | |
402 | ||
403 | return 0; | |
404 | } | |
405 | ||
34745785 MB |
406 | static void crypt_iv_lmk_dtr(struct crypt_config *cc) |
407 | { | |
408 | struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; | |
409 | ||
410 | if (lmk->hash_tfm && !IS_ERR(lmk->hash_tfm)) | |
411 | crypto_free_shash(lmk->hash_tfm); | |
412 | lmk->hash_tfm = NULL; | |
413 | ||
453431a5 | 414 | kfree_sensitive(lmk->seed); |
34745785 MB |
415 | lmk->seed = NULL; |
416 | } | |
417 | ||
418 | static int crypt_iv_lmk_ctr(struct crypt_config *cc, struct dm_target *ti, | |
419 | const char *opts) | |
420 | { | |
421 | struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; | |
422 | ||
8f0009a2 MB |
423 | if (cc->sector_size != (1 << SECTOR_SHIFT)) { |
424 | ti->error = "Unsupported sector size for LMK"; | |
425 | return -EINVAL; | |
426 | } | |
427 | ||
cd746938 MP |
428 | lmk->hash_tfm = crypto_alloc_shash("md5", 0, |
429 | CRYPTO_ALG_ALLOCATES_MEMORY); | |
34745785 MB |
430 | if (IS_ERR(lmk->hash_tfm)) { |
431 | ti->error = "Error initializing LMK hash"; | |
432 | return PTR_ERR(lmk->hash_tfm); | |
433 | } | |
434 | ||
435 | /* No seed in LMK version 2 */ | |
436 | if (cc->key_parts == cc->tfms_count) { | |
437 | lmk->seed = NULL; | |
438 | return 0; | |
439 | } | |
440 | ||
441 | lmk->seed = kzalloc(LMK_SEED_SIZE, GFP_KERNEL); | |
442 | if (!lmk->seed) { | |
443 | crypt_iv_lmk_dtr(cc); | |
444 | ti->error = "Error kmallocing seed storage in LMK"; | |
445 | return -ENOMEM; | |
446 | } | |
447 | ||
448 | return 0; | |
449 | } | |
450 | ||
451 | static int crypt_iv_lmk_init(struct crypt_config *cc) | |
452 | { | |
453 | struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; | |
454 | int subkey_size = cc->key_size / cc->key_parts; | |
455 | ||
456 | /* LMK seed is on the position of LMK_KEYS + 1 key */ | |
457 | if (lmk->seed) | |
458 | memcpy(lmk->seed, cc->key + (cc->tfms_count * subkey_size), | |
459 | crypto_shash_digestsize(lmk->hash_tfm)); | |
460 | ||
461 | return 0; | |
462 | } | |
463 | ||
464 | static int crypt_iv_lmk_wipe(struct crypt_config *cc) | |
465 | { | |
466 | struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; | |
467 | ||
468 | if (lmk->seed) | |
469 | memset(lmk->seed, 0, LMK_SEED_SIZE); | |
470 | ||
471 | return 0; | |
472 | } | |
473 | ||
474 | static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv, | |
475 | struct dm_crypt_request *dmreq, | |
476 | u8 *data) | |
477 | { | |
478 | struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; | |
b6106265 | 479 | SHASH_DESC_ON_STACK(desc, lmk->hash_tfm); |
34745785 | 480 | struct md5_state md5state; |
da31a078 | 481 | __le32 buf[4]; |
34745785 MB |
482 | int i, r; |
483 | ||
b6106265 | 484 | desc->tfm = lmk->hash_tfm; |
34745785 | 485 | |
b6106265 | 486 | r = crypto_shash_init(desc); |
34745785 MB |
487 | if (r) |
488 | return r; | |
489 | ||
490 | if (lmk->seed) { | |
b6106265 | 491 | r = crypto_shash_update(desc, lmk->seed, LMK_SEED_SIZE); |
34745785 MB |
492 | if (r) |
493 | return r; | |
494 | } | |
495 | ||
496 | /* Sector is always 512B, block size 16, add data of blocks 1-31 */ | |
b6106265 | 497 | r = crypto_shash_update(desc, data + 16, 16 * 31); |
34745785 MB |
498 | if (r) |
499 | return r; | |
500 | ||
501 | /* Sector is cropped to 56 bits here */ | |
502 | buf[0] = cpu_to_le32(dmreq->iv_sector & 0xFFFFFFFF); | |
503 | buf[1] = cpu_to_le32((((u64)dmreq->iv_sector >> 32) & 0x00FFFFFF) | 0x80000000); | |
504 | buf[2] = cpu_to_le32(4024); | |
505 | buf[3] = 0; | |
b6106265 | 506 | r = crypto_shash_update(desc, (u8 *)buf, sizeof(buf)); |
34745785 MB |
507 | if (r) |
508 | return r; | |
509 | ||
510 | /* No MD5 padding here */ | |
b6106265 | 511 | r = crypto_shash_export(desc, &md5state); |
34745785 MB |
512 | if (r) |
513 | return r; | |
514 | ||
515 | for (i = 0; i < MD5_HASH_WORDS; i++) | |
516 | __cpu_to_le32s(&md5state.hash[i]); | |
517 | memcpy(iv, &md5state.hash, cc->iv_size); | |
518 | ||
519 | return 0; | |
520 | } | |
521 | ||
522 | static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv, | |
523 | struct dm_crypt_request *dmreq) | |
524 | { | |
ef43aa38 | 525 | struct scatterlist *sg; |
34745785 MB |
526 | u8 *src; |
527 | int r = 0; | |
528 | ||
529 | if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) { | |
ef43aa38 MB |
530 | sg = crypt_get_sg_data(cc, dmreq->sg_in); |
531 | src = kmap_atomic(sg_page(sg)); | |
532 | r = crypt_iv_lmk_one(cc, iv, dmreq, src + sg->offset); | |
c2e022cb | 533 | kunmap_atomic(src); |
34745785 MB |
534 | } else |
535 | memset(iv, 0, cc->iv_size); | |
536 | ||
537 | return r; | |
538 | } | |
539 | ||
540 | static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv, | |
541 | struct dm_crypt_request *dmreq) | |
542 | { | |
ef43aa38 | 543 | struct scatterlist *sg; |
34745785 MB |
544 | u8 *dst; |
545 | int r; | |
546 | ||
547 | if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) | |
548 | return 0; | |
549 | ||
ef43aa38 MB |
550 | sg = crypt_get_sg_data(cc, dmreq->sg_out); |
551 | dst = kmap_atomic(sg_page(sg)); | |
552 | r = crypt_iv_lmk_one(cc, iv, dmreq, dst + sg->offset); | |
34745785 MB |
553 | |
554 | /* Tweak the first block of plaintext sector */ | |
555 | if (!r) | |
ef43aa38 | 556 | crypto_xor(dst + sg->offset, iv, cc->iv_size); |
34745785 | 557 | |
c2e022cb | 558 | kunmap_atomic(dst); |
34745785 MB |
559 | return r; |
560 | } | |
561 | ||
ed04d981 MB |
562 | static void crypt_iv_tcw_dtr(struct crypt_config *cc) |
563 | { | |
564 | struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; | |
565 | ||
453431a5 | 566 | kfree_sensitive(tcw->iv_seed); |
ed04d981 | 567 | tcw->iv_seed = NULL; |
453431a5 | 568 | kfree_sensitive(tcw->whitening); |
ed04d981 MB |
569 | tcw->whitening = NULL; |
570 | ||
571 | if (tcw->crc32_tfm && !IS_ERR(tcw->crc32_tfm)) | |
572 | crypto_free_shash(tcw->crc32_tfm); | |
573 | tcw->crc32_tfm = NULL; | |
574 | } | |
575 | ||
576 | static int crypt_iv_tcw_ctr(struct crypt_config *cc, struct dm_target *ti, | |
577 | const char *opts) | |
578 | { | |
579 | struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; | |
580 | ||
8f0009a2 MB |
581 | if (cc->sector_size != (1 << SECTOR_SHIFT)) { |
582 | ti->error = "Unsupported sector size for TCW"; | |
583 | return -EINVAL; | |
584 | } | |
585 | ||
ed04d981 MB |
586 | if (cc->key_size <= (cc->iv_size + TCW_WHITENING_SIZE)) { |
587 | ti->error = "Wrong key size for TCW"; | |
588 | return -EINVAL; | |
589 | } | |
590 | ||
cd746938 MP |
591 | tcw->crc32_tfm = crypto_alloc_shash("crc32", 0, |
592 | CRYPTO_ALG_ALLOCATES_MEMORY); | |
ed04d981 MB |
593 | if (IS_ERR(tcw->crc32_tfm)) { |
594 | ti->error = "Error initializing CRC32 in TCW"; | |
595 | return PTR_ERR(tcw->crc32_tfm); | |
596 | } | |
597 | ||
598 | tcw->iv_seed = kzalloc(cc->iv_size, GFP_KERNEL); | |
599 | tcw->whitening = kzalloc(TCW_WHITENING_SIZE, GFP_KERNEL); | |
600 | if (!tcw->iv_seed || !tcw->whitening) { | |
601 | crypt_iv_tcw_dtr(cc); | |
602 | ti->error = "Error allocating seed storage in TCW"; | |
603 | return -ENOMEM; | |
604 | } | |
605 | ||
606 | return 0; | |
607 | } | |
608 | ||
609 | static int crypt_iv_tcw_init(struct crypt_config *cc) | |
610 | { | |
611 | struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; | |
612 | int key_offset = cc->key_size - cc->iv_size - TCW_WHITENING_SIZE; | |
613 | ||
614 | memcpy(tcw->iv_seed, &cc->key[key_offset], cc->iv_size); | |
615 | memcpy(tcw->whitening, &cc->key[key_offset + cc->iv_size], | |
616 | TCW_WHITENING_SIZE); | |
617 | ||
618 | return 0; | |
619 | } | |
620 | ||
621 | static int crypt_iv_tcw_wipe(struct crypt_config *cc) | |
622 | { | |
623 | struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; | |
624 | ||
625 | memset(tcw->iv_seed, 0, cc->iv_size); | |
626 | memset(tcw->whitening, 0, TCW_WHITENING_SIZE); | |
627 | ||
628 | return 0; | |
629 | } | |
630 | ||
631 | static int crypt_iv_tcw_whitening(struct crypt_config *cc, | |
632 | struct dm_crypt_request *dmreq, | |
633 | u8 *data) | |
634 | { | |
635 | struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; | |
350b5393 | 636 | __le64 sector = cpu_to_le64(dmreq->iv_sector); |
ed04d981 | 637 | u8 buf[TCW_WHITENING_SIZE]; |
b6106265 | 638 | SHASH_DESC_ON_STACK(desc, tcw->crc32_tfm); |
ed04d981 MB |
639 | int i, r; |
640 | ||
641 | /* xor whitening with sector number */ | |
45fe93df AB |
642 | crypto_xor_cpy(buf, tcw->whitening, (u8 *)§or, 8); |
643 | crypto_xor_cpy(&buf[8], tcw->whitening + 8, (u8 *)§or, 8); | |
ed04d981 MB |
644 | |
645 | /* calculate crc32 for every 32bit part and xor it */ | |
b6106265 | 646 | desc->tfm = tcw->crc32_tfm; |
ed04d981 | 647 | for (i = 0; i < 4; i++) { |
b6106265 | 648 | r = crypto_shash_init(desc); |
ed04d981 MB |
649 | if (r) |
650 | goto out; | |
b6106265 | 651 | r = crypto_shash_update(desc, &buf[i * 4], 4); |
ed04d981 MB |
652 | if (r) |
653 | goto out; | |
b6106265 | 654 | r = crypto_shash_final(desc, &buf[i * 4]); |
ed04d981 MB |
655 | if (r) |
656 | goto out; | |
657 | } | |
658 | crypto_xor(&buf[0], &buf[12], 4); | |
659 | crypto_xor(&buf[4], &buf[8], 4); | |
660 | ||
661 | /* apply whitening (8 bytes) to whole sector */ | |
662 | for (i = 0; i < ((1 << SECTOR_SHIFT) / 8); i++) | |
663 | crypto_xor(data + i * 8, buf, 8); | |
664 | out: | |
1a71d6ff | 665 | memzero_explicit(buf, sizeof(buf)); |
ed04d981 MB |
666 | return r; |
667 | } | |
668 | ||
669 | static int crypt_iv_tcw_gen(struct crypt_config *cc, u8 *iv, | |
670 | struct dm_crypt_request *dmreq) | |
671 | { | |
ef43aa38 | 672 | struct scatterlist *sg; |
ed04d981 | 673 | struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; |
350b5393 | 674 | __le64 sector = cpu_to_le64(dmreq->iv_sector); |
ed04d981 MB |
675 | u8 *src; |
676 | int r = 0; | |
677 | ||
678 | /* Remove whitening from ciphertext */ | |
679 | if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) { | |
ef43aa38 MB |
680 | sg = crypt_get_sg_data(cc, dmreq->sg_in); |
681 | src = kmap_atomic(sg_page(sg)); | |
682 | r = crypt_iv_tcw_whitening(cc, dmreq, src + sg->offset); | |
ed04d981 MB |
683 | kunmap_atomic(src); |
684 | } | |
685 | ||
686 | /* Calculate IV */ | |
45fe93df | 687 | crypto_xor_cpy(iv, tcw->iv_seed, (u8 *)§or, 8); |
ed04d981 | 688 | if (cc->iv_size > 8) |
45fe93df AB |
689 | crypto_xor_cpy(&iv[8], tcw->iv_seed + 8, (u8 *)§or, |
690 | cc->iv_size - 8); | |
ed04d981 MB |
691 | |
692 | return r; | |
693 | } | |
694 | ||
695 | static int crypt_iv_tcw_post(struct crypt_config *cc, u8 *iv, | |
696 | struct dm_crypt_request *dmreq) | |
697 | { | |
ef43aa38 | 698 | struct scatterlist *sg; |
ed04d981 MB |
699 | u8 *dst; |
700 | int r; | |
701 | ||
702 | if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) | |
703 | return 0; | |
704 | ||
705 | /* Apply whitening on ciphertext */ | |
ef43aa38 MB |
706 | sg = crypt_get_sg_data(cc, dmreq->sg_out); |
707 | dst = kmap_atomic(sg_page(sg)); | |
708 | r = crypt_iv_tcw_whitening(cc, dmreq, dst + sg->offset); | |
ed04d981 MB |
709 | kunmap_atomic(dst); |
710 | ||
711 | return r; | |
712 | } | |
713 | ||
ef43aa38 MB |
714 | static int crypt_iv_random_gen(struct crypt_config *cc, u8 *iv, |
715 | struct dm_crypt_request *dmreq) | |
716 | { | |
717 | /* Used only for writes, there must be an additional space to store IV */ | |
718 | get_random_bytes(iv, cc->iv_size); | |
719 | return 0; | |
720 | } | |
721 | ||
b9411d73 MB |
722 | static int crypt_iv_eboiv_ctr(struct crypt_config *cc, struct dm_target *ti, |
723 | const char *opts) | |
724 | { | |
3fd53533 | 725 | if (crypt_integrity_aead(cc)) { |
39d13a1a AB |
726 | ti->error = "AEAD transforms not supported for EBOIV"; |
727 | return -EINVAL; | |
b9411d73 MB |
728 | } |
729 | ||
39d13a1a | 730 | if (crypto_skcipher_blocksize(any_tfm(cc)) != cc->iv_size) { |
b9411d73 MB |
731 | ti->error = "Block size of EBOIV cipher does " |
732 | "not match IV size of block cipher"; | |
b9411d73 MB |
733 | return -EINVAL; |
734 | } | |
735 | ||
b9411d73 MB |
736 | return 0; |
737 | } | |
738 | ||
39d13a1a AB |
739 | static int crypt_iv_eboiv_gen(struct crypt_config *cc, u8 *iv, |
740 | struct dm_crypt_request *dmreq) | |
b9411d73 | 741 | { |
39d13a1a AB |
742 | u8 buf[MAX_CIPHER_BLOCKSIZE] __aligned(__alignof__(__le64)); |
743 | struct skcipher_request *req; | |
744 | struct scatterlist src, dst; | |
7785a9e4 | 745 | DECLARE_CRYPTO_WAIT(wait); |
b9411d73 MB |
746 | int err; |
747 | ||
9402e959 | 748 | req = skcipher_request_alloc(any_tfm(cc), GFP_NOIO); |
39d13a1a AB |
749 | if (!req) |
750 | return -ENOMEM; | |
b9411d73 | 751 | |
39d13a1a AB |
752 | memset(buf, 0, cc->iv_size); |
753 | *(__le64 *)buf = cpu_to_le64(dmreq->iv_sector * cc->sector_size); | |
b9411d73 | 754 | |
39d13a1a AB |
755 | sg_init_one(&src, page_address(ZERO_PAGE(0)), cc->iv_size); |
756 | sg_init_one(&dst, iv, cc->iv_size); | |
757 | skcipher_request_set_crypt(req, &src, &dst, cc->iv_size, buf); | |
758 | skcipher_request_set_callback(req, 0, crypto_req_done, &wait); | |
759 | err = crypto_wait_req(crypto_skcipher_encrypt(req), &wait); | |
760 | skcipher_request_free(req); | |
b9411d73 | 761 | |
39d13a1a | 762 | return err; |
b9411d73 MB |
763 | } |
764 | ||
bbb16584 MB |
765 | static void crypt_iv_elephant_dtr(struct crypt_config *cc) |
766 | { | |
767 | struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant; | |
768 | ||
769 | crypto_free_skcipher(elephant->tfm); | |
770 | elephant->tfm = NULL; | |
771 | } | |
772 | ||
773 | static int crypt_iv_elephant_ctr(struct crypt_config *cc, struct dm_target *ti, | |
774 | const char *opts) | |
775 | { | |
776 | struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant; | |
777 | int r; | |
778 | ||
cd746938 MP |
779 | elephant->tfm = crypto_alloc_skcipher("ecb(aes)", 0, |
780 | CRYPTO_ALG_ALLOCATES_MEMORY); | |
bbb16584 MB |
781 | if (IS_ERR(elephant->tfm)) { |
782 | r = PTR_ERR(elephant->tfm); | |
783 | elephant->tfm = NULL; | |
784 | return r; | |
785 | } | |
786 | ||
787 | r = crypt_iv_eboiv_ctr(cc, ti, NULL); | |
788 | if (r) | |
789 | crypt_iv_elephant_dtr(cc); | |
790 | return r; | |
791 | } | |
792 | ||
793 | static void diffuser_disk_to_cpu(u32 *d, size_t n) | |
794 | { | |
795 | #ifndef __LITTLE_ENDIAN | |
796 | int i; | |
797 | ||
798 | for (i = 0; i < n; i++) | |
799 | d[i] = le32_to_cpu((__le32)d[i]); | |
800 | #endif | |
801 | } | |
802 | ||
803 | static void diffuser_cpu_to_disk(__le32 *d, size_t n) | |
804 | { | |
805 | #ifndef __LITTLE_ENDIAN | |
806 | int i; | |
807 | ||
808 | for (i = 0; i < n; i++) | |
809 | d[i] = cpu_to_le32((u32)d[i]); | |
810 | #endif | |
811 | } | |
812 | ||
813 | static void diffuser_a_decrypt(u32 *d, size_t n) | |
814 | { | |
815 | int i, i1, i2, i3; | |
816 | ||
817 | for (i = 0; i < 5; i++) { | |
818 | i1 = 0; | |
819 | i2 = n - 2; | |
820 | i3 = n - 5; | |
821 | ||
822 | while (i1 < (n - 1)) { | |
823 | d[i1] += d[i2] ^ (d[i3] << 9 | d[i3] >> 23); | |
824 | i1++; i2++; i3++; | |
825 | ||
826 | if (i3 >= n) | |
827 | i3 -= n; | |
828 | ||
829 | d[i1] += d[i2] ^ d[i3]; | |
830 | i1++; i2++; i3++; | |
831 | ||
832 | if (i2 >= n) | |
833 | i2 -= n; | |
834 | ||
835 | d[i1] += d[i2] ^ (d[i3] << 13 | d[i3] >> 19); | |
836 | i1++; i2++; i3++; | |
837 | ||
838 | d[i1] += d[i2] ^ d[i3]; | |
839 | i1++; i2++; i3++; | |
840 | } | |
841 | } | |
842 | } | |
843 | ||
844 | static void diffuser_a_encrypt(u32 *d, size_t n) | |
845 | { | |
846 | int i, i1, i2, i3; | |
847 | ||
848 | for (i = 0; i < 5; i++) { | |
849 | i1 = n - 1; | |
850 | i2 = n - 2 - 1; | |
851 | i3 = n - 5 - 1; | |
852 | ||
853 | while (i1 > 0) { | |
854 | d[i1] -= d[i2] ^ d[i3]; | |
855 | i1--; i2--; i3--; | |
856 | ||
857 | d[i1] -= d[i2] ^ (d[i3] << 13 | d[i3] >> 19); | |
858 | i1--; i2--; i3--; | |
859 | ||
860 | if (i2 < 0) | |
861 | i2 += n; | |
862 | ||
863 | d[i1] -= d[i2] ^ d[i3]; | |
864 | i1--; i2--; i3--; | |
865 | ||
866 | if (i3 < 0) | |
867 | i3 += n; | |
868 | ||
869 | d[i1] -= d[i2] ^ (d[i3] << 9 | d[i3] >> 23); | |
870 | i1--; i2--; i3--; | |
871 | } | |
872 | } | |
873 | } | |
874 | ||
875 | static void diffuser_b_decrypt(u32 *d, size_t n) | |
876 | { | |
877 | int i, i1, i2, i3; | |
878 | ||
879 | for (i = 0; i < 3; i++) { | |
880 | i1 = 0; | |
881 | i2 = 2; | |
882 | i3 = 5; | |
883 | ||
884 | while (i1 < (n - 1)) { | |
885 | d[i1] += d[i2] ^ d[i3]; | |
886 | i1++; i2++; i3++; | |
887 | ||
888 | d[i1] += d[i2] ^ (d[i3] << 10 | d[i3] >> 22); | |
889 | i1++; i2++; i3++; | |
890 | ||
891 | if (i2 >= n) | |
892 | i2 -= n; | |
893 | ||
894 | d[i1] += d[i2] ^ d[i3]; | |
895 | i1++; i2++; i3++; | |
896 | ||
897 | if (i3 >= n) | |
898 | i3 -= n; | |
899 | ||
900 | d[i1] += d[i2] ^ (d[i3] << 25 | d[i3] >> 7); | |
901 | i1++; i2++; i3++; | |
902 | } | |
903 | } | |
904 | } | |
905 | ||
906 | static void diffuser_b_encrypt(u32 *d, size_t n) | |
907 | { | |
908 | int i, i1, i2, i3; | |
909 | ||
910 | for (i = 0; i < 3; i++) { | |
911 | i1 = n - 1; | |
912 | i2 = 2 - 1; | |
913 | i3 = 5 - 1; | |
914 | ||
915 | while (i1 > 0) { | |
916 | d[i1] -= d[i2] ^ (d[i3] << 25 | d[i3] >> 7); | |
917 | i1--; i2--; i3--; | |
918 | ||
919 | if (i3 < 0) | |
920 | i3 += n; | |
921 | ||
922 | d[i1] -= d[i2] ^ d[i3]; | |
923 | i1--; i2--; i3--; | |
924 | ||
925 | if (i2 < 0) | |
926 | i2 += n; | |
927 | ||
928 | d[i1] -= d[i2] ^ (d[i3] << 10 | d[i3] >> 22); | |
929 | i1--; i2--; i3--; | |
930 | ||
931 | d[i1] -= d[i2] ^ d[i3]; | |
932 | i1--; i2--; i3--; | |
933 | } | |
934 | } | |
935 | } | |
936 | ||
937 | static int crypt_iv_elephant(struct crypt_config *cc, struct dm_crypt_request *dmreq) | |
938 | { | |
939 | struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant; | |
940 | u8 *es, *ks, *data, *data2, *data_offset; | |
941 | struct skcipher_request *req; | |
942 | struct scatterlist *sg, *sg2, src, dst; | |
7785a9e4 | 943 | DECLARE_CRYPTO_WAIT(wait); |
bbb16584 MB |
944 | int i, r; |
945 | ||
946 | req = skcipher_request_alloc(elephant->tfm, GFP_NOIO); | |
947 | es = kzalloc(16, GFP_NOIO); /* Key for AES */ | |
948 | ks = kzalloc(32, GFP_NOIO); /* Elephant sector key */ | |
949 | ||
950 | if (!req || !es || !ks) { | |
951 | r = -ENOMEM; | |
952 | goto out; | |
953 | } | |
954 | ||
955 | *(__le64 *)es = cpu_to_le64(dmreq->iv_sector * cc->sector_size); | |
956 | ||
957 | /* E(Ks, e(s)) */ | |
958 | sg_init_one(&src, es, 16); | |
959 | sg_init_one(&dst, ks, 16); | |
960 | skcipher_request_set_crypt(req, &src, &dst, 16, NULL); | |
961 | skcipher_request_set_callback(req, 0, crypto_req_done, &wait); | |
962 | r = crypto_wait_req(crypto_skcipher_encrypt(req), &wait); | |
963 | if (r) | |
964 | goto out; | |
965 | ||
966 | /* E(Ks, e'(s)) */ | |
967 | es[15] = 0x80; | |
968 | sg_init_one(&dst, &ks[16], 16); | |
969 | r = crypto_wait_req(crypto_skcipher_encrypt(req), &wait); | |
970 | if (r) | |
971 | goto out; | |
972 | ||
973 | sg = crypt_get_sg_data(cc, dmreq->sg_out); | |
974 | data = kmap_atomic(sg_page(sg)); | |
975 | data_offset = data + sg->offset; | |
976 | ||
977 | /* Cannot modify original bio, copy to sg_out and apply Elephant to it */ | |
978 | if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) { | |
979 | sg2 = crypt_get_sg_data(cc, dmreq->sg_in); | |
980 | data2 = kmap_atomic(sg_page(sg2)); | |
981 | memcpy(data_offset, data2 + sg2->offset, cc->sector_size); | |
982 | kunmap_atomic(data2); | |
983 | } | |
984 | ||
985 | if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) { | |
986 | diffuser_disk_to_cpu((u32*)data_offset, cc->sector_size / sizeof(u32)); | |
987 | diffuser_b_decrypt((u32*)data_offset, cc->sector_size / sizeof(u32)); | |
988 | diffuser_a_decrypt((u32*)data_offset, cc->sector_size / sizeof(u32)); | |
989 | diffuser_cpu_to_disk((__le32*)data_offset, cc->sector_size / sizeof(u32)); | |
990 | } | |
991 | ||
992 | for (i = 0; i < (cc->sector_size / 32); i++) | |
993 | crypto_xor(data_offset + i * 32, ks, 32); | |
994 | ||
995 | if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) { | |
996 | diffuser_disk_to_cpu((u32*)data_offset, cc->sector_size / sizeof(u32)); | |
997 | diffuser_a_encrypt((u32*)data_offset, cc->sector_size / sizeof(u32)); | |
998 | diffuser_b_encrypt((u32*)data_offset, cc->sector_size / sizeof(u32)); | |
999 | diffuser_cpu_to_disk((__le32*)data_offset, cc->sector_size / sizeof(u32)); | |
1000 | } | |
1001 | ||
1002 | kunmap_atomic(data); | |
1003 | out: | |
453431a5 WL |
1004 | kfree_sensitive(ks); |
1005 | kfree_sensitive(es); | |
bbb16584 MB |
1006 | skcipher_request_free(req); |
1007 | return r; | |
1008 | } | |
1009 | ||
1010 | static int crypt_iv_elephant_gen(struct crypt_config *cc, u8 *iv, | |
1011 | struct dm_crypt_request *dmreq) | |
1012 | { | |
1013 | int r; | |
1014 | ||
1015 | if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) { | |
1016 | r = crypt_iv_elephant(cc, dmreq); | |
1017 | if (r) | |
1018 | return r; | |
1019 | } | |
1020 | ||
1021 | return crypt_iv_eboiv_gen(cc, iv, dmreq); | |
1022 | } | |
1023 | ||
1024 | static int crypt_iv_elephant_post(struct crypt_config *cc, u8 *iv, | |
1025 | struct dm_crypt_request *dmreq) | |
1026 | { | |
1027 | if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) | |
1028 | return crypt_iv_elephant(cc, dmreq); | |
1029 | ||
1030 | return 0; | |
1031 | } | |
1032 | ||
1033 | static int crypt_iv_elephant_init(struct crypt_config *cc) | |
1034 | { | |
1035 | struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant; | |
1036 | int key_offset = cc->key_size - cc->key_extra_size; | |
1037 | ||
1038 | return crypto_skcipher_setkey(elephant->tfm, &cc->key[key_offset], cc->key_extra_size); | |
1039 | } | |
1040 | ||
1041 | static int crypt_iv_elephant_wipe(struct crypt_config *cc) | |
1042 | { | |
1043 | struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant; | |
1044 | u8 key[ELEPHANT_MAX_KEY_SIZE]; | |
1045 | ||
1046 | memset(key, 0, cc->key_extra_size); | |
1047 | return crypto_skcipher_setkey(elephant->tfm, key, cc->key_extra_size); | |
1048 | } | |
1049 | ||
1b1b58f5 | 1050 | static const struct crypt_iv_operations crypt_iv_plain_ops = { |
1da177e4 LT |
1051 | .generator = crypt_iv_plain_gen |
1052 | }; | |
1053 | ||
1b1b58f5 | 1054 | static const struct crypt_iv_operations crypt_iv_plain64_ops = { |
61afef61 MB |
1055 | .generator = crypt_iv_plain64_gen |
1056 | }; | |
1057 | ||
7e3fd855 MB |
1058 | static const struct crypt_iv_operations crypt_iv_plain64be_ops = { |
1059 | .generator = crypt_iv_plain64be_gen | |
1060 | }; | |
1061 | ||
1b1b58f5 | 1062 | static const struct crypt_iv_operations crypt_iv_essiv_ops = { |
1da177e4 LT |
1063 | .generator = crypt_iv_essiv_gen |
1064 | }; | |
1065 | ||
1b1b58f5 | 1066 | static const struct crypt_iv_operations crypt_iv_benbi_ops = { |
48527fa7 RS |
1067 | .ctr = crypt_iv_benbi_ctr, |
1068 | .dtr = crypt_iv_benbi_dtr, | |
1069 | .generator = crypt_iv_benbi_gen | |
1070 | }; | |
1da177e4 | 1071 | |
1b1b58f5 | 1072 | static const struct crypt_iv_operations crypt_iv_null_ops = { |
46b47730 LN |
1073 | .generator = crypt_iv_null_gen |
1074 | }; | |
1075 | ||
1b1b58f5 | 1076 | static const struct crypt_iv_operations crypt_iv_lmk_ops = { |
34745785 MB |
1077 | .ctr = crypt_iv_lmk_ctr, |
1078 | .dtr = crypt_iv_lmk_dtr, | |
1079 | .init = crypt_iv_lmk_init, | |
1080 | .wipe = crypt_iv_lmk_wipe, | |
1081 | .generator = crypt_iv_lmk_gen, | |
1082 | .post = crypt_iv_lmk_post | |
1083 | }; | |
1084 | ||
1b1b58f5 | 1085 | static const struct crypt_iv_operations crypt_iv_tcw_ops = { |
ed04d981 MB |
1086 | .ctr = crypt_iv_tcw_ctr, |
1087 | .dtr = crypt_iv_tcw_dtr, | |
1088 | .init = crypt_iv_tcw_init, | |
1089 | .wipe = crypt_iv_tcw_wipe, | |
1090 | .generator = crypt_iv_tcw_gen, | |
1091 | .post = crypt_iv_tcw_post | |
1092 | }; | |
1093 | ||
e8dc79d1 | 1094 | static const struct crypt_iv_operations crypt_iv_random_ops = { |
ef43aa38 MB |
1095 | .generator = crypt_iv_random_gen |
1096 | }; | |
1097 | ||
e8dc79d1 | 1098 | static const struct crypt_iv_operations crypt_iv_eboiv_ops = { |
b9411d73 | 1099 | .ctr = crypt_iv_eboiv_ctr, |
b9411d73 MB |
1100 | .generator = crypt_iv_eboiv_gen |
1101 | }; | |
1102 | ||
e8dc79d1 | 1103 | static const struct crypt_iv_operations crypt_iv_elephant_ops = { |
bbb16584 MB |
1104 | .ctr = crypt_iv_elephant_ctr, |
1105 | .dtr = crypt_iv_elephant_dtr, | |
1106 | .init = crypt_iv_elephant_init, | |
1107 | .wipe = crypt_iv_elephant_wipe, | |
1108 | .generator = crypt_iv_elephant_gen, | |
1109 | .post = crypt_iv_elephant_post | |
1110 | }; | |
1111 | ||
ef43aa38 MB |
1112 | /* |
1113 | * Integrity extensions | |
1114 | */ | |
1115 | static bool crypt_integrity_aead(struct crypt_config *cc) | |
1116 | { | |
1117 | return test_bit(CRYPT_MODE_INTEGRITY_AEAD, &cc->cipher_flags); | |
1118 | } | |
1119 | ||
1120 | static bool crypt_integrity_hmac(struct crypt_config *cc) | |
1121 | { | |
33d2f09f | 1122 | return crypt_integrity_aead(cc) && cc->key_mac_size; |
ef43aa38 MB |
1123 | } |
1124 | ||
1125 | /* Get sg containing data */ | |
1126 | static struct scatterlist *crypt_get_sg_data(struct crypt_config *cc, | |
1127 | struct scatterlist *sg) | |
1128 | { | |
33d2f09f | 1129 | if (unlikely(crypt_integrity_aead(cc))) |
ef43aa38 MB |
1130 | return &sg[2]; |
1131 | ||
1132 | return sg; | |
1133 | } | |
1134 | ||
1135 | static int dm_crypt_integrity_io_alloc(struct dm_crypt_io *io, struct bio *bio) | |
1136 | { | |
1137 | struct bio_integrity_payload *bip; | |
1138 | unsigned int tag_len; | |
1139 | int ret; | |
1140 | ||
1141 | if (!bio_sectors(bio) || !io->cc->on_disk_tag_size) | |
1142 | return 0; | |
1143 | ||
1144 | bip = bio_integrity_alloc(bio, GFP_NOIO, 1); | |
1145 | if (IS_ERR(bip)) | |
1146 | return PTR_ERR(bip); | |
1147 | ||
ff0c129d | 1148 | tag_len = io->cc->on_disk_tag_size * (bio_sectors(bio) >> io->cc->sector_shift); |
ef43aa38 MB |
1149 | |
1150 | bip->bip_iter.bi_size = tag_len; | |
1151 | bip->bip_iter.bi_sector = io->cc->start + io->sector; | |
1152 | ||
ef43aa38 MB |
1153 | ret = bio_integrity_add_page(bio, virt_to_page(io->integrity_metadata), |
1154 | tag_len, offset_in_page(io->integrity_metadata)); | |
1155 | if (unlikely(ret != tag_len)) | |
1156 | return -ENOMEM; | |
1157 | ||
1158 | return 0; | |
1159 | } | |
1160 | ||
1161 | static int crypt_integrity_ctr(struct crypt_config *cc, struct dm_target *ti) | |
1162 | { | |
1163 | #ifdef CONFIG_BLK_DEV_INTEGRITY | |
1164 | struct blk_integrity *bi = blk_get_integrity(cc->dev->bdev->bd_disk); | |
7a1cd723 | 1165 | struct mapped_device *md = dm_table_get_md(ti->table); |
ef43aa38 MB |
1166 | |
1167 | /* From now we require underlying device with our integrity profile */ | |
1168 | if (!bi || strcasecmp(bi->profile->name, "DM-DIF-EXT-TAG")) { | |
1169 | ti->error = "Integrity profile not supported."; | |
1170 | return -EINVAL; | |
1171 | } | |
1172 | ||
583fe747 MP |
1173 | if (bi->tag_size != cc->on_disk_tag_size || |
1174 | bi->tuple_size != cc->on_disk_tag_size) { | |
ef43aa38 MB |
1175 | ti->error = "Integrity profile tag size mismatch."; |
1176 | return -EINVAL; | |
1177 | } | |
583fe747 MP |
1178 | if (1 << bi->interval_exp != cc->sector_size) { |
1179 | ti->error = "Integrity profile sector size mismatch."; | |
1180 | return -EINVAL; | |
1181 | } | |
ef43aa38 | 1182 | |
33d2f09f | 1183 | if (crypt_integrity_aead(cc)) { |
ef43aa38 | 1184 | cc->integrity_tag_size = cc->on_disk_tag_size - cc->integrity_iv_size; |
7a1cd723 | 1185 | DMDEBUG("%s: Integrity AEAD, tag size %u, IV size %u.", dm_device_name(md), |
ef43aa38 MB |
1186 | cc->integrity_tag_size, cc->integrity_iv_size); |
1187 | ||
1188 | if (crypto_aead_setauthsize(any_tfm_aead(cc), cc->integrity_tag_size)) { | |
1189 | ti->error = "Integrity AEAD auth tag size is not supported."; | |
1190 | return -EINVAL; | |
1191 | } | |
1192 | } else if (cc->integrity_iv_size) | |
7a1cd723 | 1193 | DMDEBUG("%s: Additional per-sector space %u bytes for IV.", dm_device_name(md), |
ef43aa38 MB |
1194 | cc->integrity_iv_size); |
1195 | ||
1196 | if ((cc->integrity_tag_size + cc->integrity_iv_size) != bi->tag_size) { | |
1197 | ti->error = "Not enough space for integrity tag in the profile."; | |
1198 | return -EINVAL; | |
1199 | } | |
1200 | ||
1201 | return 0; | |
1202 | #else | |
1203 | ti->error = "Integrity profile not supported."; | |
1204 | return -EINVAL; | |
1205 | #endif | |
1206 | } | |
1207 | ||
d469f841 MB |
1208 | static void crypt_convert_init(struct crypt_config *cc, |
1209 | struct convert_context *ctx, | |
1210 | struct bio *bio_out, struct bio *bio_in, | |
fcd369da | 1211 | sector_t sector) |
1da177e4 LT |
1212 | { |
1213 | ctx->bio_in = bio_in; | |
1214 | ctx->bio_out = bio_out; | |
003b5c57 KO |
1215 | if (bio_in) |
1216 | ctx->iter_in = bio_in->bi_iter; | |
1217 | if (bio_out) | |
1218 | ctx->iter_out = bio_out->bi_iter; | |
c66029f4 | 1219 | ctx->cc_sector = sector + cc->iv_offset; |
43d69034 | 1220 | init_completion(&ctx->restart); |
1da177e4 LT |
1221 | } |
1222 | ||
b2174eeb | 1223 | static struct dm_crypt_request *dmreq_of_req(struct crypt_config *cc, |
ef43aa38 | 1224 | void *req) |
b2174eeb HY |
1225 | { |
1226 | return (struct dm_crypt_request *)((char *)req + cc->dmreq_start); | |
1227 | } | |
1228 | ||
ef43aa38 | 1229 | static void *req_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq) |
b2174eeb | 1230 | { |
ef43aa38 | 1231 | return (void *)((char *)dmreq - cc->dmreq_start); |
b2174eeb HY |
1232 | } |
1233 | ||
2dc5327d MB |
1234 | static u8 *iv_of_dmreq(struct crypt_config *cc, |
1235 | struct dm_crypt_request *dmreq) | |
1236 | { | |
33d2f09f | 1237 | if (crypt_integrity_aead(cc)) |
ef43aa38 MB |
1238 | return (u8 *)ALIGN((unsigned long)(dmreq + 1), |
1239 | crypto_aead_alignmask(any_tfm_aead(cc)) + 1); | |
1240 | else | |
1241 | return (u8 *)ALIGN((unsigned long)(dmreq + 1), | |
1242 | crypto_skcipher_alignmask(any_tfm(cc)) + 1); | |
2dc5327d MB |
1243 | } |
1244 | ||
ef43aa38 MB |
1245 | static u8 *org_iv_of_dmreq(struct crypt_config *cc, |
1246 | struct dm_crypt_request *dmreq) | |
1247 | { | |
1248 | return iv_of_dmreq(cc, dmreq) + cc->iv_size; | |
1249 | } | |
1250 | ||
c13b5487 | 1251 | static __le64 *org_sector_of_dmreq(struct crypt_config *cc, |
ef43aa38 MB |
1252 | struct dm_crypt_request *dmreq) |
1253 | { | |
1254 | u8 *ptr = iv_of_dmreq(cc, dmreq) + cc->iv_size + cc->iv_size; | |
c13b5487 | 1255 | return (__le64 *) ptr; |
ef43aa38 MB |
1256 | } |
1257 | ||
1258 | static unsigned int *org_tag_of_dmreq(struct crypt_config *cc, | |
1259 | struct dm_crypt_request *dmreq) | |
1260 | { | |
1261 | u8 *ptr = iv_of_dmreq(cc, dmreq) + cc->iv_size + | |
1262 | cc->iv_size + sizeof(uint64_t); | |
1263 | return (unsigned int*)ptr; | |
1264 | } | |
1265 | ||
1266 | static void *tag_from_dmreq(struct crypt_config *cc, | |
1267 | struct dm_crypt_request *dmreq) | |
1268 | { | |
1269 | struct convert_context *ctx = dmreq->ctx; | |
1270 | struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx); | |
1271 | ||
1272 | return &io->integrity_metadata[*org_tag_of_dmreq(cc, dmreq) * | |
1273 | cc->on_disk_tag_size]; | |
1274 | } | |
1275 | ||
1276 | static void *iv_tag_from_dmreq(struct crypt_config *cc, | |
1277 | struct dm_crypt_request *dmreq) | |
1278 | { | |
1279 | return tag_from_dmreq(cc, dmreq) + cc->integrity_tag_size; | |
1280 | } | |
1281 | ||
1282 | static int crypt_convert_block_aead(struct crypt_config *cc, | |
1283 | struct convert_context *ctx, | |
1284 | struct aead_request *req, | |
1285 | unsigned int tag_offset) | |
01482b76 | 1286 | { |
003b5c57 KO |
1287 | struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in); |
1288 | struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out); | |
3a7f6c99 | 1289 | struct dm_crypt_request *dmreq; |
ef43aa38 | 1290 | u8 *iv, *org_iv, *tag_iv, *tag; |
c13b5487 | 1291 | __le64 *sector; |
ef43aa38 MB |
1292 | int r = 0; |
1293 | ||
1294 | BUG_ON(cc->integrity_iv_size && cc->integrity_iv_size != cc->iv_size); | |
3a7f6c99 | 1295 | |
8f0009a2 | 1296 | /* Reject unexpected unaligned bio. */ |
0440d5c0 | 1297 | if (unlikely(bv_in.bv_len & (cc->sector_size - 1))) |
8f0009a2 | 1298 | return -EIO; |
3a7f6c99 | 1299 | |
b2174eeb | 1300 | dmreq = dmreq_of_req(cc, req); |
ef43aa38 | 1301 | dmreq->iv_sector = ctx->cc_sector; |
8f0009a2 | 1302 | if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags)) |
ff3af92b | 1303 | dmreq->iv_sector >>= cc->sector_shift; |
ef43aa38 MB |
1304 | dmreq->ctx = ctx; |
1305 | ||
1306 | *org_tag_of_dmreq(cc, dmreq) = tag_offset; | |
1307 | ||
1308 | sector = org_sector_of_dmreq(cc, dmreq); | |
1309 | *sector = cpu_to_le64(ctx->cc_sector - cc->iv_offset); | |
1310 | ||
2dc5327d | 1311 | iv = iv_of_dmreq(cc, dmreq); |
ef43aa38 MB |
1312 | org_iv = org_iv_of_dmreq(cc, dmreq); |
1313 | tag = tag_from_dmreq(cc, dmreq); | |
1314 | tag_iv = iv_tag_from_dmreq(cc, dmreq); | |
1315 | ||
1316 | /* AEAD request: | |
1317 | * |----- AAD -------|------ DATA -------|-- AUTH TAG --| | |
1318 | * | (authenticated) | (auth+encryption) | | | |
1319 | * | sector_LE | IV | sector in/out | tag in/out | | |
1320 | */ | |
1321 | sg_init_table(dmreq->sg_in, 4); | |
1322 | sg_set_buf(&dmreq->sg_in[0], sector, sizeof(uint64_t)); | |
1323 | sg_set_buf(&dmreq->sg_in[1], org_iv, cc->iv_size); | |
8f0009a2 | 1324 | sg_set_page(&dmreq->sg_in[2], bv_in.bv_page, cc->sector_size, bv_in.bv_offset); |
ef43aa38 MB |
1325 | sg_set_buf(&dmreq->sg_in[3], tag, cc->integrity_tag_size); |
1326 | ||
1327 | sg_init_table(dmreq->sg_out, 4); | |
1328 | sg_set_buf(&dmreq->sg_out[0], sector, sizeof(uint64_t)); | |
1329 | sg_set_buf(&dmreq->sg_out[1], org_iv, cc->iv_size); | |
8f0009a2 | 1330 | sg_set_page(&dmreq->sg_out[2], bv_out.bv_page, cc->sector_size, bv_out.bv_offset); |
ef43aa38 MB |
1331 | sg_set_buf(&dmreq->sg_out[3], tag, cc->integrity_tag_size); |
1332 | ||
1333 | if (cc->iv_gen_ops) { | |
1334 | /* For READs use IV stored in integrity metadata */ | |
1335 | if (cc->integrity_iv_size && bio_data_dir(ctx->bio_in) != WRITE) { | |
1336 | memcpy(org_iv, tag_iv, cc->iv_size); | |
1337 | } else { | |
1338 | r = cc->iv_gen_ops->generator(cc, org_iv, dmreq); | |
1339 | if (r < 0) | |
1340 | return r; | |
1341 | /* Store generated IV in integrity metadata */ | |
1342 | if (cc->integrity_iv_size) | |
1343 | memcpy(tag_iv, org_iv, cc->iv_size); | |
1344 | } | |
1345 | /* Working copy of IV, to be modified in crypto API */ | |
1346 | memcpy(iv, org_iv, cc->iv_size); | |
1347 | } | |
1348 | ||
1349 | aead_request_set_ad(req, sizeof(uint64_t) + cc->iv_size); | |
1350 | if (bio_data_dir(ctx->bio_in) == WRITE) { | |
1351 | aead_request_set_crypt(req, dmreq->sg_in, dmreq->sg_out, | |
8f0009a2 | 1352 | cc->sector_size, iv); |
ef43aa38 MB |
1353 | r = crypto_aead_encrypt(req); |
1354 | if (cc->integrity_tag_size + cc->integrity_iv_size != cc->on_disk_tag_size) | |
1355 | memset(tag + cc->integrity_tag_size + cc->integrity_iv_size, 0, | |
1356 | cc->on_disk_tag_size - (cc->integrity_tag_size + cc->integrity_iv_size)); | |
1357 | } else { | |
1358 | aead_request_set_crypt(req, dmreq->sg_in, dmreq->sg_out, | |
8f0009a2 | 1359 | cc->sector_size + cc->integrity_tag_size, iv); |
ef43aa38 MB |
1360 | r = crypto_aead_decrypt(req); |
1361 | } | |
1362 | ||
f710126c MB |
1363 | if (r == -EBADMSG) { |
1364 | char b[BDEVNAME_SIZE]; | |
1365 | DMERR_LIMIT("%s: INTEGRITY AEAD ERROR, sector %llu", bio_devname(ctx->bio_in, b), | |
ef43aa38 | 1366 | (unsigned long long)le64_to_cpu(*sector)); |
f710126c | 1367 | } |
ef43aa38 MB |
1368 | |
1369 | if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post) | |
1370 | r = cc->iv_gen_ops->post(cc, org_iv, dmreq); | |
1371 | ||
8f0009a2 MB |
1372 | bio_advance_iter(ctx->bio_in, &ctx->iter_in, cc->sector_size); |
1373 | bio_advance_iter(ctx->bio_out, &ctx->iter_out, cc->sector_size); | |
01482b76 | 1374 | |
ef43aa38 MB |
1375 | return r; |
1376 | } | |
1377 | ||
1378 | static int crypt_convert_block_skcipher(struct crypt_config *cc, | |
1379 | struct convert_context *ctx, | |
1380 | struct skcipher_request *req, | |
1381 | unsigned int tag_offset) | |
1382 | { | |
1383 | struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in); | |
1384 | struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out); | |
1385 | struct scatterlist *sg_in, *sg_out; | |
1386 | struct dm_crypt_request *dmreq; | |
ef43aa38 | 1387 | u8 *iv, *org_iv, *tag_iv; |
c13b5487 | 1388 | __le64 *sector; |
ef43aa38 | 1389 | int r = 0; |
01482b76 | 1390 | |
8f0009a2 | 1391 | /* Reject unexpected unaligned bio. */ |
0440d5c0 | 1392 | if (unlikely(bv_in.bv_len & (cc->sector_size - 1))) |
8f0009a2 MB |
1393 | return -EIO; |
1394 | ||
ef43aa38 | 1395 | dmreq = dmreq_of_req(cc, req); |
c66029f4 | 1396 | dmreq->iv_sector = ctx->cc_sector; |
8f0009a2 | 1397 | if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags)) |
ff3af92b | 1398 | dmreq->iv_sector >>= cc->sector_shift; |
b2174eeb | 1399 | dmreq->ctx = ctx; |
01482b76 | 1400 | |
ef43aa38 MB |
1401 | *org_tag_of_dmreq(cc, dmreq) = tag_offset; |
1402 | ||
1403 | iv = iv_of_dmreq(cc, dmreq); | |
1404 | org_iv = org_iv_of_dmreq(cc, dmreq); | |
1405 | tag_iv = iv_tag_from_dmreq(cc, dmreq); | |
1406 | ||
1407 | sector = org_sector_of_dmreq(cc, dmreq); | |
1408 | *sector = cpu_to_le64(ctx->cc_sector - cc->iv_offset); | |
1409 | ||
1410 | /* For skcipher we use only the first sg item */ | |
1411 | sg_in = &dmreq->sg_in[0]; | |
1412 | sg_out = &dmreq->sg_out[0]; | |
01482b76 | 1413 | |
ef43aa38 | 1414 | sg_init_table(sg_in, 1); |
8f0009a2 | 1415 | sg_set_page(sg_in, bv_in.bv_page, cc->sector_size, bv_in.bv_offset); |
ef43aa38 MB |
1416 | |
1417 | sg_init_table(sg_out, 1); | |
8f0009a2 | 1418 | sg_set_page(sg_out, bv_out.bv_page, cc->sector_size, bv_out.bv_offset); |
01482b76 | 1419 | |
3a7f6c99 | 1420 | if (cc->iv_gen_ops) { |
ef43aa38 MB |
1421 | /* For READs use IV stored in integrity metadata */ |
1422 | if (cc->integrity_iv_size && bio_data_dir(ctx->bio_in) != WRITE) { | |
1423 | memcpy(org_iv, tag_iv, cc->integrity_iv_size); | |
1424 | } else { | |
1425 | r = cc->iv_gen_ops->generator(cc, org_iv, dmreq); | |
1426 | if (r < 0) | |
1427 | return r; | |
bbb16584 MB |
1428 | /* Data can be already preprocessed in generator */ |
1429 | if (test_bit(CRYPT_ENCRYPT_PREPROCESS, &cc->cipher_flags)) | |
1430 | sg_in = sg_out; | |
ef43aa38 MB |
1431 | /* Store generated IV in integrity metadata */ |
1432 | if (cc->integrity_iv_size) | |
1433 | memcpy(tag_iv, org_iv, cc->integrity_iv_size); | |
1434 | } | |
1435 | /* Working copy of IV, to be modified in crypto API */ | |
1436 | memcpy(iv, org_iv, cc->iv_size); | |
3a7f6c99 MB |
1437 | } |
1438 | ||
8f0009a2 | 1439 | skcipher_request_set_crypt(req, sg_in, sg_out, cc->sector_size, iv); |
3a7f6c99 MB |
1440 | |
1441 | if (bio_data_dir(ctx->bio_in) == WRITE) | |
bbdb23b5 | 1442 | r = crypto_skcipher_encrypt(req); |
3a7f6c99 | 1443 | else |
bbdb23b5 | 1444 | r = crypto_skcipher_decrypt(req); |
3a7f6c99 | 1445 | |
2dc5327d | 1446 | if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post) |
ef43aa38 MB |
1447 | r = cc->iv_gen_ops->post(cc, org_iv, dmreq); |
1448 | ||
8f0009a2 MB |
1449 | bio_advance_iter(ctx->bio_in, &ctx->iter_in, cc->sector_size); |
1450 | bio_advance_iter(ctx->bio_out, &ctx->iter_out, cc->sector_size); | |
2dc5327d | 1451 | |
3a7f6c99 | 1452 | return r; |
01482b76 MB |
1453 | } |
1454 | ||
95497a96 MB |
1455 | static void kcryptd_async_done(struct crypto_async_request *async_req, |
1456 | int error); | |
c0297721 | 1457 | |
d68b2958 | 1458 | static int crypt_alloc_req_skcipher(struct crypt_config *cc, |
ef43aa38 | 1459 | struct convert_context *ctx) |
ddd42edf | 1460 | { |
c66029f4 | 1461 | unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1); |
c0297721 | 1462 | |
d68b2958 IK |
1463 | if (!ctx->r.req) { |
1464 | ctx->r.req = mempool_alloc(&cc->req_pool, in_interrupt() ? GFP_ATOMIC : GFP_NOIO); | |
1465 | if (!ctx->r.req) | |
1466 | return -ENOMEM; | |
1467 | } | |
c0297721 | 1468 | |
ef43aa38 | 1469 | skcipher_request_set_tfm(ctx->r.req, cc->cipher_tfm.tfms[key_index]); |
54cea3f6 MB |
1470 | |
1471 | /* | |
1472 | * Use REQ_MAY_BACKLOG so a cipher driver internally backlogs | |
1473 | * requests if driver request queue is full. | |
1474 | */ | |
ef43aa38 | 1475 | skcipher_request_set_callback(ctx->r.req, |
432061b3 | 1476 | CRYPTO_TFM_REQ_MAY_BACKLOG, |
ef43aa38 | 1477 | kcryptd_async_done, dmreq_of_req(cc, ctx->r.req)); |
d68b2958 IK |
1478 | |
1479 | return 0; | |
ddd42edf MB |
1480 | } |
1481 | ||
d68b2958 | 1482 | static int crypt_alloc_req_aead(struct crypt_config *cc, |
ef43aa38 MB |
1483 | struct convert_context *ctx) |
1484 | { | |
004b8ae9 IK |
1485 | if (!ctx->r.req_aead) { |
1486 | ctx->r.req_aead = mempool_alloc(&cc->req_pool, in_interrupt() ? GFP_ATOMIC : GFP_NOIO); | |
1487 | if (!ctx->r.req_aead) | |
d68b2958 IK |
1488 | return -ENOMEM; |
1489 | } | |
c0297721 | 1490 | |
ef43aa38 | 1491 | aead_request_set_tfm(ctx->r.req_aead, cc->cipher_tfm.tfms_aead[0]); |
54cea3f6 MB |
1492 | |
1493 | /* | |
1494 | * Use REQ_MAY_BACKLOG so a cipher driver internally backlogs | |
1495 | * requests if driver request queue is full. | |
1496 | */ | |
ef43aa38 | 1497 | aead_request_set_callback(ctx->r.req_aead, |
432061b3 | 1498 | CRYPTO_TFM_REQ_MAY_BACKLOG, |
ef43aa38 | 1499 | kcryptd_async_done, dmreq_of_req(cc, ctx->r.req_aead)); |
d68b2958 IK |
1500 | |
1501 | return 0; | |
ef43aa38 MB |
1502 | } |
1503 | ||
d68b2958 | 1504 | static int crypt_alloc_req(struct crypt_config *cc, |
ef43aa38 MB |
1505 | struct convert_context *ctx) |
1506 | { | |
33d2f09f | 1507 | if (crypt_integrity_aead(cc)) |
d68b2958 | 1508 | return crypt_alloc_req_aead(cc, ctx); |
ef43aa38 | 1509 | else |
d68b2958 | 1510 | return crypt_alloc_req_skcipher(cc, ctx); |
ddd42edf MB |
1511 | } |
1512 | ||
ef43aa38 MB |
1513 | static void crypt_free_req_skcipher(struct crypt_config *cc, |
1514 | struct skcipher_request *req, struct bio *base_bio) | |
298a9fa0 MP |
1515 | { |
1516 | struct dm_crypt_io *io = dm_per_bio_data(base_bio, cc->per_bio_data_size); | |
1517 | ||
bbdb23b5 | 1518 | if ((struct skcipher_request *)(io + 1) != req) |
6f1c819c | 1519 | mempool_free(req, &cc->req_pool); |
298a9fa0 MP |
1520 | } |
1521 | ||
ef43aa38 MB |
1522 | static void crypt_free_req_aead(struct crypt_config *cc, |
1523 | struct aead_request *req, struct bio *base_bio) | |
1524 | { | |
1525 | struct dm_crypt_io *io = dm_per_bio_data(base_bio, cc->per_bio_data_size); | |
1526 | ||
1527 | if ((struct aead_request *)(io + 1) != req) | |
6f1c819c | 1528 | mempool_free(req, &cc->req_pool); |
ef43aa38 MB |
1529 | } |
1530 | ||
1531 | static void crypt_free_req(struct crypt_config *cc, void *req, struct bio *base_bio) | |
1532 | { | |
33d2f09f | 1533 | if (crypt_integrity_aead(cc)) |
ef43aa38 MB |
1534 | crypt_free_req_aead(cc, req, base_bio); |
1535 | else | |
1536 | crypt_free_req_skcipher(cc, req, base_bio); | |
1537 | } | |
1538 | ||
1da177e4 LT |
1539 | /* |
1540 | * Encrypt / decrypt data from one bio to another one (can be the same one) | |
1541 | */ | |
4e4cbee9 | 1542 | static blk_status_t crypt_convert(struct crypt_config *cc, |
8abec36d | 1543 | struct convert_context *ctx, bool atomic, bool reset_pending) |
1da177e4 | 1544 | { |
ef43aa38 | 1545 | unsigned int tag_offset = 0; |
ff3af92b | 1546 | unsigned int sector_step = cc->sector_size >> SECTOR_SHIFT; |
3f1e9070 | 1547 | int r; |
1da177e4 | 1548 | |
8abec36d IK |
1549 | /* |
1550 | * if reset_pending is set we are dealing with the bio for the first time, | |
1551 | * else we're continuing to work on the previous bio, so don't mess with | |
1552 | * the cc_pending counter | |
1553 | */ | |
1554 | if (reset_pending) | |
1555 | atomic_set(&ctx->cc_pending, 1); | |
c8081618 | 1556 | |
003b5c57 | 1557 | while (ctx->iter_in.bi_size && ctx->iter_out.bi_size) { |
1da177e4 | 1558 | |
d68b2958 IK |
1559 | r = crypt_alloc_req(cc, ctx); |
1560 | if (r) { | |
1561 | complete(&ctx->restart); | |
1562 | return BLK_STS_DEV_RESOURCE; | |
1563 | } | |
1564 | ||
40b6229b | 1565 | atomic_inc(&ctx->cc_pending); |
3f1e9070 | 1566 | |
33d2f09f | 1567 | if (crypt_integrity_aead(cc)) |
ef43aa38 MB |
1568 | r = crypt_convert_block_aead(cc, ctx, ctx->r.req_aead, tag_offset); |
1569 | else | |
1570 | r = crypt_convert_block_skcipher(cc, ctx, ctx->r.req, tag_offset); | |
3a7f6c99 MB |
1571 | |
1572 | switch (r) { | |
54cea3f6 MB |
1573 | /* |
1574 | * The request was queued by a crypto driver | |
1575 | * but the driver request queue is full, let's wait. | |
1576 | */ | |
3a7f6c99 | 1577 | case -EBUSY: |
8abec36d IK |
1578 | if (in_interrupt()) { |
1579 | if (try_wait_for_completion(&ctx->restart)) { | |
1580 | /* | |
1581 | * we don't have to block to wait for completion, | |
1582 | * so proceed | |
1583 | */ | |
1584 | } else { | |
1585 | /* | |
1586 | * we can't wait for completion without blocking | |
1587 | * exit and continue processing in a workqueue | |
1588 | */ | |
1589 | ctx->r.req = NULL; | |
1590 | ctx->cc_sector += sector_step; | |
1591 | tag_offset++; | |
1592 | return BLK_STS_DEV_RESOURCE; | |
1593 | } | |
1594 | } else { | |
1595 | wait_for_completion(&ctx->restart); | |
1596 | } | |
16735d02 | 1597 | reinit_completion(&ctx->restart); |
df561f66 | 1598 | fallthrough; |
54cea3f6 MB |
1599 | /* |
1600 | * The request is queued and processed asynchronously, | |
1601 | * completion function kcryptd_async_done() will be called. | |
1602 | */ | |
c0403ec0 | 1603 | case -EINPROGRESS: |
ef43aa38 | 1604 | ctx->r.req = NULL; |
8f0009a2 | 1605 | ctx->cc_sector += sector_step; |
583fe747 | 1606 | tag_offset++; |
3f1e9070 | 1607 | continue; |
54cea3f6 MB |
1608 | /* |
1609 | * The request was already processed (synchronously). | |
1610 | */ | |
3a7f6c99 | 1611 | case 0: |
40b6229b | 1612 | atomic_dec(&ctx->cc_pending); |
8f0009a2 | 1613 | ctx->cc_sector += sector_step; |
583fe747 | 1614 | tag_offset++; |
39d42fa9 IK |
1615 | if (!atomic) |
1616 | cond_resched(); | |
3a7f6c99 | 1617 | continue; |
ef43aa38 MB |
1618 | /* |
1619 | * There was a data integrity error. | |
1620 | */ | |
1621 | case -EBADMSG: | |
1622 | atomic_dec(&ctx->cc_pending); | |
4e4cbee9 | 1623 | return BLK_STS_PROTECTION; |
ef43aa38 MB |
1624 | /* |
1625 | * There was an error while processing the request. | |
1626 | */ | |
3f1e9070 | 1627 | default: |
40b6229b | 1628 | atomic_dec(&ctx->cc_pending); |
4e4cbee9 | 1629 | return BLK_STS_IOERR; |
3f1e9070 | 1630 | } |
1da177e4 LT |
1631 | } |
1632 | ||
3f1e9070 | 1633 | return 0; |
1da177e4 LT |
1634 | } |
1635 | ||
cf2f1abf MP |
1636 | static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone); |
1637 | ||
1da177e4 LT |
1638 | /* |
1639 | * Generate a new unfragmented bio with the given size | |
586b286b MS |
1640 | * This should never violate the device limitations (but only because |
1641 | * max_segment_size is being constrained to PAGE_SIZE). | |
7145c241 MP |
1642 | * |
1643 | * This function may be called concurrently. If we allocate from the mempool | |
1644 | * concurrently, there is a possibility of deadlock. For example, if we have | |
1645 | * mempool of 256 pages, two processes, each wanting 256, pages allocate from | |
1646 | * the mempool concurrently, it may deadlock in a situation where both processes | |
1647 | * have allocated 128 pages and the mempool is exhausted. | |
1648 | * | |
1649 | * In order to avoid this scenario we allocate the pages under a mutex. | |
1650 | * | |
1651 | * In order to not degrade performance with excessive locking, we try | |
1652 | * non-blocking allocations without a mutex first but on failure we fallback | |
1653 | * to blocking allocations with a mutex. | |
1da177e4 | 1654 | */ |
cf2f1abf | 1655 | static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size) |
1da177e4 | 1656 | { |
49a8a920 | 1657 | struct crypt_config *cc = io->cc; |
8b004457 | 1658 | struct bio *clone; |
1da177e4 | 1659 | unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; |
7145c241 MP |
1660 | gfp_t gfp_mask = GFP_NOWAIT | __GFP_HIGHMEM; |
1661 | unsigned i, len, remaining_size; | |
91e10625 | 1662 | struct page *page; |
1da177e4 | 1663 | |
7145c241 | 1664 | retry: |
d0164adc | 1665 | if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM)) |
7145c241 MP |
1666 | mutex_lock(&cc->bio_alloc_lock); |
1667 | ||
6f1c819c | 1668 | clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, &cc->bs); |
8b004457 | 1669 | if (!clone) |
ef43aa38 | 1670 | goto out; |
1da177e4 | 1671 | |
027581f3 | 1672 | clone_init(io, clone); |
6a24c718 | 1673 | |
7145c241 MP |
1674 | remaining_size = size; |
1675 | ||
f97380bc | 1676 | for (i = 0; i < nr_iovecs; i++) { |
6f1c819c | 1677 | page = mempool_alloc(&cc->page_pool, gfp_mask); |
7145c241 MP |
1678 | if (!page) { |
1679 | crypt_free_buffer_pages(cc, clone); | |
1680 | bio_put(clone); | |
d0164adc | 1681 | gfp_mask |= __GFP_DIRECT_RECLAIM; |
7145c241 MP |
1682 | goto retry; |
1683 | } | |
1da177e4 | 1684 | |
7145c241 | 1685 | len = (remaining_size > PAGE_SIZE) ? PAGE_SIZE : remaining_size; |
91e10625 | 1686 | |
0dae7fe5 | 1687 | bio_add_page(clone, page, len, 0); |
1da177e4 | 1688 | |
7145c241 | 1689 | remaining_size -= len; |
1da177e4 LT |
1690 | } |
1691 | ||
ef43aa38 MB |
1692 | /* Allocate space for integrity tags */ |
1693 | if (dm_crypt_integrity_io_alloc(io, clone)) { | |
1694 | crypt_free_buffer_pages(cc, clone); | |
1695 | bio_put(clone); | |
1696 | clone = NULL; | |
1697 | } | |
1698 | out: | |
d0164adc | 1699 | if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM)) |
7145c241 MP |
1700 | mutex_unlock(&cc->bio_alloc_lock); |
1701 | ||
8b004457 | 1702 | return clone; |
1da177e4 LT |
1703 | } |
1704 | ||
644bd2f0 | 1705 | static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone) |
1da177e4 | 1706 | { |
1da177e4 | 1707 | struct bio_vec *bv; |
6dc4f100 | 1708 | struct bvec_iter_all iter_all; |
1da177e4 | 1709 | |
2b070cfe | 1710 | bio_for_each_segment_all(bv, clone, iter_all) { |
1da177e4 | 1711 | BUG_ON(!bv->bv_page); |
6f1c819c | 1712 | mempool_free(bv->bv_page, &cc->page_pool); |
1da177e4 LT |
1713 | } |
1714 | } | |
1715 | ||
298a9fa0 MP |
1716 | static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc, |
1717 | struct bio *bio, sector_t sector) | |
dc440d1e | 1718 | { |
49a8a920 | 1719 | io->cc = cc; |
dc440d1e MB |
1720 | io->base_bio = bio; |
1721 | io->sector = sector; | |
1722 | io->error = 0; | |
ef43aa38 MB |
1723 | io->ctx.r.req = NULL; |
1724 | io->integrity_metadata = NULL; | |
1725 | io->integrity_metadata_from_pool = false; | |
40b6229b | 1726 | atomic_set(&io->io_pending, 0); |
dc440d1e MB |
1727 | } |
1728 | ||
3e1a8bdd MB |
1729 | static void crypt_inc_pending(struct dm_crypt_io *io) |
1730 | { | |
40b6229b | 1731 | atomic_inc(&io->io_pending); |
3e1a8bdd MB |
1732 | } |
1733 | ||
8e14f610 IK |
1734 | static void kcryptd_io_bio_endio(struct work_struct *work) |
1735 | { | |
1736 | struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); | |
1737 | bio_endio(io->base_bio); | |
1738 | } | |
1739 | ||
1da177e4 LT |
1740 | /* |
1741 | * One of the bios was finished. Check for completion of | |
1742 | * the whole request and correctly clean up the buffer. | |
1743 | */ | |
5742fd77 | 1744 | static void crypt_dec_pending(struct dm_crypt_io *io) |
1da177e4 | 1745 | { |
49a8a920 | 1746 | struct crypt_config *cc = io->cc; |
b35f8caa | 1747 | struct bio *base_bio = io->base_bio; |
4e4cbee9 | 1748 | blk_status_t error = io->error; |
1da177e4 | 1749 | |
40b6229b | 1750 | if (!atomic_dec_and_test(&io->io_pending)) |
1da177e4 LT |
1751 | return; |
1752 | ||
ef43aa38 MB |
1753 | if (io->ctx.r.req) |
1754 | crypt_free_req(cc, io->ctx.r.req, base_bio); | |
1755 | ||
1756 | if (unlikely(io->integrity_metadata_from_pool)) | |
6f1c819c | 1757 | mempool_free(io->integrity_metadata, &io->cc->tag_pool); |
ef43aa38 MB |
1758 | else |
1759 | kfree(io->integrity_metadata); | |
b35f8caa | 1760 | |
4e4cbee9 | 1761 | base_bio->bi_status = error; |
8e14f610 IK |
1762 | |
1763 | /* | |
1764 | * If we are running this function from our tasklet, | |
1765 | * we can't call bio_endio() here, because it will call | |
1766 | * clone_endio() from dm.c, which in turn will | |
1767 | * free the current struct dm_crypt_io structure with | |
1768 | * our tasklet. In this case we need to delay bio_endio() | |
1769 | * execution to after the tasklet is done and dequeued. | |
1770 | */ | |
1771 | if (tasklet_trylock(&io->tasklet)) { | |
1772 | tasklet_unlock(&io->tasklet); | |
1773 | bio_endio(base_bio); | |
1774 | return; | |
1775 | } | |
1776 | ||
1777 | INIT_WORK(&io->work, kcryptd_io_bio_endio); | |
1778 | queue_work(cc->io_queue, &io->work); | |
1da177e4 LT |
1779 | } |
1780 | ||
1781 | /* | |
cabf08e4 | 1782 | * kcryptd/kcryptd_io: |
1da177e4 LT |
1783 | * |
1784 | * Needed because it would be very unwise to do decryption in an | |
23541d2d | 1785 | * interrupt context. |
cabf08e4 MB |
1786 | * |
1787 | * kcryptd performs the actual encryption or decryption. | |
1788 | * | |
1789 | * kcryptd_io performs the IO submission. | |
1790 | * | |
1791 | * They must be separated as otherwise the final stages could be | |
1792 | * starved by new requests which can block in the first stages due | |
1793 | * to memory allocation. | |
c0297721 AK |
1794 | * |
1795 | * The work is done per CPU global for all dm-crypt instances. | |
1796 | * They should not depend on each other and do not block. | |
1da177e4 | 1797 | */ |
4246a0b6 | 1798 | static void crypt_endio(struct bio *clone) |
8b004457 | 1799 | { |
028867ac | 1800 | struct dm_crypt_io *io = clone->bi_private; |
49a8a920 | 1801 | struct crypt_config *cc = io->cc; |
ee7a491e | 1802 | unsigned rw = bio_data_dir(clone); |
4e4cbee9 | 1803 | blk_status_t error; |
8b004457 MB |
1804 | |
1805 | /* | |
6712ecf8 | 1806 | * free the processed pages |
8b004457 | 1807 | */ |
ee7a491e | 1808 | if (rw == WRITE) |
644bd2f0 | 1809 | crypt_free_buffer_pages(cc, clone); |
8b004457 | 1810 | |
4e4cbee9 | 1811 | error = clone->bi_status; |
8b004457 | 1812 | bio_put(clone); |
8b004457 | 1813 | |
9b81c842 | 1814 | if (rw == READ && !error) { |
ee7a491e MB |
1815 | kcryptd_queue_crypt(io); |
1816 | return; | |
1817 | } | |
5742fd77 | 1818 | |
9b81c842 SL |
1819 | if (unlikely(error)) |
1820 | io->error = error; | |
5742fd77 MB |
1821 | |
1822 | crypt_dec_pending(io); | |
8b004457 MB |
1823 | } |
1824 | ||
028867ac | 1825 | static void clone_init(struct dm_crypt_io *io, struct bio *clone) |
8b004457 | 1826 | { |
49a8a920 | 1827 | struct crypt_config *cc = io->cc; |
8b004457 MB |
1828 | |
1829 | clone->bi_private = io; | |
1830 | clone->bi_end_io = crypt_endio; | |
74d46992 | 1831 | bio_set_dev(clone, cc->dev->bdev); |
ef295ecf | 1832 | clone->bi_opf = io->base_bio->bi_opf; |
8b004457 MB |
1833 | } |
1834 | ||
20c82538 | 1835 | static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp) |
8b004457 | 1836 | { |
49a8a920 | 1837 | struct crypt_config *cc = io->cc; |
8b004457 | 1838 | struct bio *clone; |
93e605c2 | 1839 | |
8b004457 | 1840 | /* |
59779079 MS |
1841 | * We need the original biovec array in order to decrypt |
1842 | * the whole bio data *afterwards* -- thanks to immutable | |
1843 | * biovecs we don't need to worry about the block layer | |
1844 | * modifying the biovec array; so leverage bio_clone_fast(). | |
8b004457 | 1845 | */ |
6f1c819c | 1846 | clone = bio_clone_fast(io->base_bio, gfp, &cc->bs); |
7eaceacc | 1847 | if (!clone) |
20c82538 | 1848 | return 1; |
8b004457 | 1849 | |
20c82538 MB |
1850 | crypt_inc_pending(io); |
1851 | ||
8b004457 | 1852 | clone_init(io, clone); |
4f024f37 | 1853 | clone->bi_iter.bi_sector = cc->start + io->sector; |
8b004457 | 1854 | |
ef43aa38 MB |
1855 | if (dm_crypt_integrity_io_alloc(io, clone)) { |
1856 | crypt_dec_pending(io); | |
1857 | bio_put(clone); | |
1858 | return 1; | |
1859 | } | |
1860 | ||
ed00aabd | 1861 | submit_bio_noacct(clone); |
20c82538 | 1862 | return 0; |
8b004457 MB |
1863 | } |
1864 | ||
dc267621 MP |
1865 | static void kcryptd_io_read_work(struct work_struct *work) |
1866 | { | |
1867 | struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); | |
1868 | ||
1869 | crypt_inc_pending(io); | |
1870 | if (kcryptd_io_read(io, GFP_NOIO)) | |
4e4cbee9 | 1871 | io->error = BLK_STS_RESOURCE; |
dc267621 MP |
1872 | crypt_dec_pending(io); |
1873 | } | |
1874 | ||
1875 | static void kcryptd_queue_read(struct dm_crypt_io *io) | |
1876 | { | |
1877 | struct crypt_config *cc = io->cc; | |
1878 | ||
1879 | INIT_WORK(&io->work, kcryptd_io_read_work); | |
1880 | queue_work(cc->io_queue, &io->work); | |
1881 | } | |
1882 | ||
4e4eef64 MB |
1883 | static void kcryptd_io_write(struct dm_crypt_io *io) |
1884 | { | |
95497a96 | 1885 | struct bio *clone = io->ctx.bio_out; |
dc267621 | 1886 | |
ed00aabd | 1887 | submit_bio_noacct(clone); |
4e4eef64 MB |
1888 | } |
1889 | ||
b3c5fd30 MP |
1890 | #define crypt_io_from_node(node) rb_entry((node), struct dm_crypt_io, rb_node) |
1891 | ||
dc267621 | 1892 | static int dmcrypt_write(void *data) |
395b167c | 1893 | { |
dc267621 | 1894 | struct crypt_config *cc = data; |
b3c5fd30 MP |
1895 | struct dm_crypt_io *io; |
1896 | ||
dc267621 | 1897 | while (1) { |
b3c5fd30 | 1898 | struct rb_root write_tree; |
dc267621 | 1899 | struct blk_plug plug; |
395b167c | 1900 | |
c7329eff | 1901 | spin_lock_irq(&cc->write_thread_lock); |
dc267621 | 1902 | continue_locked: |
395b167c | 1903 | |
b3c5fd30 | 1904 | if (!RB_EMPTY_ROOT(&cc->write_tree)) |
dc267621 MP |
1905 | goto pop_from_list; |
1906 | ||
f659b100 | 1907 | set_current_state(TASK_INTERRUPTIBLE); |
dc267621 | 1908 | |
c7329eff | 1909 | spin_unlock_irq(&cc->write_thread_lock); |
dc267621 | 1910 | |
f659b100 | 1911 | if (unlikely(kthread_should_stop())) { |
642fa448 | 1912 | set_current_state(TASK_RUNNING); |
f659b100 RV |
1913 | break; |
1914 | } | |
1915 | ||
dc267621 MP |
1916 | schedule(); |
1917 | ||
642fa448 | 1918 | set_current_state(TASK_RUNNING); |
c7329eff | 1919 | spin_lock_irq(&cc->write_thread_lock); |
dc267621 MP |
1920 | goto continue_locked; |
1921 | ||
1922 | pop_from_list: | |
b3c5fd30 MP |
1923 | write_tree = cc->write_tree; |
1924 | cc->write_tree = RB_ROOT; | |
c7329eff | 1925 | spin_unlock_irq(&cc->write_thread_lock); |
dc267621 | 1926 | |
b3c5fd30 MP |
1927 | BUG_ON(rb_parent(write_tree.rb_node)); |
1928 | ||
1929 | /* | |
1930 | * Note: we cannot walk the tree here with rb_next because | |
1931 | * the structures may be freed when kcryptd_io_write is called. | |
1932 | */ | |
dc267621 MP |
1933 | blk_start_plug(&plug); |
1934 | do { | |
b3c5fd30 MP |
1935 | io = crypt_io_from_node(rb_first(&write_tree)); |
1936 | rb_erase(&io->rb_node, &write_tree); | |
dc267621 | 1937 | kcryptd_io_write(io); |
b3c5fd30 | 1938 | } while (!RB_EMPTY_ROOT(&write_tree)); |
dc267621 MP |
1939 | blk_finish_plug(&plug); |
1940 | } | |
1941 | return 0; | |
395b167c AK |
1942 | } |
1943 | ||
72c6e7af | 1944 | static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async) |
4e4eef64 | 1945 | { |
dec1cedf | 1946 | struct bio *clone = io->ctx.bio_out; |
49a8a920 | 1947 | struct crypt_config *cc = io->cc; |
dc267621 | 1948 | unsigned long flags; |
b3c5fd30 MP |
1949 | sector_t sector; |
1950 | struct rb_node **rbp, *parent; | |
dec1cedf | 1951 | |
4e4cbee9 | 1952 | if (unlikely(io->error)) { |
dec1cedf MB |
1953 | crypt_free_buffer_pages(cc, clone); |
1954 | bio_put(clone); | |
6c031f41 | 1955 | crypt_dec_pending(io); |
dec1cedf MB |
1956 | return; |
1957 | } | |
1958 | ||
1959 | /* crypt_convert should have filled the clone bio */ | |
003b5c57 | 1960 | BUG_ON(io->ctx.iter_out.bi_size); |
dec1cedf | 1961 | |
4f024f37 | 1962 | clone->bi_iter.bi_sector = cc->start + io->sector; |
899c95d3 | 1963 | |
39d42fa9 IK |
1964 | if ((likely(!async) && test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags)) || |
1965 | test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags)) { | |
ed00aabd | 1966 | submit_bio_noacct(clone); |
0f5d8e6e MP |
1967 | return; |
1968 | } | |
1969 | ||
c7329eff MP |
1970 | spin_lock_irqsave(&cc->write_thread_lock, flags); |
1971 | if (RB_EMPTY_ROOT(&cc->write_tree)) | |
1972 | wake_up_process(cc->write_thread); | |
b3c5fd30 MP |
1973 | rbp = &cc->write_tree.rb_node; |
1974 | parent = NULL; | |
1975 | sector = io->sector; | |
1976 | while (*rbp) { | |
1977 | parent = *rbp; | |
1978 | if (sector < crypt_io_from_node(parent)->sector) | |
1979 | rbp = &(*rbp)->rb_left; | |
1980 | else | |
1981 | rbp = &(*rbp)->rb_right; | |
1982 | } | |
1983 | rb_link_node(&io->rb_node, parent, rbp); | |
1984 | rb_insert_color(&io->rb_node, &cc->write_tree); | |
c7329eff | 1985 | spin_unlock_irqrestore(&cc->write_thread_lock, flags); |
4e4eef64 MB |
1986 | } |
1987 | ||
8e225f04 DLM |
1988 | static bool kcryptd_crypt_write_inline(struct crypt_config *cc, |
1989 | struct convert_context *ctx) | |
1990 | ||
1991 | { | |
1992 | if (!test_bit(DM_CRYPT_WRITE_INLINE, &cc->flags)) | |
1993 | return false; | |
1994 | ||
1995 | /* | |
1996 | * Note: zone append writes (REQ_OP_ZONE_APPEND) do not have ordering | |
1997 | * constraints so they do not need to be issued inline by | |
1998 | * kcryptd_crypt_write_convert(). | |
1999 | */ | |
2000 | switch (bio_op(ctx->bio_in)) { | |
2001 | case REQ_OP_WRITE: | |
2002 | case REQ_OP_WRITE_SAME: | |
2003 | case REQ_OP_WRITE_ZEROES: | |
2004 | return true; | |
2005 | default: | |
2006 | return false; | |
2007 | } | |
2008 | } | |
2009 | ||
8abec36d IK |
2010 | static void kcryptd_crypt_write_continue(struct work_struct *work) |
2011 | { | |
2012 | struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); | |
2013 | struct crypt_config *cc = io->cc; | |
2014 | struct convert_context *ctx = &io->ctx; | |
2015 | int crypt_finished; | |
2016 | sector_t sector = io->sector; | |
2017 | blk_status_t r; | |
2018 | ||
2019 | wait_for_completion(&ctx->restart); | |
2020 | reinit_completion(&ctx->restart); | |
2021 | ||
2022 | r = crypt_convert(cc, &io->ctx, true, false); | |
2023 | if (r) | |
2024 | io->error = r; | |
2025 | crypt_finished = atomic_dec_and_test(&ctx->cc_pending); | |
2026 | if (!crypt_finished && kcryptd_crypt_write_inline(cc, ctx)) { | |
2027 | /* Wait for completion signaled by kcryptd_async_done() */ | |
2028 | wait_for_completion(&ctx->restart); | |
2029 | crypt_finished = 1; | |
2030 | } | |
2031 | ||
2032 | /* Encryption was already finished, submit io now */ | |
2033 | if (crypt_finished) { | |
2034 | kcryptd_crypt_write_io_submit(io, 0); | |
2035 | io->sector = sector; | |
2036 | } | |
2037 | ||
2038 | crypt_dec_pending(io); | |
2039 | } | |
2040 | ||
fc5a5e9a | 2041 | static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) |
8b004457 | 2042 | { |
49a8a920 | 2043 | struct crypt_config *cc = io->cc; |
8e225f04 | 2044 | struct convert_context *ctx = &io->ctx; |
8b004457 | 2045 | struct bio *clone; |
c8081618 | 2046 | int crypt_finished; |
b635b00e | 2047 | sector_t sector = io->sector; |
4e4cbee9 | 2048 | blk_status_t r; |
8b004457 | 2049 | |
fc5a5e9a MB |
2050 | /* |
2051 | * Prevent io from disappearing until this function completes. | |
2052 | */ | |
2053 | crypt_inc_pending(io); | |
8e225f04 | 2054 | crypt_convert_init(cc, ctx, NULL, io->base_bio, sector); |
fc5a5e9a | 2055 | |
cf2f1abf MP |
2056 | clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size); |
2057 | if (unlikely(!clone)) { | |
4e4cbee9 | 2058 | io->error = BLK_STS_IOERR; |
cf2f1abf MP |
2059 | goto dec; |
2060 | } | |
c8081618 | 2061 | |
cf2f1abf MP |
2062 | io->ctx.bio_out = clone; |
2063 | io->ctx.iter_out = clone->bi_iter; | |
b635b00e | 2064 | |
cf2f1abf | 2065 | sector += bio_sectors(clone); |
93e605c2 | 2066 | |
cf2f1abf | 2067 | crypt_inc_pending(io); |
8e225f04 | 2068 | r = crypt_convert(cc, ctx, |
8abec36d IK |
2069 | test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags), true); |
2070 | /* | |
2071 | * Crypto API backlogged the request, because its queue was full | |
2072 | * and we're in softirq context, so continue from a workqueue | |
2073 | * (TODO: is it actually possible to be in softirq in the write path?) | |
2074 | */ | |
2075 | if (r == BLK_STS_DEV_RESOURCE) { | |
2076 | INIT_WORK(&io->work, kcryptd_crypt_write_continue); | |
2077 | queue_work(cc->crypt_queue, &io->work); | |
2078 | return; | |
2079 | } | |
4e4cbee9 | 2080 | if (r) |
ef43aa38 | 2081 | io->error = r; |
8e225f04 DLM |
2082 | crypt_finished = atomic_dec_and_test(&ctx->cc_pending); |
2083 | if (!crypt_finished && kcryptd_crypt_write_inline(cc, ctx)) { | |
2084 | /* Wait for completion signaled by kcryptd_async_done() */ | |
2085 | wait_for_completion(&ctx->restart); | |
2086 | crypt_finished = 1; | |
2087 | } | |
933f01d4 | 2088 | |
cf2f1abf MP |
2089 | /* Encryption was already finished, submit io now */ |
2090 | if (crypt_finished) { | |
2091 | kcryptd_crypt_write_io_submit(io, 0); | |
2092 | io->sector = sector; | |
93e605c2 | 2093 | } |
899c95d3 | 2094 | |
cf2f1abf | 2095 | dec: |
899c95d3 | 2096 | crypt_dec_pending(io); |
84131db6 MB |
2097 | } |
2098 | ||
72c6e7af | 2099 | static void kcryptd_crypt_read_done(struct dm_crypt_io *io) |
5742fd77 | 2100 | { |
5742fd77 MB |
2101 | crypt_dec_pending(io); |
2102 | } | |
2103 | ||
8abec36d IK |
2104 | static void kcryptd_crypt_read_continue(struct work_struct *work) |
2105 | { | |
2106 | struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); | |
2107 | struct crypt_config *cc = io->cc; | |
2108 | blk_status_t r; | |
2109 | ||
2110 | wait_for_completion(&io->ctx.restart); | |
2111 | reinit_completion(&io->ctx.restart); | |
2112 | ||
2113 | r = crypt_convert(cc, &io->ctx, true, false); | |
2114 | if (r) | |
2115 | io->error = r; | |
2116 | ||
2117 | if (atomic_dec_and_test(&io->ctx.cc_pending)) | |
2118 | kcryptd_crypt_read_done(io); | |
2119 | ||
2120 | crypt_dec_pending(io); | |
2121 | } | |
2122 | ||
4e4eef64 | 2123 | static void kcryptd_crypt_read_convert(struct dm_crypt_io *io) |
8b004457 | 2124 | { |
49a8a920 | 2125 | struct crypt_config *cc = io->cc; |
4e4cbee9 | 2126 | blk_status_t r; |
1da177e4 | 2127 | |
3e1a8bdd | 2128 | crypt_inc_pending(io); |
3a7f6c99 | 2129 | |
53017030 | 2130 | crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio, |
0c395b0f | 2131 | io->sector); |
1da177e4 | 2132 | |
39d42fa9 | 2133 | r = crypt_convert(cc, &io->ctx, |
8abec36d IK |
2134 | test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags), true); |
2135 | /* | |
2136 | * Crypto API backlogged the request, because its queue was full | |
2137 | * and we're in softirq context, so continue from a workqueue | |
2138 | */ | |
2139 | if (r == BLK_STS_DEV_RESOURCE) { | |
2140 | INIT_WORK(&io->work, kcryptd_crypt_read_continue); | |
2141 | queue_work(cc->crypt_queue, &io->work); | |
2142 | return; | |
2143 | } | |
4e4cbee9 | 2144 | if (r) |
ef43aa38 | 2145 | io->error = r; |
5742fd77 | 2146 | |
40b6229b | 2147 | if (atomic_dec_and_test(&io->ctx.cc_pending)) |
72c6e7af | 2148 | kcryptd_crypt_read_done(io); |
3a7f6c99 MB |
2149 | |
2150 | crypt_dec_pending(io); | |
1da177e4 LT |
2151 | } |
2152 | ||
95497a96 MB |
2153 | static void kcryptd_async_done(struct crypto_async_request *async_req, |
2154 | int error) | |
2155 | { | |
b2174eeb HY |
2156 | struct dm_crypt_request *dmreq = async_req->data; |
2157 | struct convert_context *ctx = dmreq->ctx; | |
95497a96 | 2158 | struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx); |
49a8a920 | 2159 | struct crypt_config *cc = io->cc; |
95497a96 | 2160 | |
54cea3f6 MB |
2161 | /* |
2162 | * A request from crypto driver backlog is going to be processed now, | |
2163 | * finish the completion and continue in crypt_convert(). | |
2164 | * (Callback will be called for the second time for this request.) | |
2165 | */ | |
c0403ec0 RV |
2166 | if (error == -EINPROGRESS) { |
2167 | complete(&ctx->restart); | |
95497a96 | 2168 | return; |
c0403ec0 | 2169 | } |
95497a96 | 2170 | |
2dc5327d | 2171 | if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post) |
ef43aa38 | 2172 | error = cc->iv_gen_ops->post(cc, org_iv_of_dmreq(cc, dmreq), dmreq); |
2dc5327d | 2173 | |
ef43aa38 | 2174 | if (error == -EBADMSG) { |
f710126c MB |
2175 | char b[BDEVNAME_SIZE]; |
2176 | DMERR_LIMIT("%s: INTEGRITY AEAD ERROR, sector %llu", bio_devname(ctx->bio_in, b), | |
ef43aa38 | 2177 | (unsigned long long)le64_to_cpu(*org_sector_of_dmreq(cc, dmreq))); |
4e4cbee9 | 2178 | io->error = BLK_STS_PROTECTION; |
ef43aa38 | 2179 | } else if (error < 0) |
4e4cbee9 | 2180 | io->error = BLK_STS_IOERR; |
72c6e7af | 2181 | |
298a9fa0 | 2182 | crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio); |
95497a96 | 2183 | |
40b6229b | 2184 | if (!atomic_dec_and_test(&ctx->cc_pending)) |
c0403ec0 | 2185 | return; |
95497a96 | 2186 | |
8e225f04 DLM |
2187 | /* |
2188 | * The request is fully completed: for inline writes, let | |
2189 | * kcryptd_crypt_write_convert() do the IO submission. | |
2190 | */ | |
2191 | if (bio_data_dir(io->base_bio) == READ) { | |
72c6e7af | 2192 | kcryptd_crypt_read_done(io); |
8e225f04 DLM |
2193 | return; |
2194 | } | |
2195 | ||
2196 | if (kcryptd_crypt_write_inline(cc, ctx)) { | |
2197 | complete(&ctx->restart); | |
2198 | return; | |
2199 | } | |
2200 | ||
2201 | kcryptd_crypt_write_io_submit(io, 1); | |
95497a96 MB |
2202 | } |
2203 | ||
395b167c | 2204 | static void kcryptd_crypt(struct work_struct *work) |
1da177e4 | 2205 | { |
028867ac | 2206 | struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); |
8b004457 | 2207 | |
cabf08e4 | 2208 | if (bio_data_dir(io->base_bio) == READ) |
395b167c | 2209 | kcryptd_crypt_read_convert(io); |
4e4eef64 | 2210 | else |
395b167c | 2211 | kcryptd_crypt_write_convert(io); |
cabf08e4 MB |
2212 | } |
2213 | ||
39d42fa9 IK |
2214 | static void kcryptd_crypt_tasklet(unsigned long work) |
2215 | { | |
2216 | kcryptd_crypt((struct work_struct *)work); | |
2217 | } | |
2218 | ||
395b167c | 2219 | static void kcryptd_queue_crypt(struct dm_crypt_io *io) |
cabf08e4 | 2220 | { |
49a8a920 | 2221 | struct crypt_config *cc = io->cc; |
cabf08e4 | 2222 | |
39d42fa9 IK |
2223 | if ((bio_data_dir(io->base_bio) == READ && test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags)) || |
2224 | (bio_data_dir(io->base_bio) == WRITE && test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags))) { | |
c87a95dc IK |
2225 | /* |
2226 | * in_irq(): Crypto API's skcipher_walk_first() refuses to work in hard IRQ context. | |
2227 | * irqs_disabled(): the kernel may run some IO completion from the idle thread, but | |
2228 | * it is being executed with irqs disabled. | |
2229 | */ | |
2230 | if (in_irq() || irqs_disabled()) { | |
39d42fa9 IK |
2231 | tasklet_init(&io->tasklet, kcryptd_crypt_tasklet, (unsigned long)&io->work); |
2232 | tasklet_schedule(&io->tasklet); | |
2233 | return; | |
2234 | } | |
2235 | ||
2236 | kcryptd_crypt(&io->work); | |
2237 | return; | |
2238 | } | |
2239 | ||
395b167c AK |
2240 | INIT_WORK(&io->work, kcryptd_crypt); |
2241 | queue_work(cc->crypt_queue, &io->work); | |
1da177e4 LT |
2242 | } |
2243 | ||
ef43aa38 | 2244 | static void crypt_free_tfms_aead(struct crypt_config *cc) |
1da177e4 | 2245 | { |
ef43aa38 MB |
2246 | if (!cc->cipher_tfm.tfms_aead) |
2247 | return; | |
1da177e4 | 2248 | |
ef43aa38 MB |
2249 | if (cc->cipher_tfm.tfms_aead[0] && !IS_ERR(cc->cipher_tfm.tfms_aead[0])) { |
2250 | crypto_free_aead(cc->cipher_tfm.tfms_aead[0]); | |
2251 | cc->cipher_tfm.tfms_aead[0] = NULL; | |
1da177e4 LT |
2252 | } |
2253 | ||
ef43aa38 MB |
2254 | kfree(cc->cipher_tfm.tfms_aead); |
2255 | cc->cipher_tfm.tfms_aead = NULL; | |
1da177e4 LT |
2256 | } |
2257 | ||
ef43aa38 | 2258 | static void crypt_free_tfms_skcipher(struct crypt_config *cc) |
d1f96423 | 2259 | { |
d1f96423 MB |
2260 | unsigned i; |
2261 | ||
ef43aa38 | 2262 | if (!cc->cipher_tfm.tfms) |
fd2d231f MP |
2263 | return; |
2264 | ||
d1f96423 | 2265 | for (i = 0; i < cc->tfms_count; i++) |
ef43aa38 MB |
2266 | if (cc->cipher_tfm.tfms[i] && !IS_ERR(cc->cipher_tfm.tfms[i])) { |
2267 | crypto_free_skcipher(cc->cipher_tfm.tfms[i]); | |
2268 | cc->cipher_tfm.tfms[i] = NULL; | |
d1f96423 | 2269 | } |
fd2d231f | 2270 | |
ef43aa38 MB |
2271 | kfree(cc->cipher_tfm.tfms); |
2272 | cc->cipher_tfm.tfms = NULL; | |
d1f96423 MB |
2273 | } |
2274 | ||
ef43aa38 MB |
2275 | static void crypt_free_tfms(struct crypt_config *cc) |
2276 | { | |
33d2f09f | 2277 | if (crypt_integrity_aead(cc)) |
ef43aa38 MB |
2278 | crypt_free_tfms_aead(cc); |
2279 | else | |
2280 | crypt_free_tfms_skcipher(cc); | |
2281 | } | |
2282 | ||
2283 | static int crypt_alloc_tfms_skcipher(struct crypt_config *cc, char *ciphermode) | |
d1f96423 | 2284 | { |
d1f96423 MB |
2285 | unsigned i; |
2286 | int err; | |
2287 | ||
6396bb22 KC |
2288 | cc->cipher_tfm.tfms = kcalloc(cc->tfms_count, |
2289 | sizeof(struct crypto_skcipher *), | |
2290 | GFP_KERNEL); | |
ef43aa38 | 2291 | if (!cc->cipher_tfm.tfms) |
fd2d231f MP |
2292 | return -ENOMEM; |
2293 | ||
d1f96423 | 2294 | for (i = 0; i < cc->tfms_count; i++) { |
cd746938 MP |
2295 | cc->cipher_tfm.tfms[i] = crypto_alloc_skcipher(ciphermode, 0, |
2296 | CRYPTO_ALG_ALLOCATES_MEMORY); | |
ef43aa38 MB |
2297 | if (IS_ERR(cc->cipher_tfm.tfms[i])) { |
2298 | err = PTR_ERR(cc->cipher_tfm.tfms[i]); | |
fd2d231f | 2299 | crypt_free_tfms(cc); |
d1f96423 MB |
2300 | return err; |
2301 | } | |
2302 | } | |
2303 | ||
af331eba EB |
2304 | /* |
2305 | * dm-crypt performance can vary greatly depending on which crypto | |
2306 | * algorithm implementation is used. Help people debug performance | |
2307 | * problems by logging the ->cra_driver_name. | |
2308 | */ | |
7a1cd723 | 2309 | DMDEBUG_LIMIT("%s using implementation \"%s\"", ciphermode, |
af331eba | 2310 | crypto_skcipher_alg(any_tfm(cc))->base.cra_driver_name); |
d1f96423 MB |
2311 | return 0; |
2312 | } | |
2313 | ||
ef43aa38 MB |
2314 | static int crypt_alloc_tfms_aead(struct crypt_config *cc, char *ciphermode) |
2315 | { | |
ef43aa38 MB |
2316 | int err; |
2317 | ||
2318 | cc->cipher_tfm.tfms = kmalloc(sizeof(struct crypto_aead *), GFP_KERNEL); | |
2319 | if (!cc->cipher_tfm.tfms) | |
2320 | return -ENOMEM; | |
2321 | ||
cd746938 MP |
2322 | cc->cipher_tfm.tfms_aead[0] = crypto_alloc_aead(ciphermode, 0, |
2323 | CRYPTO_ALG_ALLOCATES_MEMORY); | |
ef43aa38 MB |
2324 | if (IS_ERR(cc->cipher_tfm.tfms_aead[0])) { |
2325 | err = PTR_ERR(cc->cipher_tfm.tfms_aead[0]); | |
2326 | crypt_free_tfms(cc); | |
2327 | return err; | |
2328 | } | |
2329 | ||
7a1cd723 | 2330 | DMDEBUG_LIMIT("%s using implementation \"%s\"", ciphermode, |
af331eba | 2331 | crypto_aead_alg(any_tfm_aead(cc))->base.cra_driver_name); |
ef43aa38 MB |
2332 | return 0; |
2333 | } | |
2334 | ||
2335 | static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode) | |
2336 | { | |
33d2f09f | 2337 | if (crypt_integrity_aead(cc)) |
ef43aa38 MB |
2338 | return crypt_alloc_tfms_aead(cc, ciphermode); |
2339 | else | |
2340 | return crypt_alloc_tfms_skcipher(cc, ciphermode); | |
2341 | } | |
2342 | ||
2343 | static unsigned crypt_subkey_size(struct crypt_config *cc) | |
2344 | { | |
2345 | return (cc->key_size - cc->key_extra_size) >> ilog2(cc->tfms_count); | |
2346 | } | |
2347 | ||
2348 | static unsigned crypt_authenckey_size(struct crypt_config *cc) | |
2349 | { | |
2350 | return crypt_subkey_size(cc) + RTA_SPACE(sizeof(struct crypto_authenc_key_param)); | |
2351 | } | |
2352 | ||
2353 | /* | |
2354 | * If AEAD is composed like authenc(hmac(sha256),xts(aes)), | |
2355 | * the key must be for some reason in special format. | |
2356 | * This funcion converts cc->key to this special format. | |
2357 | */ | |
2358 | static void crypt_copy_authenckey(char *p, const void *key, | |
2359 | unsigned enckeylen, unsigned authkeylen) | |
2360 | { | |
2361 | struct crypto_authenc_key_param *param; | |
2362 | struct rtattr *rta; | |
2363 | ||
2364 | rta = (struct rtattr *)p; | |
2365 | param = RTA_DATA(rta); | |
2366 | param->enckeylen = cpu_to_be32(enckeylen); | |
2367 | rta->rta_len = RTA_LENGTH(sizeof(*param)); | |
2368 | rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM; | |
2369 | p += RTA_SPACE(sizeof(*param)); | |
2370 | memcpy(p, key + enckeylen, authkeylen); | |
2371 | p += authkeylen; | |
2372 | memcpy(p, key, enckeylen); | |
2373 | } | |
2374 | ||
671ea6b4 | 2375 | static int crypt_setkey(struct crypt_config *cc) |
c0297721 | 2376 | { |
da31a078 | 2377 | unsigned subkey_size; |
fd2d231f MP |
2378 | int err = 0, i, r; |
2379 | ||
da31a078 | 2380 | /* Ignore extra keys (which are used for IV etc) */ |
ef43aa38 | 2381 | subkey_size = crypt_subkey_size(cc); |
da31a078 | 2382 | |
27c70036 MB |
2383 | if (crypt_integrity_hmac(cc)) { |
2384 | if (subkey_size < cc->key_mac_size) | |
2385 | return -EINVAL; | |
2386 | ||
ef43aa38 MB |
2387 | crypt_copy_authenckey(cc->authenc_key, cc->key, |
2388 | subkey_size - cc->key_mac_size, | |
2389 | cc->key_mac_size); | |
27c70036 MB |
2390 | } |
2391 | ||
fd2d231f | 2392 | for (i = 0; i < cc->tfms_count; i++) { |
33d2f09f | 2393 | if (crypt_integrity_hmac(cc)) |
ef43aa38 MB |
2394 | r = crypto_aead_setkey(cc->cipher_tfm.tfms_aead[i], |
2395 | cc->authenc_key, crypt_authenckey_size(cc)); | |
33d2f09f MB |
2396 | else if (crypt_integrity_aead(cc)) |
2397 | r = crypto_aead_setkey(cc->cipher_tfm.tfms_aead[i], | |
2398 | cc->key + (i * subkey_size), | |
2399 | subkey_size); | |
ef43aa38 MB |
2400 | else |
2401 | r = crypto_skcipher_setkey(cc->cipher_tfm.tfms[i], | |
2402 | cc->key + (i * subkey_size), | |
2403 | subkey_size); | |
fd2d231f MP |
2404 | if (r) |
2405 | err = r; | |
c0297721 AK |
2406 | } |
2407 | ||
ef43aa38 MB |
2408 | if (crypt_integrity_hmac(cc)) |
2409 | memzero_explicit(cc->authenc_key, crypt_authenckey_size(cc)); | |
2410 | ||
c0297721 AK |
2411 | return err; |
2412 | } | |
2413 | ||
c538f6ec OK |
2414 | #ifdef CONFIG_KEYS |
2415 | ||
027c431c OK |
2416 | static bool contains_whitespace(const char *str) |
2417 | { | |
2418 | while (*str) | |
2419 | if (isspace(*str++)) | |
2420 | return true; | |
2421 | return false; | |
2422 | } | |
2423 | ||
27f5411a DB |
2424 | static int set_key_user(struct crypt_config *cc, struct key *key) |
2425 | { | |
2426 | const struct user_key_payload *ukp; | |
2427 | ||
2428 | ukp = user_key_payload_locked(key); | |
2429 | if (!ukp) | |
2430 | return -EKEYREVOKED; | |
2431 | ||
2432 | if (cc->key_size != ukp->datalen) | |
2433 | return -EINVAL; | |
2434 | ||
2435 | memcpy(cc->key, ukp->data, cc->key_size); | |
2436 | ||
2437 | return 0; | |
2438 | } | |
2439 | ||
27f5411a DB |
2440 | static int set_key_encrypted(struct crypt_config *cc, struct key *key) |
2441 | { | |
2442 | const struct encrypted_key_payload *ekp; | |
2443 | ||
2444 | ekp = key->payload.data[0]; | |
2445 | if (!ekp) | |
2446 | return -EKEYREVOKED; | |
2447 | ||
2448 | if (cc->key_size != ekp->decrypted_datalen) | |
2449 | return -EINVAL; | |
2450 | ||
2451 | memcpy(cc->key, ekp->decrypted_data, cc->key_size); | |
2452 | ||
2453 | return 0; | |
2454 | } | |
27f5411a | 2455 | |
363880c4 AF |
2456 | static int set_key_trusted(struct crypt_config *cc, struct key *key) |
2457 | { | |
2458 | const struct trusted_key_payload *tkp; | |
2459 | ||
2460 | tkp = key->payload.data[0]; | |
2461 | if (!tkp) | |
2462 | return -EKEYREVOKED; | |
2463 | ||
2464 | if (cc->key_size != tkp->key_len) | |
2465 | return -EINVAL; | |
2466 | ||
2467 | memcpy(cc->key, tkp->key, cc->key_size); | |
2468 | ||
2469 | return 0; | |
2470 | } | |
2471 | ||
c538f6ec OK |
2472 | static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string) |
2473 | { | |
2474 | char *new_key_string, *key_desc; | |
2475 | int ret; | |
27f5411a | 2476 | struct key_type *type; |
c538f6ec | 2477 | struct key *key; |
27f5411a | 2478 | int (*set_key)(struct crypt_config *cc, struct key *key); |
c538f6ec | 2479 | |
027c431c OK |
2480 | /* |
2481 | * Reject key_string with whitespace. dm core currently lacks code for | |
2482 | * proper whitespace escaping in arguments on DM_TABLE_STATUS path. | |
2483 | */ | |
2484 | if (contains_whitespace(key_string)) { | |
2485 | DMERR("whitespace chars not allowed in key string"); | |
2486 | return -EINVAL; | |
2487 | } | |
2488 | ||
c538f6ec OK |
2489 | /* look for next ':' separating key_type from key_description */ |
2490 | key_desc = strpbrk(key_string, ":"); | |
2491 | if (!key_desc || key_desc == key_string || !strlen(key_desc + 1)) | |
2492 | return -EINVAL; | |
2493 | ||
27f5411a DB |
2494 | if (!strncmp(key_string, "logon:", key_desc - key_string + 1)) { |
2495 | type = &key_type_logon; | |
2496 | set_key = set_key_user; | |
2497 | } else if (!strncmp(key_string, "user:", key_desc - key_string + 1)) { | |
2498 | type = &key_type_user; | |
2499 | set_key = set_key_user; | |
831475cc AF |
2500 | } else if (IS_ENABLED(CONFIG_ENCRYPTED_KEYS) && |
2501 | !strncmp(key_string, "encrypted:", key_desc - key_string + 1)) { | |
27f5411a DB |
2502 | type = &key_type_encrypted; |
2503 | set_key = set_key_encrypted; | |
363880c4 AF |
2504 | } else if (IS_ENABLED(CONFIG_TRUSTED_KEYS) && |
2505 | !strncmp(key_string, "trusted:", key_desc - key_string + 1)) { | |
2506 | type = &key_type_trusted; | |
2507 | set_key = set_key_trusted; | |
27f5411a | 2508 | } else { |
c538f6ec | 2509 | return -EINVAL; |
27f5411a | 2510 | } |
c538f6ec OK |
2511 | |
2512 | new_key_string = kstrdup(key_string, GFP_KERNEL); | |
2513 | if (!new_key_string) | |
2514 | return -ENOMEM; | |
2515 | ||
27f5411a | 2516 | key = request_key(type, key_desc + 1, NULL); |
c538f6ec | 2517 | if (IS_ERR(key)) { |
453431a5 | 2518 | kfree_sensitive(new_key_string); |
c538f6ec OK |
2519 | return PTR_ERR(key); |
2520 | } | |
2521 | ||
f5b0cba8 | 2522 | down_read(&key->sem); |
c538f6ec | 2523 | |
27f5411a DB |
2524 | ret = set_key(cc, key); |
2525 | if (ret < 0) { | |
f5b0cba8 | 2526 | up_read(&key->sem); |
c538f6ec | 2527 | key_put(key); |
453431a5 | 2528 | kfree_sensitive(new_key_string); |
27f5411a | 2529 | return ret; |
c538f6ec OK |
2530 | } |
2531 | ||
f5b0cba8 | 2532 | up_read(&key->sem); |
c538f6ec OK |
2533 | key_put(key); |
2534 | ||
2535 | /* clear the flag since following operations may invalidate previously valid key */ | |
2536 | clear_bit(DM_CRYPT_KEY_VALID, &cc->flags); | |
2537 | ||
2538 | ret = crypt_setkey(cc); | |
2539 | ||
c538f6ec OK |
2540 | if (!ret) { |
2541 | set_bit(DM_CRYPT_KEY_VALID, &cc->flags); | |
453431a5 | 2542 | kfree_sensitive(cc->key_string); |
c538f6ec OK |
2543 | cc->key_string = new_key_string; |
2544 | } else | |
453431a5 | 2545 | kfree_sensitive(new_key_string); |
c538f6ec OK |
2546 | |
2547 | return ret; | |
2548 | } | |
2549 | ||
2550 | static int get_key_size(char **key_string) | |
2551 | { | |
2552 | char *colon, dummy; | |
2553 | int ret; | |
2554 | ||
2555 | if (*key_string[0] != ':') | |
2556 | return strlen(*key_string) >> 1; | |
2557 | ||
2558 | /* look for next ':' in key string */ | |
2559 | colon = strpbrk(*key_string + 1, ":"); | |
2560 | if (!colon) | |
2561 | return -EINVAL; | |
2562 | ||
2563 | if (sscanf(*key_string + 1, "%u%c", &ret, &dummy) != 2 || dummy != ':') | |
2564 | return -EINVAL; | |
2565 | ||
2566 | *key_string = colon; | |
2567 | ||
2568 | /* remaining key string should be :<logon|user>:<key_desc> */ | |
2569 | ||
2570 | return ret; | |
2571 | } | |
2572 | ||
2573 | #else | |
2574 | ||
2575 | static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string) | |
2576 | { | |
2577 | return -EINVAL; | |
2578 | } | |
2579 | ||
2580 | static int get_key_size(char **key_string) | |
2581 | { | |
2582 | return (*key_string[0] == ':') ? -EINVAL : strlen(*key_string) >> 1; | |
2583 | } | |
2584 | ||
27f5411a | 2585 | #endif /* CONFIG_KEYS */ |
c538f6ec | 2586 | |
e48d4bbf MB |
2587 | static int crypt_set_key(struct crypt_config *cc, char *key) |
2588 | { | |
de8be5ac MB |
2589 | int r = -EINVAL; |
2590 | int key_string_len = strlen(key); | |
2591 | ||
69a8cfcd MB |
2592 | /* Hyphen (which gives a key_size of zero) means there is no key. */ |
2593 | if (!cc->key_size && strcmp(key, "-")) | |
de8be5ac | 2594 | goto out; |
e48d4bbf | 2595 | |
c538f6ec OK |
2596 | /* ':' means the key is in kernel keyring, short-circuit normal key processing */ |
2597 | if (key[0] == ':') { | |
2598 | r = crypt_set_keyring_key(cc, key + 1); | |
de8be5ac | 2599 | goto out; |
c538f6ec | 2600 | } |
e48d4bbf | 2601 | |
265e9098 OK |
2602 | /* clear the flag since following operations may invalidate previously valid key */ |
2603 | clear_bit(DM_CRYPT_KEY_VALID, &cc->flags); | |
e48d4bbf | 2604 | |
c538f6ec | 2605 | /* wipe references to any kernel keyring key */ |
453431a5 | 2606 | kfree_sensitive(cc->key_string); |
c538f6ec OK |
2607 | cc->key_string = NULL; |
2608 | ||
e944e03e AS |
2609 | /* Decode key from its hex representation. */ |
2610 | if (cc->key_size && hex2bin(cc->key, key, cc->key_size) < 0) | |
de8be5ac | 2611 | goto out; |
e48d4bbf | 2612 | |
671ea6b4 | 2613 | r = crypt_setkey(cc); |
265e9098 OK |
2614 | if (!r) |
2615 | set_bit(DM_CRYPT_KEY_VALID, &cc->flags); | |
de8be5ac MB |
2616 | |
2617 | out: | |
2618 | /* Hex key string not needed after here, so wipe it. */ | |
2619 | memset(key, '0', key_string_len); | |
2620 | ||
2621 | return r; | |
e48d4bbf MB |
2622 | } |
2623 | ||
2624 | static int crypt_wipe_key(struct crypt_config *cc) | |
2625 | { | |
c82feeec OK |
2626 | int r; |
2627 | ||
e48d4bbf | 2628 | clear_bit(DM_CRYPT_KEY_VALID, &cc->flags); |
c82feeec | 2629 | get_random_bytes(&cc->key, cc->key_size); |
4a52ffc7 MB |
2630 | |
2631 | /* Wipe IV private keys */ | |
2632 | if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) { | |
2633 | r = cc->iv_gen_ops->wipe(cc); | |
2634 | if (r) | |
2635 | return r; | |
2636 | } | |
2637 | ||
453431a5 | 2638 | kfree_sensitive(cc->key_string); |
c538f6ec | 2639 | cc->key_string = NULL; |
c82feeec OK |
2640 | r = crypt_setkey(cc); |
2641 | memset(&cc->key, 0, cc->key_size * sizeof(u8)); | |
c0297721 | 2642 | |
c82feeec | 2643 | return r; |
e48d4bbf MB |
2644 | } |
2645 | ||
5059353d MP |
2646 | static void crypt_calculate_pages_per_client(void) |
2647 | { | |
ca79b0c2 | 2648 | unsigned long pages = (totalram_pages() - totalhigh_pages()) * DM_CRYPT_MEMORY_PERCENT / 100; |
5059353d MP |
2649 | |
2650 | if (!dm_crypt_clients_n) | |
2651 | return; | |
2652 | ||
2653 | pages /= dm_crypt_clients_n; | |
2654 | if (pages < DM_CRYPT_MIN_PAGES_PER_CLIENT) | |
2655 | pages = DM_CRYPT_MIN_PAGES_PER_CLIENT; | |
2656 | dm_crypt_pages_per_client = pages; | |
2657 | } | |
2658 | ||
2659 | static void *crypt_page_alloc(gfp_t gfp_mask, void *pool_data) | |
2660 | { | |
2661 | struct crypt_config *cc = pool_data; | |
2662 | struct page *page; | |
2663 | ||
2664 | if (unlikely(percpu_counter_compare(&cc->n_allocated_pages, dm_crypt_pages_per_client) >= 0) && | |
2665 | likely(gfp_mask & __GFP_NORETRY)) | |
2666 | return NULL; | |
2667 | ||
2668 | page = alloc_page(gfp_mask); | |
2669 | if (likely(page != NULL)) | |
2670 | percpu_counter_add(&cc->n_allocated_pages, 1); | |
2671 | ||
2672 | return page; | |
2673 | } | |
2674 | ||
2675 | static void crypt_page_free(void *page, void *pool_data) | |
2676 | { | |
2677 | struct crypt_config *cc = pool_data; | |
2678 | ||
2679 | __free_page(page); | |
2680 | percpu_counter_sub(&cc->n_allocated_pages, 1); | |
2681 | } | |
2682 | ||
28513fcc MB |
2683 | static void crypt_dtr(struct dm_target *ti) |
2684 | { | |
2685 | struct crypt_config *cc = ti->private; | |
2686 | ||
2687 | ti->private = NULL; | |
2688 | ||
2689 | if (!cc) | |
2690 | return; | |
2691 | ||
f659b100 | 2692 | if (cc->write_thread) |
dc267621 MP |
2693 | kthread_stop(cc->write_thread); |
2694 | ||
28513fcc MB |
2695 | if (cc->io_queue) |
2696 | destroy_workqueue(cc->io_queue); | |
2697 | if (cc->crypt_queue) | |
2698 | destroy_workqueue(cc->crypt_queue); | |
2699 | ||
fd2d231f MP |
2700 | crypt_free_tfms(cc); |
2701 | ||
6f1c819c | 2702 | bioset_exit(&cc->bs); |
28513fcc | 2703 | |
6f1c819c KO |
2704 | mempool_exit(&cc->page_pool); |
2705 | mempool_exit(&cc->req_pool); | |
2706 | mempool_exit(&cc->tag_pool); | |
2707 | ||
d00a11df KO |
2708 | WARN_ON(percpu_counter_sum(&cc->n_allocated_pages) != 0); |
2709 | percpu_counter_destroy(&cc->n_allocated_pages); | |
2710 | ||
28513fcc MB |
2711 | if (cc->iv_gen_ops && cc->iv_gen_ops->dtr) |
2712 | cc->iv_gen_ops->dtr(cc); | |
2713 | ||
28513fcc MB |
2714 | if (cc->dev) |
2715 | dm_put_device(ti, cc->dev); | |
2716 | ||
453431a5 WL |
2717 | kfree_sensitive(cc->cipher_string); |
2718 | kfree_sensitive(cc->key_string); | |
2719 | kfree_sensitive(cc->cipher_auth); | |
2720 | kfree_sensitive(cc->authenc_key); | |
28513fcc | 2721 | |
d5ffebdd MS |
2722 | mutex_destroy(&cc->bio_alloc_lock); |
2723 | ||
28513fcc | 2724 | /* Must zero key material before freeing */ |
453431a5 | 2725 | kfree_sensitive(cc); |
5059353d MP |
2726 | |
2727 | spin_lock(&dm_crypt_clients_lock); | |
2728 | WARN_ON(!dm_crypt_clients_n); | |
2729 | dm_crypt_clients_n--; | |
2730 | crypt_calculate_pages_per_client(); | |
2731 | spin_unlock(&dm_crypt_clients_lock); | |
28513fcc MB |
2732 | } |
2733 | ||
e889f97a MB |
2734 | static int crypt_ctr_ivmode(struct dm_target *ti, const char *ivmode) |
2735 | { | |
2736 | struct crypt_config *cc = ti->private; | |
2737 | ||
33d2f09f | 2738 | if (crypt_integrity_aead(cc)) |
e889f97a MB |
2739 | cc->iv_size = crypto_aead_ivsize(any_tfm_aead(cc)); |
2740 | else | |
2741 | cc->iv_size = crypto_skcipher_ivsize(any_tfm(cc)); | |
2742 | ||
e889f97a MB |
2743 | if (cc->iv_size) |
2744 | /* at least a 64 bit sector number should fit in our buffer */ | |
2745 | cc->iv_size = max(cc->iv_size, | |
2746 | (unsigned int)(sizeof(u64) / sizeof(u8))); | |
2747 | else if (ivmode) { | |
2748 | DMWARN("Selected cipher does not support IVs"); | |
2749 | ivmode = NULL; | |
2750 | } | |
2751 | ||
2752 | /* Choose ivmode, see comments at iv code. */ | |
2753 | if (ivmode == NULL) | |
2754 | cc->iv_gen_ops = NULL; | |
2755 | else if (strcmp(ivmode, "plain") == 0) | |
2756 | cc->iv_gen_ops = &crypt_iv_plain_ops; | |
2757 | else if (strcmp(ivmode, "plain64") == 0) | |
2758 | cc->iv_gen_ops = &crypt_iv_plain64_ops; | |
7e3fd855 MB |
2759 | else if (strcmp(ivmode, "plain64be") == 0) |
2760 | cc->iv_gen_ops = &crypt_iv_plain64be_ops; | |
e889f97a MB |
2761 | else if (strcmp(ivmode, "essiv") == 0) |
2762 | cc->iv_gen_ops = &crypt_iv_essiv_ops; | |
2763 | else if (strcmp(ivmode, "benbi") == 0) | |
2764 | cc->iv_gen_ops = &crypt_iv_benbi_ops; | |
2765 | else if (strcmp(ivmode, "null") == 0) | |
2766 | cc->iv_gen_ops = &crypt_iv_null_ops; | |
b9411d73 MB |
2767 | else if (strcmp(ivmode, "eboiv") == 0) |
2768 | cc->iv_gen_ops = &crypt_iv_eboiv_ops; | |
bbb16584 MB |
2769 | else if (strcmp(ivmode, "elephant") == 0) { |
2770 | cc->iv_gen_ops = &crypt_iv_elephant_ops; | |
2771 | cc->key_parts = 2; | |
2772 | cc->key_extra_size = cc->key_size / 2; | |
2773 | if (cc->key_extra_size > ELEPHANT_MAX_KEY_SIZE) | |
2774 | return -EINVAL; | |
2775 | set_bit(CRYPT_ENCRYPT_PREPROCESS, &cc->cipher_flags); | |
2776 | } else if (strcmp(ivmode, "lmk") == 0) { | |
e889f97a MB |
2777 | cc->iv_gen_ops = &crypt_iv_lmk_ops; |
2778 | /* | |
2779 | * Version 2 and 3 is recognised according | |
2780 | * to length of provided multi-key string. | |
2781 | * If present (version 3), last key is used as IV seed. | |
2782 | * All keys (including IV seed) are always the same size. | |
2783 | */ | |
2784 | if (cc->key_size % cc->key_parts) { | |
2785 | cc->key_parts++; | |
2786 | cc->key_extra_size = cc->key_size / cc->key_parts; | |
2787 | } | |
2788 | } else if (strcmp(ivmode, "tcw") == 0) { | |
2789 | cc->iv_gen_ops = &crypt_iv_tcw_ops; | |
2790 | cc->key_parts += 2; /* IV + whitening */ | |
2791 | cc->key_extra_size = cc->iv_size + TCW_WHITENING_SIZE; | |
2792 | } else if (strcmp(ivmode, "random") == 0) { | |
2793 | cc->iv_gen_ops = &crypt_iv_random_ops; | |
2794 | /* Need storage space in integrity fields. */ | |
2795 | cc->integrity_iv_size = cc->iv_size; | |
2796 | } else { | |
2797 | ti->error = "Invalid IV mode"; | |
2798 | return -EINVAL; | |
2799 | } | |
2800 | ||
2801 | return 0; | |
2802 | } | |
2803 | ||
33d2f09f MB |
2804 | /* |
2805 | * Workaround to parse HMAC algorithm from AEAD crypto API spec. | |
2806 | * The HMAC is needed to calculate tag size (HMAC digest size). | |
2807 | * This should be probably done by crypto-api calls (once available...) | |
2808 | */ | |
2809 | static int crypt_ctr_auth_cipher(struct crypt_config *cc, char *cipher_api) | |
2810 | { | |
2811 | char *start, *end, *mac_alg = NULL; | |
2812 | struct crypto_ahash *mac; | |
2813 | ||
2814 | if (!strstarts(cipher_api, "authenc(")) | |
2815 | return 0; | |
2816 | ||
2817 | start = strchr(cipher_api, '('); | |
2818 | end = strchr(cipher_api, ','); | |
2819 | if (!start || !end || ++start > end) | |
2820 | return -EINVAL; | |
2821 | ||
2822 | mac_alg = kzalloc(end - start + 1, GFP_KERNEL); | |
2823 | if (!mac_alg) | |
2824 | return -ENOMEM; | |
2825 | strncpy(mac_alg, start, end - start); | |
2826 | ||
cd746938 | 2827 | mac = crypto_alloc_ahash(mac_alg, 0, CRYPTO_ALG_ALLOCATES_MEMORY); |
33d2f09f MB |
2828 | kfree(mac_alg); |
2829 | ||
2830 | if (IS_ERR(mac)) | |
2831 | return PTR_ERR(mac); | |
2832 | ||
2833 | cc->key_mac_size = crypto_ahash_digestsize(mac); | |
2834 | crypto_free_ahash(mac); | |
2835 | ||
2836 | cc->authenc_key = kmalloc(crypt_authenckey_size(cc), GFP_KERNEL); | |
2837 | if (!cc->authenc_key) | |
2838 | return -ENOMEM; | |
2839 | ||
2840 | return 0; | |
2841 | } | |
2842 | ||
2843 | static int crypt_ctr_cipher_new(struct dm_target *ti, char *cipher_in, char *key, | |
2844 | char **ivmode, char **ivopts) | |
2845 | { | |
2846 | struct crypt_config *cc = ti->private; | |
a1a262b6 | 2847 | char *tmp, *cipher_api, buf[CRYPTO_MAX_ALG_NAME]; |
33d2f09f MB |
2848 | int ret = -EINVAL; |
2849 | ||
2850 | cc->tfms_count = 1; | |
2851 | ||
2852 | /* | |
2853 | * New format (capi: prefix) | |
2854 | * capi:cipher_api_spec-iv:ivopts | |
2855 | */ | |
2856 | tmp = &cipher_in[strlen("capi:")]; | |
1856b9f7 MB |
2857 | |
2858 | /* Separate IV options if present, it can contain another '-' in hash name */ | |
2859 | *ivopts = strrchr(tmp, ':'); | |
2860 | if (*ivopts) { | |
2861 | **ivopts = '\0'; | |
2862 | (*ivopts)++; | |
2863 | } | |
2864 | /* Parse IV mode */ | |
2865 | *ivmode = strrchr(tmp, '-'); | |
2866 | if (*ivmode) { | |
2867 | **ivmode = '\0'; | |
2868 | (*ivmode)++; | |
2869 | } | |
2870 | /* The rest is crypto API spec */ | |
2871 | cipher_api = tmp; | |
33d2f09f | 2872 | |
a1a262b6 AB |
2873 | /* Alloc AEAD, can be used only in new format. */ |
2874 | if (crypt_integrity_aead(cc)) { | |
2875 | ret = crypt_ctr_auth_cipher(cc, cipher_api); | |
2876 | if (ret < 0) { | |
2877 | ti->error = "Invalid AEAD cipher spec"; | |
2878 | return -ENOMEM; | |
2879 | } | |
2880 | } | |
2881 | ||
33d2f09f MB |
2882 | if (*ivmode && !strcmp(*ivmode, "lmk")) |
2883 | cc->tfms_count = 64; | |
2884 | ||
a1a262b6 AB |
2885 | if (*ivmode && !strcmp(*ivmode, "essiv")) { |
2886 | if (!*ivopts) { | |
2887 | ti->error = "Digest algorithm missing for ESSIV mode"; | |
2888 | return -EINVAL; | |
2889 | } | |
2890 | ret = snprintf(buf, CRYPTO_MAX_ALG_NAME, "essiv(%s,%s)", | |
2891 | cipher_api, *ivopts); | |
2892 | if (ret < 0 || ret >= CRYPTO_MAX_ALG_NAME) { | |
2893 | ti->error = "Cannot allocate cipher string"; | |
2894 | return -ENOMEM; | |
2895 | } | |
2896 | cipher_api = buf; | |
2897 | } | |
2898 | ||
33d2f09f MB |
2899 | cc->key_parts = cc->tfms_count; |
2900 | ||
2901 | /* Allocate cipher */ | |
2902 | ret = crypt_alloc_tfms(cc, cipher_api); | |
2903 | if (ret < 0) { | |
2904 | ti->error = "Error allocating crypto tfm"; | |
2905 | return ret; | |
2906 | } | |
2907 | ||
a1a262b6 | 2908 | if (crypt_integrity_aead(cc)) |
33d2f09f | 2909 | cc->iv_size = crypto_aead_ivsize(any_tfm_aead(cc)); |
a1a262b6 | 2910 | else |
33d2f09f MB |
2911 | cc->iv_size = crypto_skcipher_ivsize(any_tfm(cc)); |
2912 | ||
33d2f09f MB |
2913 | return 0; |
2914 | } | |
2915 | ||
2916 | static int crypt_ctr_cipher_old(struct dm_target *ti, char *cipher_in, char *key, | |
2917 | char **ivmode, char **ivopts) | |
1da177e4 | 2918 | { |
5ebaee6d | 2919 | struct crypt_config *cc = ti->private; |
33d2f09f | 2920 | char *tmp, *cipher, *chainmode, *keycount; |
5ebaee6d | 2921 | char *cipher_api = NULL; |
fd2d231f | 2922 | int ret = -EINVAL; |
31998ef1 | 2923 | char dummy; |
1da177e4 | 2924 | |
33d2f09f | 2925 | if (strchr(cipher_in, '(') || crypt_integrity_aead(cc)) { |
5ebaee6d | 2926 | ti->error = "Bad cipher specification"; |
1da177e4 LT |
2927 | return -EINVAL; |
2928 | } | |
2929 | ||
5ebaee6d MB |
2930 | /* |
2931 | * Legacy dm-crypt cipher specification | |
d1f96423 | 2932 | * cipher[:keycount]-mode-iv:ivopts |
5ebaee6d MB |
2933 | */ |
2934 | tmp = cipher_in; | |
d1f96423 MB |
2935 | keycount = strsep(&tmp, "-"); |
2936 | cipher = strsep(&keycount, ":"); | |
2937 | ||
2938 | if (!keycount) | |
2939 | cc->tfms_count = 1; | |
31998ef1 | 2940 | else if (sscanf(keycount, "%u%c", &cc->tfms_count, &dummy) != 1 || |
d1f96423 MB |
2941 | !is_power_of_2(cc->tfms_count)) { |
2942 | ti->error = "Bad cipher key count specification"; | |
2943 | return -EINVAL; | |
2944 | } | |
2945 | cc->key_parts = cc->tfms_count; | |
5ebaee6d | 2946 | |
1da177e4 | 2947 | chainmode = strsep(&tmp, "-"); |
1856b9f7 MB |
2948 | *ivmode = strsep(&tmp, ":"); |
2949 | *ivopts = tmp; | |
1da177e4 | 2950 | |
7dbcd137 MB |
2951 | /* |
2952 | * For compatibility with the original dm-crypt mapping format, if | |
2953 | * only the cipher name is supplied, use cbc-plain. | |
2954 | */ | |
33d2f09f | 2955 | if (!chainmode || (!strcmp(chainmode, "plain") && !*ivmode)) { |
1da177e4 | 2956 | chainmode = "cbc"; |
33d2f09f | 2957 | *ivmode = "plain"; |
1da177e4 LT |
2958 | } |
2959 | ||
33d2f09f | 2960 | if (strcmp(chainmode, "ecb") && !*ivmode) { |
5ebaee6d MB |
2961 | ti->error = "IV mechanism required"; |
2962 | return -EINVAL; | |
1da177e4 LT |
2963 | } |
2964 | ||
5ebaee6d MB |
2965 | cipher_api = kmalloc(CRYPTO_MAX_ALG_NAME, GFP_KERNEL); |
2966 | if (!cipher_api) | |
2967 | goto bad_mem; | |
2968 | ||
a1a262b6 AB |
2969 | if (*ivmode && !strcmp(*ivmode, "essiv")) { |
2970 | if (!*ivopts) { | |
2971 | ti->error = "Digest algorithm missing for ESSIV mode"; | |
2972 | kfree(cipher_api); | |
2973 | return -EINVAL; | |
2974 | } | |
2975 | ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME, | |
2976 | "essiv(%s(%s),%s)", chainmode, cipher, *ivopts); | |
2977 | } else { | |
2978 | ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME, | |
2979 | "%s(%s)", chainmode, cipher); | |
2980 | } | |
2981 | if (ret < 0 || ret >= CRYPTO_MAX_ALG_NAME) { | |
5ebaee6d MB |
2982 | kfree(cipher_api); |
2983 | goto bad_mem; | |
1da177e4 LT |
2984 | } |
2985 | ||
5ebaee6d | 2986 | /* Allocate cipher */ |
fd2d231f MP |
2987 | ret = crypt_alloc_tfms(cc, cipher_api); |
2988 | if (ret < 0) { | |
2989 | ti->error = "Error allocating crypto tfm"; | |
33d2f09f MB |
2990 | kfree(cipher_api); |
2991 | return ret; | |
1da177e4 | 2992 | } |
bd86e320 | 2993 | kfree(cipher_api); |
1da177e4 | 2994 | |
33d2f09f MB |
2995 | return 0; |
2996 | bad_mem: | |
2997 | ti->error = "Cannot allocate cipher strings"; | |
2998 | return -ENOMEM; | |
2999 | } | |
5ebaee6d | 3000 | |
33d2f09f MB |
3001 | static int crypt_ctr_cipher(struct dm_target *ti, char *cipher_in, char *key) |
3002 | { | |
3003 | struct crypt_config *cc = ti->private; | |
3004 | char *ivmode = NULL, *ivopts = NULL; | |
3005 | int ret; | |
3006 | ||
3007 | cc->cipher_string = kstrdup(cipher_in, GFP_KERNEL); | |
3008 | if (!cc->cipher_string) { | |
3009 | ti->error = "Cannot allocate cipher strings"; | |
3010 | return -ENOMEM; | |
1da177e4 LT |
3011 | } |
3012 | ||
33d2f09f MB |
3013 | if (strstarts(cipher_in, "capi:")) |
3014 | ret = crypt_ctr_cipher_new(ti, cipher_in, key, &ivmode, &ivopts); | |
3015 | else | |
3016 | ret = crypt_ctr_cipher_old(ti, cipher_in, key, &ivmode, &ivopts); | |
3017 | if (ret) | |
3018 | return ret; | |
3019 | ||
5ebaee6d | 3020 | /* Initialize IV */ |
e889f97a MB |
3021 | ret = crypt_ctr_ivmode(ti, ivmode); |
3022 | if (ret < 0) | |
33d2f09f | 3023 | return ret; |
1da177e4 | 3024 | |
da31a078 MB |
3025 | /* Initialize and set key */ |
3026 | ret = crypt_set_key(cc, key); | |
3027 | if (ret < 0) { | |
3028 | ti->error = "Error decoding and setting key"; | |
33d2f09f | 3029 | return ret; |
da31a078 MB |
3030 | } |
3031 | ||
28513fcc MB |
3032 | /* Allocate IV */ |
3033 | if (cc->iv_gen_ops && cc->iv_gen_ops->ctr) { | |
3034 | ret = cc->iv_gen_ops->ctr(cc, ti, ivopts); | |
3035 | if (ret < 0) { | |
3036 | ti->error = "Error creating IV"; | |
33d2f09f | 3037 | return ret; |
28513fcc MB |
3038 | } |
3039 | } | |
1da177e4 | 3040 | |
28513fcc MB |
3041 | /* Initialize IV (set keys for ESSIV etc) */ |
3042 | if (cc->iv_gen_ops && cc->iv_gen_ops->init) { | |
3043 | ret = cc->iv_gen_ops->init(cc); | |
3044 | if (ret < 0) { | |
3045 | ti->error = "Error initialising IV"; | |
33d2f09f | 3046 | return ret; |
28513fcc | 3047 | } |
b95bf2d3 MB |
3048 | } |
3049 | ||
dc94902b OK |
3050 | /* wipe the kernel key payload copy */ |
3051 | if (cc->key_string) | |
3052 | memset(cc->key, 0, cc->key_size * sizeof(u8)); | |
3053 | ||
5ebaee6d | 3054 | return ret; |
5ebaee6d | 3055 | } |
5ebaee6d | 3056 | |
ef43aa38 MB |
3057 | static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **argv) |
3058 | { | |
3059 | struct crypt_config *cc = ti->private; | |
3060 | struct dm_arg_set as; | |
5916a22b | 3061 | static const struct dm_arg _args[] = { |
39d42fa9 | 3062 | {0, 8, "Invalid number of feature args"}, |
ef43aa38 MB |
3063 | }; |
3064 | unsigned int opt_params, val; | |
3065 | const char *opt_string, *sval; | |
8f0009a2 | 3066 | char dummy; |
ef43aa38 MB |
3067 | int ret; |
3068 | ||
3069 | /* Optional parameters */ | |
3070 | as.argc = argc; | |
3071 | as.argv = argv; | |
3072 | ||
3073 | ret = dm_read_arg_group(_args, &as, &opt_params, &ti->error); | |
3074 | if (ret) | |
3075 | return ret; | |
3076 | ||
3077 | while (opt_params--) { | |
3078 | opt_string = dm_shift_arg(&as); | |
3079 | if (!opt_string) { | |
3080 | ti->error = "Not enough feature arguments"; | |
3081 | return -EINVAL; | |
3082 | } | |
3083 | ||
3084 | if (!strcasecmp(opt_string, "allow_discards")) | |
3085 | ti->num_discard_bios = 1; | |
3086 | ||
3087 | else if (!strcasecmp(opt_string, "same_cpu_crypt")) | |
3088 | set_bit(DM_CRYPT_SAME_CPU, &cc->flags); | |
3089 | ||
3090 | else if (!strcasecmp(opt_string, "submit_from_crypt_cpus")) | |
3091 | set_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags); | |
39d42fa9 IK |
3092 | else if (!strcasecmp(opt_string, "no_read_workqueue")) |
3093 | set_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags); | |
3094 | else if (!strcasecmp(opt_string, "no_write_workqueue")) | |
3095 | set_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags); | |
ef43aa38 MB |
3096 | else if (sscanf(opt_string, "integrity:%u:", &val) == 1) { |
3097 | if (val == 0 || val > MAX_TAG_SIZE) { | |
3098 | ti->error = "Invalid integrity arguments"; | |
3099 | return -EINVAL; | |
3100 | } | |
3101 | cc->on_disk_tag_size = val; | |
3102 | sval = strchr(opt_string + strlen("integrity:"), ':') + 1; | |
3103 | if (!strcasecmp(sval, "aead")) { | |
3104 | set_bit(CRYPT_MODE_INTEGRITY_AEAD, &cc->cipher_flags); | |
ef43aa38 MB |
3105 | } else if (strcasecmp(sval, "none")) { |
3106 | ti->error = "Unknown integrity profile"; | |
3107 | return -EINVAL; | |
3108 | } | |
3109 | ||
3110 | cc->cipher_auth = kstrdup(sval, GFP_KERNEL); | |
3111 | if (!cc->cipher_auth) | |
3112 | return -ENOMEM; | |
ff3af92b | 3113 | } else if (sscanf(opt_string, "sector_size:%hu%c", &cc->sector_size, &dummy) == 1) { |
8f0009a2 MB |
3114 | if (cc->sector_size < (1 << SECTOR_SHIFT) || |
3115 | cc->sector_size > 4096 || | |
ff3af92b | 3116 | (cc->sector_size & (cc->sector_size - 1))) { |
8f0009a2 MB |
3117 | ti->error = "Invalid feature value for sector_size"; |
3118 | return -EINVAL; | |
3119 | } | |
783874b0 MB |
3120 | if (ti->len & ((cc->sector_size >> SECTOR_SHIFT) - 1)) { |
3121 | ti->error = "Device size is not multiple of sector_size feature"; | |
3122 | return -EINVAL; | |
3123 | } | |
ff3af92b | 3124 | cc->sector_shift = __ffs(cc->sector_size) - SECTOR_SHIFT; |
8f0009a2 MB |
3125 | } else if (!strcasecmp(opt_string, "iv_large_sectors")) |
3126 | set_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags); | |
3127 | else { | |
ef43aa38 MB |
3128 | ti->error = "Invalid feature arguments"; |
3129 | return -EINVAL; | |
3130 | } | |
3131 | } | |
3132 | ||
3133 | return 0; | |
5ebaee6d MB |
3134 | } |
3135 | ||
8e225f04 | 3136 | #ifdef CONFIG_BLK_DEV_ZONED |
8e225f04 DLM |
3137 | static int crypt_report_zones(struct dm_target *ti, |
3138 | struct dm_report_zones_args *args, unsigned int nr_zones) | |
3139 | { | |
3140 | struct crypt_config *cc = ti->private; | |
3141 | sector_t sector = cc->start + dm_target_offset(ti, args->next_sector); | |
3142 | ||
3143 | args->start = cc->start; | |
3144 | return blkdev_report_zones(cc->dev->bdev, sector, nr_zones, | |
3145 | dm_report_zones_cb, args); | |
3146 | } | |
e3290b94 MS |
3147 | #else |
3148 | #define crypt_report_zones NULL | |
8e225f04 DLM |
3149 | #endif |
3150 | ||
5ebaee6d MB |
3151 | /* |
3152 | * Construct an encryption mapping: | |
c538f6ec | 3153 | * <cipher> [<key>|:<key_size>:<user|logon>:<key_description>] <iv_offset> <dev_path> <start> |
5ebaee6d MB |
3154 | */ |
3155 | static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |
3156 | { | |
3157 | struct crypt_config *cc; | |
ed0302e8 | 3158 | const char *devname = dm_table_device_name(ti->table); |
c538f6ec | 3159 | int key_size; |
ef43aa38 | 3160 | unsigned int align_mask; |
5ebaee6d MB |
3161 | unsigned long long tmpll; |
3162 | int ret; | |
ef43aa38 | 3163 | size_t iv_size_padding, additional_req_size; |
31998ef1 | 3164 | char dummy; |
772ae5f5 | 3165 | |
772ae5f5 | 3166 | if (argc < 5) { |
5ebaee6d MB |
3167 | ti->error = "Not enough arguments"; |
3168 | return -EINVAL; | |
1da177e4 LT |
3169 | } |
3170 | ||
c538f6ec OK |
3171 | key_size = get_key_size(&argv[1]); |
3172 | if (key_size < 0) { | |
3173 | ti->error = "Cannot parse key size"; | |
3174 | return -EINVAL; | |
3175 | } | |
5ebaee6d | 3176 | |
9c81c99b | 3177 | cc = kzalloc(struct_size(cc, key, key_size), GFP_KERNEL); |
5ebaee6d MB |
3178 | if (!cc) { |
3179 | ti->error = "Cannot allocate encryption context"; | |
3180 | return -ENOMEM; | |
3181 | } | |
69a8cfcd | 3182 | cc->key_size = key_size; |
8f0009a2 | 3183 | cc->sector_size = (1 << SECTOR_SHIFT); |
ff3af92b | 3184 | cc->sector_shift = 0; |
5ebaee6d MB |
3185 | |
3186 | ti->private = cc; | |
ef43aa38 | 3187 | |
5059353d MP |
3188 | spin_lock(&dm_crypt_clients_lock); |
3189 | dm_crypt_clients_n++; | |
3190 | crypt_calculate_pages_per_client(); | |
3191 | spin_unlock(&dm_crypt_clients_lock); | |
3192 | ||
3193 | ret = percpu_counter_init(&cc->n_allocated_pages, 0, GFP_KERNEL); | |
3194 | if (ret < 0) | |
3195 | goto bad; | |
3196 | ||
ef43aa38 MB |
3197 | /* Optional parameters need to be read before cipher constructor */ |
3198 | if (argc > 5) { | |
3199 | ret = crypt_ctr_optional(ti, argc - 5, &argv[5]); | |
3200 | if (ret) | |
3201 | goto bad; | |
3202 | } | |
3203 | ||
5ebaee6d MB |
3204 | ret = crypt_ctr_cipher(ti, argv[0], argv[1]); |
3205 | if (ret < 0) | |
3206 | goto bad; | |
3207 | ||
33d2f09f | 3208 | if (crypt_integrity_aead(cc)) { |
ef43aa38 MB |
3209 | cc->dmreq_start = sizeof(struct aead_request); |
3210 | cc->dmreq_start += crypto_aead_reqsize(any_tfm_aead(cc)); | |
3211 | align_mask = crypto_aead_alignmask(any_tfm_aead(cc)); | |
3212 | } else { | |
3213 | cc->dmreq_start = sizeof(struct skcipher_request); | |
3214 | cc->dmreq_start += crypto_skcipher_reqsize(any_tfm(cc)); | |
3215 | align_mask = crypto_skcipher_alignmask(any_tfm(cc)); | |
3216 | } | |
d49ec52f MP |
3217 | cc->dmreq_start = ALIGN(cc->dmreq_start, __alignof__(struct dm_crypt_request)); |
3218 | ||
ef43aa38 | 3219 | if (align_mask < CRYPTO_MINALIGN) { |
d49ec52f MP |
3220 | /* Allocate the padding exactly */ |
3221 | iv_size_padding = -(cc->dmreq_start + sizeof(struct dm_crypt_request)) | |
ef43aa38 | 3222 | & align_mask; |
d49ec52f MP |
3223 | } else { |
3224 | /* | |
3225 | * If the cipher requires greater alignment than kmalloc | |
3226 | * alignment, we don't know the exact position of the | |
3227 | * initialization vector. We must assume worst case. | |
3228 | */ | |
ef43aa38 | 3229 | iv_size_padding = align_mask; |
d49ec52f | 3230 | } |
ddd42edf | 3231 | |
ef43aa38 MB |
3232 | /* ...| IV + padding | original IV | original sec. number | bio tag offset | */ |
3233 | additional_req_size = sizeof(struct dm_crypt_request) + | |
3234 | iv_size_padding + cc->iv_size + | |
3235 | cc->iv_size + | |
3236 | sizeof(uint64_t) + | |
3237 | sizeof(unsigned int); | |
3238 | ||
6f1c819c KO |
3239 | ret = mempool_init_kmalloc_pool(&cc->req_pool, MIN_IOS, cc->dmreq_start + additional_req_size); |
3240 | if (ret) { | |
ddd42edf | 3241 | ti->error = "Cannot allocate crypt request mempool"; |
28513fcc | 3242 | goto bad; |
ddd42edf | 3243 | } |
ddd42edf | 3244 | |
30187e1d | 3245 | cc->per_bio_data_size = ti->per_io_data_size = |
ef43aa38 | 3246 | ALIGN(sizeof(struct dm_crypt_io) + cc->dmreq_start + additional_req_size, |
d49ec52f | 3247 | ARCH_KMALLOC_MINALIGN); |
298a9fa0 | 3248 | |
a8affc03 | 3249 | ret = mempool_init(&cc->page_pool, BIO_MAX_VECS, crypt_page_alloc, crypt_page_free, cc); |
6f1c819c | 3250 | if (ret) { |
72d94861 | 3251 | ti->error = "Cannot allocate page mempool"; |
28513fcc | 3252 | goto bad; |
1da177e4 LT |
3253 | } |
3254 | ||
6f1c819c KO |
3255 | ret = bioset_init(&cc->bs, MIN_IOS, 0, BIOSET_NEED_BVECS); |
3256 | if (ret) { | |
6a24c718 | 3257 | ti->error = "Cannot allocate crypt bioset"; |
28513fcc | 3258 | goto bad; |
6a24c718 MB |
3259 | } |
3260 | ||
7145c241 MP |
3261 | mutex_init(&cc->bio_alloc_lock); |
3262 | ||
28513fcc | 3263 | ret = -EINVAL; |
8f0009a2 MB |
3264 | if ((sscanf(argv[2], "%llu%c", &tmpll, &dummy) != 1) || |
3265 | (tmpll & ((cc->sector_size >> SECTOR_SHIFT) - 1))) { | |
72d94861 | 3266 | ti->error = "Invalid iv_offset sector"; |
28513fcc | 3267 | goto bad; |
1da177e4 | 3268 | } |
4ee218cd | 3269 | cc->iv_offset = tmpll; |
1da177e4 | 3270 | |
e80d1c80 VG |
3271 | ret = dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &cc->dev); |
3272 | if (ret) { | |
28513fcc MB |
3273 | ti->error = "Device lookup failed"; |
3274 | goto bad; | |
3275 | } | |
3276 | ||
e80d1c80 | 3277 | ret = -EINVAL; |
ef87bfc2 | 3278 | if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1 || tmpll != (sector_t)tmpll) { |
72d94861 | 3279 | ti->error = "Invalid device sector"; |
28513fcc | 3280 | goto bad; |
1da177e4 | 3281 | } |
4ee218cd | 3282 | cc->start = tmpll; |
1da177e4 | 3283 | |
8e225f04 DLM |
3284 | /* |
3285 | * For zoned block devices, we need to preserve the issuer write | |
3286 | * ordering. To do so, disable write workqueues and force inline | |
3287 | * encryption completion. | |
3288 | */ | |
3289 | if (bdev_is_zoned(cc->dev->bdev)) { | |
3290 | set_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags); | |
3291 | set_bit(DM_CRYPT_WRITE_INLINE, &cc->flags); | |
3292 | } | |
3293 | ||
33d2f09f | 3294 | if (crypt_integrity_aead(cc) || cc->integrity_iv_size) { |
ef43aa38 | 3295 | ret = crypt_integrity_ctr(cc, ti); |
772ae5f5 MB |
3296 | if (ret) |
3297 | goto bad; | |
3298 | ||
ef43aa38 MB |
3299 | cc->tag_pool_max_sectors = POOL_ENTRY_SIZE / cc->on_disk_tag_size; |
3300 | if (!cc->tag_pool_max_sectors) | |
3301 | cc->tag_pool_max_sectors = 1; | |
f3396c58 | 3302 | |
6f1c819c | 3303 | ret = mempool_init_kmalloc_pool(&cc->tag_pool, MIN_IOS, |
ef43aa38 | 3304 | cc->tag_pool_max_sectors * cc->on_disk_tag_size); |
6f1c819c | 3305 | if (ret) { |
ef43aa38 MB |
3306 | ti->error = "Cannot allocate integrity tags mempool"; |
3307 | goto bad; | |
772ae5f5 | 3308 | } |
583fe747 MP |
3309 | |
3310 | cc->tag_pool_max_sectors <<= cc->sector_shift; | |
772ae5f5 MB |
3311 | } |
3312 | ||
28513fcc | 3313 | ret = -ENOMEM; |
f612b213 | 3314 | cc->io_queue = alloc_workqueue("kcryptd_io/%s", WQ_MEM_RECLAIM, 1, devname); |
cabf08e4 MB |
3315 | if (!cc->io_queue) { |
3316 | ti->error = "Couldn't create kcryptd io queue"; | |
28513fcc | 3317 | goto bad; |
cabf08e4 MB |
3318 | } |
3319 | ||
f3396c58 | 3320 | if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags)) |
48b0777c | 3321 | cc->crypt_queue = alloc_workqueue("kcryptd/%s", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, |
ed0302e8 | 3322 | 1, devname); |
f3396c58 | 3323 | else |
48b0777c MS |
3324 | cc->crypt_queue = alloc_workqueue("kcryptd/%s", |
3325 | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND, | |
ed0302e8 | 3326 | num_online_cpus(), devname); |
cabf08e4 | 3327 | if (!cc->crypt_queue) { |
9934a8be | 3328 | ti->error = "Couldn't create kcryptd queue"; |
28513fcc | 3329 | goto bad; |
9934a8be MB |
3330 | } |
3331 | ||
c7329eff | 3332 | spin_lock_init(&cc->write_thread_lock); |
b3c5fd30 | 3333 | cc->write_tree = RB_ROOT; |
dc267621 | 3334 | |
ed0302e8 | 3335 | cc->write_thread = kthread_create(dmcrypt_write, cc, "dmcrypt_write/%s", devname); |
dc267621 MP |
3336 | if (IS_ERR(cc->write_thread)) { |
3337 | ret = PTR_ERR(cc->write_thread); | |
3338 | cc->write_thread = NULL; | |
3339 | ti->error = "Couldn't spawn write thread"; | |
3340 | goto bad; | |
3341 | } | |
3342 | wake_up_process(cc->write_thread); | |
3343 | ||
55a62eef | 3344 | ti->num_flush_bios = 1; |
a666e5c0 | 3345 | ti->limit_swap_bios = true; |
983c7db3 | 3346 | |
1da177e4 LT |
3347 | return 0; |
3348 | ||
28513fcc MB |
3349 | bad: |
3350 | crypt_dtr(ti); | |
3351 | return ret; | |
1da177e4 LT |
3352 | } |
3353 | ||
7de3ee57 | 3354 | static int crypt_map(struct dm_target *ti, struct bio *bio) |
1da177e4 | 3355 | { |
028867ac | 3356 | struct dm_crypt_io *io; |
49a8a920 | 3357 | struct crypt_config *cc = ti->private; |
647c7db1 | 3358 | |
772ae5f5 | 3359 | /* |
28a8f0d3 MC |
3360 | * If bio is REQ_PREFLUSH or REQ_OP_DISCARD, just bypass crypt queues. |
3361 | * - for REQ_PREFLUSH device-mapper core ensures that no IO is in-flight | |
e6047149 | 3362 | * - for REQ_OP_DISCARD caller must use flush if IO ordering matters |
772ae5f5 | 3363 | */ |
1eff9d32 | 3364 | if (unlikely(bio->bi_opf & REQ_PREFLUSH || |
28a8f0d3 | 3365 | bio_op(bio) == REQ_OP_DISCARD)) { |
74d46992 | 3366 | bio_set_dev(bio, cc->dev->bdev); |
772ae5f5 | 3367 | if (bio_sectors(bio)) |
4f024f37 KO |
3368 | bio->bi_iter.bi_sector = cc->start + |
3369 | dm_target_offset(ti, bio->bi_iter.bi_sector); | |
647c7db1 MP |
3370 | return DM_MAPIO_REMAPPED; |
3371 | } | |
1da177e4 | 3372 | |
4e870e94 MP |
3373 | /* |
3374 | * Check if bio is too large, split as needed. | |
3375 | */ | |
a8affc03 | 3376 | if (unlikely(bio->bi_iter.bi_size > (BIO_MAX_VECS << PAGE_SHIFT)) && |
ef43aa38 | 3377 | (bio_data_dir(bio) == WRITE || cc->on_disk_tag_size)) |
a8affc03 | 3378 | dm_accept_partial_bio(bio, ((BIO_MAX_VECS << PAGE_SHIFT) >> SECTOR_SHIFT)); |
4e870e94 | 3379 | |
8f0009a2 MB |
3380 | /* |
3381 | * Ensure that bio is a multiple of internal sector encryption size | |
3382 | * and is aligned to this size as defined in IO hints. | |
3383 | */ | |
3384 | if (unlikely((bio->bi_iter.bi_sector & ((cc->sector_size >> SECTOR_SHIFT) - 1)) != 0)) | |
846785e6 | 3385 | return DM_MAPIO_KILL; |
8f0009a2 MB |
3386 | |
3387 | if (unlikely(bio->bi_iter.bi_size & (cc->sector_size - 1))) | |
846785e6 | 3388 | return DM_MAPIO_KILL; |
8f0009a2 | 3389 | |
298a9fa0 MP |
3390 | io = dm_per_bio_data(bio, cc->per_bio_data_size); |
3391 | crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector)); | |
ef43aa38 MB |
3392 | |
3393 | if (cc->on_disk_tag_size) { | |
583fe747 | 3394 | unsigned tag_len = cc->on_disk_tag_size * (bio_sectors(bio) >> cc->sector_shift); |
ef43aa38 MB |
3395 | |
3396 | if (unlikely(tag_len > KMALLOC_MAX_SIZE) || | |
583fe747 | 3397 | unlikely(!(io->integrity_metadata = kmalloc(tag_len, |
ef43aa38 MB |
3398 | GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN)))) { |
3399 | if (bio_sectors(bio) > cc->tag_pool_max_sectors) | |
3400 | dm_accept_partial_bio(bio, cc->tag_pool_max_sectors); | |
6f1c819c | 3401 | io->integrity_metadata = mempool_alloc(&cc->tag_pool, GFP_NOIO); |
ef43aa38 MB |
3402 | io->integrity_metadata_from_pool = true; |
3403 | } | |
3404 | } | |
3405 | ||
33d2f09f | 3406 | if (crypt_integrity_aead(cc)) |
ef43aa38 MB |
3407 | io->ctx.r.req_aead = (struct aead_request *)(io + 1); |
3408 | else | |
3409 | io->ctx.r.req = (struct skcipher_request *)(io + 1); | |
cabf08e4 | 3410 | |
20c82538 MB |
3411 | if (bio_data_dir(io->base_bio) == READ) { |
3412 | if (kcryptd_io_read(io, GFP_NOWAIT)) | |
dc267621 | 3413 | kcryptd_queue_read(io); |
20c82538 | 3414 | } else |
cabf08e4 | 3415 | kcryptd_queue_crypt(io); |
1da177e4 | 3416 | |
d2a7ad29 | 3417 | return DM_MAPIO_SUBMITTED; |
1da177e4 LT |
3418 | } |
3419 | ||
fd7c092e MP |
3420 | static void crypt_status(struct dm_target *ti, status_type_t type, |
3421 | unsigned status_flags, char *result, unsigned maxlen) | |
1da177e4 | 3422 | { |
5ebaee6d | 3423 | struct crypt_config *cc = ti->private; |
fd7c092e | 3424 | unsigned i, sz = 0; |
f3396c58 | 3425 | int num_feature_args = 0; |
1da177e4 LT |
3426 | |
3427 | switch (type) { | |
3428 | case STATUSTYPE_INFO: | |
3429 | result[0] = '\0'; | |
3430 | break; | |
3431 | ||
3432 | case STATUSTYPE_TABLE: | |
7dbcd137 | 3433 | DMEMIT("%s ", cc->cipher_string); |
1da177e4 | 3434 | |
c538f6ec OK |
3435 | if (cc->key_size > 0) { |
3436 | if (cc->key_string) | |
3437 | DMEMIT(":%u:%s", cc->key_size, cc->key_string); | |
3438 | else | |
3439 | for (i = 0; i < cc->key_size; i++) | |
3440 | DMEMIT("%02x", cc->key[i]); | |
3441 | } else | |
fd7c092e | 3442 | DMEMIT("-"); |
1da177e4 | 3443 | |
4ee218cd AM |
3444 | DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset, |
3445 | cc->dev->name, (unsigned long long)cc->start); | |
772ae5f5 | 3446 | |
f3396c58 MP |
3447 | num_feature_args += !!ti->num_discard_bios; |
3448 | num_feature_args += test_bit(DM_CRYPT_SAME_CPU, &cc->flags); | |
0f5d8e6e | 3449 | num_feature_args += test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags); |
39d42fa9 IK |
3450 | num_feature_args += test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags); |
3451 | num_feature_args += test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags); | |
ff3af92b | 3452 | num_feature_args += cc->sector_size != (1 << SECTOR_SHIFT); |
8f0009a2 | 3453 | num_feature_args += test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags); |
ef43aa38 MB |
3454 | if (cc->on_disk_tag_size) |
3455 | num_feature_args++; | |
f3396c58 MP |
3456 | if (num_feature_args) { |
3457 | DMEMIT(" %d", num_feature_args); | |
3458 | if (ti->num_discard_bios) | |
3459 | DMEMIT(" allow_discards"); | |
3460 | if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags)) | |
3461 | DMEMIT(" same_cpu_crypt"); | |
0f5d8e6e MP |
3462 | if (test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags)) |
3463 | DMEMIT(" submit_from_crypt_cpus"); | |
39d42fa9 IK |
3464 | if (test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags)) |
3465 | DMEMIT(" no_read_workqueue"); | |
3466 | if (test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags)) | |
3467 | DMEMIT(" no_write_workqueue"); | |
ef43aa38 MB |
3468 | if (cc->on_disk_tag_size) |
3469 | DMEMIT(" integrity:%u:%s", cc->on_disk_tag_size, cc->cipher_auth); | |
8f0009a2 MB |
3470 | if (cc->sector_size != (1 << SECTOR_SHIFT)) |
3471 | DMEMIT(" sector_size:%d", cc->sector_size); | |
3472 | if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags)) | |
3473 | DMEMIT(" iv_large_sectors"); | |
f3396c58 | 3474 | } |
772ae5f5 | 3475 | |
1da177e4 LT |
3476 | break; |
3477 | } | |
1da177e4 LT |
3478 | } |
3479 | ||
e48d4bbf MB |
3480 | static void crypt_postsuspend(struct dm_target *ti) |
3481 | { | |
3482 | struct crypt_config *cc = ti->private; | |
3483 | ||
3484 | set_bit(DM_CRYPT_SUSPENDED, &cc->flags); | |
3485 | } | |
3486 | ||
3487 | static int crypt_preresume(struct dm_target *ti) | |
3488 | { | |
3489 | struct crypt_config *cc = ti->private; | |
3490 | ||
3491 | if (!test_bit(DM_CRYPT_KEY_VALID, &cc->flags)) { | |
3492 | DMERR("aborting resume - crypt key is not set."); | |
3493 | return -EAGAIN; | |
3494 | } | |
3495 | ||
3496 | return 0; | |
3497 | } | |
3498 | ||
3499 | static void crypt_resume(struct dm_target *ti) | |
3500 | { | |
3501 | struct crypt_config *cc = ti->private; | |
3502 | ||
3503 | clear_bit(DM_CRYPT_SUSPENDED, &cc->flags); | |
3504 | } | |
3505 | ||
3506 | /* Message interface | |
3507 | * key set <key> | |
3508 | * key wipe | |
3509 | */ | |
1eb5fa84 MS |
3510 | static int crypt_message(struct dm_target *ti, unsigned argc, char **argv, |
3511 | char *result, unsigned maxlen) | |
e48d4bbf MB |
3512 | { |
3513 | struct crypt_config *cc = ti->private; | |
c538f6ec | 3514 | int key_size, ret = -EINVAL; |
e48d4bbf MB |
3515 | |
3516 | if (argc < 2) | |
3517 | goto error; | |
3518 | ||
498f0103 | 3519 | if (!strcasecmp(argv[0], "key")) { |
e48d4bbf MB |
3520 | if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) { |
3521 | DMWARN("not suspended during key manipulation."); | |
3522 | return -EINVAL; | |
3523 | } | |
498f0103 | 3524 | if (argc == 3 && !strcasecmp(argv[1], "set")) { |
c538f6ec OK |
3525 | /* The key size may not be changed. */ |
3526 | key_size = get_key_size(&argv[2]); | |
3527 | if (key_size < 0 || cc->key_size != key_size) { | |
3528 | memset(argv[2], '0', strlen(argv[2])); | |
3529 | return -EINVAL; | |
3530 | } | |
3531 | ||
542da317 MB |
3532 | ret = crypt_set_key(cc, argv[2]); |
3533 | if (ret) | |
3534 | return ret; | |
3535 | if (cc->iv_gen_ops && cc->iv_gen_ops->init) | |
3536 | ret = cc->iv_gen_ops->init(cc); | |
dc94902b OK |
3537 | /* wipe the kernel key payload copy */ |
3538 | if (cc->key_string) | |
3539 | memset(cc->key, 0, cc->key_size * sizeof(u8)); | |
542da317 MB |
3540 | return ret; |
3541 | } | |
4a52ffc7 | 3542 | if (argc == 2 && !strcasecmp(argv[1], "wipe")) |
e48d4bbf MB |
3543 | return crypt_wipe_key(cc); |
3544 | } | |
3545 | ||
3546 | error: | |
3547 | DMWARN("unrecognised message received."); | |
3548 | return -EINVAL; | |
3549 | } | |
3550 | ||
af4874e0 MS |
3551 | static int crypt_iterate_devices(struct dm_target *ti, |
3552 | iterate_devices_callout_fn fn, void *data) | |
3553 | { | |
3554 | struct crypt_config *cc = ti->private; | |
3555 | ||
5dea271b | 3556 | return fn(ti, cc->dev, cc->start, ti->len, data); |
af4874e0 MS |
3557 | } |
3558 | ||
586b286b MS |
3559 | static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits) |
3560 | { | |
8f0009a2 MB |
3561 | struct crypt_config *cc = ti->private; |
3562 | ||
586b286b MS |
3563 | /* |
3564 | * Unfortunate constraint that is required to avoid the potential | |
3565 | * for exceeding underlying device's max_segments limits -- due to | |
3566 | * crypt_alloc_buffer() possibly allocating pages for the encryption | |
3567 | * bio that are not as physically contiguous as the original bio. | |
3568 | */ | |
3569 | limits->max_segment_size = PAGE_SIZE; | |
8f0009a2 | 3570 | |
bc9e9cf0 | 3571 | limits->logical_block_size = |
64611a15 | 3572 | max_t(unsigned, limits->logical_block_size, cc->sector_size); |
bc9e9cf0 MP |
3573 | limits->physical_block_size = |
3574 | max_t(unsigned, limits->physical_block_size, cc->sector_size); | |
3575 | limits->io_min = max_t(unsigned, limits->io_min, cc->sector_size); | |
586b286b MS |
3576 | } |
3577 | ||
1da177e4 LT |
3578 | static struct target_type crypt_target = { |
3579 | .name = "crypt", | |
363880c4 | 3580 | .version = {1, 23, 0}, |
1da177e4 LT |
3581 | .module = THIS_MODULE, |
3582 | .ctr = crypt_ctr, | |
3583 | .dtr = crypt_dtr, | |
8e225f04 DLM |
3584 | .features = DM_TARGET_ZONED_HM, |
3585 | .report_zones = crypt_report_zones, | |
1da177e4 LT |
3586 | .map = crypt_map, |
3587 | .status = crypt_status, | |
e48d4bbf MB |
3588 | .postsuspend = crypt_postsuspend, |
3589 | .preresume = crypt_preresume, | |
3590 | .resume = crypt_resume, | |
3591 | .message = crypt_message, | |
af4874e0 | 3592 | .iterate_devices = crypt_iterate_devices, |
586b286b | 3593 | .io_hints = crypt_io_hints, |
1da177e4 LT |
3594 | }; |
3595 | ||
3596 | static int __init dm_crypt_init(void) | |
3597 | { | |
3598 | int r; | |
3599 | ||
1da177e4 | 3600 | r = dm_register_target(&crypt_target); |
94f5e024 | 3601 | if (r < 0) |
72d94861 | 3602 | DMERR("register failed %d", r); |
1da177e4 | 3603 | |
1da177e4 LT |
3604 | return r; |
3605 | } | |
3606 | ||
3607 | static void __exit dm_crypt_exit(void) | |
3608 | { | |
10d3bd09 | 3609 | dm_unregister_target(&crypt_target); |
1da177e4 LT |
3610 | } |
3611 | ||
3612 | module_init(dm_crypt_init); | |
3613 | module_exit(dm_crypt_exit); | |
3614 | ||
bf14299f | 3615 | MODULE_AUTHOR("Jana Saout <jana@saout.de>"); |
1da177e4 LT |
3616 | MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption"); |
3617 | MODULE_LICENSE("GPL"); |