Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Copyright (C) 2003 Christophe Saout <christophe@saout.de> | |
3 | * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org> | |
3f1e9070 | 4 | * Copyright (C) 2006-2008 Red Hat, Inc. All rights reserved. |
1da177e4 LT |
5 | * |
6 | * This file is released under the GPL. | |
7 | */ | |
8 | ||
43d69034 | 9 | #include <linux/completion.h> |
d1806f6a | 10 | #include <linux/err.h> |
1da177e4 LT |
11 | #include <linux/module.h> |
12 | #include <linux/init.h> | |
13 | #include <linux/kernel.h> | |
14 | #include <linux/bio.h> | |
15 | #include <linux/blkdev.h> | |
16 | #include <linux/mempool.h> | |
17 | #include <linux/slab.h> | |
18 | #include <linux/crypto.h> | |
19 | #include <linux/workqueue.h> | |
3fcfab16 | 20 | #include <linux/backing-dev.h> |
1da177e4 | 21 | #include <asm/atomic.h> |
378f058c | 22 | #include <linux/scatterlist.h> |
1da177e4 | 23 | #include <asm/page.h> |
48527fa7 | 24 | #include <asm/unaligned.h> |
1da177e4 | 25 | |
586e80e6 | 26 | #include <linux/device-mapper.h> |
1da177e4 | 27 | |
72d94861 | 28 | #define DM_MSG_PREFIX "crypt" |
e48d4bbf | 29 | #define MESG_STR(x) x, sizeof(x) |
1da177e4 | 30 | |
1da177e4 LT |
31 | /* |
32 | * context holding the current state of a multi-part conversion | |
33 | */ | |
34 | struct convert_context { | |
43d69034 | 35 | struct completion restart; |
1da177e4 LT |
36 | struct bio *bio_in; |
37 | struct bio *bio_out; | |
38 | unsigned int offset_in; | |
39 | unsigned int offset_out; | |
40 | unsigned int idx_in; | |
41 | unsigned int idx_out; | |
42 | sector_t sector; | |
43d69034 | 43 | atomic_t pending; |
1da177e4 LT |
44 | }; |
45 | ||
53017030 MB |
46 | /* |
47 | * per bio private data | |
48 | */ | |
49 | struct dm_crypt_io { | |
50 | struct dm_target *target; | |
51 | struct bio *base_bio; | |
52 | struct work_struct work; | |
53 | ||
54 | struct convert_context ctx; | |
55 | ||
56 | atomic_t pending; | |
57 | int error; | |
0c395b0f | 58 | sector_t sector; |
53017030 MB |
59 | }; |
60 | ||
01482b76 MB |
61 | struct dm_crypt_request { |
62 | struct scatterlist sg_in; | |
63 | struct scatterlist sg_out; | |
64 | }; | |
65 | ||
1da177e4 LT |
66 | struct crypt_config; |
67 | ||
68 | struct crypt_iv_operations { | |
69 | int (*ctr)(struct crypt_config *cc, struct dm_target *ti, | |
d469f841 | 70 | const char *opts); |
1da177e4 LT |
71 | void (*dtr)(struct crypt_config *cc); |
72 | const char *(*status)(struct crypt_config *cc); | |
73 | int (*generator)(struct crypt_config *cc, u8 *iv, sector_t sector); | |
74 | }; | |
75 | ||
76 | /* | |
77 | * Crypt: maps a linear range of a block device | |
78 | * and encrypts / decrypts at the same time. | |
79 | */ | |
e48d4bbf | 80 | enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID }; |
1da177e4 LT |
81 | struct crypt_config { |
82 | struct dm_dev *dev; | |
83 | sector_t start; | |
84 | ||
85 | /* | |
ddd42edf MB |
86 | * pool for per bio private data, crypto requests and |
87 | * encryption requeusts/buffer pages | |
1da177e4 LT |
88 | */ |
89 | mempool_t *io_pool; | |
ddd42edf | 90 | mempool_t *req_pool; |
1da177e4 | 91 | mempool_t *page_pool; |
6a24c718 | 92 | struct bio_set *bs; |
1da177e4 | 93 | |
cabf08e4 MB |
94 | struct workqueue_struct *io_queue; |
95 | struct workqueue_struct *crypt_queue; | |
3f1e9070 MB |
96 | wait_queue_head_t writeq; |
97 | ||
1da177e4 LT |
98 | /* |
99 | * crypto related data | |
100 | */ | |
101 | struct crypt_iv_operations *iv_gen_ops; | |
102 | char *iv_mode; | |
79066ad3 HX |
103 | union { |
104 | struct crypto_cipher *essiv_tfm; | |
105 | int benbi_shift; | |
106 | } iv_gen_private; | |
1da177e4 LT |
107 | sector_t iv_offset; |
108 | unsigned int iv_size; | |
109 | ||
ddd42edf MB |
110 | /* |
111 | * Layout of each crypto request: | |
112 | * | |
113 | * struct ablkcipher_request | |
114 | * context | |
115 | * padding | |
116 | * struct dm_crypt_request | |
117 | * padding | |
118 | * IV | |
119 | * | |
120 | * The padding is added so that dm_crypt_request and the IV are | |
121 | * correctly aligned. | |
122 | */ | |
123 | unsigned int dmreq_start; | |
124 | struct ablkcipher_request *req; | |
125 | ||
d1806f6a HX |
126 | char cipher[CRYPTO_MAX_ALG_NAME]; |
127 | char chainmode[CRYPTO_MAX_ALG_NAME]; | |
3a7f6c99 | 128 | struct crypto_ablkcipher *tfm; |
e48d4bbf | 129 | unsigned long flags; |
1da177e4 LT |
130 | unsigned int key_size; |
131 | u8 key[0]; | |
132 | }; | |
133 | ||
6a24c718 | 134 | #define MIN_IOS 16 |
1da177e4 LT |
135 | #define MIN_POOL_PAGES 32 |
136 | #define MIN_BIO_PAGES 8 | |
137 | ||
e18b890b | 138 | static struct kmem_cache *_crypt_io_pool; |
1da177e4 | 139 | |
028867ac | 140 | static void clone_init(struct dm_crypt_io *, struct bio *); |
395b167c | 141 | static void kcryptd_queue_crypt(struct dm_crypt_io *io); |
027581f3 | 142 | |
1da177e4 LT |
143 | /* |
144 | * Different IV generation algorithms: | |
145 | * | |
3c164bd8 | 146 | * plain: the initial vector is the 32-bit little-endian version of the sector |
3a4fa0a2 | 147 | * number, padded with zeros if necessary. |
1da177e4 | 148 | * |
3c164bd8 RS |
149 | * essiv: "encrypted sector|salt initial vector", the sector number is |
150 | * encrypted with the bulk cipher using a salt as key. The salt | |
151 | * should be derived from the bulk cipher's key via hashing. | |
1da177e4 | 152 | * |
48527fa7 RS |
153 | * benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1 |
154 | * (needed for LRW-32-AES and possible other narrow block modes) | |
155 | * | |
46b47730 LN |
156 | * null: the initial vector is always zero. Provides compatibility with |
157 | * obsolete loop_fish2 devices. Do not use for new devices. | |
158 | * | |
1da177e4 LT |
159 | * plumb: unimplemented, see: |
160 | * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454 | |
161 | */ | |
162 | ||
163 | static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv, sector_t sector) | |
164 | { | |
165 | memset(iv, 0, cc->iv_size); | |
166 | *(u32 *)iv = cpu_to_le32(sector & 0xffffffff); | |
167 | ||
168 | return 0; | |
169 | } | |
170 | ||
171 | static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, | |
d469f841 | 172 | const char *opts) |
1da177e4 | 173 | { |
d1806f6a | 174 | struct crypto_cipher *essiv_tfm; |
35058687 HX |
175 | struct crypto_hash *hash_tfm; |
176 | struct hash_desc desc; | |
1da177e4 LT |
177 | struct scatterlist sg; |
178 | unsigned int saltsize; | |
179 | u8 *salt; | |
d1806f6a | 180 | int err; |
1da177e4 LT |
181 | |
182 | if (opts == NULL) { | |
72d94861 | 183 | ti->error = "Digest algorithm missing for ESSIV mode"; |
1da177e4 LT |
184 | return -EINVAL; |
185 | } | |
186 | ||
187 | /* Hash the cipher key with the given hash algorithm */ | |
35058687 HX |
188 | hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC); |
189 | if (IS_ERR(hash_tfm)) { | |
72d94861 | 190 | ti->error = "Error initializing ESSIV hash"; |
35058687 | 191 | return PTR_ERR(hash_tfm); |
1da177e4 LT |
192 | } |
193 | ||
35058687 | 194 | saltsize = crypto_hash_digestsize(hash_tfm); |
1da177e4 LT |
195 | salt = kmalloc(saltsize, GFP_KERNEL); |
196 | if (salt == NULL) { | |
72d94861 | 197 | ti->error = "Error kmallocing salt storage in ESSIV"; |
35058687 | 198 | crypto_free_hash(hash_tfm); |
1da177e4 LT |
199 | return -ENOMEM; |
200 | } | |
201 | ||
68e3f5dd | 202 | sg_init_one(&sg, cc->key, cc->key_size); |
35058687 HX |
203 | desc.tfm = hash_tfm; |
204 | desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; | |
205 | err = crypto_hash_digest(&desc, &sg, cc->key_size, salt); | |
206 | crypto_free_hash(hash_tfm); | |
207 | ||
208 | if (err) { | |
209 | ti->error = "Error calculating hash in ESSIV"; | |
815f9e32 | 210 | kfree(salt); |
35058687 HX |
211 | return err; |
212 | } | |
1da177e4 LT |
213 | |
214 | /* Setup the essiv_tfm with the given salt */ | |
d1806f6a HX |
215 | essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC); |
216 | if (IS_ERR(essiv_tfm)) { | |
72d94861 | 217 | ti->error = "Error allocating crypto tfm for ESSIV"; |
1da177e4 | 218 | kfree(salt); |
d1806f6a | 219 | return PTR_ERR(essiv_tfm); |
1da177e4 | 220 | } |
d1806f6a | 221 | if (crypto_cipher_blocksize(essiv_tfm) != |
3a7f6c99 | 222 | crypto_ablkcipher_ivsize(cc->tfm)) { |
72d94861 | 223 | ti->error = "Block size of ESSIV cipher does " |
d469f841 | 224 | "not match IV size of block cipher"; |
d1806f6a | 225 | crypto_free_cipher(essiv_tfm); |
1da177e4 LT |
226 | kfree(salt); |
227 | return -EINVAL; | |
228 | } | |
d1806f6a HX |
229 | err = crypto_cipher_setkey(essiv_tfm, salt, saltsize); |
230 | if (err) { | |
72d94861 | 231 | ti->error = "Failed to set key for ESSIV cipher"; |
d1806f6a | 232 | crypto_free_cipher(essiv_tfm); |
1da177e4 | 233 | kfree(salt); |
d1806f6a | 234 | return err; |
1da177e4 LT |
235 | } |
236 | kfree(salt); | |
237 | ||
79066ad3 | 238 | cc->iv_gen_private.essiv_tfm = essiv_tfm; |
1da177e4 LT |
239 | return 0; |
240 | } | |
241 | ||
242 | static void crypt_iv_essiv_dtr(struct crypt_config *cc) | |
243 | { | |
79066ad3 HX |
244 | crypto_free_cipher(cc->iv_gen_private.essiv_tfm); |
245 | cc->iv_gen_private.essiv_tfm = NULL; | |
1da177e4 LT |
246 | } |
247 | ||
248 | static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, sector_t sector) | |
249 | { | |
1da177e4 LT |
250 | memset(iv, 0, cc->iv_size); |
251 | *(u64 *)iv = cpu_to_le64(sector); | |
79066ad3 | 252 | crypto_cipher_encrypt_one(cc->iv_gen_private.essiv_tfm, iv, iv); |
1da177e4 LT |
253 | return 0; |
254 | } | |
255 | ||
48527fa7 RS |
256 | static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti, |
257 | const char *opts) | |
258 | { | |
3a7f6c99 | 259 | unsigned bs = crypto_ablkcipher_blocksize(cc->tfm); |
f0d1b0b3 | 260 | int log = ilog2(bs); |
48527fa7 RS |
261 | |
262 | /* we need to calculate how far we must shift the sector count | |
263 | * to get the cipher block count, we use this shift in _gen */ | |
264 | ||
265 | if (1 << log != bs) { | |
266 | ti->error = "cypher blocksize is not a power of 2"; | |
267 | return -EINVAL; | |
268 | } | |
269 | ||
270 | if (log > 9) { | |
271 | ti->error = "cypher blocksize is > 512"; | |
272 | return -EINVAL; | |
273 | } | |
274 | ||
79066ad3 | 275 | cc->iv_gen_private.benbi_shift = 9 - log; |
48527fa7 RS |
276 | |
277 | return 0; | |
278 | } | |
279 | ||
280 | static void crypt_iv_benbi_dtr(struct crypt_config *cc) | |
281 | { | |
48527fa7 RS |
282 | } |
283 | ||
284 | static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv, sector_t sector) | |
285 | { | |
79066ad3 HX |
286 | __be64 val; |
287 | ||
48527fa7 | 288 | memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */ |
79066ad3 HX |
289 | |
290 | val = cpu_to_be64(((u64)sector << cc->iv_gen_private.benbi_shift) + 1); | |
291 | put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64))); | |
48527fa7 | 292 | |
1da177e4 LT |
293 | return 0; |
294 | } | |
295 | ||
46b47730 LN |
296 | static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv, sector_t sector) |
297 | { | |
298 | memset(iv, 0, cc->iv_size); | |
299 | ||
300 | return 0; | |
301 | } | |
302 | ||
1da177e4 LT |
303 | static struct crypt_iv_operations crypt_iv_plain_ops = { |
304 | .generator = crypt_iv_plain_gen | |
305 | }; | |
306 | ||
307 | static struct crypt_iv_operations crypt_iv_essiv_ops = { | |
308 | .ctr = crypt_iv_essiv_ctr, | |
309 | .dtr = crypt_iv_essiv_dtr, | |
310 | .generator = crypt_iv_essiv_gen | |
311 | }; | |
312 | ||
48527fa7 RS |
313 | static struct crypt_iv_operations crypt_iv_benbi_ops = { |
314 | .ctr = crypt_iv_benbi_ctr, | |
315 | .dtr = crypt_iv_benbi_dtr, | |
316 | .generator = crypt_iv_benbi_gen | |
317 | }; | |
1da177e4 | 318 | |
46b47730 LN |
319 | static struct crypt_iv_operations crypt_iv_null_ops = { |
320 | .generator = crypt_iv_null_gen | |
321 | }; | |
322 | ||
d469f841 MB |
323 | static void crypt_convert_init(struct crypt_config *cc, |
324 | struct convert_context *ctx, | |
325 | struct bio *bio_out, struct bio *bio_in, | |
fcd369da | 326 | sector_t sector) |
1da177e4 LT |
327 | { |
328 | ctx->bio_in = bio_in; | |
329 | ctx->bio_out = bio_out; | |
330 | ctx->offset_in = 0; | |
331 | ctx->offset_out = 0; | |
332 | ctx->idx_in = bio_in ? bio_in->bi_idx : 0; | |
333 | ctx->idx_out = bio_out ? bio_out->bi_idx : 0; | |
334 | ctx->sector = sector + cc->iv_offset; | |
43d69034 | 335 | init_completion(&ctx->restart); |
1da177e4 LT |
336 | } |
337 | ||
01482b76 | 338 | static int crypt_convert_block(struct crypt_config *cc, |
3a7f6c99 MB |
339 | struct convert_context *ctx, |
340 | struct ablkcipher_request *req) | |
01482b76 MB |
341 | { |
342 | struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in); | |
343 | struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out); | |
3a7f6c99 MB |
344 | struct dm_crypt_request *dmreq; |
345 | u8 *iv; | |
346 | int r = 0; | |
347 | ||
348 | dmreq = (struct dm_crypt_request *)((char *)req + cc->dmreq_start); | |
349 | iv = (u8 *)ALIGN((unsigned long)(dmreq + 1), | |
350 | crypto_ablkcipher_alignmask(cc->tfm) + 1); | |
01482b76 | 351 | |
3a7f6c99 MB |
352 | sg_init_table(&dmreq->sg_in, 1); |
353 | sg_set_page(&dmreq->sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT, | |
01482b76 MB |
354 | bv_in->bv_offset + ctx->offset_in); |
355 | ||
3a7f6c99 MB |
356 | sg_init_table(&dmreq->sg_out, 1); |
357 | sg_set_page(&dmreq->sg_out, bv_out->bv_page, 1 << SECTOR_SHIFT, | |
01482b76 MB |
358 | bv_out->bv_offset + ctx->offset_out); |
359 | ||
360 | ctx->offset_in += 1 << SECTOR_SHIFT; | |
361 | if (ctx->offset_in >= bv_in->bv_len) { | |
362 | ctx->offset_in = 0; | |
363 | ctx->idx_in++; | |
364 | } | |
365 | ||
366 | ctx->offset_out += 1 << SECTOR_SHIFT; | |
367 | if (ctx->offset_out >= bv_out->bv_len) { | |
368 | ctx->offset_out = 0; | |
369 | ctx->idx_out++; | |
370 | } | |
371 | ||
3a7f6c99 MB |
372 | if (cc->iv_gen_ops) { |
373 | r = cc->iv_gen_ops->generator(cc, iv, ctx->sector); | |
374 | if (r < 0) | |
375 | return r; | |
376 | } | |
377 | ||
378 | ablkcipher_request_set_crypt(req, &dmreq->sg_in, &dmreq->sg_out, | |
379 | 1 << SECTOR_SHIFT, iv); | |
380 | ||
381 | if (bio_data_dir(ctx->bio_in) == WRITE) | |
382 | r = crypto_ablkcipher_encrypt(req); | |
383 | else | |
384 | r = crypto_ablkcipher_decrypt(req); | |
385 | ||
386 | return r; | |
01482b76 MB |
387 | } |
388 | ||
95497a96 MB |
389 | static void kcryptd_async_done(struct crypto_async_request *async_req, |
390 | int error); | |
ddd42edf MB |
391 | static void crypt_alloc_req(struct crypt_config *cc, |
392 | struct convert_context *ctx) | |
393 | { | |
394 | if (!cc->req) | |
395 | cc->req = mempool_alloc(cc->req_pool, GFP_NOIO); | |
95497a96 MB |
396 | ablkcipher_request_set_tfm(cc->req, cc->tfm); |
397 | ablkcipher_request_set_callback(cc->req, CRYPTO_TFM_REQ_MAY_BACKLOG | | |
398 | CRYPTO_TFM_REQ_MAY_SLEEP, | |
399 | kcryptd_async_done, ctx); | |
ddd42edf MB |
400 | } |
401 | ||
1da177e4 LT |
402 | /* |
403 | * Encrypt / decrypt data from one bio to another one (can be the same one) | |
404 | */ | |
405 | static int crypt_convert(struct crypt_config *cc, | |
d469f841 | 406 | struct convert_context *ctx) |
1da177e4 | 407 | { |
3f1e9070 | 408 | int r; |
1da177e4 | 409 | |
c8081618 MB |
410 | atomic_set(&ctx->pending, 1); |
411 | ||
1da177e4 LT |
412 | while(ctx->idx_in < ctx->bio_in->bi_vcnt && |
413 | ctx->idx_out < ctx->bio_out->bi_vcnt) { | |
1da177e4 | 414 | |
3a7f6c99 MB |
415 | crypt_alloc_req(cc, ctx); |
416 | ||
3f1e9070 MB |
417 | atomic_inc(&ctx->pending); |
418 | ||
3a7f6c99 MB |
419 | r = crypt_convert_block(cc, ctx, cc->req); |
420 | ||
421 | switch (r) { | |
3f1e9070 | 422 | /* async */ |
3a7f6c99 MB |
423 | case -EBUSY: |
424 | wait_for_completion(&ctx->restart); | |
425 | INIT_COMPLETION(ctx->restart); | |
426 | /* fall through*/ | |
427 | case -EINPROGRESS: | |
3a7f6c99 | 428 | cc->req = NULL; |
3f1e9070 MB |
429 | ctx->sector++; |
430 | continue; | |
431 | ||
432 | /* sync */ | |
3a7f6c99 | 433 | case 0: |
3f1e9070 | 434 | atomic_dec(&ctx->pending); |
3a7f6c99 | 435 | ctx->sector++; |
c7f1b204 | 436 | cond_resched(); |
3a7f6c99 | 437 | continue; |
3a7f6c99 | 438 | |
3f1e9070 MB |
439 | /* error */ |
440 | default: | |
441 | atomic_dec(&ctx->pending); | |
442 | return r; | |
443 | } | |
1da177e4 LT |
444 | } |
445 | ||
3f1e9070 | 446 | return 0; |
1da177e4 LT |
447 | } |
448 | ||
d469f841 MB |
449 | static void dm_crypt_bio_destructor(struct bio *bio) |
450 | { | |
028867ac | 451 | struct dm_crypt_io *io = bio->bi_private; |
6a24c718 MB |
452 | struct crypt_config *cc = io->target->private; |
453 | ||
454 | bio_free(bio, cc->bs); | |
d469f841 | 455 | } |
6a24c718 | 456 | |
1da177e4 LT |
457 | /* |
458 | * Generate a new unfragmented bio with the given size | |
459 | * This should never violate the device limitations | |
933f01d4 MB |
460 | * May return a smaller bio when running out of pages, indicated by |
461 | * *out_of_pages set to 1. | |
1da177e4 | 462 | */ |
933f01d4 MB |
463 | static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size, |
464 | unsigned *out_of_pages) | |
1da177e4 | 465 | { |
027581f3 | 466 | struct crypt_config *cc = io->target->private; |
8b004457 | 467 | struct bio *clone; |
1da177e4 | 468 | unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; |
b4e3ca1a | 469 | gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM; |
91e10625 MB |
470 | unsigned i, len; |
471 | struct page *page; | |
1da177e4 | 472 | |
2f9941b6 | 473 | clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs); |
8b004457 | 474 | if (!clone) |
1da177e4 | 475 | return NULL; |
1da177e4 | 476 | |
027581f3 | 477 | clone_init(io, clone); |
933f01d4 | 478 | *out_of_pages = 0; |
6a24c718 | 479 | |
f97380bc | 480 | for (i = 0; i < nr_iovecs; i++) { |
91e10625 | 481 | page = mempool_alloc(cc->page_pool, gfp_mask); |
933f01d4 MB |
482 | if (!page) { |
483 | *out_of_pages = 1; | |
1da177e4 | 484 | break; |
933f01d4 | 485 | } |
1da177e4 LT |
486 | |
487 | /* | |
488 | * if additional pages cannot be allocated without waiting, | |
489 | * return a partially allocated bio, the caller will then try | |
490 | * to allocate additional bios while submitting this partial bio | |
491 | */ | |
f97380bc | 492 | if (i == (MIN_BIO_PAGES - 1)) |
1da177e4 LT |
493 | gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT; |
494 | ||
91e10625 MB |
495 | len = (size > PAGE_SIZE) ? PAGE_SIZE : size; |
496 | ||
497 | if (!bio_add_page(clone, page, len, 0)) { | |
498 | mempool_free(page, cc->page_pool); | |
499 | break; | |
500 | } | |
1da177e4 | 501 | |
91e10625 | 502 | size -= len; |
1da177e4 LT |
503 | } |
504 | ||
8b004457 MB |
505 | if (!clone->bi_size) { |
506 | bio_put(clone); | |
1da177e4 LT |
507 | return NULL; |
508 | } | |
509 | ||
8b004457 | 510 | return clone; |
1da177e4 LT |
511 | } |
512 | ||
644bd2f0 | 513 | static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone) |
1da177e4 | 514 | { |
644bd2f0 | 515 | unsigned int i; |
1da177e4 LT |
516 | struct bio_vec *bv; |
517 | ||
644bd2f0 | 518 | for (i = 0; i < clone->bi_vcnt; i++) { |
8b004457 | 519 | bv = bio_iovec_idx(clone, i); |
1da177e4 LT |
520 | BUG_ON(!bv->bv_page); |
521 | mempool_free(bv->bv_page, cc->page_pool); | |
522 | bv->bv_page = NULL; | |
523 | } | |
524 | } | |
525 | ||
dc440d1e MB |
526 | static struct dm_crypt_io *crypt_io_alloc(struct dm_target *ti, |
527 | struct bio *bio, sector_t sector) | |
528 | { | |
529 | struct crypt_config *cc = ti->private; | |
530 | struct dm_crypt_io *io; | |
531 | ||
532 | io = mempool_alloc(cc->io_pool, GFP_NOIO); | |
533 | io->target = ti; | |
534 | io->base_bio = bio; | |
535 | io->sector = sector; | |
536 | io->error = 0; | |
537 | atomic_set(&io->pending, 0); | |
538 | ||
539 | return io; | |
540 | } | |
541 | ||
3e1a8bdd MB |
542 | static void crypt_inc_pending(struct dm_crypt_io *io) |
543 | { | |
544 | atomic_inc(&io->pending); | |
545 | } | |
546 | ||
1da177e4 LT |
547 | /* |
548 | * One of the bios was finished. Check for completion of | |
549 | * the whole request and correctly clean up the buffer. | |
550 | */ | |
5742fd77 | 551 | static void crypt_dec_pending(struct dm_crypt_io *io) |
1da177e4 | 552 | { |
5742fd77 | 553 | struct crypt_config *cc = io->target->private; |
1da177e4 LT |
554 | |
555 | if (!atomic_dec_and_test(&io->pending)) | |
556 | return; | |
557 | ||
6712ecf8 | 558 | bio_endio(io->base_bio, io->error); |
1da177e4 LT |
559 | mempool_free(io, cc->io_pool); |
560 | } | |
561 | ||
562 | /* | |
cabf08e4 | 563 | * kcryptd/kcryptd_io: |
1da177e4 LT |
564 | * |
565 | * Needed because it would be very unwise to do decryption in an | |
23541d2d | 566 | * interrupt context. |
cabf08e4 MB |
567 | * |
568 | * kcryptd performs the actual encryption or decryption. | |
569 | * | |
570 | * kcryptd_io performs the IO submission. | |
571 | * | |
572 | * They must be separated as otherwise the final stages could be | |
573 | * starved by new requests which can block in the first stages due | |
574 | * to memory allocation. | |
1da177e4 | 575 | */ |
6712ecf8 | 576 | static void crypt_endio(struct bio *clone, int error) |
8b004457 | 577 | { |
028867ac | 578 | struct dm_crypt_io *io = clone->bi_private; |
8b004457 | 579 | struct crypt_config *cc = io->target->private; |
ee7a491e | 580 | unsigned rw = bio_data_dir(clone); |
8b004457 | 581 | |
adfe4770 MB |
582 | if (unlikely(!bio_flagged(clone, BIO_UPTODATE) && !error)) |
583 | error = -EIO; | |
584 | ||
8b004457 | 585 | /* |
6712ecf8 | 586 | * free the processed pages |
8b004457 | 587 | */ |
ee7a491e | 588 | if (rw == WRITE) |
644bd2f0 | 589 | crypt_free_buffer_pages(cc, clone); |
8b004457 MB |
590 | |
591 | bio_put(clone); | |
8b004457 | 592 | |
ee7a491e MB |
593 | if (rw == READ && !error) { |
594 | kcryptd_queue_crypt(io); | |
595 | return; | |
596 | } | |
5742fd77 MB |
597 | |
598 | if (unlikely(error)) | |
599 | io->error = error; | |
600 | ||
601 | crypt_dec_pending(io); | |
8b004457 MB |
602 | } |
603 | ||
028867ac | 604 | static void clone_init(struct dm_crypt_io *io, struct bio *clone) |
8b004457 MB |
605 | { |
606 | struct crypt_config *cc = io->target->private; | |
607 | ||
608 | clone->bi_private = io; | |
609 | clone->bi_end_io = crypt_endio; | |
610 | clone->bi_bdev = cc->dev->bdev; | |
611 | clone->bi_rw = io->base_bio->bi_rw; | |
027581f3 | 612 | clone->bi_destructor = dm_crypt_bio_destructor; |
8b004457 MB |
613 | } |
614 | ||
4e4eef64 | 615 | static void kcryptd_io_read(struct dm_crypt_io *io) |
8b004457 MB |
616 | { |
617 | struct crypt_config *cc = io->target->private; | |
618 | struct bio *base_bio = io->base_bio; | |
619 | struct bio *clone; | |
93e605c2 | 620 | |
3e1a8bdd | 621 | crypt_inc_pending(io); |
8b004457 MB |
622 | |
623 | /* | |
624 | * The block layer might modify the bvec array, so always | |
625 | * copy the required bvecs because we need the original | |
626 | * one in order to decrypt the whole bio data *afterwards*. | |
627 | */ | |
6a24c718 | 628 | clone = bio_alloc_bioset(GFP_NOIO, bio_segments(base_bio), cc->bs); |
93e605c2 | 629 | if (unlikely(!clone)) { |
5742fd77 MB |
630 | io->error = -ENOMEM; |
631 | crypt_dec_pending(io); | |
23541d2d | 632 | return; |
93e605c2 | 633 | } |
8b004457 MB |
634 | |
635 | clone_init(io, clone); | |
636 | clone->bi_idx = 0; | |
637 | clone->bi_vcnt = bio_segments(base_bio); | |
638 | clone->bi_size = base_bio->bi_size; | |
0c395b0f | 639 | clone->bi_sector = cc->start + io->sector; |
8b004457 MB |
640 | memcpy(clone->bi_io_vec, bio_iovec(base_bio), |
641 | sizeof(struct bio_vec) * clone->bi_vcnt); | |
8b004457 | 642 | |
93e605c2 | 643 | generic_make_request(clone); |
8b004457 MB |
644 | } |
645 | ||
4e4eef64 MB |
646 | static void kcryptd_io_write(struct dm_crypt_io *io) |
647 | { | |
95497a96 | 648 | struct bio *clone = io->ctx.bio_out; |
3f1e9070 | 649 | struct crypt_config *cc = io->target->private; |
95497a96 MB |
650 | |
651 | generic_make_request(clone); | |
3f1e9070 | 652 | wake_up(&cc->writeq); |
4e4eef64 MB |
653 | } |
654 | ||
395b167c AK |
655 | static void kcryptd_io(struct work_struct *work) |
656 | { | |
657 | struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); | |
658 | ||
659 | if (bio_data_dir(io->base_bio) == READ) | |
660 | kcryptd_io_read(io); | |
661 | else | |
662 | kcryptd_io_write(io); | |
663 | } | |
664 | ||
665 | static void kcryptd_queue_io(struct dm_crypt_io *io) | |
666 | { | |
667 | struct crypt_config *cc = io->target->private; | |
668 | ||
669 | INIT_WORK(&io->work, kcryptd_io); | |
670 | queue_work(cc->io_queue, &io->work); | |
671 | } | |
672 | ||
95497a96 MB |
673 | static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, |
674 | int error, int async) | |
4e4eef64 | 675 | { |
dec1cedf MB |
676 | struct bio *clone = io->ctx.bio_out; |
677 | struct crypt_config *cc = io->target->private; | |
678 | ||
679 | if (unlikely(error < 0)) { | |
680 | crypt_free_buffer_pages(cc, clone); | |
681 | bio_put(clone); | |
682 | io->error = -EIO; | |
6c031f41 | 683 | crypt_dec_pending(io); |
dec1cedf MB |
684 | return; |
685 | } | |
686 | ||
687 | /* crypt_convert should have filled the clone bio */ | |
688 | BUG_ON(io->ctx.idx_out < clone->bi_vcnt); | |
689 | ||
690 | clone->bi_sector = cc->start + io->sector; | |
691 | io->sector += bio_sectors(clone); | |
899c95d3 | 692 | |
95497a96 MB |
693 | if (async) |
694 | kcryptd_queue_io(io); | |
1e37bb8e | 695 | else |
95497a96 | 696 | generic_make_request(clone); |
4e4eef64 MB |
697 | } |
698 | ||
fc5a5e9a | 699 | static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) |
8b004457 MB |
700 | { |
701 | struct crypt_config *cc = io->target->private; | |
8b004457 | 702 | struct bio *clone; |
c8081618 | 703 | int crypt_finished; |
933f01d4 | 704 | unsigned out_of_pages = 0; |
dec1cedf MB |
705 | unsigned remaining = io->base_bio->bi_size; |
706 | int r; | |
8b004457 | 707 | |
fc5a5e9a MB |
708 | /* |
709 | * Prevent io from disappearing until this function completes. | |
710 | */ | |
711 | crypt_inc_pending(io); | |
712 | crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, io->sector); | |
713 | ||
93e605c2 MB |
714 | /* |
715 | * The allocated buffers can be smaller than the whole bio, | |
716 | * so repeat the whole process until all the data can be handled. | |
717 | */ | |
718 | while (remaining) { | |
933f01d4 | 719 | clone = crypt_alloc_buffer(io, remaining, &out_of_pages); |
23541d2d | 720 | if (unlikely(!clone)) { |
5742fd77 | 721 | io->error = -ENOMEM; |
fc5a5e9a | 722 | break; |
23541d2d | 723 | } |
93e605c2 | 724 | |
53017030 MB |
725 | io->ctx.bio_out = clone; |
726 | io->ctx.idx_out = 0; | |
93e605c2 | 727 | |
dec1cedf | 728 | remaining -= clone->bi_size; |
93e605c2 | 729 | |
4e594098 | 730 | crypt_inc_pending(io); |
dec1cedf | 731 | r = crypt_convert(cc, &io->ctx); |
c8081618 | 732 | crypt_finished = atomic_dec_and_test(&io->ctx.pending); |
f97380bc | 733 | |
c8081618 MB |
734 | /* Encryption was already finished, submit io now */ |
735 | if (crypt_finished) { | |
3a7f6c99 | 736 | kcryptd_crypt_write_io_submit(io, r, 0); |
c8081618 MB |
737 | |
738 | /* | |
739 | * If there was an error, do not try next fragments. | |
740 | * For async, error is processed in async handler. | |
741 | */ | |
6c031f41 | 742 | if (unlikely(r < 0)) |
fc5a5e9a | 743 | break; |
4e594098 | 744 | } |
93e605c2 | 745 | |
933f01d4 MB |
746 | /* |
747 | * Out of memory -> run queues | |
748 | * But don't wait if split was due to the io size restriction | |
749 | */ | |
750 | if (unlikely(out_of_pages)) | |
98221eb7 | 751 | congestion_wait(WRITE, HZ/100); |
933f01d4 MB |
752 | |
753 | if (unlikely(remaining)) | |
754 | wait_event(cc->writeq, !atomic_read(&io->ctx.pending)); | |
93e605c2 | 755 | } |
899c95d3 MB |
756 | |
757 | crypt_dec_pending(io); | |
84131db6 MB |
758 | } |
759 | ||
4e4eef64 | 760 | static void kcryptd_crypt_read_done(struct dm_crypt_io *io, int error) |
5742fd77 MB |
761 | { |
762 | if (unlikely(error < 0)) | |
763 | io->error = -EIO; | |
764 | ||
765 | crypt_dec_pending(io); | |
766 | } | |
767 | ||
4e4eef64 | 768 | static void kcryptd_crypt_read_convert(struct dm_crypt_io *io) |
8b004457 MB |
769 | { |
770 | struct crypt_config *cc = io->target->private; | |
5742fd77 | 771 | int r = 0; |
1da177e4 | 772 | |
3e1a8bdd | 773 | crypt_inc_pending(io); |
3a7f6c99 | 774 | |
53017030 | 775 | crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio, |
0c395b0f | 776 | io->sector); |
1da177e4 | 777 | |
5742fd77 MB |
778 | r = crypt_convert(cc, &io->ctx); |
779 | ||
3f1e9070 | 780 | if (atomic_dec_and_test(&io->ctx.pending)) |
3a7f6c99 MB |
781 | kcryptd_crypt_read_done(io, r); |
782 | ||
783 | crypt_dec_pending(io); | |
1da177e4 LT |
784 | } |
785 | ||
95497a96 MB |
786 | static void kcryptd_async_done(struct crypto_async_request *async_req, |
787 | int error) | |
788 | { | |
789 | struct convert_context *ctx = async_req->data; | |
790 | struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx); | |
791 | struct crypt_config *cc = io->target->private; | |
792 | ||
793 | if (error == -EINPROGRESS) { | |
794 | complete(&ctx->restart); | |
795 | return; | |
796 | } | |
797 | ||
798 | mempool_free(ablkcipher_request_cast(async_req), cc->req_pool); | |
799 | ||
800 | if (!atomic_dec_and_test(&ctx->pending)) | |
801 | return; | |
802 | ||
803 | if (bio_data_dir(io->base_bio) == READ) | |
804 | kcryptd_crypt_read_done(io, error); | |
805 | else | |
806 | kcryptd_crypt_write_io_submit(io, error, 1); | |
807 | } | |
808 | ||
395b167c | 809 | static void kcryptd_crypt(struct work_struct *work) |
1da177e4 | 810 | { |
028867ac | 811 | struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); |
8b004457 | 812 | |
cabf08e4 | 813 | if (bio_data_dir(io->base_bio) == READ) |
395b167c | 814 | kcryptd_crypt_read_convert(io); |
4e4eef64 | 815 | else |
395b167c | 816 | kcryptd_crypt_write_convert(io); |
cabf08e4 MB |
817 | } |
818 | ||
395b167c | 819 | static void kcryptd_queue_crypt(struct dm_crypt_io *io) |
cabf08e4 | 820 | { |
395b167c | 821 | struct crypt_config *cc = io->target->private; |
cabf08e4 | 822 | |
395b167c AK |
823 | INIT_WORK(&io->work, kcryptd_crypt); |
824 | queue_work(cc->crypt_queue, &io->work); | |
1da177e4 LT |
825 | } |
826 | ||
827 | /* | |
828 | * Decode key from its hex representation | |
829 | */ | |
830 | static int crypt_decode_key(u8 *key, char *hex, unsigned int size) | |
831 | { | |
832 | char buffer[3]; | |
833 | char *endp; | |
834 | unsigned int i; | |
835 | ||
836 | buffer[2] = '\0'; | |
837 | ||
8b004457 | 838 | for (i = 0; i < size; i++) { |
1da177e4 LT |
839 | buffer[0] = *hex++; |
840 | buffer[1] = *hex++; | |
841 | ||
842 | key[i] = (u8)simple_strtoul(buffer, &endp, 16); | |
843 | ||
844 | if (endp != &buffer[2]) | |
845 | return -EINVAL; | |
846 | } | |
847 | ||
848 | if (*hex != '\0') | |
849 | return -EINVAL; | |
850 | ||
851 | return 0; | |
852 | } | |
853 | ||
854 | /* | |
855 | * Encode key into its hex representation | |
856 | */ | |
857 | static void crypt_encode_key(char *hex, u8 *key, unsigned int size) | |
858 | { | |
859 | unsigned int i; | |
860 | ||
8b004457 | 861 | for (i = 0; i < size; i++) { |
1da177e4 LT |
862 | sprintf(hex, "%02x", *key); |
863 | hex += 2; | |
864 | key++; | |
865 | } | |
866 | } | |
867 | ||
e48d4bbf MB |
868 | static int crypt_set_key(struct crypt_config *cc, char *key) |
869 | { | |
870 | unsigned key_size = strlen(key) >> 1; | |
871 | ||
872 | if (cc->key_size && cc->key_size != key_size) | |
873 | return -EINVAL; | |
874 | ||
875 | cc->key_size = key_size; /* initial settings */ | |
876 | ||
877 | if ((!key_size && strcmp(key, "-")) || | |
d469f841 | 878 | (key_size && crypt_decode_key(cc->key, key, key_size) < 0)) |
e48d4bbf MB |
879 | return -EINVAL; |
880 | ||
881 | set_bit(DM_CRYPT_KEY_VALID, &cc->flags); | |
882 | ||
883 | return 0; | |
884 | } | |
885 | ||
886 | static int crypt_wipe_key(struct crypt_config *cc) | |
887 | { | |
888 | clear_bit(DM_CRYPT_KEY_VALID, &cc->flags); | |
889 | memset(&cc->key, 0, cc->key_size * sizeof(u8)); | |
890 | return 0; | |
891 | } | |
892 | ||
1da177e4 LT |
893 | /* |
894 | * Construct an encryption mapping: | |
895 | * <cipher> <key> <iv_offset> <dev_path> <start> | |
896 | */ | |
897 | static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |
898 | { | |
899 | struct crypt_config *cc; | |
3a7f6c99 | 900 | struct crypto_ablkcipher *tfm; |
1da177e4 LT |
901 | char *tmp; |
902 | char *cipher; | |
903 | char *chainmode; | |
904 | char *ivmode; | |
905 | char *ivopts; | |
1da177e4 | 906 | unsigned int key_size; |
4ee218cd | 907 | unsigned long long tmpll; |
1da177e4 LT |
908 | |
909 | if (argc != 5) { | |
72d94861 | 910 | ti->error = "Not enough arguments"; |
1da177e4 LT |
911 | return -EINVAL; |
912 | } | |
913 | ||
914 | tmp = argv[0]; | |
915 | cipher = strsep(&tmp, "-"); | |
916 | chainmode = strsep(&tmp, "-"); | |
917 | ivopts = strsep(&tmp, "-"); | |
918 | ivmode = strsep(&ivopts, ":"); | |
919 | ||
920 | if (tmp) | |
72d94861 | 921 | DMWARN("Unexpected additional cipher options"); |
1da177e4 LT |
922 | |
923 | key_size = strlen(argv[1]) >> 1; | |
924 | ||
e48d4bbf | 925 | cc = kzalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL); |
1da177e4 LT |
926 | if (cc == NULL) { |
927 | ti->error = | |
72d94861 | 928 | "Cannot allocate transparent encryption context"; |
1da177e4 LT |
929 | return -ENOMEM; |
930 | } | |
931 | ||
e48d4bbf | 932 | if (crypt_set_key(cc, argv[1])) { |
72d94861 | 933 | ti->error = "Error decoding key"; |
636d5786 | 934 | goto bad_cipher; |
1da177e4 LT |
935 | } |
936 | ||
937 | /* Compatiblity mode for old dm-crypt cipher strings */ | |
938 | if (!chainmode || (strcmp(chainmode, "plain") == 0 && !ivmode)) { | |
939 | chainmode = "cbc"; | |
940 | ivmode = "plain"; | |
941 | } | |
942 | ||
d1806f6a HX |
943 | if (strcmp(chainmode, "ecb") && !ivmode) { |
944 | ti->error = "This chaining mode requires an IV mechanism"; | |
636d5786 | 945 | goto bad_cipher; |
1da177e4 LT |
946 | } |
947 | ||
d469f841 MB |
948 | if (snprintf(cc->cipher, CRYPTO_MAX_ALG_NAME, "%s(%s)", |
949 | chainmode, cipher) >= CRYPTO_MAX_ALG_NAME) { | |
d1806f6a | 950 | ti->error = "Chain mode + cipher name is too long"; |
636d5786 | 951 | goto bad_cipher; |
1da177e4 LT |
952 | } |
953 | ||
3a7f6c99 | 954 | tfm = crypto_alloc_ablkcipher(cc->cipher, 0, 0); |
d1806f6a | 955 | if (IS_ERR(tfm)) { |
72d94861 | 956 | ti->error = "Error allocating crypto tfm"; |
636d5786 | 957 | goto bad_cipher; |
1da177e4 | 958 | } |
1da177e4 | 959 | |
d1806f6a HX |
960 | strcpy(cc->cipher, cipher); |
961 | strcpy(cc->chainmode, chainmode); | |
1da177e4 LT |
962 | cc->tfm = tfm; |
963 | ||
964 | /* | |
48527fa7 | 965 | * Choose ivmode. Valid modes: "plain", "essiv:<esshash>", "benbi". |
1da177e4 LT |
966 | * See comments at iv code |
967 | */ | |
968 | ||
969 | if (ivmode == NULL) | |
970 | cc->iv_gen_ops = NULL; | |
971 | else if (strcmp(ivmode, "plain") == 0) | |
972 | cc->iv_gen_ops = &crypt_iv_plain_ops; | |
973 | else if (strcmp(ivmode, "essiv") == 0) | |
974 | cc->iv_gen_ops = &crypt_iv_essiv_ops; | |
48527fa7 RS |
975 | else if (strcmp(ivmode, "benbi") == 0) |
976 | cc->iv_gen_ops = &crypt_iv_benbi_ops; | |
46b47730 LN |
977 | else if (strcmp(ivmode, "null") == 0) |
978 | cc->iv_gen_ops = &crypt_iv_null_ops; | |
1da177e4 | 979 | else { |
72d94861 | 980 | ti->error = "Invalid IV mode"; |
636d5786 | 981 | goto bad_ivmode; |
1da177e4 LT |
982 | } |
983 | ||
984 | if (cc->iv_gen_ops && cc->iv_gen_ops->ctr && | |
985 | cc->iv_gen_ops->ctr(cc, ti, ivopts) < 0) | |
636d5786 | 986 | goto bad_ivmode; |
1da177e4 | 987 | |
3a7f6c99 | 988 | cc->iv_size = crypto_ablkcipher_ivsize(tfm); |
d1806f6a | 989 | if (cc->iv_size) |
1da177e4 | 990 | /* at least a 64 bit sector number should fit in our buffer */ |
d1806f6a | 991 | cc->iv_size = max(cc->iv_size, |
d469f841 | 992 | (unsigned int)(sizeof(u64) / sizeof(u8))); |
1da177e4 | 993 | else { |
1da177e4 | 994 | if (cc->iv_gen_ops) { |
72d94861 | 995 | DMWARN("Selected cipher does not support IVs"); |
1da177e4 LT |
996 | if (cc->iv_gen_ops->dtr) |
997 | cc->iv_gen_ops->dtr(cc); | |
998 | cc->iv_gen_ops = NULL; | |
999 | } | |
1000 | } | |
1001 | ||
93d2341c | 1002 | cc->io_pool = mempool_create_slab_pool(MIN_IOS, _crypt_io_pool); |
1da177e4 | 1003 | if (!cc->io_pool) { |
72d94861 | 1004 | ti->error = "Cannot allocate crypt io mempool"; |
636d5786 | 1005 | goto bad_slab_pool; |
1da177e4 LT |
1006 | } |
1007 | ||
ddd42edf | 1008 | cc->dmreq_start = sizeof(struct ablkcipher_request); |
3a7f6c99 | 1009 | cc->dmreq_start += crypto_ablkcipher_reqsize(tfm); |
ddd42edf | 1010 | cc->dmreq_start = ALIGN(cc->dmreq_start, crypto_tfm_ctx_alignment()); |
3a7f6c99 MB |
1011 | cc->dmreq_start += crypto_ablkcipher_alignmask(tfm) & |
1012 | ~(crypto_tfm_ctx_alignment() - 1); | |
ddd42edf MB |
1013 | |
1014 | cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start + | |
1015 | sizeof(struct dm_crypt_request) + cc->iv_size); | |
1016 | if (!cc->req_pool) { | |
1017 | ti->error = "Cannot allocate crypt request mempool"; | |
1018 | goto bad_req_pool; | |
1019 | } | |
1020 | cc->req = NULL; | |
1021 | ||
a19b27ce | 1022 | cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0); |
1da177e4 | 1023 | if (!cc->page_pool) { |
72d94861 | 1024 | ti->error = "Cannot allocate page mempool"; |
636d5786 | 1025 | goto bad_page_pool; |
1da177e4 LT |
1026 | } |
1027 | ||
5972511b | 1028 | cc->bs = bioset_create(MIN_IOS, MIN_IOS); |
6a24c718 MB |
1029 | if (!cc->bs) { |
1030 | ti->error = "Cannot allocate crypt bioset"; | |
1031 | goto bad_bs; | |
1032 | } | |
1033 | ||
3a7f6c99 | 1034 | if (crypto_ablkcipher_setkey(tfm, cc->key, key_size) < 0) { |
72d94861 | 1035 | ti->error = "Error setting key"; |
636d5786 | 1036 | goto bad_device; |
1da177e4 LT |
1037 | } |
1038 | ||
4ee218cd | 1039 | if (sscanf(argv[2], "%llu", &tmpll) != 1) { |
72d94861 | 1040 | ti->error = "Invalid iv_offset sector"; |
636d5786 | 1041 | goto bad_device; |
1da177e4 | 1042 | } |
4ee218cd | 1043 | cc->iv_offset = tmpll; |
1da177e4 | 1044 | |
4ee218cd | 1045 | if (sscanf(argv[4], "%llu", &tmpll) != 1) { |
72d94861 | 1046 | ti->error = "Invalid device sector"; |
636d5786 | 1047 | goto bad_device; |
1da177e4 | 1048 | } |
4ee218cd | 1049 | cc->start = tmpll; |
1da177e4 LT |
1050 | |
1051 | if (dm_get_device(ti, argv[3], cc->start, ti->len, | |
d469f841 | 1052 | dm_table_get_mode(ti->table), &cc->dev)) { |
72d94861 | 1053 | ti->error = "Device lookup failed"; |
636d5786 | 1054 | goto bad_device; |
1da177e4 LT |
1055 | } |
1056 | ||
1057 | if (ivmode && cc->iv_gen_ops) { | |
1058 | if (ivopts) | |
1059 | *(ivopts - 1) = ':'; | |
1060 | cc->iv_mode = kmalloc(strlen(ivmode) + 1, GFP_KERNEL); | |
1061 | if (!cc->iv_mode) { | |
72d94861 | 1062 | ti->error = "Error kmallocing iv_mode string"; |
636d5786 | 1063 | goto bad_ivmode_string; |
1da177e4 LT |
1064 | } |
1065 | strcpy(cc->iv_mode, ivmode); | |
1066 | } else | |
1067 | cc->iv_mode = NULL; | |
1068 | ||
cabf08e4 MB |
1069 | cc->io_queue = create_singlethread_workqueue("kcryptd_io"); |
1070 | if (!cc->io_queue) { | |
1071 | ti->error = "Couldn't create kcryptd io queue"; | |
1072 | goto bad_io_queue; | |
1073 | } | |
1074 | ||
1075 | cc->crypt_queue = create_singlethread_workqueue("kcryptd"); | |
1076 | if (!cc->crypt_queue) { | |
9934a8be | 1077 | ti->error = "Couldn't create kcryptd queue"; |
cabf08e4 | 1078 | goto bad_crypt_queue; |
9934a8be MB |
1079 | } |
1080 | ||
3f1e9070 | 1081 | init_waitqueue_head(&cc->writeq); |
1da177e4 LT |
1082 | ti->private = cc; |
1083 | return 0; | |
1084 | ||
cabf08e4 MB |
1085 | bad_crypt_queue: |
1086 | destroy_workqueue(cc->io_queue); | |
1087 | bad_io_queue: | |
9934a8be | 1088 | kfree(cc->iv_mode); |
636d5786 | 1089 | bad_ivmode_string: |
55b42c5a | 1090 | dm_put_device(ti, cc->dev); |
636d5786 | 1091 | bad_device: |
6a24c718 MB |
1092 | bioset_free(cc->bs); |
1093 | bad_bs: | |
1da177e4 | 1094 | mempool_destroy(cc->page_pool); |
636d5786 | 1095 | bad_page_pool: |
ddd42edf MB |
1096 | mempool_destroy(cc->req_pool); |
1097 | bad_req_pool: | |
1da177e4 | 1098 | mempool_destroy(cc->io_pool); |
636d5786 | 1099 | bad_slab_pool: |
1da177e4 LT |
1100 | if (cc->iv_gen_ops && cc->iv_gen_ops->dtr) |
1101 | cc->iv_gen_ops->dtr(cc); | |
636d5786 | 1102 | bad_ivmode: |
3a7f6c99 | 1103 | crypto_free_ablkcipher(tfm); |
636d5786 | 1104 | bad_cipher: |
9d3520a3 SR |
1105 | /* Must zero key material before freeing */ |
1106 | memset(cc, 0, sizeof(*cc) + cc->key_size * sizeof(u8)); | |
1da177e4 LT |
1107 | kfree(cc); |
1108 | return -EINVAL; | |
1109 | } | |
1110 | ||
1111 | static void crypt_dtr(struct dm_target *ti) | |
1112 | { | |
1113 | struct crypt_config *cc = (struct crypt_config *) ti->private; | |
1114 | ||
cabf08e4 MB |
1115 | destroy_workqueue(cc->io_queue); |
1116 | destroy_workqueue(cc->crypt_queue); | |
80b16c19 | 1117 | |
ddd42edf MB |
1118 | if (cc->req) |
1119 | mempool_free(cc->req, cc->req_pool); | |
1120 | ||
6a24c718 | 1121 | bioset_free(cc->bs); |
1da177e4 | 1122 | mempool_destroy(cc->page_pool); |
ddd42edf | 1123 | mempool_destroy(cc->req_pool); |
1da177e4 LT |
1124 | mempool_destroy(cc->io_pool); |
1125 | ||
990a8baf | 1126 | kfree(cc->iv_mode); |
1da177e4 LT |
1127 | if (cc->iv_gen_ops && cc->iv_gen_ops->dtr) |
1128 | cc->iv_gen_ops->dtr(cc); | |
3a7f6c99 | 1129 | crypto_free_ablkcipher(cc->tfm); |
1da177e4 | 1130 | dm_put_device(ti, cc->dev); |
9d3520a3 SR |
1131 | |
1132 | /* Must zero key material before freeing */ | |
1133 | memset(cc, 0, sizeof(*cc) + cc->key_size * sizeof(u8)); | |
1da177e4 LT |
1134 | kfree(cc); |
1135 | } | |
1136 | ||
1da177e4 LT |
1137 | static int crypt_map(struct dm_target *ti, struct bio *bio, |
1138 | union map_info *map_context) | |
1139 | { | |
028867ac | 1140 | struct dm_crypt_io *io; |
1da177e4 | 1141 | |
dc440d1e | 1142 | io = crypt_io_alloc(ti, bio, bio->bi_sector - ti->begin); |
cabf08e4 MB |
1143 | |
1144 | if (bio_data_dir(io->base_bio) == READ) | |
1145 | kcryptd_queue_io(io); | |
1146 | else | |
1147 | kcryptd_queue_crypt(io); | |
1da177e4 | 1148 | |
d2a7ad29 | 1149 | return DM_MAPIO_SUBMITTED; |
1da177e4 LT |
1150 | } |
1151 | ||
1152 | static int crypt_status(struct dm_target *ti, status_type_t type, | |
1153 | char *result, unsigned int maxlen) | |
1154 | { | |
1155 | struct crypt_config *cc = (struct crypt_config *) ti->private; | |
1da177e4 LT |
1156 | unsigned int sz = 0; |
1157 | ||
1158 | switch (type) { | |
1159 | case STATUSTYPE_INFO: | |
1160 | result[0] = '\0'; | |
1161 | break; | |
1162 | ||
1163 | case STATUSTYPE_TABLE: | |
1da177e4 | 1164 | if (cc->iv_mode) |
37af6560 CS |
1165 | DMEMIT("%s-%s-%s ", cc->cipher, cc->chainmode, |
1166 | cc->iv_mode); | |
1da177e4 | 1167 | else |
37af6560 | 1168 | DMEMIT("%s-%s ", cc->cipher, cc->chainmode); |
1da177e4 LT |
1169 | |
1170 | if (cc->key_size > 0) { | |
1171 | if ((maxlen - sz) < ((cc->key_size << 1) + 1)) | |
1172 | return -ENOMEM; | |
1173 | ||
1174 | crypt_encode_key(result + sz, cc->key, cc->key_size); | |
1175 | sz += cc->key_size << 1; | |
1176 | } else { | |
1177 | if (sz >= maxlen) | |
1178 | return -ENOMEM; | |
1179 | result[sz++] = '-'; | |
1180 | } | |
1181 | ||
4ee218cd AM |
1182 | DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset, |
1183 | cc->dev->name, (unsigned long long)cc->start); | |
1da177e4 LT |
1184 | break; |
1185 | } | |
1186 | return 0; | |
1187 | } | |
1188 | ||
e48d4bbf MB |
1189 | static void crypt_postsuspend(struct dm_target *ti) |
1190 | { | |
1191 | struct crypt_config *cc = ti->private; | |
1192 | ||
1193 | set_bit(DM_CRYPT_SUSPENDED, &cc->flags); | |
1194 | } | |
1195 | ||
1196 | static int crypt_preresume(struct dm_target *ti) | |
1197 | { | |
1198 | struct crypt_config *cc = ti->private; | |
1199 | ||
1200 | if (!test_bit(DM_CRYPT_KEY_VALID, &cc->flags)) { | |
1201 | DMERR("aborting resume - crypt key is not set."); | |
1202 | return -EAGAIN; | |
1203 | } | |
1204 | ||
1205 | return 0; | |
1206 | } | |
1207 | ||
1208 | static void crypt_resume(struct dm_target *ti) | |
1209 | { | |
1210 | struct crypt_config *cc = ti->private; | |
1211 | ||
1212 | clear_bit(DM_CRYPT_SUSPENDED, &cc->flags); | |
1213 | } | |
1214 | ||
1215 | /* Message interface | |
1216 | * key set <key> | |
1217 | * key wipe | |
1218 | */ | |
1219 | static int crypt_message(struct dm_target *ti, unsigned argc, char **argv) | |
1220 | { | |
1221 | struct crypt_config *cc = ti->private; | |
1222 | ||
1223 | if (argc < 2) | |
1224 | goto error; | |
1225 | ||
1226 | if (!strnicmp(argv[0], MESG_STR("key"))) { | |
1227 | if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) { | |
1228 | DMWARN("not suspended during key manipulation."); | |
1229 | return -EINVAL; | |
1230 | } | |
1231 | if (argc == 3 && !strnicmp(argv[1], MESG_STR("set"))) | |
1232 | return crypt_set_key(cc, argv[2]); | |
1233 | if (argc == 2 && !strnicmp(argv[1], MESG_STR("wipe"))) | |
1234 | return crypt_wipe_key(cc); | |
1235 | } | |
1236 | ||
1237 | error: | |
1238 | DMWARN("unrecognised message received."); | |
1239 | return -EINVAL; | |
1240 | } | |
1241 | ||
d41e26b9 MB |
1242 | static int crypt_merge(struct dm_target *ti, struct bvec_merge_data *bvm, |
1243 | struct bio_vec *biovec, int max_size) | |
1244 | { | |
1245 | struct crypt_config *cc = ti->private; | |
1246 | struct request_queue *q = bdev_get_queue(cc->dev->bdev); | |
1247 | ||
1248 | if (!q->merge_bvec_fn) | |
1249 | return max_size; | |
1250 | ||
1251 | bvm->bi_bdev = cc->dev->bdev; | |
1252 | bvm->bi_sector = cc->start + bvm->bi_sector - ti->begin; | |
1253 | ||
1254 | return min(max_size, q->merge_bvec_fn(q, bvm, biovec)); | |
1255 | } | |
1256 | ||
1da177e4 LT |
1257 | static struct target_type crypt_target = { |
1258 | .name = "crypt", | |
d41e26b9 | 1259 | .version= {1, 6, 0}, |
1da177e4 LT |
1260 | .module = THIS_MODULE, |
1261 | .ctr = crypt_ctr, | |
1262 | .dtr = crypt_dtr, | |
1263 | .map = crypt_map, | |
1264 | .status = crypt_status, | |
e48d4bbf MB |
1265 | .postsuspend = crypt_postsuspend, |
1266 | .preresume = crypt_preresume, | |
1267 | .resume = crypt_resume, | |
1268 | .message = crypt_message, | |
d41e26b9 | 1269 | .merge = crypt_merge, |
1da177e4 LT |
1270 | }; |
1271 | ||
1272 | static int __init dm_crypt_init(void) | |
1273 | { | |
1274 | int r; | |
1275 | ||
028867ac | 1276 | _crypt_io_pool = KMEM_CACHE(dm_crypt_io, 0); |
1da177e4 LT |
1277 | if (!_crypt_io_pool) |
1278 | return -ENOMEM; | |
1279 | ||
1da177e4 LT |
1280 | r = dm_register_target(&crypt_target); |
1281 | if (r < 0) { | |
72d94861 | 1282 | DMERR("register failed %d", r); |
9934a8be | 1283 | kmem_cache_destroy(_crypt_io_pool); |
1da177e4 LT |
1284 | } |
1285 | ||
1da177e4 LT |
1286 | return r; |
1287 | } | |
1288 | ||
1289 | static void __exit dm_crypt_exit(void) | |
1290 | { | |
1291 | int r = dm_unregister_target(&crypt_target); | |
1292 | ||
1293 | if (r < 0) | |
72d94861 | 1294 | DMERR("unregister failed %d", r); |
1da177e4 | 1295 | |
1da177e4 LT |
1296 | kmem_cache_destroy(_crypt_io_pool); |
1297 | } | |
1298 | ||
1299 | module_init(dm_crypt_init); | |
1300 | module_exit(dm_crypt_exit); | |
1301 | ||
1302 | MODULE_AUTHOR("Christophe Saout <christophe@saout.de>"); | |
1303 | MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption"); | |
1304 | MODULE_LICENSE("GPL"); |