Commit | Line | Data |
---|---|---|
488f6682 ST |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* | |
3 | * Copyright 2019 Google LLC | |
4 | */ | |
5 | ||
6 | /* | |
7 | * Refer to Documentation/block/inline-encryption.rst for detailed explanation. | |
8 | */ | |
9 | ||
10 | #define pr_fmt(fmt) "blk-crypto-fallback: " fmt | |
11 | ||
12 | #include <crypto/skcipher.h> | |
13 | #include <linux/blk-cgroup.h> | |
14 | #include <linux/blk-crypto.h> | |
15 | #include <linux/blkdev.h> | |
16 | #include <linux/crypto.h> | |
17 | #include <linux/keyslot-manager.h> | |
18 | #include <linux/mempool.h> | |
19 | #include <linux/module.h> | |
20 | #include <linux/random.h> | |
21 | ||
22 | #include "blk-crypto-internal.h" | |
23 | ||
24 | static unsigned int num_prealloc_bounce_pg = 32; | |
25 | module_param(num_prealloc_bounce_pg, uint, 0); | |
26 | MODULE_PARM_DESC(num_prealloc_bounce_pg, | |
27 | "Number of preallocated bounce pages for the blk-crypto crypto API fallback"); | |
28 | ||
29 | static unsigned int blk_crypto_num_keyslots = 100; | |
30 | module_param_named(num_keyslots, blk_crypto_num_keyslots, uint, 0); | |
31 | MODULE_PARM_DESC(num_keyslots, | |
32 | "Number of keyslots for the blk-crypto crypto API fallback"); | |
33 | ||
34 | static unsigned int num_prealloc_fallback_crypt_ctxs = 128; | |
35 | module_param(num_prealloc_fallback_crypt_ctxs, uint, 0); | |
36 | MODULE_PARM_DESC(num_prealloc_crypt_fallback_ctxs, | |
37 | "Number of preallocated bio fallback crypto contexts for blk-crypto to use during crypto API fallback"); | |
38 | ||
39 | struct bio_fallback_crypt_ctx { | |
40 | struct bio_crypt_ctx crypt_ctx; | |
41 | /* | |
42 | * Copy of the bvec_iter when this bio was submitted. | |
43 | * We only want to en/decrypt the part of the bio as described by the | |
44 | * bvec_iter upon submission because bio might be split before being | |
45 | * resubmitted | |
46 | */ | |
47 | struct bvec_iter crypt_iter; | |
48 | union { | |
49 | struct { | |
50 | struct work_struct work; | |
51 | struct bio *bio; | |
52 | }; | |
53 | struct { | |
54 | void *bi_private_orig; | |
55 | bio_end_io_t *bi_end_io_orig; | |
56 | }; | |
57 | }; | |
58 | }; | |
59 | ||
60 | static struct kmem_cache *bio_fallback_crypt_ctx_cache; | |
61 | static mempool_t *bio_fallback_crypt_ctx_pool; | |
62 | ||
63 | /* | |
64 | * Allocating a crypto tfm during I/O can deadlock, so we have to preallocate | |
65 | * all of a mode's tfms when that mode starts being used. Since each mode may | |
66 | * need all the keyslots at some point, each mode needs its own tfm for each | |
67 | * keyslot; thus, a keyslot may contain tfms for multiple modes. However, to | |
68 | * match the behavior of real inline encryption hardware (which only supports a | |
69 | * single encryption context per keyslot), we only allow one tfm per keyslot to | |
70 | * be used at a time - the rest of the unused tfms have their keys cleared. | |
71 | */ | |
72 | static DEFINE_MUTEX(tfms_init_lock); | |
73 | static bool tfms_inited[BLK_ENCRYPTION_MODE_MAX]; | |
74 | ||
75 | static struct blk_crypto_keyslot { | |
76 | enum blk_crypto_mode_num crypto_mode; | |
77 | struct crypto_skcipher *tfms[BLK_ENCRYPTION_MODE_MAX]; | |
78 | } *blk_crypto_keyslots; | |
79 | ||
80 | static struct blk_keyslot_manager blk_crypto_ksm; | |
81 | static struct workqueue_struct *blk_crypto_wq; | |
82 | static mempool_t *blk_crypto_bounce_page_pool; | |
83 | ||
84 | /* | |
85 | * This is the key we set when evicting a keyslot. This *should* be the all 0's | |
86 | * key, but AES-XTS rejects that key, so we use some random bytes instead. | |
87 | */ | |
88 | static u8 blank_key[BLK_CRYPTO_MAX_KEY_SIZE]; | |
89 | ||
90 | static void blk_crypto_evict_keyslot(unsigned int slot) | |
91 | { | |
92 | struct blk_crypto_keyslot *slotp = &blk_crypto_keyslots[slot]; | |
93 | enum blk_crypto_mode_num crypto_mode = slotp->crypto_mode; | |
94 | int err; | |
95 | ||
96 | WARN_ON(slotp->crypto_mode == BLK_ENCRYPTION_MODE_INVALID); | |
97 | ||
98 | /* Clear the key in the skcipher */ | |
99 | err = crypto_skcipher_setkey(slotp->tfms[crypto_mode], blank_key, | |
100 | blk_crypto_modes[crypto_mode].keysize); | |
101 | WARN_ON(err); | |
102 | slotp->crypto_mode = BLK_ENCRYPTION_MODE_INVALID; | |
103 | } | |
104 | ||
105 | static int blk_crypto_keyslot_program(struct blk_keyslot_manager *ksm, | |
106 | const struct blk_crypto_key *key, | |
107 | unsigned int slot) | |
108 | { | |
109 | struct blk_crypto_keyslot *slotp = &blk_crypto_keyslots[slot]; | |
110 | const enum blk_crypto_mode_num crypto_mode = | |
111 | key->crypto_cfg.crypto_mode; | |
112 | int err; | |
113 | ||
114 | if (crypto_mode != slotp->crypto_mode && | |
115 | slotp->crypto_mode != BLK_ENCRYPTION_MODE_INVALID) | |
116 | blk_crypto_evict_keyslot(slot); | |
117 | ||
118 | slotp->crypto_mode = crypto_mode; | |
119 | err = crypto_skcipher_setkey(slotp->tfms[crypto_mode], key->raw, | |
120 | key->size); | |
121 | if (err) { | |
122 | blk_crypto_evict_keyslot(slot); | |
123 | return err; | |
124 | } | |
125 | return 0; | |
126 | } | |
127 | ||
128 | static int blk_crypto_keyslot_evict(struct blk_keyslot_manager *ksm, | |
129 | const struct blk_crypto_key *key, | |
130 | unsigned int slot) | |
131 | { | |
132 | blk_crypto_evict_keyslot(slot); | |
133 | return 0; | |
134 | } | |
135 | ||
136 | /* | |
137 | * The crypto API fallback KSM ops - only used for a bio when it specifies a | |
138 | * blk_crypto_key that was not supported by the device's inline encryption | |
139 | * hardware. | |
140 | */ | |
141 | static const struct blk_ksm_ll_ops blk_crypto_ksm_ll_ops = { | |
142 | .keyslot_program = blk_crypto_keyslot_program, | |
143 | .keyslot_evict = blk_crypto_keyslot_evict, | |
144 | }; | |
145 | ||
146 | static void blk_crypto_fallback_encrypt_endio(struct bio *enc_bio) | |
147 | { | |
148 | struct bio *src_bio = enc_bio->bi_private; | |
149 | int i; | |
150 | ||
151 | for (i = 0; i < enc_bio->bi_vcnt; i++) | |
152 | mempool_free(enc_bio->bi_io_vec[i].bv_page, | |
153 | blk_crypto_bounce_page_pool); | |
154 | ||
155 | src_bio->bi_status = enc_bio->bi_status; | |
156 | ||
157 | bio_put(enc_bio); | |
158 | bio_endio(src_bio); | |
159 | } | |
160 | ||
161 | static struct bio *blk_crypto_clone_bio(struct bio *bio_src) | |
162 | { | |
163 | struct bvec_iter iter; | |
164 | struct bio_vec bv; | |
165 | struct bio *bio; | |
166 | ||
167 | bio = bio_alloc_bioset(GFP_NOIO, bio_segments(bio_src), NULL); | |
168 | if (!bio) | |
169 | return NULL; | |
170 | bio->bi_disk = bio_src->bi_disk; | |
171 | bio->bi_opf = bio_src->bi_opf; | |
172 | bio->bi_ioprio = bio_src->bi_ioprio; | |
173 | bio->bi_write_hint = bio_src->bi_write_hint; | |
174 | bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector; | |
175 | bio->bi_iter.bi_size = bio_src->bi_iter.bi_size; | |
176 | ||
177 | bio_for_each_segment(bv, bio_src, iter) | |
178 | bio->bi_io_vec[bio->bi_vcnt++] = bv; | |
179 | ||
180 | bio_clone_blkg_association(bio, bio_src); | |
181 | blkcg_bio_issue_init(bio); | |
182 | ||
183 | return bio; | |
184 | } | |
185 | ||
186 | static bool blk_crypto_alloc_cipher_req(struct blk_ksm_keyslot *slot, | |
187 | struct skcipher_request **ciph_req_ret, | |
188 | struct crypto_wait *wait) | |
189 | { | |
190 | struct skcipher_request *ciph_req; | |
191 | const struct blk_crypto_keyslot *slotp; | |
192 | int keyslot_idx = blk_ksm_get_slot_idx(slot); | |
193 | ||
194 | slotp = &blk_crypto_keyslots[keyslot_idx]; | |
195 | ciph_req = skcipher_request_alloc(slotp->tfms[slotp->crypto_mode], | |
196 | GFP_NOIO); | |
197 | if (!ciph_req) | |
198 | return false; | |
199 | ||
200 | skcipher_request_set_callback(ciph_req, | |
201 | CRYPTO_TFM_REQ_MAY_BACKLOG | | |
202 | CRYPTO_TFM_REQ_MAY_SLEEP, | |
203 | crypto_req_done, wait); | |
204 | *ciph_req_ret = ciph_req; | |
205 | ||
206 | return true; | |
207 | } | |
208 | ||
209 | static bool blk_crypto_split_bio_if_needed(struct bio **bio_ptr) | |
210 | { | |
211 | struct bio *bio = *bio_ptr; | |
212 | unsigned int i = 0; | |
213 | unsigned int num_sectors = 0; | |
214 | struct bio_vec bv; | |
215 | struct bvec_iter iter; | |
216 | ||
217 | bio_for_each_segment(bv, bio, iter) { | |
218 | num_sectors += bv.bv_len >> SECTOR_SHIFT; | |
219 | if (++i == BIO_MAX_PAGES) | |
220 | break; | |
221 | } | |
222 | if (num_sectors < bio_sectors(bio)) { | |
223 | struct bio *split_bio; | |
224 | ||
225 | split_bio = bio_split(bio, num_sectors, GFP_NOIO, NULL); | |
226 | if (!split_bio) { | |
227 | bio->bi_status = BLK_STS_RESOURCE; | |
228 | return false; | |
229 | } | |
230 | bio_chain(split_bio, bio); | |
ed00aabd | 231 | submit_bio_noacct(bio); |
488f6682 ST |
232 | *bio_ptr = split_bio; |
233 | } | |
234 | ||
235 | return true; | |
236 | } | |
237 | ||
238 | union blk_crypto_iv { | |
239 | __le64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE]; | |
240 | u8 bytes[BLK_CRYPTO_MAX_IV_SIZE]; | |
241 | }; | |
242 | ||
243 | static void blk_crypto_dun_to_iv(const u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE], | |
244 | union blk_crypto_iv *iv) | |
245 | { | |
246 | int i; | |
247 | ||
248 | for (i = 0; i < BLK_CRYPTO_DUN_ARRAY_SIZE; i++) | |
249 | iv->dun[i] = cpu_to_le64(dun[i]); | |
250 | } | |
251 | ||
252 | /* | |
253 | * The crypto API fallback's encryption routine. | |
254 | * Allocate a bounce bio for encryption, encrypt the input bio using crypto API, | |
255 | * and replace *bio_ptr with the bounce bio. May split input bio if it's too | |
256 | * large. Returns true on success. Returns false and sets bio->bi_status on | |
257 | * error. | |
258 | */ | |
259 | static bool blk_crypto_fallback_encrypt_bio(struct bio **bio_ptr) | |
260 | { | |
261 | struct bio *src_bio, *enc_bio; | |
262 | struct bio_crypt_ctx *bc; | |
263 | struct blk_ksm_keyslot *slot; | |
264 | int data_unit_size; | |
265 | struct skcipher_request *ciph_req = NULL; | |
266 | DECLARE_CRYPTO_WAIT(wait); | |
267 | u64 curr_dun[BLK_CRYPTO_DUN_ARRAY_SIZE]; | |
268 | struct scatterlist src, dst; | |
269 | union blk_crypto_iv iv; | |
270 | unsigned int i, j; | |
271 | bool ret = false; | |
272 | blk_status_t blk_st; | |
273 | ||
274 | /* Split the bio if it's too big for single page bvec */ | |
275 | if (!blk_crypto_split_bio_if_needed(bio_ptr)) | |
276 | return false; | |
277 | ||
278 | src_bio = *bio_ptr; | |
279 | bc = src_bio->bi_crypt_context; | |
280 | data_unit_size = bc->bc_key->crypto_cfg.data_unit_size; | |
281 | ||
282 | /* Allocate bounce bio for encryption */ | |
283 | enc_bio = blk_crypto_clone_bio(src_bio); | |
284 | if (!enc_bio) { | |
285 | src_bio->bi_status = BLK_STS_RESOURCE; | |
286 | return false; | |
287 | } | |
288 | ||
289 | /* | |
290 | * Use the crypto API fallback keyslot manager to get a crypto_skcipher | |
291 | * for the algorithm and key specified for this bio. | |
292 | */ | |
293 | blk_st = blk_ksm_get_slot_for_key(&blk_crypto_ksm, bc->bc_key, &slot); | |
294 | if (blk_st != BLK_STS_OK) { | |
295 | src_bio->bi_status = blk_st; | |
296 | goto out_put_enc_bio; | |
297 | } | |
298 | ||
299 | /* and then allocate an skcipher_request for it */ | |
300 | if (!blk_crypto_alloc_cipher_req(slot, &ciph_req, &wait)) { | |
301 | src_bio->bi_status = BLK_STS_RESOURCE; | |
302 | goto out_release_keyslot; | |
303 | } | |
304 | ||
305 | memcpy(curr_dun, bc->bc_dun, sizeof(curr_dun)); | |
306 | sg_init_table(&src, 1); | |
307 | sg_init_table(&dst, 1); | |
308 | ||
309 | skcipher_request_set_crypt(ciph_req, &src, &dst, data_unit_size, | |
310 | iv.bytes); | |
311 | ||
312 | /* Encrypt each page in the bounce bio */ | |
313 | for (i = 0; i < enc_bio->bi_vcnt; i++) { | |
314 | struct bio_vec *enc_bvec = &enc_bio->bi_io_vec[i]; | |
315 | struct page *plaintext_page = enc_bvec->bv_page; | |
316 | struct page *ciphertext_page = | |
317 | mempool_alloc(blk_crypto_bounce_page_pool, GFP_NOIO); | |
318 | ||
319 | enc_bvec->bv_page = ciphertext_page; | |
320 | ||
321 | if (!ciphertext_page) { | |
322 | src_bio->bi_status = BLK_STS_RESOURCE; | |
323 | goto out_free_bounce_pages; | |
324 | } | |
325 | ||
326 | sg_set_page(&src, plaintext_page, data_unit_size, | |
327 | enc_bvec->bv_offset); | |
328 | sg_set_page(&dst, ciphertext_page, data_unit_size, | |
329 | enc_bvec->bv_offset); | |
330 | ||
331 | /* Encrypt each data unit in this page */ | |
332 | for (j = 0; j < enc_bvec->bv_len; j += data_unit_size) { | |
333 | blk_crypto_dun_to_iv(curr_dun, &iv); | |
334 | if (crypto_wait_req(crypto_skcipher_encrypt(ciph_req), | |
335 | &wait)) { | |
336 | i++; | |
337 | src_bio->bi_status = BLK_STS_IOERR; | |
338 | goto out_free_bounce_pages; | |
339 | } | |
340 | bio_crypt_dun_increment(curr_dun, 1); | |
341 | src.offset += data_unit_size; | |
342 | dst.offset += data_unit_size; | |
343 | } | |
344 | } | |
345 | ||
346 | enc_bio->bi_private = src_bio; | |
347 | enc_bio->bi_end_io = blk_crypto_fallback_encrypt_endio; | |
348 | *bio_ptr = enc_bio; | |
349 | ret = true; | |
350 | ||
351 | enc_bio = NULL; | |
352 | goto out_free_ciph_req; | |
353 | ||
354 | out_free_bounce_pages: | |
355 | while (i > 0) | |
356 | mempool_free(enc_bio->bi_io_vec[--i].bv_page, | |
357 | blk_crypto_bounce_page_pool); | |
358 | out_free_ciph_req: | |
359 | skcipher_request_free(ciph_req); | |
360 | out_release_keyslot: | |
361 | blk_ksm_put_slot(slot); | |
362 | out_put_enc_bio: | |
363 | if (enc_bio) | |
364 | bio_put(enc_bio); | |
365 | ||
366 | return ret; | |
367 | } | |
368 | ||
369 | /* | |
370 | * The crypto API fallback's main decryption routine. | |
371 | * Decrypts input bio in place, and calls bio_endio on the bio. | |
372 | */ | |
373 | static void blk_crypto_fallback_decrypt_bio(struct work_struct *work) | |
374 | { | |
375 | struct bio_fallback_crypt_ctx *f_ctx = | |
376 | container_of(work, struct bio_fallback_crypt_ctx, work); | |
377 | struct bio *bio = f_ctx->bio; | |
378 | struct bio_crypt_ctx *bc = &f_ctx->crypt_ctx; | |
379 | struct blk_ksm_keyslot *slot; | |
380 | struct skcipher_request *ciph_req = NULL; | |
381 | DECLARE_CRYPTO_WAIT(wait); | |
382 | u64 curr_dun[BLK_CRYPTO_DUN_ARRAY_SIZE]; | |
383 | union blk_crypto_iv iv; | |
384 | struct scatterlist sg; | |
385 | struct bio_vec bv; | |
386 | struct bvec_iter iter; | |
387 | const int data_unit_size = bc->bc_key->crypto_cfg.data_unit_size; | |
388 | unsigned int i; | |
389 | blk_status_t blk_st; | |
390 | ||
391 | /* | |
392 | * Use the crypto API fallback keyslot manager to get a crypto_skcipher | |
393 | * for the algorithm and key specified for this bio. | |
394 | */ | |
395 | blk_st = blk_ksm_get_slot_for_key(&blk_crypto_ksm, bc->bc_key, &slot); | |
396 | if (blk_st != BLK_STS_OK) { | |
397 | bio->bi_status = blk_st; | |
398 | goto out_no_keyslot; | |
399 | } | |
400 | ||
401 | /* and then allocate an skcipher_request for it */ | |
402 | if (!blk_crypto_alloc_cipher_req(slot, &ciph_req, &wait)) { | |
403 | bio->bi_status = BLK_STS_RESOURCE; | |
404 | goto out; | |
405 | } | |
406 | ||
407 | memcpy(curr_dun, bc->bc_dun, sizeof(curr_dun)); | |
408 | sg_init_table(&sg, 1); | |
409 | skcipher_request_set_crypt(ciph_req, &sg, &sg, data_unit_size, | |
410 | iv.bytes); | |
411 | ||
412 | /* Decrypt each segment in the bio */ | |
413 | __bio_for_each_segment(bv, bio, iter, f_ctx->crypt_iter) { | |
414 | struct page *page = bv.bv_page; | |
415 | ||
416 | sg_set_page(&sg, page, data_unit_size, bv.bv_offset); | |
417 | ||
418 | /* Decrypt each data unit in the segment */ | |
419 | for (i = 0; i < bv.bv_len; i += data_unit_size) { | |
420 | blk_crypto_dun_to_iv(curr_dun, &iv); | |
421 | if (crypto_wait_req(crypto_skcipher_decrypt(ciph_req), | |
422 | &wait)) { | |
423 | bio->bi_status = BLK_STS_IOERR; | |
424 | goto out; | |
425 | } | |
426 | bio_crypt_dun_increment(curr_dun, 1); | |
427 | sg.offset += data_unit_size; | |
428 | } | |
429 | } | |
430 | ||
431 | out: | |
432 | skcipher_request_free(ciph_req); | |
433 | blk_ksm_put_slot(slot); | |
434 | out_no_keyslot: | |
435 | mempool_free(f_ctx, bio_fallback_crypt_ctx_pool); | |
436 | bio_endio(bio); | |
437 | } | |
438 | ||
439 | /** | |
440 | * blk_crypto_fallback_decrypt_endio - queue bio for fallback decryption | |
441 | * | |
442 | * @bio: the bio to queue | |
443 | * | |
444 | * Restore bi_private and bi_end_io, and queue the bio for decryption into a | |
445 | * workqueue, since this function will be called from an atomic context. | |
446 | */ | |
447 | static void blk_crypto_fallback_decrypt_endio(struct bio *bio) | |
448 | { | |
449 | struct bio_fallback_crypt_ctx *f_ctx = bio->bi_private; | |
450 | ||
451 | bio->bi_private = f_ctx->bi_private_orig; | |
452 | bio->bi_end_io = f_ctx->bi_end_io_orig; | |
453 | ||
454 | /* If there was an IO error, don't queue for decrypt. */ | |
455 | if (bio->bi_status) { | |
456 | mempool_free(f_ctx, bio_fallback_crypt_ctx_pool); | |
457 | bio_endio(bio); | |
458 | return; | |
459 | } | |
460 | ||
461 | INIT_WORK(&f_ctx->work, blk_crypto_fallback_decrypt_bio); | |
462 | f_ctx->bio = bio; | |
463 | queue_work(blk_crypto_wq, &f_ctx->work); | |
464 | } | |
465 | ||
466 | /** | |
467 | * blk_crypto_fallback_bio_prep - Prepare a bio to use fallback en/decryption | |
468 | * | |
469 | * @bio_ptr: pointer to the bio to prepare | |
470 | * | |
471 | * If bio is doing a WRITE operation, this splits the bio into two parts if it's | |
472 | * too big (see blk_crypto_split_bio_if_needed). It then allocates a bounce bio | |
473 | * for the first part, encrypts it, and update bio_ptr to point to the bounce | |
474 | * bio. | |
475 | * | |
476 | * For a READ operation, we mark the bio for decryption by using bi_private and | |
477 | * bi_end_io. | |
478 | * | |
479 | * In either case, this function will make the bio look like a regular bio (i.e. | |
480 | * as if no encryption context was ever specified) for the purposes of the rest | |
481 | * of the stack except for blk-integrity (blk-integrity and blk-crypto are not | |
482 | * currently supported together). | |
483 | * | |
484 | * Return: true on success. Sets bio->bi_status and returns false on error. | |
485 | */ | |
486 | bool blk_crypto_fallback_bio_prep(struct bio **bio_ptr) | |
487 | { | |
488 | struct bio *bio = *bio_ptr; | |
489 | struct bio_crypt_ctx *bc = bio->bi_crypt_context; | |
490 | struct bio_fallback_crypt_ctx *f_ctx; | |
491 | ||
492 | if (WARN_ON_ONCE(!tfms_inited[bc->bc_key->crypto_cfg.crypto_mode])) { | |
493 | /* User didn't call blk_crypto_start_using_key() first */ | |
494 | bio->bi_status = BLK_STS_IOERR; | |
495 | return false; | |
496 | } | |
497 | ||
498 | if (!blk_ksm_crypto_cfg_supported(&blk_crypto_ksm, | |
499 | &bc->bc_key->crypto_cfg)) { | |
500 | bio->bi_status = BLK_STS_NOTSUPP; | |
501 | return false; | |
502 | } | |
503 | ||
504 | if (bio_data_dir(bio) == WRITE) | |
505 | return blk_crypto_fallback_encrypt_bio(bio_ptr); | |
506 | ||
507 | /* | |
508 | * bio READ case: Set up a f_ctx in the bio's bi_private and set the | |
509 | * bi_end_io appropriately to trigger decryption when the bio is ended. | |
510 | */ | |
511 | f_ctx = mempool_alloc(bio_fallback_crypt_ctx_pool, GFP_NOIO); | |
512 | f_ctx->crypt_ctx = *bc; | |
513 | f_ctx->crypt_iter = bio->bi_iter; | |
514 | f_ctx->bi_private_orig = bio->bi_private; | |
515 | f_ctx->bi_end_io_orig = bio->bi_end_io; | |
516 | bio->bi_private = (void *)f_ctx; | |
517 | bio->bi_end_io = blk_crypto_fallback_decrypt_endio; | |
518 | bio_crypt_free_ctx(bio); | |
519 | ||
520 | return true; | |
521 | } | |
522 | ||
523 | int blk_crypto_fallback_evict_key(const struct blk_crypto_key *key) | |
524 | { | |
525 | return blk_ksm_evict_key(&blk_crypto_ksm, key); | |
526 | } | |
527 | ||
528 | static bool blk_crypto_fallback_inited; | |
529 | static int blk_crypto_fallback_init(void) | |
530 | { | |
531 | int i; | |
e7ecc142 | 532 | int err; |
488f6682 ST |
533 | |
534 | if (blk_crypto_fallback_inited) | |
535 | return 0; | |
536 | ||
537 | prandom_bytes(blank_key, BLK_CRYPTO_MAX_KEY_SIZE); | |
538 | ||
539 | err = blk_ksm_init(&blk_crypto_ksm, blk_crypto_num_keyslots); | |
540 | if (err) | |
541 | goto out; | |
542 | err = -ENOMEM; | |
543 | ||
544 | blk_crypto_ksm.ksm_ll_ops = blk_crypto_ksm_ll_ops; | |
545 | blk_crypto_ksm.max_dun_bytes_supported = BLK_CRYPTO_MAX_IV_SIZE; | |
546 | ||
547 | /* All blk-crypto modes have a crypto API fallback. */ | |
548 | for (i = 0; i < BLK_ENCRYPTION_MODE_MAX; i++) | |
549 | blk_crypto_ksm.crypto_modes_supported[i] = 0xFFFFFFFF; | |
550 | blk_crypto_ksm.crypto_modes_supported[BLK_ENCRYPTION_MODE_INVALID] = 0; | |
551 | ||
552 | blk_crypto_wq = alloc_workqueue("blk_crypto_wq", | |
553 | WQ_UNBOUND | WQ_HIGHPRI | | |
554 | WQ_MEM_RECLAIM, num_online_cpus()); | |
555 | if (!blk_crypto_wq) | |
556 | goto fail_free_ksm; | |
557 | ||
558 | blk_crypto_keyslots = kcalloc(blk_crypto_num_keyslots, | |
559 | sizeof(blk_crypto_keyslots[0]), | |
560 | GFP_KERNEL); | |
561 | if (!blk_crypto_keyslots) | |
562 | goto fail_free_wq; | |
563 | ||
564 | blk_crypto_bounce_page_pool = | |
565 | mempool_create_page_pool(num_prealloc_bounce_pg, 0); | |
566 | if (!blk_crypto_bounce_page_pool) | |
567 | goto fail_free_keyslots; | |
568 | ||
569 | bio_fallback_crypt_ctx_cache = KMEM_CACHE(bio_fallback_crypt_ctx, 0); | |
570 | if (!bio_fallback_crypt_ctx_cache) | |
571 | goto fail_free_bounce_page_pool; | |
572 | ||
573 | bio_fallback_crypt_ctx_pool = | |
574 | mempool_create_slab_pool(num_prealloc_fallback_crypt_ctxs, | |
575 | bio_fallback_crypt_ctx_cache); | |
576 | if (!bio_fallback_crypt_ctx_pool) | |
577 | goto fail_free_crypt_ctx_cache; | |
578 | ||
579 | blk_crypto_fallback_inited = true; | |
580 | ||
581 | return 0; | |
582 | fail_free_crypt_ctx_cache: | |
583 | kmem_cache_destroy(bio_fallback_crypt_ctx_cache); | |
584 | fail_free_bounce_page_pool: | |
585 | mempool_destroy(blk_crypto_bounce_page_pool); | |
586 | fail_free_keyslots: | |
587 | kfree(blk_crypto_keyslots); | |
588 | fail_free_wq: | |
589 | destroy_workqueue(blk_crypto_wq); | |
590 | fail_free_ksm: | |
591 | blk_ksm_destroy(&blk_crypto_ksm); | |
592 | out: | |
593 | return err; | |
594 | } | |
595 | ||
596 | /* | |
597 | * Prepare blk-crypto-fallback for the specified crypto mode. | |
598 | * Returns -ENOPKG if the needed crypto API support is missing. | |
599 | */ | |
600 | int blk_crypto_fallback_start_using_mode(enum blk_crypto_mode_num mode_num) | |
601 | { | |
602 | const char *cipher_str = blk_crypto_modes[mode_num].cipher_str; | |
603 | struct blk_crypto_keyslot *slotp; | |
604 | unsigned int i; | |
605 | int err = 0; | |
606 | ||
607 | /* | |
608 | * Fast path | |
609 | * Ensure that updates to blk_crypto_keyslots[i].tfms[mode_num] | |
610 | * for each i are visible before we try to access them. | |
611 | */ | |
612 | if (likely(smp_load_acquire(&tfms_inited[mode_num]))) | |
613 | return 0; | |
614 | ||
615 | mutex_lock(&tfms_init_lock); | |
616 | if (tfms_inited[mode_num]) | |
617 | goto out; | |
618 | ||
619 | err = blk_crypto_fallback_init(); | |
620 | if (err) | |
621 | goto out; | |
622 | ||
623 | for (i = 0; i < blk_crypto_num_keyslots; i++) { | |
624 | slotp = &blk_crypto_keyslots[i]; | |
625 | slotp->tfms[mode_num] = crypto_alloc_skcipher(cipher_str, 0, 0); | |
626 | if (IS_ERR(slotp->tfms[mode_num])) { | |
627 | err = PTR_ERR(slotp->tfms[mode_num]); | |
628 | if (err == -ENOENT) { | |
629 | pr_warn_once("Missing crypto API support for \"%s\"\n", | |
630 | cipher_str); | |
631 | err = -ENOPKG; | |
632 | } | |
633 | slotp->tfms[mode_num] = NULL; | |
634 | goto out_free_tfms; | |
635 | } | |
636 | ||
637 | crypto_skcipher_set_flags(slotp->tfms[mode_num], | |
638 | CRYPTO_TFM_REQ_FORBID_WEAK_KEYS); | |
639 | } | |
640 | ||
641 | /* | |
642 | * Ensure that updates to blk_crypto_keyslots[i].tfms[mode_num] | |
643 | * for each i are visible before we set tfms_inited[mode_num]. | |
644 | */ | |
645 | smp_store_release(&tfms_inited[mode_num], true); | |
646 | goto out; | |
647 | ||
648 | out_free_tfms: | |
649 | for (i = 0; i < blk_crypto_num_keyslots; i++) { | |
650 | slotp = &blk_crypto_keyslots[i]; | |
651 | crypto_free_skcipher(slotp->tfms[mode_num]); | |
652 | slotp->tfms[mode_num] = NULL; | |
653 | } | |
654 | out: | |
655 | mutex_unlock(&tfms_init_lock); | |
656 | return err; | |
657 | } |