1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright 2019 Google LLC
7 * Refer to Documentation/block/inline-encryption.rst for detailed explanation.
10 #define pr_fmt(fmt) "blk-crypto-fallback: " fmt
12 #include <crypto/skcipher.h>
13 #include <linux/blk-crypto.h>
14 #include <linux/blk-crypto-profile.h>
15 #include <linux/blkdev.h>
16 #include <linux/crypto.h>
17 #include <linux/mempool.h>
18 #include <linux/module.h>
19 #include <linux/random.h>
20 #include <linux/scatterlist.h>
22 #include "blk-cgroup.h"
23 #include "blk-crypto-internal.h"
25 static unsigned int num_prealloc_bounce_pg = 32;
26 module_param(num_prealloc_bounce_pg, uint, 0);
27 MODULE_PARM_DESC(num_prealloc_bounce_pg,
28 "Number of preallocated bounce pages for the blk-crypto crypto API fallback");
30 static unsigned int blk_crypto_num_keyslots = 100;
31 module_param_named(num_keyslots, blk_crypto_num_keyslots, uint, 0);
32 MODULE_PARM_DESC(num_keyslots,
33 "Number of keyslots for the blk-crypto crypto API fallback");
35 static unsigned int num_prealloc_fallback_crypt_ctxs = 128;
36 module_param(num_prealloc_fallback_crypt_ctxs, uint, 0);
37 MODULE_PARM_DESC(num_prealloc_crypt_fallback_ctxs,
38 "Number of preallocated bio fallback crypto contexts for blk-crypto to use during crypto API fallback");
40 struct bio_fallback_crypt_ctx {
41 struct bio_crypt_ctx crypt_ctx;
43 * Copy of the bvec_iter when this bio was submitted.
44 * We only want to en/decrypt the part of the bio as described by the
45 * bvec_iter upon submission because bio might be split before being
48 struct bvec_iter crypt_iter;
51 struct work_struct work;
55 void *bi_private_orig;
56 bio_end_io_t *bi_end_io_orig;
61 static struct kmem_cache *bio_fallback_crypt_ctx_cache;
62 static mempool_t *bio_fallback_crypt_ctx_pool;
65 * Allocating a crypto tfm during I/O can deadlock, so we have to preallocate
66 * all of a mode's tfms when that mode starts being used. Since each mode may
67 * need all the keyslots at some point, each mode needs its own tfm for each
68 * keyslot; thus, a keyslot may contain tfms for multiple modes. However, to
69 * match the behavior of real inline encryption hardware (which only supports a
70 * single encryption context per keyslot), we only allow one tfm per keyslot to
71 * be used at a time - the rest of the unused tfms have their keys cleared.
73 static DEFINE_MUTEX(tfms_init_lock);
74 static bool tfms_inited[BLK_ENCRYPTION_MODE_MAX];
76 static struct blk_crypto_fallback_keyslot {
77 enum blk_crypto_mode_num crypto_mode;
78 struct crypto_skcipher *tfms[BLK_ENCRYPTION_MODE_MAX];
79 } *blk_crypto_keyslots;
81 static struct blk_crypto_profile *blk_crypto_fallback_profile;
82 static struct workqueue_struct *blk_crypto_wq;
83 static mempool_t *blk_crypto_bounce_page_pool;
84 static struct bio_set crypto_bio_split;
87 * This is the key we set when evicting a keyslot. This *should* be the all 0's
88 * key, but AES-XTS rejects that key, so we use some random bytes instead.
90 static u8 blank_key[BLK_CRYPTO_MAX_KEY_SIZE];
92 static void blk_crypto_fallback_evict_keyslot(unsigned int slot)
94 struct blk_crypto_fallback_keyslot *slotp = &blk_crypto_keyslots[slot];
95 enum blk_crypto_mode_num crypto_mode = slotp->crypto_mode;
98 WARN_ON(slotp->crypto_mode == BLK_ENCRYPTION_MODE_INVALID);
100 /* Clear the key in the skcipher */
101 err = crypto_skcipher_setkey(slotp->tfms[crypto_mode], blank_key,
102 blk_crypto_modes[crypto_mode].keysize);
104 slotp->crypto_mode = BLK_ENCRYPTION_MODE_INVALID;
108 blk_crypto_fallback_keyslot_program(struct blk_crypto_profile *profile,
109 const struct blk_crypto_key *key,
112 struct blk_crypto_fallback_keyslot *slotp = &blk_crypto_keyslots[slot];
113 const enum blk_crypto_mode_num crypto_mode =
114 key->crypto_cfg.crypto_mode;
117 if (crypto_mode != slotp->crypto_mode &&
118 slotp->crypto_mode != BLK_ENCRYPTION_MODE_INVALID)
119 blk_crypto_fallback_evict_keyslot(slot);
121 slotp->crypto_mode = crypto_mode;
122 err = crypto_skcipher_setkey(slotp->tfms[crypto_mode], key->raw,
125 blk_crypto_fallback_evict_keyslot(slot);
131 static int blk_crypto_fallback_keyslot_evict(struct blk_crypto_profile *profile,
132 const struct blk_crypto_key *key,
135 blk_crypto_fallback_evict_keyslot(slot);
139 static const struct blk_crypto_ll_ops blk_crypto_fallback_ll_ops = {
140 .keyslot_program = blk_crypto_fallback_keyslot_program,
141 .keyslot_evict = blk_crypto_fallback_keyslot_evict,
144 static void blk_crypto_fallback_encrypt_endio(struct bio *enc_bio)
146 struct bio *src_bio = enc_bio->bi_private;
149 for (i = 0; i < enc_bio->bi_vcnt; i++)
150 mempool_free(enc_bio->bi_io_vec[i].bv_page,
151 blk_crypto_bounce_page_pool);
153 src_bio->bi_status = enc_bio->bi_status;
160 static struct bio *blk_crypto_fallback_clone_bio(struct bio *bio_src)
162 unsigned int nr_segs = bio_segments(bio_src);
163 struct bvec_iter iter;
167 bio = bio_kmalloc(nr_segs, GFP_NOIO);
170 bio_init(bio, bio_src->bi_bdev, bio->bi_inline_vecs, nr_segs,
172 if (bio_flagged(bio_src, BIO_REMAPPED))
173 bio_set_flag(bio, BIO_REMAPPED);
174 bio->bi_ioprio = bio_src->bi_ioprio;
175 bio->bi_write_hint = bio_src->bi_write_hint;
176 bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
177 bio->bi_iter.bi_size = bio_src->bi_iter.bi_size;
179 bio_for_each_segment(bv, bio_src, iter)
180 bio->bi_io_vec[bio->bi_vcnt++] = bv;
182 bio_clone_blkg_association(bio, bio_src);
188 blk_crypto_fallback_alloc_cipher_req(struct blk_crypto_keyslot *slot,
189 struct skcipher_request **ciph_req_ret,
190 struct crypto_wait *wait)
192 struct skcipher_request *ciph_req;
193 const struct blk_crypto_fallback_keyslot *slotp;
194 int keyslot_idx = blk_crypto_keyslot_index(slot);
196 slotp = &blk_crypto_keyslots[keyslot_idx];
197 ciph_req = skcipher_request_alloc(slotp->tfms[slotp->crypto_mode],
202 skcipher_request_set_callback(ciph_req,
203 CRYPTO_TFM_REQ_MAY_BACKLOG |
204 CRYPTO_TFM_REQ_MAY_SLEEP,
205 crypto_req_done, wait);
206 *ciph_req_ret = ciph_req;
211 static bool blk_crypto_fallback_split_bio_if_needed(struct bio **bio_ptr)
213 struct bio *bio = *bio_ptr;
215 unsigned int num_sectors = 0;
217 struct bvec_iter iter;
219 bio_for_each_segment(bv, bio, iter) {
220 num_sectors += bv.bv_len >> SECTOR_SHIFT;
221 if (++i == BIO_MAX_VECS)
224 if (num_sectors < bio_sectors(bio)) {
225 struct bio *split_bio;
227 split_bio = bio_split(bio, num_sectors, GFP_NOIO,
230 bio->bi_status = BLK_STS_RESOURCE;
233 bio_chain(split_bio, bio);
234 submit_bio_noacct(bio);
235 *bio_ptr = split_bio;
241 union blk_crypto_iv {
242 __le64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
243 u8 bytes[BLK_CRYPTO_MAX_IV_SIZE];
246 static void blk_crypto_dun_to_iv(const u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE],
247 union blk_crypto_iv *iv)
251 for (i = 0; i < BLK_CRYPTO_DUN_ARRAY_SIZE; i++)
252 iv->dun[i] = cpu_to_le64(dun[i]);
256 * The crypto API fallback's encryption routine.
257 * Allocate a bounce bio for encryption, encrypt the input bio using crypto API,
258 * and replace *bio_ptr with the bounce bio. May split input bio if it's too
259 * large. Returns true on success. Returns false and sets bio->bi_status on
262 static bool blk_crypto_fallback_encrypt_bio(struct bio **bio_ptr)
264 struct bio *src_bio, *enc_bio;
265 struct bio_crypt_ctx *bc;
266 struct blk_crypto_keyslot *slot;
268 struct skcipher_request *ciph_req = NULL;
269 DECLARE_CRYPTO_WAIT(wait);
270 u64 curr_dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
271 struct scatterlist src, dst;
272 union blk_crypto_iv iv;
277 /* Split the bio if it's too big for single page bvec */
278 if (!blk_crypto_fallback_split_bio_if_needed(bio_ptr))
282 bc = src_bio->bi_crypt_context;
283 data_unit_size = bc->bc_key->crypto_cfg.data_unit_size;
285 /* Allocate bounce bio for encryption */
286 enc_bio = blk_crypto_fallback_clone_bio(src_bio);
288 src_bio->bi_status = BLK_STS_RESOURCE;
293 * Get a blk-crypto-fallback keyslot that contains a crypto_skcipher for
294 * this bio's algorithm and key.
296 blk_st = blk_crypto_get_keyslot(blk_crypto_fallback_profile,
298 if (blk_st != BLK_STS_OK) {
299 src_bio->bi_status = blk_st;
300 goto out_put_enc_bio;
303 /* and then allocate an skcipher_request for it */
304 if (!blk_crypto_fallback_alloc_cipher_req(slot, &ciph_req, &wait)) {
305 src_bio->bi_status = BLK_STS_RESOURCE;
306 goto out_release_keyslot;
309 memcpy(curr_dun, bc->bc_dun, sizeof(curr_dun));
310 sg_init_table(&src, 1);
311 sg_init_table(&dst, 1);
313 skcipher_request_set_crypt(ciph_req, &src, &dst, data_unit_size,
316 /* Encrypt each page in the bounce bio */
317 for (i = 0; i < enc_bio->bi_vcnt; i++) {
318 struct bio_vec *enc_bvec = &enc_bio->bi_io_vec[i];
319 struct page *plaintext_page = enc_bvec->bv_page;
320 struct page *ciphertext_page =
321 mempool_alloc(blk_crypto_bounce_page_pool, GFP_NOIO);
323 enc_bvec->bv_page = ciphertext_page;
325 if (!ciphertext_page) {
326 src_bio->bi_status = BLK_STS_RESOURCE;
327 goto out_free_bounce_pages;
330 sg_set_page(&src, plaintext_page, data_unit_size,
331 enc_bvec->bv_offset);
332 sg_set_page(&dst, ciphertext_page, data_unit_size,
333 enc_bvec->bv_offset);
335 /* Encrypt each data unit in this page */
336 for (j = 0; j < enc_bvec->bv_len; j += data_unit_size) {
337 blk_crypto_dun_to_iv(curr_dun, &iv);
338 if (crypto_wait_req(crypto_skcipher_encrypt(ciph_req),
341 src_bio->bi_status = BLK_STS_IOERR;
342 goto out_free_bounce_pages;
344 bio_crypt_dun_increment(curr_dun, 1);
345 src.offset += data_unit_size;
346 dst.offset += data_unit_size;
350 enc_bio->bi_private = src_bio;
351 enc_bio->bi_end_io = blk_crypto_fallback_encrypt_endio;
356 goto out_free_ciph_req;
358 out_free_bounce_pages:
360 mempool_free(enc_bio->bi_io_vec[--i].bv_page,
361 blk_crypto_bounce_page_pool);
363 skcipher_request_free(ciph_req);
365 blk_crypto_put_keyslot(slot);
374 * The crypto API fallback's main decryption routine.
375 * Decrypts input bio in place, and calls bio_endio on the bio.
377 static void blk_crypto_fallback_decrypt_bio(struct work_struct *work)
379 struct bio_fallback_crypt_ctx *f_ctx =
380 container_of(work, struct bio_fallback_crypt_ctx, work);
381 struct bio *bio = f_ctx->bio;
382 struct bio_crypt_ctx *bc = &f_ctx->crypt_ctx;
383 struct blk_crypto_keyslot *slot;
384 struct skcipher_request *ciph_req = NULL;
385 DECLARE_CRYPTO_WAIT(wait);
386 u64 curr_dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
387 union blk_crypto_iv iv;
388 struct scatterlist sg;
390 struct bvec_iter iter;
391 const int data_unit_size = bc->bc_key->crypto_cfg.data_unit_size;
396 * Get a blk-crypto-fallback keyslot that contains a crypto_skcipher for
397 * this bio's algorithm and key.
399 blk_st = blk_crypto_get_keyslot(blk_crypto_fallback_profile,
401 if (blk_st != BLK_STS_OK) {
402 bio->bi_status = blk_st;
406 /* and then allocate an skcipher_request for it */
407 if (!blk_crypto_fallback_alloc_cipher_req(slot, &ciph_req, &wait)) {
408 bio->bi_status = BLK_STS_RESOURCE;
412 memcpy(curr_dun, bc->bc_dun, sizeof(curr_dun));
413 sg_init_table(&sg, 1);
414 skcipher_request_set_crypt(ciph_req, &sg, &sg, data_unit_size,
417 /* Decrypt each segment in the bio */
418 __bio_for_each_segment(bv, bio, iter, f_ctx->crypt_iter) {
419 struct page *page = bv.bv_page;
421 sg_set_page(&sg, page, data_unit_size, bv.bv_offset);
423 /* Decrypt each data unit in the segment */
424 for (i = 0; i < bv.bv_len; i += data_unit_size) {
425 blk_crypto_dun_to_iv(curr_dun, &iv);
426 if (crypto_wait_req(crypto_skcipher_decrypt(ciph_req),
428 bio->bi_status = BLK_STS_IOERR;
431 bio_crypt_dun_increment(curr_dun, 1);
432 sg.offset += data_unit_size;
437 skcipher_request_free(ciph_req);
438 blk_crypto_put_keyslot(slot);
440 mempool_free(f_ctx, bio_fallback_crypt_ctx_pool);
445 * blk_crypto_fallback_decrypt_endio - queue bio for fallback decryption
447 * @bio: the bio to queue
449 * Restore bi_private and bi_end_io, and queue the bio for decryption into a
450 * workqueue, since this function will be called from an atomic context.
452 static void blk_crypto_fallback_decrypt_endio(struct bio *bio)
454 struct bio_fallback_crypt_ctx *f_ctx = bio->bi_private;
456 bio->bi_private = f_ctx->bi_private_orig;
457 bio->bi_end_io = f_ctx->bi_end_io_orig;
459 /* If there was an IO error, don't queue for decrypt. */
460 if (bio->bi_status) {
461 mempool_free(f_ctx, bio_fallback_crypt_ctx_pool);
466 INIT_WORK(&f_ctx->work, blk_crypto_fallback_decrypt_bio);
468 queue_work(blk_crypto_wq, &f_ctx->work);
472 * blk_crypto_fallback_bio_prep - Prepare a bio to use fallback en/decryption
474 * @bio_ptr: pointer to the bio to prepare
476 * If bio is doing a WRITE operation, this splits the bio into two parts if it's
477 * too big (see blk_crypto_fallback_split_bio_if_needed()). It then allocates a
478 * bounce bio for the first part, encrypts it, and updates bio_ptr to point to
481 * For a READ operation, we mark the bio for decryption by using bi_private and
484 * In either case, this function will make the bio look like a regular bio (i.e.
485 * as if no encryption context was ever specified) for the purposes of the rest
486 * of the stack except for blk-integrity (blk-integrity and blk-crypto are not
487 * currently supported together).
489 * Return: true on success. Sets bio->bi_status and returns false on error.
491 bool blk_crypto_fallback_bio_prep(struct bio **bio_ptr)
493 struct bio *bio = *bio_ptr;
494 struct bio_crypt_ctx *bc = bio->bi_crypt_context;
495 struct bio_fallback_crypt_ctx *f_ctx;
497 if (WARN_ON_ONCE(!tfms_inited[bc->bc_key->crypto_cfg.crypto_mode])) {
498 /* User didn't call blk_crypto_start_using_key() first */
499 bio->bi_status = BLK_STS_IOERR;
503 if (!__blk_crypto_cfg_supported(blk_crypto_fallback_profile,
504 &bc->bc_key->crypto_cfg)) {
505 bio->bi_status = BLK_STS_NOTSUPP;
509 if (bio_data_dir(bio) == WRITE)
510 return blk_crypto_fallback_encrypt_bio(bio_ptr);
513 * bio READ case: Set up a f_ctx in the bio's bi_private and set the
514 * bi_end_io appropriately to trigger decryption when the bio is ended.
516 f_ctx = mempool_alloc(bio_fallback_crypt_ctx_pool, GFP_NOIO);
517 f_ctx->crypt_ctx = *bc;
518 f_ctx->crypt_iter = bio->bi_iter;
519 f_ctx->bi_private_orig = bio->bi_private;
520 f_ctx->bi_end_io_orig = bio->bi_end_io;
521 bio->bi_private = (void *)f_ctx;
522 bio->bi_end_io = blk_crypto_fallback_decrypt_endio;
523 bio_crypt_free_ctx(bio);
528 int blk_crypto_fallback_evict_key(const struct blk_crypto_key *key)
530 return __blk_crypto_evict_key(blk_crypto_fallback_profile, key);
533 static bool blk_crypto_fallback_inited;
534 static int blk_crypto_fallback_init(void)
539 if (blk_crypto_fallback_inited)
542 get_random_bytes(blank_key, BLK_CRYPTO_MAX_KEY_SIZE);
544 err = bioset_init(&crypto_bio_split, 64, 0, 0);
548 /* Dynamic allocation is needed because of lockdep_register_key(). */
549 blk_crypto_fallback_profile =
550 kzalloc(sizeof(*blk_crypto_fallback_profile), GFP_KERNEL);
551 if (!blk_crypto_fallback_profile) {
553 goto fail_free_bioset;
556 err = blk_crypto_profile_init(blk_crypto_fallback_profile,
557 blk_crypto_num_keyslots);
559 goto fail_free_profile;
562 blk_crypto_fallback_profile->ll_ops = blk_crypto_fallback_ll_ops;
563 blk_crypto_fallback_profile->max_dun_bytes_supported = BLK_CRYPTO_MAX_IV_SIZE;
565 /* All blk-crypto modes have a crypto API fallback. */
566 for (i = 0; i < BLK_ENCRYPTION_MODE_MAX; i++)
567 blk_crypto_fallback_profile->modes_supported[i] = 0xFFFFFFFF;
568 blk_crypto_fallback_profile->modes_supported[BLK_ENCRYPTION_MODE_INVALID] = 0;
570 blk_crypto_wq = alloc_workqueue("blk_crypto_wq",
571 WQ_UNBOUND | WQ_HIGHPRI |
572 WQ_MEM_RECLAIM, num_online_cpus());
574 goto fail_destroy_profile;
576 blk_crypto_keyslots = kcalloc(blk_crypto_num_keyslots,
577 sizeof(blk_crypto_keyslots[0]),
579 if (!blk_crypto_keyslots)
582 blk_crypto_bounce_page_pool =
583 mempool_create_page_pool(num_prealloc_bounce_pg, 0);
584 if (!blk_crypto_bounce_page_pool)
585 goto fail_free_keyslots;
587 bio_fallback_crypt_ctx_cache = KMEM_CACHE(bio_fallback_crypt_ctx, 0);
588 if (!bio_fallback_crypt_ctx_cache)
589 goto fail_free_bounce_page_pool;
591 bio_fallback_crypt_ctx_pool =
592 mempool_create_slab_pool(num_prealloc_fallback_crypt_ctxs,
593 bio_fallback_crypt_ctx_cache);
594 if (!bio_fallback_crypt_ctx_pool)
595 goto fail_free_crypt_ctx_cache;
597 blk_crypto_fallback_inited = true;
600 fail_free_crypt_ctx_cache:
601 kmem_cache_destroy(bio_fallback_crypt_ctx_cache);
602 fail_free_bounce_page_pool:
603 mempool_destroy(blk_crypto_bounce_page_pool);
605 kfree(blk_crypto_keyslots);
607 destroy_workqueue(blk_crypto_wq);
608 fail_destroy_profile:
609 blk_crypto_profile_destroy(blk_crypto_fallback_profile);
611 kfree(blk_crypto_fallback_profile);
613 bioset_exit(&crypto_bio_split);
619 * Prepare blk-crypto-fallback for the specified crypto mode.
620 * Returns -ENOPKG if the needed crypto API support is missing.
622 int blk_crypto_fallback_start_using_mode(enum blk_crypto_mode_num mode_num)
624 const char *cipher_str = blk_crypto_modes[mode_num].cipher_str;
625 struct blk_crypto_fallback_keyslot *slotp;
631 * Ensure that updates to blk_crypto_keyslots[i].tfms[mode_num]
632 * for each i are visible before we try to access them.
634 if (likely(smp_load_acquire(&tfms_inited[mode_num])))
637 mutex_lock(&tfms_init_lock);
638 if (tfms_inited[mode_num])
641 err = blk_crypto_fallback_init();
645 for (i = 0; i < blk_crypto_num_keyslots; i++) {
646 slotp = &blk_crypto_keyslots[i];
647 slotp->tfms[mode_num] = crypto_alloc_skcipher(cipher_str, 0, 0);
648 if (IS_ERR(slotp->tfms[mode_num])) {
649 err = PTR_ERR(slotp->tfms[mode_num]);
650 if (err == -ENOENT) {
651 pr_warn_once("Missing crypto API support for \"%s\"\n",
655 slotp->tfms[mode_num] = NULL;
659 crypto_skcipher_set_flags(slotp->tfms[mode_num],
660 CRYPTO_TFM_REQ_FORBID_WEAK_KEYS);
664 * Ensure that updates to blk_crypto_keyslots[i].tfms[mode_num]
665 * for each i are visible before we set tfms_inited[mode_num].
667 smp_store_release(&tfms_inited[mode_num], true);
671 for (i = 0; i < blk_crypto_num_keyslots; i++) {
672 slotp = &blk_crypto_keyslots[i];
673 crypto_free_skcipher(slotp->tfms[mode_num]);
674 slotp->tfms[mode_num] = NULL;
677 mutex_unlock(&tfms_init_lock);