Commit | Line | Data |
---|---|---|
a892c8d5 ST |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* | |
3 | * Copyright 2019 Google LLC | |
4 | */ | |
5 | ||
6 | /* | |
7 | * Refer to Documentation/block/inline-encryption.rst for detailed explanation. | |
8 | */ | |
9 | ||
10 | #define pr_fmt(fmt) "blk-crypto: " fmt | |
11 | ||
12 | #include <linux/bio.h> | |
13 | #include <linux/blkdev.h> | |
1e8d44bd | 14 | #include <linux/blk-crypto-profile.h> |
a892c8d5 | 15 | #include <linux/module.h> |
70493a63 | 16 | #include <linux/ratelimit.h> |
a892c8d5 ST |
17 | #include <linux/slab.h> |
18 | ||
19 | #include "blk-crypto-internal.h" | |
20 | ||
21 | const struct blk_crypto_mode blk_crypto_modes[] = { | |
22 | [BLK_ENCRYPTION_MODE_AES_256_XTS] = { | |
20f01f16 | 23 | .name = "AES-256-XTS", |
488f6682 | 24 | .cipher_str = "xts(aes)", |
a892c8d5 ST |
25 | .keysize = 64, |
26 | .ivsize = 16, | |
27 | }, | |
28 | [BLK_ENCRYPTION_MODE_AES_128_CBC_ESSIV] = { | |
20f01f16 | 29 | .name = "AES-128-CBC-ESSIV", |
488f6682 | 30 | .cipher_str = "essiv(cbc(aes),sha256)", |
a892c8d5 ST |
31 | .keysize = 16, |
32 | .ivsize = 16, | |
33 | }, | |
34 | [BLK_ENCRYPTION_MODE_ADIANTUM] = { | |
20f01f16 | 35 | .name = "Adiantum", |
488f6682 | 36 | .cipher_str = "adiantum(xchacha12,aes)", |
a892c8d5 ST |
37 | .keysize = 32, |
38 | .ivsize = 32, | |
39 | }, | |
d209ce35 TZ |
40 | [BLK_ENCRYPTION_MODE_SM4_XTS] = { |
41 | .name = "SM4-XTS", | |
42 | .cipher_str = "xts(sm4)", | |
43 | .keysize = 32, | |
44 | .ivsize = 16, | |
45 | }, | |
a892c8d5 ST |
46 | }; |
47 | ||
48 | /* | |
49 | * This number needs to be at least (the number of threads doing IO | |
50 | * concurrently) * (maximum recursive depth of a bio), so that we don't | |
51 | * deadlock on crypt_ctx allocations. The default is chosen to be the same | |
52 | * as the default number of post read contexts in both EXT4 and F2FS. | |
53 | */ | |
54 | static int num_prealloc_crypt_ctxs = 128; | |
55 | ||
56 | module_param(num_prealloc_crypt_ctxs, int, 0444); | |
57 | MODULE_PARM_DESC(num_prealloc_crypt_ctxs, | |
58 | "Number of bio crypto contexts to preallocate"); | |
59 | ||
60 | static struct kmem_cache *bio_crypt_ctx_cache; | |
61 | static mempool_t *bio_crypt_ctx_pool; | |
62 | ||
63 | static int __init bio_crypt_ctx_init(void) | |
64 | { | |
65 | size_t i; | |
66 | ||
67 | bio_crypt_ctx_cache = KMEM_CACHE(bio_crypt_ctx, 0); | |
68 | if (!bio_crypt_ctx_cache) | |
69 | goto out_no_mem; | |
70 | ||
71 | bio_crypt_ctx_pool = mempool_create_slab_pool(num_prealloc_crypt_ctxs, | |
72 | bio_crypt_ctx_cache); | |
73 | if (!bio_crypt_ctx_pool) | |
74 | goto out_no_mem; | |
75 | ||
76 | /* This is assumed in various places. */ | |
77 | BUILD_BUG_ON(BLK_ENCRYPTION_MODE_INVALID != 0); | |
78 | ||
79 | /* Sanity check that no algorithm exceeds the defined limits. */ | |
80 | for (i = 0; i < BLK_ENCRYPTION_MODE_MAX; i++) { | |
81 | BUG_ON(blk_crypto_modes[i].keysize > BLK_CRYPTO_MAX_KEY_SIZE); | |
82 | BUG_ON(blk_crypto_modes[i].ivsize > BLK_CRYPTO_MAX_IV_SIZE); | |
83 | } | |
84 | ||
85 | return 0; | |
86 | out_no_mem: | |
87 | panic("Failed to allocate mem for bio crypt ctxs\n"); | |
88 | } | |
89 | subsys_initcall(bio_crypt_ctx_init); | |
90 | ||
91 | void bio_crypt_set_ctx(struct bio *bio, const struct blk_crypto_key *key, | |
92 | const u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE], gfp_t gfp_mask) | |
93 | { | |
cf785af1 EB |
94 | struct bio_crypt_ctx *bc; |
95 | ||
96 | /* | |
97 | * The caller must use a gfp_mask that contains __GFP_DIRECT_RECLAIM so | |
98 | * that the mempool_alloc() can't fail. | |
99 | */ | |
100 | WARN_ON_ONCE(!(gfp_mask & __GFP_DIRECT_RECLAIM)); | |
101 | ||
102 | bc = mempool_alloc(bio_crypt_ctx_pool, gfp_mask); | |
a892c8d5 ST |
103 | |
104 | bc->bc_key = key; | |
105 | memcpy(bc->bc_dun, dun, sizeof(bc->bc_dun)); | |
106 | ||
107 | bio->bi_crypt_context = bc; | |
108 | } | |
109 | ||
110 | void __bio_crypt_free_ctx(struct bio *bio) | |
111 | { | |
112 | mempool_free(bio->bi_crypt_context, bio_crypt_ctx_pool); | |
113 | bio->bi_crypt_context = NULL; | |
114 | } | |
115 | ||
07560151 | 116 | int __bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask) |
a892c8d5 ST |
117 | { |
118 | dst->bi_crypt_context = mempool_alloc(bio_crypt_ctx_pool, gfp_mask); | |
07560151 EB |
119 | if (!dst->bi_crypt_context) |
120 | return -ENOMEM; | |
a892c8d5 | 121 | *dst->bi_crypt_context = *src->bi_crypt_context; |
07560151 | 122 | return 0; |
a892c8d5 | 123 | } |
a892c8d5 ST |
124 | |
125 | /* Increments @dun by @inc, treating @dun as a multi-limb integer. */ | |
126 | void bio_crypt_dun_increment(u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE], | |
127 | unsigned int inc) | |
128 | { | |
129 | int i; | |
130 | ||
131 | for (i = 0; inc && i < BLK_CRYPTO_DUN_ARRAY_SIZE; i++) { | |
132 | dun[i] += inc; | |
133 | /* | |
134 | * If the addition in this limb overflowed, then we need to | |
135 | * carry 1 into the next limb. Else the carry is 0. | |
136 | */ | |
137 | if (dun[i] < inc) | |
138 | inc = 1; | |
139 | else | |
140 | inc = 0; | |
141 | } | |
142 | } | |
143 | ||
144 | void __bio_crypt_advance(struct bio *bio, unsigned int bytes) | |
145 | { | |
146 | struct bio_crypt_ctx *bc = bio->bi_crypt_context; | |
147 | ||
148 | bio_crypt_dun_increment(bc->bc_dun, | |
149 | bytes >> bc->bc_key->data_unit_size_bits); | |
150 | } | |
151 | ||
152 | /* | |
153 | * Returns true if @bc->bc_dun plus @bytes converted to data units is equal to | |
154 | * @next_dun, treating the DUNs as multi-limb integers. | |
155 | */ | |
156 | bool bio_crypt_dun_is_contiguous(const struct bio_crypt_ctx *bc, | |
157 | unsigned int bytes, | |
158 | const u64 next_dun[BLK_CRYPTO_DUN_ARRAY_SIZE]) | |
159 | { | |
160 | int i; | |
161 | unsigned int carry = bytes >> bc->bc_key->data_unit_size_bits; | |
162 | ||
163 | for (i = 0; i < BLK_CRYPTO_DUN_ARRAY_SIZE; i++) { | |
164 | if (bc->bc_dun[i] + carry != next_dun[i]) | |
165 | return false; | |
166 | /* | |
167 | * If the addition in this limb overflowed, then we need to | |
168 | * carry 1 into the next limb. Else the carry is 0. | |
169 | */ | |
170 | if ((bc->bc_dun[i] + carry) < carry) | |
171 | carry = 1; | |
172 | else | |
173 | carry = 0; | |
174 | } | |
175 | ||
176 | /* If the DUN wrapped through 0, don't treat it as contiguous. */ | |
177 | return carry == 0; | |
178 | } | |
179 | ||
180 | /* | |
181 | * Checks that two bio crypt contexts are compatible - i.e. that | |
182 | * they are mergeable except for data_unit_num continuity. | |
183 | */ | |
184 | static bool bio_crypt_ctx_compatible(struct bio_crypt_ctx *bc1, | |
185 | struct bio_crypt_ctx *bc2) | |
186 | { | |
187 | if (!bc1) | |
188 | return !bc2; | |
189 | ||
190 | return bc2 && bc1->bc_key == bc2->bc_key; | |
191 | } | |
192 | ||
193 | bool bio_crypt_rq_ctx_compatible(struct request *rq, struct bio *bio) | |
194 | { | |
195 | return bio_crypt_ctx_compatible(rq->crypt_ctx, bio->bi_crypt_context); | |
196 | } | |
197 | ||
198 | /* | |
199 | * Checks that two bio crypt contexts are compatible, and also | |
200 | * that their data_unit_nums are continuous (and can hence be merged) | |
201 | * in the order @bc1 followed by @bc2. | |
202 | */ | |
203 | bool bio_crypt_ctx_mergeable(struct bio_crypt_ctx *bc1, unsigned int bc1_bytes, | |
204 | struct bio_crypt_ctx *bc2) | |
205 | { | |
206 | if (!bio_crypt_ctx_compatible(bc1, bc2)) | |
207 | return false; | |
208 | ||
209 | return !bc1 || bio_crypt_dun_is_contiguous(bc1, bc1_bytes, bc2->bc_dun); | |
210 | } | |
211 | ||
212 | /* Check that all I/O segments are data unit aligned. */ | |
213 | static bool bio_crypt_check_alignment(struct bio *bio) | |
214 | { | |
215 | const unsigned int data_unit_size = | |
216 | bio->bi_crypt_context->bc_key->crypto_cfg.data_unit_size; | |
217 | struct bvec_iter iter; | |
218 | struct bio_vec bv; | |
219 | ||
220 | bio_for_each_segment(bv, bio, iter) { | |
221 | if (!IS_ALIGNED(bv.bv_len | bv.bv_offset, data_unit_size)) | |
222 | return false; | |
223 | } | |
224 | ||
225 | return true; | |
226 | } | |
227 | ||
9cd1e566 | 228 | blk_status_t __blk_crypto_rq_get_keyslot(struct request *rq) |
a892c8d5 | 229 | { |
cb77cb5a EB |
230 | return blk_crypto_get_keyslot(rq->q->crypto_profile, |
231 | rq->crypt_ctx->bc_key, | |
232 | &rq->crypt_keyslot); | |
a892c8d5 ST |
233 | } |
234 | ||
9cd1e566 | 235 | void __blk_crypto_rq_put_keyslot(struct request *rq) |
a892c8d5 | 236 | { |
cb77cb5a | 237 | blk_crypto_put_keyslot(rq->crypt_keyslot); |
9cd1e566 EB |
238 | rq->crypt_keyslot = NULL; |
239 | } | |
240 | ||
241 | void __blk_crypto_free_request(struct request *rq) | |
242 | { | |
243 | /* The keyslot, if one was needed, should have been released earlier. */ | |
244 | if (WARN_ON_ONCE(rq->crypt_keyslot)) | |
245 | __blk_crypto_rq_put_keyslot(rq); | |
246 | ||
a892c8d5 | 247 | mempool_free(rq->crypt_ctx, bio_crypt_ctx_pool); |
9cd1e566 | 248 | rq->crypt_ctx = NULL; |
a892c8d5 ST |
249 | } |
250 | ||
251 | /** | |
252 | * __blk_crypto_bio_prep - Prepare bio for inline encryption | |
253 | * | |
254 | * @bio_ptr: pointer to original bio pointer | |
255 | * | |
488f6682 ST |
256 | * If the bio crypt context provided for the bio is supported by the underlying |
257 | * device's inline encryption hardware, do nothing. | |
258 | * | |
259 | * Otherwise, try to perform en/decryption for this bio by falling back to the | |
260 | * kernel crypto API. When the crypto API fallback is used for encryption, | |
261 | * blk-crypto may choose to split the bio into 2 - the first one that will | |
262 | * continue to be processed and the second one that will be resubmitted via | |
ed00aabd | 263 | * submit_bio_noacct. A bounce bio will be allocated to encrypt the contents |
488f6682 ST |
264 | * of the aforementioned "first one", and *bio_ptr will be updated to this |
265 | * bounce bio. | |
a892c8d5 ST |
266 | * |
267 | * Caller must ensure bio has bio_crypt_ctx. | |
268 | * | |
269 | * Return: true on success; false on error (and bio->bi_status will be set | |
270 | * appropriately, and bio_endio() will have been called so bio | |
271 | * submission should abort). | |
272 | */ | |
273 | bool __blk_crypto_bio_prep(struct bio **bio_ptr) | |
274 | { | |
275 | struct bio *bio = *bio_ptr; | |
276 | const struct blk_crypto_key *bc_key = bio->bi_crypt_context->bc_key; | |
a892c8d5 ST |
277 | |
278 | /* Error if bio has no data. */ | |
488f6682 ST |
279 | if (WARN_ON_ONCE(!bio_has_data(bio))) { |
280 | bio->bi_status = BLK_STS_IOERR; | |
a892c8d5 | 281 | goto fail; |
488f6682 | 282 | } |
a892c8d5 | 283 | |
488f6682 ST |
284 | if (!bio_crypt_check_alignment(bio)) { |
285 | bio->bi_status = BLK_STS_IOERR; | |
a892c8d5 | 286 | goto fail; |
488f6682 | 287 | } |
a892c8d5 ST |
288 | |
289 | /* | |
488f6682 ST |
290 | * Success if device supports the encryption context, or if we succeeded |
291 | * in falling back to the crypto API. | |
a892c8d5 | 292 | */ |
6715c98b CH |
293 | if (blk_crypto_config_supported_natively(bio->bi_bdev, |
294 | &bc_key->crypto_cfg)) | |
488f6682 | 295 | return true; |
488f6682 ST |
296 | if (blk_crypto_fallback_bio_prep(bio_ptr)) |
297 | return true; | |
a892c8d5 | 298 | fail: |
a892c8d5 ST |
299 | bio_endio(*bio_ptr); |
300 | return false; | |
301 | } | |
302 | ||
93f221ae EB |
303 | int __blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio, |
304 | gfp_t gfp_mask) | |
a892c8d5 | 305 | { |
93f221ae | 306 | if (!rq->crypt_ctx) { |
a892c8d5 | 307 | rq->crypt_ctx = mempool_alloc(bio_crypt_ctx_pool, gfp_mask); |
93f221ae EB |
308 | if (!rq->crypt_ctx) |
309 | return -ENOMEM; | |
310 | } | |
a892c8d5 | 311 | *rq->crypt_ctx = *bio->bi_crypt_context; |
93f221ae | 312 | return 0; |
a892c8d5 ST |
313 | } |
314 | ||
315 | /** | |
316 | * blk_crypto_init_key() - Prepare a key for use with blk-crypto | |
317 | * @blk_key: Pointer to the blk_crypto_key to initialize. | |
318 | * @raw_key: Pointer to the raw key. Must be the correct length for the chosen | |
319 | * @crypto_mode; see blk_crypto_modes[]. | |
320 | * @crypto_mode: identifier for the encryption algorithm to use | |
321 | * @dun_bytes: number of bytes that will be used to specify the DUN when this | |
322 | * key is used | |
323 | * @data_unit_size: the data unit size to use for en/decryption | |
324 | * | |
325 | * Return: 0 on success, -errno on failure. The caller is responsible for | |
326 | * zeroizing both blk_key and raw_key when done with them. | |
327 | */ | |
328 | int blk_crypto_init_key(struct blk_crypto_key *blk_key, const u8 *raw_key, | |
329 | enum blk_crypto_mode_num crypto_mode, | |
330 | unsigned int dun_bytes, | |
331 | unsigned int data_unit_size) | |
332 | { | |
333 | const struct blk_crypto_mode *mode; | |
334 | ||
335 | memset(blk_key, 0, sizeof(*blk_key)); | |
336 | ||
337 | if (crypto_mode >= ARRAY_SIZE(blk_crypto_modes)) | |
338 | return -EINVAL; | |
339 | ||
340 | mode = &blk_crypto_modes[crypto_mode]; | |
341 | if (mode->keysize == 0) | |
342 | return -EINVAL; | |
343 | ||
cc40b722 | 344 | if (dun_bytes == 0 || dun_bytes > mode->ivsize) |
a892c8d5 ST |
345 | return -EINVAL; |
346 | ||
347 | if (!is_power_of_2(data_unit_size)) | |
348 | return -EINVAL; | |
349 | ||
350 | blk_key->crypto_cfg.crypto_mode = crypto_mode; | |
351 | blk_key->crypto_cfg.dun_bytes = dun_bytes; | |
352 | blk_key->crypto_cfg.data_unit_size = data_unit_size; | |
353 | blk_key->data_unit_size_bits = ilog2(data_unit_size); | |
354 | blk_key->size = mode->keysize; | |
355 | memcpy(blk_key->raw, raw_key, mode->keysize); | |
356 | ||
357 | return 0; | |
358 | } | |
359 | ||
6715c98b CH |
360 | bool blk_crypto_config_supported_natively(struct block_device *bdev, |
361 | const struct blk_crypto_config *cfg) | |
362 | { | |
363 | return __blk_crypto_cfg_supported(bdev_get_queue(bdev)->crypto_profile, | |
364 | cfg); | |
365 | } | |
366 | ||
488f6682 ST |
367 | /* |
368 | * Check if bios with @cfg can be en/decrypted by blk-crypto (i.e. either the | |
fce3caea | 369 | * block_device it's submitted to supports inline crypto, or the |
488f6682 ST |
370 | * blk-crypto-fallback is enabled and supports the cfg). |
371 | */ | |
fce3caea | 372 | bool blk_crypto_config_supported(struct block_device *bdev, |
a892c8d5 ST |
373 | const struct blk_crypto_config *cfg) |
374 | { | |
488f6682 | 375 | return IS_ENABLED(CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK) || |
6715c98b | 376 | blk_crypto_config_supported_natively(bdev, cfg); |
a892c8d5 ST |
377 | } |
378 | ||
379 | /** | |
380 | * blk_crypto_start_using_key() - Start using a blk_crypto_key on a device | |
fce3caea | 381 | * @bdev: block device to operate on |
a892c8d5 | 382 | * @key: A key to use on the device |
a892c8d5 | 383 | * |
488f6682 ST |
384 | * Upper layers must call this function to ensure that either the hardware |
385 | * supports the key's crypto settings, or the crypto API fallback has transforms | |
386 | * for the needed mode allocated and ready to go. This function may allocate | |
387 | * an skcipher, and *should not* be called from the data path, since that might | |
388 | * cause a deadlock | |
a892c8d5 | 389 | * |
488f6682 ST |
390 | * Return: 0 on success; -ENOPKG if the hardware doesn't support the key and |
391 | * blk-crypto-fallback is either disabled or the needed algorithm | |
392 | * is disabled in the crypto API; or another -errno code. | |
a892c8d5 | 393 | */ |
fce3caea CH |
394 | int blk_crypto_start_using_key(struct block_device *bdev, |
395 | const struct blk_crypto_key *key) | |
a892c8d5 | 396 | { |
6715c98b | 397 | if (blk_crypto_config_supported_natively(bdev, &key->crypto_cfg)) |
a892c8d5 | 398 | return 0; |
488f6682 | 399 | return blk_crypto_fallback_start_using_mode(key->crypto_cfg.crypto_mode); |
a892c8d5 ST |
400 | } |
401 | ||
402 | /** | |
5c7cb944 EB |
403 | * blk_crypto_evict_key() - Evict a blk_crypto_key from a block_device |
404 | * @bdev: a block_device on which I/O using the key may have been done | |
405 | * @key: the key to evict | |
a892c8d5 | 406 | * |
5c7cb944 EB |
407 | * For a given block_device, this function removes the given blk_crypto_key from |
408 | * the keyslot management structures and evicts it from any underlying hardware | |
409 | * keyslot(s) or blk-crypto-fallback keyslot it may have been programmed into. | |
410 | * | |
411 | * Upper layers must call this before freeing the blk_crypto_key. It must be | |
412 | * called for every block_device the key may have been used on. The key must no | |
413 | * longer be in use by any I/O when this function is called. | |
414 | * | |
415 | * Context: May sleep. | |
a892c8d5 | 416 | */ |
70493a63 EB |
417 | void blk_crypto_evict_key(struct block_device *bdev, |
418 | const struct blk_crypto_key *key) | |
a892c8d5 | 419 | { |
fce3caea | 420 | struct request_queue *q = bdev_get_queue(bdev); |
70493a63 | 421 | int err; |
fce3caea | 422 | |
6715c98b | 423 | if (blk_crypto_config_supported_natively(bdev, &key->crypto_cfg)) |
70493a63 EB |
424 | err = __blk_crypto_evict_key(q->crypto_profile, key); |
425 | else | |
426 | err = blk_crypto_fallback_evict_key(key); | |
5c7cb944 EB |
427 | /* |
428 | * An error can only occur here if the key failed to be evicted from a | |
429 | * keyslot (due to a hardware or driver issue) or is allegedly still in | |
430 | * use by I/O (due to a kernel bug). Even in these cases, the key is | |
431 | * still unlinked from the keyslot management structures, and the caller | |
432 | * is allowed and expected to free it right away. There's nothing | |
433 | * callers can do to handle errors, so just log them and return void. | |
434 | */ | |
70493a63 EB |
435 | if (err) |
436 | pr_warn_ratelimited("%pg: error %d evicting key\n", bdev, err); | |
a892c8d5 | 437 | } |
9355a9eb | 438 | EXPORT_SYMBOL_GPL(blk_crypto_evict_key); |