Commit | Line | Data |
---|---|---|
a892c8d5 ST |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* | |
3 | * Copyright 2019 Google LLC | |
4 | */ | |
5 | ||
6 | /* | |
7 | * Refer to Documentation/block/inline-encryption.rst for detailed explanation. | |
8 | */ | |
9 | ||
10 | #define pr_fmt(fmt) "blk-crypto: " fmt | |
11 | ||
12 | #include <linux/bio.h> | |
13 | #include <linux/blkdev.h> | |
1e8d44bd | 14 | #include <linux/blk-crypto-profile.h> |
a892c8d5 ST |
15 | #include <linux/module.h> |
16 | #include <linux/slab.h> | |
17 | ||
18 | #include "blk-crypto-internal.h" | |
19 | ||
20 | const struct blk_crypto_mode blk_crypto_modes[] = { | |
21 | [BLK_ENCRYPTION_MODE_AES_256_XTS] = { | |
20f01f16 | 22 | .name = "AES-256-XTS", |
488f6682 | 23 | .cipher_str = "xts(aes)", |
a892c8d5 ST |
24 | .keysize = 64, |
25 | .ivsize = 16, | |
26 | }, | |
27 | [BLK_ENCRYPTION_MODE_AES_128_CBC_ESSIV] = { | |
20f01f16 | 28 | .name = "AES-128-CBC-ESSIV", |
488f6682 | 29 | .cipher_str = "essiv(cbc(aes),sha256)", |
a892c8d5 ST |
30 | .keysize = 16, |
31 | .ivsize = 16, | |
32 | }, | |
33 | [BLK_ENCRYPTION_MODE_ADIANTUM] = { | |
20f01f16 | 34 | .name = "Adiantum", |
488f6682 | 35 | .cipher_str = "adiantum(xchacha12,aes)", |
a892c8d5 ST |
36 | .keysize = 32, |
37 | .ivsize = 32, | |
38 | }, | |
d209ce35 TZ |
39 | [BLK_ENCRYPTION_MODE_SM4_XTS] = { |
40 | .name = "SM4-XTS", | |
41 | .cipher_str = "xts(sm4)", | |
42 | .keysize = 32, | |
43 | .ivsize = 16, | |
44 | }, | |
a892c8d5 ST |
45 | }; |
46 | ||
47 | /* | |
48 | * This number needs to be at least (the number of threads doing IO | |
49 | * concurrently) * (maximum recursive depth of a bio), so that we don't | |
50 | * deadlock on crypt_ctx allocations. The default is chosen to be the same | |
51 | * as the default number of post read contexts in both EXT4 and F2FS. | |
52 | */ | |
53 | static int num_prealloc_crypt_ctxs = 128; | |
54 | ||
55 | module_param(num_prealloc_crypt_ctxs, int, 0444); | |
56 | MODULE_PARM_DESC(num_prealloc_crypt_ctxs, | |
57 | "Number of bio crypto contexts to preallocate"); | |
58 | ||
59 | static struct kmem_cache *bio_crypt_ctx_cache; | |
60 | static mempool_t *bio_crypt_ctx_pool; | |
61 | ||
62 | static int __init bio_crypt_ctx_init(void) | |
63 | { | |
64 | size_t i; | |
65 | ||
66 | bio_crypt_ctx_cache = KMEM_CACHE(bio_crypt_ctx, 0); | |
67 | if (!bio_crypt_ctx_cache) | |
68 | goto out_no_mem; | |
69 | ||
70 | bio_crypt_ctx_pool = mempool_create_slab_pool(num_prealloc_crypt_ctxs, | |
71 | bio_crypt_ctx_cache); | |
72 | if (!bio_crypt_ctx_pool) | |
73 | goto out_no_mem; | |
74 | ||
75 | /* This is assumed in various places. */ | |
76 | BUILD_BUG_ON(BLK_ENCRYPTION_MODE_INVALID != 0); | |
77 | ||
78 | /* Sanity check that no algorithm exceeds the defined limits. */ | |
79 | for (i = 0; i < BLK_ENCRYPTION_MODE_MAX; i++) { | |
80 | BUG_ON(blk_crypto_modes[i].keysize > BLK_CRYPTO_MAX_KEY_SIZE); | |
81 | BUG_ON(blk_crypto_modes[i].ivsize > BLK_CRYPTO_MAX_IV_SIZE); | |
82 | } | |
83 | ||
84 | return 0; | |
85 | out_no_mem: | |
86 | panic("Failed to allocate mem for bio crypt ctxs\n"); | |
87 | } | |
88 | subsys_initcall(bio_crypt_ctx_init); | |
89 | ||
90 | void bio_crypt_set_ctx(struct bio *bio, const struct blk_crypto_key *key, | |
91 | const u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE], gfp_t gfp_mask) | |
92 | { | |
cf785af1 EB |
93 | struct bio_crypt_ctx *bc; |
94 | ||
95 | /* | |
96 | * The caller must use a gfp_mask that contains __GFP_DIRECT_RECLAIM so | |
97 | * that the mempool_alloc() can't fail. | |
98 | */ | |
99 | WARN_ON_ONCE(!(gfp_mask & __GFP_DIRECT_RECLAIM)); | |
100 | ||
101 | bc = mempool_alloc(bio_crypt_ctx_pool, gfp_mask); | |
a892c8d5 ST |
102 | |
103 | bc->bc_key = key; | |
104 | memcpy(bc->bc_dun, dun, sizeof(bc->bc_dun)); | |
105 | ||
106 | bio->bi_crypt_context = bc; | |
107 | } | |
108 | ||
109 | void __bio_crypt_free_ctx(struct bio *bio) | |
110 | { | |
111 | mempool_free(bio->bi_crypt_context, bio_crypt_ctx_pool); | |
112 | bio->bi_crypt_context = NULL; | |
113 | } | |
114 | ||
07560151 | 115 | int __bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask) |
a892c8d5 ST |
116 | { |
117 | dst->bi_crypt_context = mempool_alloc(bio_crypt_ctx_pool, gfp_mask); | |
07560151 EB |
118 | if (!dst->bi_crypt_context) |
119 | return -ENOMEM; | |
a892c8d5 | 120 | *dst->bi_crypt_context = *src->bi_crypt_context; |
07560151 | 121 | return 0; |
a892c8d5 | 122 | } |
a892c8d5 ST |
123 | |
124 | /* Increments @dun by @inc, treating @dun as a multi-limb integer. */ | |
125 | void bio_crypt_dun_increment(u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE], | |
126 | unsigned int inc) | |
127 | { | |
128 | int i; | |
129 | ||
130 | for (i = 0; inc && i < BLK_CRYPTO_DUN_ARRAY_SIZE; i++) { | |
131 | dun[i] += inc; | |
132 | /* | |
133 | * If the addition in this limb overflowed, then we need to | |
134 | * carry 1 into the next limb. Else the carry is 0. | |
135 | */ | |
136 | if (dun[i] < inc) | |
137 | inc = 1; | |
138 | else | |
139 | inc = 0; | |
140 | } | |
141 | } | |
142 | ||
143 | void __bio_crypt_advance(struct bio *bio, unsigned int bytes) | |
144 | { | |
145 | struct bio_crypt_ctx *bc = bio->bi_crypt_context; | |
146 | ||
147 | bio_crypt_dun_increment(bc->bc_dun, | |
148 | bytes >> bc->bc_key->data_unit_size_bits); | |
149 | } | |
150 | ||
151 | /* | |
152 | * Returns true if @bc->bc_dun plus @bytes converted to data units is equal to | |
153 | * @next_dun, treating the DUNs as multi-limb integers. | |
154 | */ | |
155 | bool bio_crypt_dun_is_contiguous(const struct bio_crypt_ctx *bc, | |
156 | unsigned int bytes, | |
157 | const u64 next_dun[BLK_CRYPTO_DUN_ARRAY_SIZE]) | |
158 | { | |
159 | int i; | |
160 | unsigned int carry = bytes >> bc->bc_key->data_unit_size_bits; | |
161 | ||
162 | for (i = 0; i < BLK_CRYPTO_DUN_ARRAY_SIZE; i++) { | |
163 | if (bc->bc_dun[i] + carry != next_dun[i]) | |
164 | return false; | |
165 | /* | |
166 | * If the addition in this limb overflowed, then we need to | |
167 | * carry 1 into the next limb. Else the carry is 0. | |
168 | */ | |
169 | if ((bc->bc_dun[i] + carry) < carry) | |
170 | carry = 1; | |
171 | else | |
172 | carry = 0; | |
173 | } | |
174 | ||
175 | /* If the DUN wrapped through 0, don't treat it as contiguous. */ | |
176 | return carry == 0; | |
177 | } | |
178 | ||
179 | /* | |
180 | * Checks that two bio crypt contexts are compatible - i.e. that | |
181 | * they are mergeable except for data_unit_num continuity. | |
182 | */ | |
183 | static bool bio_crypt_ctx_compatible(struct bio_crypt_ctx *bc1, | |
184 | struct bio_crypt_ctx *bc2) | |
185 | { | |
186 | if (!bc1) | |
187 | return !bc2; | |
188 | ||
189 | return bc2 && bc1->bc_key == bc2->bc_key; | |
190 | } | |
191 | ||
192 | bool bio_crypt_rq_ctx_compatible(struct request *rq, struct bio *bio) | |
193 | { | |
194 | return bio_crypt_ctx_compatible(rq->crypt_ctx, bio->bi_crypt_context); | |
195 | } | |
196 | ||
197 | /* | |
198 | * Checks that two bio crypt contexts are compatible, and also | |
199 | * that their data_unit_nums are continuous (and can hence be merged) | |
200 | * in the order @bc1 followed by @bc2. | |
201 | */ | |
202 | bool bio_crypt_ctx_mergeable(struct bio_crypt_ctx *bc1, unsigned int bc1_bytes, | |
203 | struct bio_crypt_ctx *bc2) | |
204 | { | |
205 | if (!bio_crypt_ctx_compatible(bc1, bc2)) | |
206 | return false; | |
207 | ||
208 | return !bc1 || bio_crypt_dun_is_contiguous(bc1, bc1_bytes, bc2->bc_dun); | |
209 | } | |
210 | ||
211 | /* Check that all I/O segments are data unit aligned. */ | |
212 | static bool bio_crypt_check_alignment(struct bio *bio) | |
213 | { | |
214 | const unsigned int data_unit_size = | |
215 | bio->bi_crypt_context->bc_key->crypto_cfg.data_unit_size; | |
216 | struct bvec_iter iter; | |
217 | struct bio_vec bv; | |
218 | ||
219 | bio_for_each_segment(bv, bio, iter) { | |
220 | if (!IS_ALIGNED(bv.bv_len | bv.bv_offset, data_unit_size)) | |
221 | return false; | |
222 | } | |
223 | ||
224 | return true; | |
225 | } | |
226 | ||
227 | blk_status_t __blk_crypto_init_request(struct request *rq) | |
228 | { | |
cb77cb5a EB |
229 | return blk_crypto_get_keyslot(rq->q->crypto_profile, |
230 | rq->crypt_ctx->bc_key, | |
231 | &rq->crypt_keyslot); | |
a892c8d5 ST |
232 | } |
233 | ||
234 | /** | |
235 | * __blk_crypto_free_request - Uninitialize the crypto fields of a request. | |
236 | * | |
237 | * @rq: The request whose crypto fields to uninitialize. | |
238 | * | |
239 | * Completely uninitializes the crypto fields of a request. If a keyslot has | |
240 | * been programmed into some inline encryption hardware, that keyslot is | |
241 | * released. The rq->crypt_ctx is also freed. | |
242 | */ | |
243 | void __blk_crypto_free_request(struct request *rq) | |
244 | { | |
cb77cb5a | 245 | blk_crypto_put_keyslot(rq->crypt_keyslot); |
a892c8d5 ST |
246 | mempool_free(rq->crypt_ctx, bio_crypt_ctx_pool); |
247 | blk_crypto_rq_set_defaults(rq); | |
248 | } | |
249 | ||
250 | /** | |
251 | * __blk_crypto_bio_prep - Prepare bio for inline encryption | |
252 | * | |
253 | * @bio_ptr: pointer to original bio pointer | |
254 | * | |
488f6682 ST |
255 | * If the bio crypt context provided for the bio is supported by the underlying |
256 | * device's inline encryption hardware, do nothing. | |
257 | * | |
258 | * Otherwise, try to perform en/decryption for this bio by falling back to the | |
259 | * kernel crypto API. When the crypto API fallback is used for encryption, | |
260 | * blk-crypto may choose to split the bio into 2 - the first one that will | |
261 | * continue to be processed and the second one that will be resubmitted via | |
ed00aabd | 262 | * submit_bio_noacct. A bounce bio will be allocated to encrypt the contents |
488f6682 ST |
263 | * of the aforementioned "first one", and *bio_ptr will be updated to this |
264 | * bounce bio. | |
a892c8d5 ST |
265 | * |
266 | * Caller must ensure bio has bio_crypt_ctx. | |
267 | * | |
268 | * Return: true on success; false on error (and bio->bi_status will be set | |
269 | * appropriately, and bio_endio() will have been called so bio | |
270 | * submission should abort). | |
271 | */ | |
272 | bool __blk_crypto_bio_prep(struct bio **bio_ptr) | |
273 | { | |
274 | struct bio *bio = *bio_ptr; | |
275 | const struct blk_crypto_key *bc_key = bio->bi_crypt_context->bc_key; | |
a892c8d5 ST |
276 | |
277 | /* Error if bio has no data. */ | |
488f6682 ST |
278 | if (WARN_ON_ONCE(!bio_has_data(bio))) { |
279 | bio->bi_status = BLK_STS_IOERR; | |
a892c8d5 | 280 | goto fail; |
488f6682 | 281 | } |
a892c8d5 | 282 | |
488f6682 ST |
283 | if (!bio_crypt_check_alignment(bio)) { |
284 | bio->bi_status = BLK_STS_IOERR; | |
a892c8d5 | 285 | goto fail; |
488f6682 | 286 | } |
a892c8d5 ST |
287 | |
288 | /* | |
488f6682 ST |
289 | * Success if device supports the encryption context, or if we succeeded |
290 | * in falling back to the crypto API. | |
a892c8d5 | 291 | */ |
6715c98b CH |
292 | if (blk_crypto_config_supported_natively(bio->bi_bdev, |
293 | &bc_key->crypto_cfg)) | |
488f6682 | 294 | return true; |
488f6682 ST |
295 | if (blk_crypto_fallback_bio_prep(bio_ptr)) |
296 | return true; | |
a892c8d5 | 297 | fail: |
a892c8d5 ST |
298 | bio_endio(*bio_ptr); |
299 | return false; | |
300 | } | |
301 | ||
93f221ae EB |
302 | int __blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio, |
303 | gfp_t gfp_mask) | |
a892c8d5 | 304 | { |
93f221ae | 305 | if (!rq->crypt_ctx) { |
a892c8d5 | 306 | rq->crypt_ctx = mempool_alloc(bio_crypt_ctx_pool, gfp_mask); |
93f221ae EB |
307 | if (!rq->crypt_ctx) |
308 | return -ENOMEM; | |
309 | } | |
a892c8d5 | 310 | *rq->crypt_ctx = *bio->bi_crypt_context; |
93f221ae | 311 | return 0; |
a892c8d5 ST |
312 | } |
313 | ||
314 | /** | |
315 | * blk_crypto_init_key() - Prepare a key for use with blk-crypto | |
316 | * @blk_key: Pointer to the blk_crypto_key to initialize. | |
317 | * @raw_key: Pointer to the raw key. Must be the correct length for the chosen | |
318 | * @crypto_mode; see blk_crypto_modes[]. | |
319 | * @crypto_mode: identifier for the encryption algorithm to use | |
320 | * @dun_bytes: number of bytes that will be used to specify the DUN when this | |
321 | * key is used | |
322 | * @data_unit_size: the data unit size to use for en/decryption | |
323 | * | |
324 | * Return: 0 on success, -errno on failure. The caller is responsible for | |
325 | * zeroizing both blk_key and raw_key when done with them. | |
326 | */ | |
327 | int blk_crypto_init_key(struct blk_crypto_key *blk_key, const u8 *raw_key, | |
328 | enum blk_crypto_mode_num crypto_mode, | |
329 | unsigned int dun_bytes, | |
330 | unsigned int data_unit_size) | |
331 | { | |
332 | const struct blk_crypto_mode *mode; | |
333 | ||
334 | memset(blk_key, 0, sizeof(*blk_key)); | |
335 | ||
336 | if (crypto_mode >= ARRAY_SIZE(blk_crypto_modes)) | |
337 | return -EINVAL; | |
338 | ||
339 | mode = &blk_crypto_modes[crypto_mode]; | |
340 | if (mode->keysize == 0) | |
341 | return -EINVAL; | |
342 | ||
cc40b722 | 343 | if (dun_bytes == 0 || dun_bytes > mode->ivsize) |
a892c8d5 ST |
344 | return -EINVAL; |
345 | ||
346 | if (!is_power_of_2(data_unit_size)) | |
347 | return -EINVAL; | |
348 | ||
349 | blk_key->crypto_cfg.crypto_mode = crypto_mode; | |
350 | blk_key->crypto_cfg.dun_bytes = dun_bytes; | |
351 | blk_key->crypto_cfg.data_unit_size = data_unit_size; | |
352 | blk_key->data_unit_size_bits = ilog2(data_unit_size); | |
353 | blk_key->size = mode->keysize; | |
354 | memcpy(blk_key->raw, raw_key, mode->keysize); | |
355 | ||
356 | return 0; | |
357 | } | |
358 | ||
6715c98b CH |
359 | bool blk_crypto_config_supported_natively(struct block_device *bdev, |
360 | const struct blk_crypto_config *cfg) | |
361 | { | |
362 | return __blk_crypto_cfg_supported(bdev_get_queue(bdev)->crypto_profile, | |
363 | cfg); | |
364 | } | |
365 | ||
488f6682 ST |
366 | /* |
367 | * Check if bios with @cfg can be en/decrypted by blk-crypto (i.e. either the | |
fce3caea | 368 | * block_device it's submitted to supports inline crypto, or the |
488f6682 ST |
369 | * blk-crypto-fallback is enabled and supports the cfg). |
370 | */ | |
fce3caea | 371 | bool blk_crypto_config_supported(struct block_device *bdev, |
a892c8d5 ST |
372 | const struct blk_crypto_config *cfg) |
373 | { | |
488f6682 | 374 | return IS_ENABLED(CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK) || |
6715c98b | 375 | blk_crypto_config_supported_natively(bdev, cfg); |
a892c8d5 ST |
376 | } |
377 | ||
378 | /** | |
379 | * blk_crypto_start_using_key() - Start using a blk_crypto_key on a device | |
fce3caea | 380 | * @bdev: block device to operate on |
a892c8d5 | 381 | * @key: A key to use on the device |
a892c8d5 | 382 | * |
488f6682 ST |
383 | * Upper layers must call this function to ensure that either the hardware |
384 | * supports the key's crypto settings, or the crypto API fallback has transforms | |
385 | * for the needed mode allocated and ready to go. This function may allocate | |
386 | * an skcipher, and *should not* be called from the data path, since that might | |
387 | * cause a deadlock | |
a892c8d5 | 388 | * |
488f6682 ST |
389 | * Return: 0 on success; -ENOPKG if the hardware doesn't support the key and |
390 | * blk-crypto-fallback is either disabled or the needed algorithm | |
391 | * is disabled in the crypto API; or another -errno code. | |
a892c8d5 | 392 | */ |
fce3caea CH |
393 | int blk_crypto_start_using_key(struct block_device *bdev, |
394 | const struct blk_crypto_key *key) | |
a892c8d5 | 395 | { |
6715c98b | 396 | if (blk_crypto_config_supported_natively(bdev, &key->crypto_cfg)) |
a892c8d5 | 397 | return 0; |
488f6682 | 398 | return blk_crypto_fallback_start_using_mode(key->crypto_cfg.crypto_mode); |
a892c8d5 ST |
399 | } |
400 | ||
401 | /** | |
402 | * blk_crypto_evict_key() - Evict a key from any inline encryption hardware | |
403 | * it may have been programmed into | |
fce3caea | 404 | * @bdev: The block_device who's associated inline encryption hardware this key |
a892c8d5 ST |
405 | * might have been programmed into |
406 | * @key: The key to evict | |
407 | * | |
408 | * Upper layers (filesystems) must call this function to ensure that a key is | |
409 | * evicted from any hardware that it might have been programmed into. The key | |
410 | * must not be in use by any in-flight IO when this function is called. | |
411 | * | |
cb77cb5a | 412 | * Return: 0 on success or if the key wasn't in any keyslot; -errno on error. |
a892c8d5 | 413 | */ |
fce3caea | 414 | int blk_crypto_evict_key(struct block_device *bdev, |
a892c8d5 ST |
415 | const struct blk_crypto_key *key) |
416 | { | |
fce3caea CH |
417 | struct request_queue *q = bdev_get_queue(bdev); |
418 | ||
6715c98b | 419 | if (blk_crypto_config_supported_natively(bdev, &key->crypto_cfg)) |
cb77cb5a | 420 | return __blk_crypto_evict_key(q->crypto_profile, key); |
a892c8d5 | 421 | |
488f6682 | 422 | /* |
fce3caea | 423 | * If the block_device didn't support the key, then blk-crypto-fallback |
cb77cb5a | 424 | * may have been used, so try to evict the key from blk-crypto-fallback. |
488f6682 ST |
425 | */ |
426 | return blk_crypto_fallback_evict_key(key); | |
a892c8d5 | 427 | } |
9355a9eb | 428 | EXPORT_SYMBOL_GPL(blk_crypto_evict_key); |