Commit | Line | Data |
---|---|---|
a892c8d5 ST |
1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | /* | |
3 | * Copyright 2019 Google LLC | |
4 | */ | |
5 | ||
6 | #ifndef __LINUX_BLK_CRYPTO_INTERNAL_H | |
7 | #define __LINUX_BLK_CRYPTO_INTERNAL_H | |
8 | ||
9 | #include <linux/bio.h> | |
24b83deb | 10 | #include <linux/blk-mq.h> |
a892c8d5 ST |
11 | |
12 | /* Represents a crypto mode supported by blk-crypto */ | |
13 | struct blk_crypto_mode { | |
20f01f16 | 14 | const char *name; /* name of this mode, shown in sysfs */ |
488f6682 | 15 | const char *cipher_str; /* crypto API name (for fallback case) */ |
a892c8d5 | 16 | unsigned int keysize; /* key size in bytes */ |
ebc41765 | 17 | unsigned int security_strength; /* security strength in bytes */ |
a892c8d5 ST |
18 | unsigned int ivsize; /* iv size in bytes */ |
19 | }; | |
20 | ||
488f6682 ST |
21 | extern const struct blk_crypto_mode blk_crypto_modes[]; |
22 | ||
a892c8d5 ST |
23 | #ifdef CONFIG_BLK_INLINE_ENCRYPTION |
24 | ||
450deb93 | 25 | int blk_crypto_sysfs_register(struct gendisk *disk); |
20f01f16 | 26 | |
450deb93 | 27 | void blk_crypto_sysfs_unregister(struct gendisk *disk); |
20f01f16 | 28 | |
a892c8d5 ST |
29 | void bio_crypt_dun_increment(u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE], |
30 | unsigned int inc); | |
31 | ||
32 | bool bio_crypt_rq_ctx_compatible(struct request *rq, struct bio *bio); | |
33 | ||
34 | bool bio_crypt_ctx_mergeable(struct bio_crypt_ctx *bc1, unsigned int bc1_bytes, | |
35 | struct bio_crypt_ctx *bc2); | |
36 | ||
37 | static inline bool bio_crypt_ctx_back_mergeable(struct request *req, | |
38 | struct bio *bio) | |
39 | { | |
40 | return bio_crypt_ctx_mergeable(req->crypt_ctx, blk_rq_bytes(req), | |
41 | bio->bi_crypt_context); | |
42 | } | |
43 | ||
44 | static inline bool bio_crypt_ctx_front_mergeable(struct request *req, | |
45 | struct bio *bio) | |
46 | { | |
47 | return bio_crypt_ctx_mergeable(bio->bi_crypt_context, | |
48 | bio->bi_iter.bi_size, req->crypt_ctx); | |
49 | } | |
50 | ||
51 | static inline bool bio_crypt_ctx_merge_rq(struct request *req, | |
52 | struct request *next) | |
53 | { | |
54 | return bio_crypt_ctx_mergeable(req->crypt_ctx, blk_rq_bytes(req), | |
55 | next->crypt_ctx); | |
56 | } | |
57 | ||
58 | static inline void blk_crypto_rq_set_defaults(struct request *rq) | |
59 | { | |
60 | rq->crypt_ctx = NULL; | |
61 | rq->crypt_keyslot = NULL; | |
62 | } | |
63 | ||
64 | static inline bool blk_crypto_rq_is_encrypted(struct request *rq) | |
65 | { | |
66 | return rq->crypt_ctx; | |
67 | } | |
68 | ||
9cd1e566 EB |
69 | static inline bool blk_crypto_rq_has_keyslot(struct request *rq) |
70 | { | |
71 | return rq->crypt_keyslot; | |
72 | } | |
73 | ||
3569788c CH |
74 | blk_status_t blk_crypto_get_keyslot(struct blk_crypto_profile *profile, |
75 | const struct blk_crypto_key *key, | |
76 | struct blk_crypto_keyslot **slot_ptr); | |
77 | ||
78 | void blk_crypto_put_keyslot(struct blk_crypto_keyslot *slot); | |
79 | ||
80 | int __blk_crypto_evict_key(struct blk_crypto_profile *profile, | |
81 | const struct blk_crypto_key *key); | |
82 | ||
83 | bool __blk_crypto_cfg_supported(struct blk_crypto_profile *profile, | |
84 | const struct blk_crypto_config *cfg); | |
85 | ||
1ebd4a3c EB |
86 | int blk_crypto_ioctl(struct block_device *bdev, unsigned int cmd, |
87 | void __user *argp); | |
88 | ||
a892c8d5 ST |
89 | #else /* CONFIG_BLK_INLINE_ENCRYPTION */ |
90 | ||
450deb93 | 91 | static inline int blk_crypto_sysfs_register(struct gendisk *disk) |
20f01f16 EB |
92 | { |
93 | return 0; | |
94 | } | |
95 | ||
450deb93 CH |
96 | static inline void blk_crypto_sysfs_unregister(struct gendisk *disk) |
97 | { | |
98 | } | |
20f01f16 | 99 | |
a892c8d5 ST |
100 | static inline bool bio_crypt_rq_ctx_compatible(struct request *rq, |
101 | struct bio *bio) | |
102 | { | |
103 | return true; | |
104 | } | |
105 | ||
106 | static inline bool bio_crypt_ctx_front_mergeable(struct request *req, | |
107 | struct bio *bio) | |
108 | { | |
109 | return true; | |
110 | } | |
111 | ||
112 | static inline bool bio_crypt_ctx_back_mergeable(struct request *req, | |
113 | struct bio *bio) | |
114 | { | |
115 | return true; | |
116 | } | |
117 | ||
118 | static inline bool bio_crypt_ctx_merge_rq(struct request *req, | |
119 | struct request *next) | |
120 | { | |
121 | return true; | |
122 | } | |
123 | ||
124 | static inline void blk_crypto_rq_set_defaults(struct request *rq) { } | |
125 | ||
126 | static inline bool blk_crypto_rq_is_encrypted(struct request *rq) | |
127 | { | |
128 | return false; | |
129 | } | |
130 | ||
9cd1e566 EB |
131 | static inline bool blk_crypto_rq_has_keyslot(struct request *rq) |
132 | { | |
133 | return false; | |
134 | } | |
135 | ||
1ebd4a3c EB |
136 | static inline int blk_crypto_ioctl(struct block_device *bdev, unsigned int cmd, |
137 | void __user *argp) | |
138 | { | |
139 | return -ENOTTY; | |
140 | } | |
141 | ||
a892c8d5 ST |
142 | #endif /* CONFIG_BLK_INLINE_ENCRYPTION */ |
143 | ||
144 | void __bio_crypt_advance(struct bio *bio, unsigned int bytes); | |
145 | static inline void bio_crypt_advance(struct bio *bio, unsigned int bytes) | |
146 | { | |
147 | if (bio_has_crypt_ctx(bio)) | |
148 | __bio_crypt_advance(bio, bytes); | |
149 | } | |
150 | ||
151 | void __bio_crypt_free_ctx(struct bio *bio); | |
152 | static inline void bio_crypt_free_ctx(struct bio *bio) | |
153 | { | |
154 | if (bio_has_crypt_ctx(bio)) | |
155 | __bio_crypt_free_ctx(bio); | |
156 | } | |
157 | ||
158 | static inline void bio_crypt_do_front_merge(struct request *rq, | |
159 | struct bio *bio) | |
160 | { | |
161 | #ifdef CONFIG_BLK_INLINE_ENCRYPTION | |
162 | if (bio_has_crypt_ctx(bio)) | |
163 | memcpy(rq->crypt_ctx->bc_dun, bio->bi_crypt_context->bc_dun, | |
164 | sizeof(rq->crypt_ctx->bc_dun)); | |
165 | #endif | |
166 | } | |
167 | ||
168 | bool __blk_crypto_bio_prep(struct bio **bio_ptr); | |
169 | static inline bool blk_crypto_bio_prep(struct bio **bio_ptr) | |
170 | { | |
171 | if (bio_has_crypt_ctx(*bio_ptr)) | |
172 | return __blk_crypto_bio_prep(bio_ptr); | |
173 | return true; | |
174 | } | |
175 | ||
9cd1e566 EB |
176 | blk_status_t __blk_crypto_rq_get_keyslot(struct request *rq); |
177 | static inline blk_status_t blk_crypto_rq_get_keyslot(struct request *rq) | |
a892c8d5 ST |
178 | { |
179 | if (blk_crypto_rq_is_encrypted(rq)) | |
9cd1e566 | 180 | return __blk_crypto_rq_get_keyslot(rq); |
a892c8d5 ST |
181 | return BLK_STS_OK; |
182 | } | |
183 | ||
9cd1e566 EB |
184 | void __blk_crypto_rq_put_keyslot(struct request *rq); |
185 | static inline void blk_crypto_rq_put_keyslot(struct request *rq) | |
186 | { | |
187 | if (blk_crypto_rq_has_keyslot(rq)) | |
188 | __blk_crypto_rq_put_keyslot(rq); | |
189 | } | |
190 | ||
a892c8d5 ST |
191 | void __blk_crypto_free_request(struct request *rq); |
192 | static inline void blk_crypto_free_request(struct request *rq) | |
193 | { | |
194 | if (blk_crypto_rq_is_encrypted(rq)) | |
195 | __blk_crypto_free_request(rq); | |
196 | } | |
197 | ||
93f221ae EB |
198 | int __blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio, |
199 | gfp_t gfp_mask); | |
200 | /** | |
201 | * blk_crypto_rq_bio_prep - Prepare a request's crypt_ctx when its first bio | |
202 | * is inserted | |
203 | * @rq: The request to prepare | |
204 | * @bio: The first bio being inserted into the request | |
205 | * @gfp_mask: Memory allocation flags | |
206 | * | |
207 | * Return: 0 on success, -ENOMEM if out of memory. -ENOMEM is only possible if | |
208 | * @gfp_mask doesn't include %__GFP_DIRECT_RECLAIM. | |
209 | */ | |
210 | static inline int blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio, | |
211 | gfp_t gfp_mask) | |
a892c8d5 ST |
212 | { |
213 | if (bio_has_crypt_ctx(bio)) | |
93f221ae EB |
214 | return __blk_crypto_rq_bio_prep(rq, bio, gfp_mask); |
215 | return 0; | |
a892c8d5 ST |
216 | } |
217 | ||
488f6682 ST |
218 | #ifdef CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK |
219 | ||
220 | int blk_crypto_fallback_start_using_mode(enum blk_crypto_mode_num mode_num); | |
221 | ||
222 | bool blk_crypto_fallback_bio_prep(struct bio **bio_ptr); | |
223 | ||
224 | int blk_crypto_fallback_evict_key(const struct blk_crypto_key *key); | |
225 | ||
226 | #else /* CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK */ | |
227 | ||
228 | static inline int | |
229 | blk_crypto_fallback_start_using_mode(enum blk_crypto_mode_num mode_num) | |
230 | { | |
231 | pr_warn_once("crypto API fallback is disabled\n"); | |
232 | return -ENOPKG; | |
233 | } | |
234 | ||
235 | static inline bool blk_crypto_fallback_bio_prep(struct bio **bio_ptr) | |
236 | { | |
237 | pr_warn_once("crypto API fallback disabled; failing request.\n"); | |
238 | (*bio_ptr)->bi_status = BLK_STS_NOTSUPP; | |
239 | return false; | |
240 | } | |
241 | ||
242 | static inline int | |
243 | blk_crypto_fallback_evict_key(const struct blk_crypto_key *key) | |
244 | { | |
245 | return 0; | |
246 | } | |
247 | ||
248 | #endif /* CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK */ | |
249 | ||
a892c8d5 | 250 | #endif /* __LINUX_BLK_CRYPTO_INTERNAL_H */ |