Commit | Line | Data |
---|---|---|
a892c8d5 ST |
1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | /* | |
3 | * Copyright 2019 Google LLC | |
4 | */ | |
5 | ||
6 | #ifndef __LINUX_BLK_CRYPTO_INTERNAL_H | |
7 | #define __LINUX_BLK_CRYPTO_INTERNAL_H | |
8 | ||
9 | #include <linux/bio.h> | |
10 | #include <linux/blkdev.h> | |
11 | ||
12 | /* Represents a crypto mode supported by blk-crypto */ | |
13 | struct blk_crypto_mode { | |
14 | unsigned int keysize; /* key size in bytes */ | |
15 | unsigned int ivsize; /* iv size in bytes */ | |
16 | }; | |
17 | ||
18 | #ifdef CONFIG_BLK_INLINE_ENCRYPTION | |
19 | ||
20 | void bio_crypt_dun_increment(u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE], | |
21 | unsigned int inc); | |
22 | ||
23 | bool bio_crypt_rq_ctx_compatible(struct request *rq, struct bio *bio); | |
24 | ||
25 | bool bio_crypt_ctx_mergeable(struct bio_crypt_ctx *bc1, unsigned int bc1_bytes, | |
26 | struct bio_crypt_ctx *bc2); | |
27 | ||
28 | static inline bool bio_crypt_ctx_back_mergeable(struct request *req, | |
29 | struct bio *bio) | |
30 | { | |
31 | return bio_crypt_ctx_mergeable(req->crypt_ctx, blk_rq_bytes(req), | |
32 | bio->bi_crypt_context); | |
33 | } | |
34 | ||
35 | static inline bool bio_crypt_ctx_front_mergeable(struct request *req, | |
36 | struct bio *bio) | |
37 | { | |
38 | return bio_crypt_ctx_mergeable(bio->bi_crypt_context, | |
39 | bio->bi_iter.bi_size, req->crypt_ctx); | |
40 | } | |
41 | ||
42 | static inline bool bio_crypt_ctx_merge_rq(struct request *req, | |
43 | struct request *next) | |
44 | { | |
45 | return bio_crypt_ctx_mergeable(req->crypt_ctx, blk_rq_bytes(req), | |
46 | next->crypt_ctx); | |
47 | } | |
48 | ||
49 | static inline void blk_crypto_rq_set_defaults(struct request *rq) | |
50 | { | |
51 | rq->crypt_ctx = NULL; | |
52 | rq->crypt_keyslot = NULL; | |
53 | } | |
54 | ||
55 | static inline bool blk_crypto_rq_is_encrypted(struct request *rq) | |
56 | { | |
57 | return rq->crypt_ctx; | |
58 | } | |
59 | ||
60 | #else /* CONFIG_BLK_INLINE_ENCRYPTION */ | |
61 | ||
62 | static inline bool bio_crypt_rq_ctx_compatible(struct request *rq, | |
63 | struct bio *bio) | |
64 | { | |
65 | return true; | |
66 | } | |
67 | ||
68 | static inline bool bio_crypt_ctx_front_mergeable(struct request *req, | |
69 | struct bio *bio) | |
70 | { | |
71 | return true; | |
72 | } | |
73 | ||
74 | static inline bool bio_crypt_ctx_back_mergeable(struct request *req, | |
75 | struct bio *bio) | |
76 | { | |
77 | return true; | |
78 | } | |
79 | ||
80 | static inline bool bio_crypt_ctx_merge_rq(struct request *req, | |
81 | struct request *next) | |
82 | { | |
83 | return true; | |
84 | } | |
85 | ||
86 | static inline void blk_crypto_rq_set_defaults(struct request *rq) { } | |
87 | ||
88 | static inline bool blk_crypto_rq_is_encrypted(struct request *rq) | |
89 | { | |
90 | return false; | |
91 | } | |
92 | ||
93 | #endif /* CONFIG_BLK_INLINE_ENCRYPTION */ | |
94 | ||
95 | void __bio_crypt_advance(struct bio *bio, unsigned int bytes); | |
96 | static inline void bio_crypt_advance(struct bio *bio, unsigned int bytes) | |
97 | { | |
98 | if (bio_has_crypt_ctx(bio)) | |
99 | __bio_crypt_advance(bio, bytes); | |
100 | } | |
101 | ||
102 | void __bio_crypt_free_ctx(struct bio *bio); | |
103 | static inline void bio_crypt_free_ctx(struct bio *bio) | |
104 | { | |
105 | if (bio_has_crypt_ctx(bio)) | |
106 | __bio_crypt_free_ctx(bio); | |
107 | } | |
108 | ||
109 | static inline void bio_crypt_do_front_merge(struct request *rq, | |
110 | struct bio *bio) | |
111 | { | |
112 | #ifdef CONFIG_BLK_INLINE_ENCRYPTION | |
113 | if (bio_has_crypt_ctx(bio)) | |
114 | memcpy(rq->crypt_ctx->bc_dun, bio->bi_crypt_context->bc_dun, | |
115 | sizeof(rq->crypt_ctx->bc_dun)); | |
116 | #endif | |
117 | } | |
118 | ||
119 | bool __blk_crypto_bio_prep(struct bio **bio_ptr); | |
120 | static inline bool blk_crypto_bio_prep(struct bio **bio_ptr) | |
121 | { | |
122 | if (bio_has_crypt_ctx(*bio_ptr)) | |
123 | return __blk_crypto_bio_prep(bio_ptr); | |
124 | return true; | |
125 | } | |
126 | ||
127 | blk_status_t __blk_crypto_init_request(struct request *rq); | |
128 | static inline blk_status_t blk_crypto_init_request(struct request *rq) | |
129 | { | |
130 | if (blk_crypto_rq_is_encrypted(rq)) | |
131 | return __blk_crypto_init_request(rq); | |
132 | return BLK_STS_OK; | |
133 | } | |
134 | ||
135 | void __blk_crypto_free_request(struct request *rq); | |
136 | static inline void blk_crypto_free_request(struct request *rq) | |
137 | { | |
138 | if (blk_crypto_rq_is_encrypted(rq)) | |
139 | __blk_crypto_free_request(rq); | |
140 | } | |
141 | ||
142 | void __blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio, | |
143 | gfp_t gfp_mask); | |
144 | static inline void blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio, | |
145 | gfp_t gfp_mask) | |
146 | { | |
147 | if (bio_has_crypt_ctx(bio)) | |
148 | __blk_crypto_rq_bio_prep(rq, bio, gfp_mask); | |
149 | } | |
150 | ||
151 | /** | |
152 | * blk_crypto_insert_cloned_request - Prepare a cloned request to be inserted | |
153 | * into a request queue. | |
154 | * @rq: the request being queued | |
155 | * | |
156 | * Return: BLK_STS_OK on success, nonzero on error. | |
157 | */ | |
158 | static inline blk_status_t blk_crypto_insert_cloned_request(struct request *rq) | |
159 | { | |
160 | ||
161 | if (blk_crypto_rq_is_encrypted(rq)) | |
162 | return blk_crypto_init_request(rq); | |
163 | return BLK_STS_OK; | |
164 | } | |
165 | ||
166 | #endif /* __LINUX_BLK_CRYPTO_INTERNAL_H */ |