Merge tag 'nfs-for-5.4-1' of git://git.linux-nfs.org/projects/anna/linux-nfs
[linux-block.git] / arch / x86 / crypto / glue_helper.c
CommitLineData
1a59d1b8 1// SPDX-License-Identifier: GPL-2.0-or-later
596d8750
JK
2/*
3 * Shared glue code for 128bit block ciphers
4 *
a05248ed 5 * Copyright © 2012-2013 Jussi Kivilinna <jussi.kivilinna@iki.fi>
596d8750
JK
6 *
7 * CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by:
8 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
9 * CTR part based on code (crypto/ctr.c) by:
10 * (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com>
596d8750
JK
11 */
12
13#include <linux/module.h>
14#include <crypto/b128ops.h>
692016bd 15#include <crypto/gf128mul.h>
065ce327 16#include <crypto/internal/skcipher.h>
8ce5fac2 17#include <crypto/scatterwalk.h>
596d8750
JK
18#include <crypto/xts.h>
19#include <asm/crypto/glue_helper.h>
596d8750 20
f15f2a25
EB
21int glue_ecb_req_128bit(const struct common_glue_ctx *gctx,
22 struct skcipher_request *req)
23{
24 void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
25 const unsigned int bsize = 128 / 8;
26 struct skcipher_walk walk;
27 bool fpu_enabled = false;
28 unsigned int nbytes;
29 int err;
30
31 err = skcipher_walk_virt(&walk, req, false);
32
33 while ((nbytes = walk.nbytes)) {
34 const u8 *src = walk.src.virt.addr;
35 u8 *dst = walk.dst.virt.addr;
36 unsigned int func_bytes;
37 unsigned int i;
38
75d8a553
EB
39 fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
40 &walk, fpu_enabled, nbytes);
f15f2a25
EB
41 for (i = 0; i < gctx->num_funcs; i++) {
42 func_bytes = bsize * gctx->funcs[i].num_blocks;
43
44 if (nbytes < func_bytes)
45 continue;
46
47 /* Process multi-block batch */
48 do {
49 gctx->funcs[i].fn_u.ecb(ctx, dst, src);
50 src += func_bytes;
51 dst += func_bytes;
52 nbytes -= func_bytes;
53 } while (nbytes >= func_bytes);
54
55 if (nbytes < bsize)
56 break;
57 }
58 err = skcipher_walk_done(&walk, nbytes);
59 }
60
61 glue_fpu_end(fpu_enabled);
62 return err;
63}
64EXPORT_SYMBOL_GPL(glue_ecb_req_128bit);
65
f15f2a25
EB
66int glue_cbc_encrypt_req_128bit(const common_glue_func_t fn,
67 struct skcipher_request *req)
68{
69 void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
70 const unsigned int bsize = 128 / 8;
71 struct skcipher_walk walk;
72 unsigned int nbytes;
73 int err;
74
75 err = skcipher_walk_virt(&walk, req, false);
76
77 while ((nbytes = walk.nbytes)) {
78 const u128 *src = (u128 *)walk.src.virt.addr;
79 u128 *dst = (u128 *)walk.dst.virt.addr;
80 u128 *iv = (u128 *)walk.iv;
81
82 do {
83 u128_xor(dst, src, iv);
84 fn(ctx, (u8 *)dst, (u8 *)dst);
85 iv = dst;
86 src++;
87 dst++;
88 nbytes -= bsize;
89 } while (nbytes >= bsize);
90
91 *(u128 *)walk.iv = *iv;
92 err = skcipher_walk_done(&walk, nbytes);
93 }
94 return err;
95}
96EXPORT_SYMBOL_GPL(glue_cbc_encrypt_req_128bit);
97
f15f2a25
EB
98int glue_cbc_decrypt_req_128bit(const struct common_glue_ctx *gctx,
99 struct skcipher_request *req)
100{
101 void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
102 const unsigned int bsize = 128 / 8;
103 struct skcipher_walk walk;
104 bool fpu_enabled = false;
105 unsigned int nbytes;
106 int err;
107
108 err = skcipher_walk_virt(&walk, req, false);
109
110 while ((nbytes = walk.nbytes)) {
111 const u128 *src = walk.src.virt.addr;
112 u128 *dst = walk.dst.virt.addr;
113 unsigned int func_bytes, num_blocks;
114 unsigned int i;
115 u128 last_iv;
116
75d8a553
EB
117 fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
118 &walk, fpu_enabled, nbytes);
f15f2a25
EB
119 /* Start of the last block. */
120 src += nbytes / bsize - 1;
121 dst += nbytes / bsize - 1;
122
123 last_iv = *src;
124
125 for (i = 0; i < gctx->num_funcs; i++) {
126 num_blocks = gctx->funcs[i].num_blocks;
127 func_bytes = bsize * num_blocks;
128
129 if (nbytes < func_bytes)
130 continue;
131
132 /* Process multi-block batch */
133 do {
134 src -= num_blocks - 1;
135 dst -= num_blocks - 1;
136
137 gctx->funcs[i].fn_u.cbc(ctx, dst, src);
138
139 nbytes -= func_bytes;
140 if (nbytes < bsize)
141 goto done;
142
143 u128_xor(dst, dst, --src);
144 dst--;
145 } while (nbytes >= func_bytes);
146 }
147done:
148 u128_xor(dst, dst, (u128 *)walk.iv);
149 *(u128 *)walk.iv = last_iv;
150 err = skcipher_walk_done(&walk, nbytes);
151 }
152
153 glue_fpu_end(fpu_enabled);
154 return err;
155}
156EXPORT_SYMBOL_GPL(glue_cbc_decrypt_req_128bit);
157
f15f2a25
EB
158int glue_ctr_req_128bit(const struct common_glue_ctx *gctx,
159 struct skcipher_request *req)
160{
161 void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
162 const unsigned int bsize = 128 / 8;
163 struct skcipher_walk walk;
164 bool fpu_enabled = false;
165 unsigned int nbytes;
166 int err;
167
168 err = skcipher_walk_virt(&walk, req, false);
169
170 while ((nbytes = walk.nbytes) >= bsize) {
171 const u128 *src = walk.src.virt.addr;
172 u128 *dst = walk.dst.virt.addr;
173 unsigned int func_bytes, num_blocks;
174 unsigned int i;
175 le128 ctrblk;
176
75d8a553
EB
177 fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
178 &walk, fpu_enabled, nbytes);
f15f2a25
EB
179
180 be128_to_le128(&ctrblk, (be128 *)walk.iv);
181
182 for (i = 0; i < gctx->num_funcs; i++) {
183 num_blocks = gctx->funcs[i].num_blocks;
184 func_bytes = bsize * num_blocks;
185
186 if (nbytes < func_bytes)
187 continue;
188
189 /* Process multi-block batch */
190 do {
191 gctx->funcs[i].fn_u.ctr(ctx, dst, src, &ctrblk);
192 src += num_blocks;
193 dst += num_blocks;
194 nbytes -= func_bytes;
195 } while (nbytes >= func_bytes);
196
197 if (nbytes < bsize)
198 break;
199 }
200
201 le128_to_be128((be128 *)walk.iv, &ctrblk);
202 err = skcipher_walk_done(&walk, nbytes);
203 }
204
205 glue_fpu_end(fpu_enabled);
206
207 if (nbytes) {
208 le128 ctrblk;
209 u128 tmp;
210
211 be128_to_le128(&ctrblk, (be128 *)walk.iv);
212 memcpy(&tmp, walk.src.virt.addr, nbytes);
213 gctx->funcs[gctx->num_funcs - 1].fn_u.ctr(ctx, &tmp, &tmp,
214 &ctrblk);
215 memcpy(walk.dst.virt.addr, &tmp, nbytes);
216 le128_to_be128((be128 *)walk.iv, &ctrblk);
217
218 err = skcipher_walk_done(&walk, 0);
219 }
220
221 return err;
222}
223EXPORT_SYMBOL_GPL(glue_ctr_req_128bit);
224
065ce327
HX
225static unsigned int __glue_xts_req_128bit(const struct common_glue_ctx *gctx,
226 void *ctx,
227 struct skcipher_walk *walk)
228{
229 const unsigned int bsize = 128 / 8;
230 unsigned int nbytes = walk->nbytes;
231 u128 *src = walk->src.virt.addr;
232 u128 *dst = walk->dst.virt.addr;
233 unsigned int num_blocks, func_bytes;
234 unsigned int i;
235
236 /* Process multi-block batch */
237 for (i = 0; i < gctx->num_funcs; i++) {
238 num_blocks = gctx->funcs[i].num_blocks;
239 func_bytes = bsize * num_blocks;
240
241 if (nbytes >= func_bytes) {
242 do {
243 gctx->funcs[i].fn_u.xts(ctx, dst, src,
244 walk->iv);
245
246 src += num_blocks;
247 dst += num_blocks;
248 nbytes -= func_bytes;
249 } while (nbytes >= func_bytes);
250
251 if (nbytes < bsize)
252 goto done;
253 }
254 }
255
256done:
257 return nbytes;
258}
259
065ce327
HX
260int glue_xts_req_128bit(const struct common_glue_ctx *gctx,
261 struct skcipher_request *req,
262 common_glue_func_t tweak_fn, void *tweak_ctx,
8ce5fac2 263 void *crypt_ctx, bool decrypt)
065ce327 264{
8ce5fac2 265 const bool cts = (req->cryptlen % XTS_BLOCK_SIZE);
065ce327 266 const unsigned int bsize = 128 / 8;
8ce5fac2 267 struct skcipher_request subreq;
065ce327
HX
268 struct skcipher_walk walk;
269 bool fpu_enabled = false;
8ce5fac2 270 unsigned int nbytes, tail;
065ce327
HX
271 int err;
272
8ce5fac2
AB
273 if (req->cryptlen < XTS_BLOCK_SIZE)
274 return -EINVAL;
275
276 if (unlikely(cts)) {
277 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
278
279 tail = req->cryptlen % XTS_BLOCK_SIZE + XTS_BLOCK_SIZE;
280
281 skcipher_request_set_tfm(&subreq, tfm);
282 skcipher_request_set_callback(&subreq,
283 crypto_skcipher_get_flags(tfm),
284 NULL, NULL);
285 skcipher_request_set_crypt(&subreq, req->src, req->dst,
286 req->cryptlen - tail, req->iv);
287 req = &subreq;
288 }
289
065ce327
HX
290 err = skcipher_walk_virt(&walk, req, false);
291 nbytes = walk.nbytes;
8ce5fac2 292 if (err)
065ce327
HX
293 return err;
294
295 /* set minimum length to bsize, for tweak_fn */
75d8a553
EB
296 fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
297 &walk, fpu_enabled,
298 nbytes < bsize ? bsize : nbytes);
065ce327
HX
299
300 /* calculate first value of T */
301 tweak_fn(tweak_ctx, walk.iv, walk.iv);
302
303 while (nbytes) {
304 nbytes = __glue_xts_req_128bit(gctx, crypt_ctx, &walk);
305
306 err = skcipher_walk_done(&walk, nbytes);
307 nbytes = walk.nbytes;
308 }
309
8ce5fac2
AB
310 if (unlikely(cts)) {
311 u8 *next_tweak, *final_tweak = req->iv;
312 struct scatterlist *src, *dst;
313 struct scatterlist s[2], d[2];
314 le128 b[2];
315
316 dst = src = scatterwalk_ffwd(s, req->src, req->cryptlen);
317 if (req->dst != req->src)
318 dst = scatterwalk_ffwd(d, req->dst, req->cryptlen);
319
320 if (decrypt) {
321 next_tweak = memcpy(b, req->iv, XTS_BLOCK_SIZE);
322 gf128mul_x_ble(b, b);
323 } else {
324 next_tweak = req->iv;
325 }
326
327 skcipher_request_set_crypt(&subreq, src, dst, XTS_BLOCK_SIZE,
328 next_tweak);
329
330 err = skcipher_walk_virt(&walk, req, false) ?:
331 skcipher_walk_done(&walk,
332 __glue_xts_req_128bit(gctx, crypt_ctx, &walk));
333 if (err)
334 goto out;
335
336 scatterwalk_map_and_copy(b, dst, 0, XTS_BLOCK_SIZE, 0);
337 memcpy(b + 1, b, tail - XTS_BLOCK_SIZE);
338 scatterwalk_map_and_copy(b, src, XTS_BLOCK_SIZE,
339 tail - XTS_BLOCK_SIZE, 0);
340 scatterwalk_map_and_copy(b, dst, 0, tail, 1);
341
342 skcipher_request_set_crypt(&subreq, dst, dst, XTS_BLOCK_SIZE,
343 final_tweak);
344
345 err = skcipher_walk_virt(&walk, req, false) ?:
346 skcipher_walk_done(&walk,
347 __glue_xts_req_128bit(gctx, crypt_ctx, &walk));
348 }
349
350out:
065ce327
HX
351 glue_fpu_end(fpu_enabled);
352
353 return err;
354}
355EXPORT_SYMBOL_GPL(glue_xts_req_128bit);
356
a05248ed
JK
357void glue_xts_crypt_128bit_one(void *ctx, u128 *dst, const u128 *src, le128 *iv,
358 common_glue_func_t fn)
359{
360 le128 ivblk = *iv;
361
362 /* generate next IV */
692016bd 363 gf128mul_x_ble(iv, &ivblk);
a05248ed
JK
364
365 /* CC <- T xor C */
366 u128_xor(dst, src, (u128 *)&ivblk);
367
368 /* PP <- D(Key2,CC) */
369 fn(ctx, (u8 *)dst, (u8 *)dst);
370
371 /* P <- T xor PP */
372 u128_xor(dst, dst, (u128 *)&ivblk);
373}
374EXPORT_SYMBOL_GPL(glue_xts_crypt_128bit_one);
375
596d8750 376MODULE_LICENSE("GPL");