crypto: lrw - Optimize tweak computation
[linux-block.git] / crypto / lrw.c
CommitLineData
64470f1b
RS
1/* LRW: as defined by Cyril Guyot in
2 * http://grouper.ieee.org/groups/1619/email/pdf00017.pdf
3 *
4 * Copyright (c) 2006 Rik Snel <rsnel@cube.dyndns.org>
5 *
6c2205b8 6 * Based on ecb.c
64470f1b
RS
7 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free
11 * Software Foundation; either version 2 of the License, or (at your option)
12 * any later version.
13 */
14/* This implementation is checked against the test vectors in the above
15 * document and by a test vector provided by Ken Buchanan at
16 * http://www.mail-archive.com/stds-p1619@listserv.ieee.org/msg00173.html
17 *
18 * The test vectors are included in the testing module tcrypt.[ch] */
6c2205b8 19
700cb3f5
HX
20#include <crypto/internal/skcipher.h>
21#include <crypto/scatterwalk.h>
64470f1b
RS
22#include <linux/err.h>
23#include <linux/init.h>
24#include <linux/kernel.h>
25#include <linux/module.h>
26#include <linux/scatterlist.h>
27#include <linux/slab.h>
28
29#include <crypto/b128ops.h>
30#include <crypto/gf128mul.h>
64470f1b 31
700cb3f5
HX
32#define LRW_BUFFER_SIZE 128u
33
217afccf
EB
34#define LRW_BLOCK_SIZE 16
35
171c0204 36struct priv {
700cb3f5 37 struct crypto_skcipher *child;
217afccf
EB
38
39 /*
40 * optimizes multiplying a random (non incrementing, as at the
41 * start of a new sector) value with key2, we could also have
42 * used 4k optimization tables or no optimization at all. In the
43 * latter case we would have to store key2 here
44 */
45 struct gf128mul_64k *table;
46
47 /*
48 * stores:
49 * key2*{ 0,0,...0,0,0,0,1 }, key2*{ 0,0,...0,0,0,1,1 },
50 * key2*{ 0,0,...0,0,1,1,1 }, key2*{ 0,0,...0,1,1,1,1 }
51 * key2*{ 0,0,...1,1,1,1,1 }, etc
52 * needed for optimized multiplication of incrementing values
53 * with key2
54 */
55 be128 mulinc[128];
171c0204
JK
56};
57
700cb3f5
HX
58struct rctx {
59 be128 buf[LRW_BUFFER_SIZE / sizeof(be128)];
60
61 be128 t;
62
63 be128 *ext;
64
65 struct scatterlist srcbuf[2];
66 struct scatterlist dstbuf[2];
67 struct scatterlist *src;
68 struct scatterlist *dst;
69
70 unsigned int left;
71
72 struct skcipher_request subreq;
73};
74
64470f1b
RS
75static inline void setbit128_bbe(void *b, int bit)
76{
8eb2dfac
HX
77 __set_bit(bit ^ (0x80 -
78#ifdef __BIG_ENDIAN
79 BITS_PER_LONG
80#else
81 BITS_PER_BYTE
82#endif
83 ), b);
64470f1b
RS
84}
85
217afccf
EB
86static int setkey(struct crypto_skcipher *parent, const u8 *key,
87 unsigned int keylen)
64470f1b 88{
217afccf
EB
89 struct priv *ctx = crypto_skcipher_ctx(parent);
90 struct crypto_skcipher *child = ctx->child;
91 int err, bsize = LRW_BLOCK_SIZE;
92 const u8 *tweak = key + keylen - bsize;
64470f1b 93 be128 tmp = { 0 };
171c0204 94 int i;
64470f1b 95
217afccf
EB
96 crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
97 crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) &
98 CRYPTO_TFM_REQ_MASK);
99 err = crypto_skcipher_setkey(child, key, keylen - bsize);
100 crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) &
101 CRYPTO_TFM_RES_MASK);
102 if (err)
103 return err;
104
64470f1b
RS
105 if (ctx->table)
106 gf128mul_free_64k(ctx->table);
107
108 /* initialize multiplication table for Key2 */
171c0204 109 ctx->table = gf128mul_init_64k_bbe((be128 *)tweak);
64470f1b
RS
110 if (!ctx->table)
111 return -ENOMEM;
112
113 /* initialize optimization table */
114 for (i = 0; i < 128; i++) {
115 setbit128_bbe(&tmp, i);
116 ctx->mulinc[i] = tmp;
117 gf128mul_64k_bbe(&ctx->mulinc[i], ctx->table);
118 }
119
120 return 0;
121}
171c0204 122
c778f96b
OM
123/*
124 * Returns the number of trailing '1' bits in the words of the counter, which is
125 * represented by 4 32-bit words, arranged from least to most significant.
126 * At the same time, increments the counter by one.
127 *
128 * For example:
129 *
130 * u32 counter[4] = { 0xFFFFFFFF, 0x1, 0x0, 0x0 };
131 * int i = next_index(&counter);
132 * // i == 33, counter == { 0x0, 0x2, 0x0, 0x0 }
133 */
134static int next_index(u32 *counter)
64470f1b 135{
c778f96b 136 int i, res = 0;
64470f1b 137
c778f96b
OM
138 for (i = 0; i < 4; i++) {
139 if (counter[i] + 1 != 0) {
140 res += ffz(counter[i]++);
141 break;
142 }
143 counter[i] = 0;
144 res += 32;
64470f1b
RS
145 }
146
fbe1a850
OM
147 /*
148 * If we get here, then x == 128 and we are incrementing the counter
149 * from all ones to all zeros. This means we must return index 127, i.e.
150 * the one corresponding to key2*{ 1,...,1 }.
151 */
152 return 127;
64470f1b
RS
153}
154
700cb3f5 155static int post_crypt(struct skcipher_request *req)
64470f1b 156{
700cb3f5
HX
157 struct rctx *rctx = skcipher_request_ctx(req);
158 be128 *buf = rctx->ext ?: rctx->buf;
159 struct skcipher_request *subreq;
160 const int bs = LRW_BLOCK_SIZE;
161 struct skcipher_walk w;
162 struct scatterlist *sg;
163 unsigned offset;
64470f1b 164 int err;
700cb3f5
HX
165
166 subreq = &rctx->subreq;
167 err = skcipher_walk_virt(&w, subreq, false);
168
169 while (w.nbytes) {
170 unsigned int avail = w.nbytes;
171 be128 *wdst;
172
173 wdst = w.dst.virt.addr;
174
175 do {
176 be128_xor(wdst, buf++, wdst);
177 wdst++;
178 } while ((avail -= bs) >= bs);
179
180 err = skcipher_walk_done(&w, avail);
181 }
182
183 rctx->left -= subreq->cryptlen;
184
185 if (err || !rctx->left)
186 goto out;
187
188 rctx->dst = rctx->dstbuf;
189
190 scatterwalk_done(&w.out, 0, 1);
191 sg = w.out.sg;
192 offset = w.out.offset;
193
194 if (rctx->dst != sg) {
195 rctx->dst[0] = *sg;
196 sg_unmark_end(rctx->dst);
8c30fbe6 197 scatterwalk_crypto_chain(rctx->dst, sg_next(sg), 2);
700cb3f5
HX
198 }
199 rctx->dst[0].length -= offset - sg->offset;
200 rctx->dst[0].offset = offset;
201
202out:
203 return err;
204}
205
206static int pre_crypt(struct skcipher_request *req)
207{
208 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
209 struct rctx *rctx = skcipher_request_ctx(req);
210 struct priv *ctx = crypto_skcipher_ctx(tfm);
211 be128 *buf = rctx->ext ?: rctx->buf;
212 struct skcipher_request *subreq;
4660720d 213 const int bs = LRW_BLOCK_SIZE;
700cb3f5
HX
214 struct skcipher_walk w;
215 struct scatterlist *sg;
216 unsigned cryptlen;
217 unsigned offset;
700cb3f5 218 bool more;
c778f96b
OM
219 __be32 *iv;
220 u32 counter[4];
700cb3f5 221 int err;
64470f1b 222
700cb3f5
HX
223 subreq = &rctx->subreq;
224 skcipher_request_set_tfm(subreq, tfm);
64470f1b 225
700cb3f5
HX
226 cryptlen = subreq->cryptlen;
227 more = rctx->left > cryptlen;
228 if (!more)
229 cryptlen = rctx->left;
64470f1b 230
700cb3f5
HX
231 skcipher_request_set_crypt(subreq, rctx->src, rctx->dst,
232 cryptlen, req->iv);
64470f1b 233
700cb3f5 234 err = skcipher_walk_virt(&w, subreq, false);
c778f96b
OM
235 iv = (__be32 *)w.iv;
236
237 counter[0] = be32_to_cpu(iv[3]);
238 counter[1] = be32_to_cpu(iv[2]);
239 counter[2] = be32_to_cpu(iv[1]);
240 counter[3] = be32_to_cpu(iv[0]);
64470f1b 241
700cb3f5
HX
242 while (w.nbytes) {
243 unsigned int avail = w.nbytes;
244 be128 *wsrc;
245 be128 *wdst;
246
247 wsrc = w.src.virt.addr;
248 wdst = w.dst.virt.addr;
64470f1b 249
64470f1b 250 do {
700cb3f5
HX
251 *buf++ = rctx->t;
252 be128_xor(wdst++, &rctx->t, wsrc++);
253
64470f1b
RS
254 /* T <- I*Key2, using the optimization
255 * discussed in the specification */
700cb3f5 256 be128_xor(&rctx->t, &rctx->t,
c778f96b 257 &ctx->mulinc[next_index(counter)]);
700cb3f5 258 } while ((avail -= bs) >= bs);
64470f1b 259
c778f96b
OM
260 if (w.nbytes == w.total) {
261 iv[0] = cpu_to_be32(counter[3]);
262 iv[1] = cpu_to_be32(counter[2]);
263 iv[2] = cpu_to_be32(counter[1]);
264 iv[3] = cpu_to_be32(counter[0]);
265 }
266
700cb3f5
HX
267 err = skcipher_walk_done(&w, avail);
268 }
64470f1b 269
700cb3f5
HX
270 skcipher_request_set_tfm(subreq, ctx->child);
271 skcipher_request_set_crypt(subreq, rctx->dst, rctx->dst,
272 cryptlen, NULL);
64470f1b 273
700cb3f5
HX
274 if (err || !more)
275 goto out;
276
277 rctx->src = rctx->srcbuf;
278
279 scatterwalk_done(&w.in, 0, 1);
280 sg = w.in.sg;
281 offset = w.in.offset;
282
283 if (rctx->src != sg) {
284 rctx->src[0] = *sg;
285 sg_unmark_end(rctx->src);
8c30fbe6 286 scatterwalk_crypto_chain(rctx->src, sg_next(sg), 2);
700cb3f5
HX
287 }
288 rctx->src[0].length -= offset - sg->offset;
289 rctx->src[0].offset = offset;
290
291out:
292 return err;
293}
294
295static int init_crypt(struct skcipher_request *req, crypto_completion_t done)
296{
297 struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
298 struct rctx *rctx = skcipher_request_ctx(req);
299 struct skcipher_request *subreq;
300 gfp_t gfp;
301
302 subreq = &rctx->subreq;
303 skcipher_request_set_callback(subreq, req->base.flags, done, req);
304
305 gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
306 GFP_ATOMIC;
307 rctx->ext = NULL;
308
309 subreq->cryptlen = LRW_BUFFER_SIZE;
310 if (req->cryptlen > LRW_BUFFER_SIZE) {
9df0eb18
EB
311 unsigned int n = min(req->cryptlen, (unsigned int)PAGE_SIZE);
312
313 rctx->ext = kmalloc(n, gfp);
314 if (rctx->ext)
315 subreq->cryptlen = n;
700cb3f5
HX
316 }
317
318 rctx->src = req->src;
319 rctx->dst = req->dst;
320 rctx->left = req->cryptlen;
321
322 /* calculate first value of T */
323 memcpy(&rctx->t, req->iv, sizeof(rctx->t));
324
325 /* T <- I*Key2 */
217afccf 326 gf128mul_64k_bbe(&rctx->t, ctx->table);
64470f1b 327
700cb3f5
HX
328 return 0;
329}
330
331static void exit_crypt(struct skcipher_request *req)
332{
333 struct rctx *rctx = skcipher_request_ctx(req);
334
335 rctx->left = 0;
336
337 if (rctx->ext)
8c9bdab2 338 kzfree(rctx->ext);
700cb3f5
HX
339}
340
341static int do_encrypt(struct skcipher_request *req, int err)
342{
343 struct rctx *rctx = skcipher_request_ctx(req);
344 struct skcipher_request *subreq;
345
346 subreq = &rctx->subreq;
347
348 while (!err && rctx->left) {
349 err = pre_crypt(req) ?:
350 crypto_skcipher_encrypt(subreq) ?:
351 post_crypt(req);
352
4e5b0ad5 353 if (err == -EINPROGRESS || err == -EBUSY)
700cb3f5 354 return err;
64470f1b
RS
355 }
356
700cb3f5 357 exit_crypt(req);
64470f1b
RS
358 return err;
359}
360
700cb3f5
HX
361static void encrypt_done(struct crypto_async_request *areq, int err)
362{
363 struct skcipher_request *req = areq->data;
364 struct skcipher_request *subreq;
365 struct rctx *rctx;
366
367 rctx = skcipher_request_ctx(req);
4702bbee
HX
368
369 if (err == -EINPROGRESS) {
370 if (rctx->left != req->cryptlen)
371 return;
372 goto out;
373 }
374
700cb3f5
HX
375 subreq = &rctx->subreq;
376 subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
377
378 err = do_encrypt(req, err ?: post_crypt(req));
379 if (rctx->left)
380 return;
381
4702bbee 382out:
700cb3f5
HX
383 skcipher_request_complete(req, err);
384}
385
386static int encrypt(struct skcipher_request *req)
387{
388 return do_encrypt(req, init_crypt(req, encrypt_done));
389}
390
391static int do_decrypt(struct skcipher_request *req, int err)
64470f1b 392{
700cb3f5
HX
393 struct rctx *rctx = skcipher_request_ctx(req);
394 struct skcipher_request *subreq;
395
396 subreq = &rctx->subreq;
397
398 while (!err && rctx->left) {
399 err = pre_crypt(req) ?:
400 crypto_skcipher_decrypt(subreq) ?:
401 post_crypt(req);
402
4e5b0ad5 403 if (err == -EINPROGRESS || err == -EBUSY)
700cb3f5
HX
404 return err;
405 }
64470f1b 406
700cb3f5
HX
407 exit_crypt(req);
408 return err;
64470f1b
RS
409}
410
700cb3f5 411static void decrypt_done(struct crypto_async_request *areq, int err)
64470f1b 412{
700cb3f5
HX
413 struct skcipher_request *req = areq->data;
414 struct skcipher_request *subreq;
415 struct rctx *rctx;
416
417 rctx = skcipher_request_ctx(req);
4702bbee
HX
418
419 if (err == -EINPROGRESS) {
420 if (rctx->left != req->cryptlen)
421 return;
422 goto out;
423 }
424
700cb3f5
HX
425 subreq = &rctx->subreq;
426 subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
427
428 err = do_decrypt(req, err ?: post_crypt(req));
429 if (rctx->left)
430 return;
64470f1b 431
4702bbee 432out:
700cb3f5
HX
433 skcipher_request_complete(req, err);
434}
435
436static int decrypt(struct skcipher_request *req)
437{
438 return do_decrypt(req, init_crypt(req, decrypt_done));
64470f1b
RS
439}
440
700cb3f5 441static int init_tfm(struct crypto_skcipher *tfm)
64470f1b 442{
700cb3f5
HX
443 struct skcipher_instance *inst = skcipher_alg_instance(tfm);
444 struct crypto_skcipher_spawn *spawn = skcipher_instance_ctx(inst);
445 struct priv *ctx = crypto_skcipher_ctx(tfm);
446 struct crypto_skcipher *cipher;
64470f1b 447
700cb3f5 448 cipher = crypto_spawn_skcipher(spawn);
2e306ee0
HX
449 if (IS_ERR(cipher))
450 return PTR_ERR(cipher);
64470f1b 451
2e306ee0 452 ctx->child = cipher;
700cb3f5
HX
453
454 crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(cipher) +
455 sizeof(struct rctx));
456
64470f1b
RS
457 return 0;
458}
459
700cb3f5 460static void exit_tfm(struct crypto_skcipher *tfm)
64470f1b 461{
700cb3f5 462 struct priv *ctx = crypto_skcipher_ctx(tfm);
171c0204 463
217afccf
EB
464 if (ctx->table)
465 gf128mul_free_64k(ctx->table);
700cb3f5
HX
466 crypto_free_skcipher(ctx->child);
467}
468
469static void free(struct skcipher_instance *inst)
470{
471 crypto_drop_skcipher(skcipher_instance_ctx(inst));
472 kfree(inst);
64470f1b
RS
473}
474
700cb3f5 475static int create(struct crypto_template *tmpl, struct rtattr **tb)
64470f1b 476{
700cb3f5
HX
477 struct crypto_skcipher_spawn *spawn;
478 struct skcipher_instance *inst;
479 struct crypto_attr_type *algt;
480 struct skcipher_alg *alg;
481 const char *cipher_name;
482 char ecb_name[CRYPTO_MAX_ALG_NAME];
ebc610e5
HX
483 int err;
484
700cb3f5
HX
485 algt = crypto_get_attr_type(tb);
486 if (IS_ERR(algt))
487 return PTR_ERR(algt);
488
489 if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask)
490 return -EINVAL;
491
492 cipher_name = crypto_attr_alg_name(tb[1]);
493 if (IS_ERR(cipher_name))
494 return PTR_ERR(cipher_name);
495
496 inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
497 if (!inst)
498 return -ENOMEM;
499
500 spawn = skcipher_instance_ctx(inst);
501
502 crypto_set_skcipher_spawn(spawn, skcipher_crypto_instance(inst));
503 err = crypto_grab_skcipher(spawn, cipher_name, 0,
504 crypto_requires_sync(algt->type,
505 algt->mask));
506 if (err == -ENOENT) {
507 err = -ENAMETOOLONG;
508 if (snprintf(ecb_name, CRYPTO_MAX_ALG_NAME, "ecb(%s)",
509 cipher_name) >= CRYPTO_MAX_ALG_NAME)
510 goto err_free_inst;
511
512 err = crypto_grab_skcipher(spawn, ecb_name, 0,
513 crypto_requires_sync(algt->type,
514 algt->mask));
515 }
516
ebc610e5 517 if (err)
700cb3f5 518 goto err_free_inst;
64470f1b 519
700cb3f5 520 alg = crypto_skcipher_spawn_alg(spawn);
64470f1b 521
700cb3f5
HX
522 err = -EINVAL;
523 if (alg->base.cra_blocksize != LRW_BLOCK_SIZE)
524 goto err_drop_spawn;
64470f1b 525
700cb3f5
HX
526 if (crypto_skcipher_alg_ivsize(alg))
527 goto err_drop_spawn;
64470f1b 528
700cb3f5
HX
529 err = crypto_inst_setname(skcipher_crypto_instance(inst), "lrw",
530 &alg->base);
531 if (err)
532 goto err_drop_spawn;
64470f1b 533
700cb3f5
HX
534 err = -EINVAL;
535 cipher_name = alg->base.cra_name;
64470f1b 536
700cb3f5
HX
537 /* Alas we screwed up the naming so we have to mangle the
538 * cipher name.
539 */
540 if (!strncmp(cipher_name, "ecb(", 4)) {
541 unsigned len;
64470f1b 542
700cb3f5
HX
543 len = strlcpy(ecb_name, cipher_name + 4, sizeof(ecb_name));
544 if (len < 2 || len >= sizeof(ecb_name))
545 goto err_drop_spawn;
64470f1b 546
700cb3f5
HX
547 if (ecb_name[len - 1] != ')')
548 goto err_drop_spawn;
64470f1b 549
700cb3f5 550 ecb_name[len - 1] = 0;
64470f1b 551
700cb3f5 552 if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
616129cc
CJ
553 "lrw(%s)", ecb_name) >= CRYPTO_MAX_ALG_NAME) {
554 err = -ENAMETOOLONG;
555 goto err_drop_spawn;
556 }
d38efad2
CJ
557 } else
558 goto err_drop_spawn;
700cb3f5
HX
559
560 inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
561 inst->alg.base.cra_priority = alg->base.cra_priority;
562 inst->alg.base.cra_blocksize = LRW_BLOCK_SIZE;
563 inst->alg.base.cra_alignmask = alg->base.cra_alignmask |
c778f96b 564 (__alignof__(__be32) - 1);
700cb3f5
HX
565
566 inst->alg.ivsize = LRW_BLOCK_SIZE;
567 inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) +
568 LRW_BLOCK_SIZE;
569 inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) +
570 LRW_BLOCK_SIZE;
571
572 inst->alg.base.cra_ctxsize = sizeof(struct priv);
573
574 inst->alg.init = init_tfm;
575 inst->alg.exit = exit_tfm;
576
577 inst->alg.setkey = setkey;
578 inst->alg.encrypt = encrypt;
579 inst->alg.decrypt = decrypt;
580
581 inst->free = free;
582
583 err = skcipher_register_instance(tmpl, inst);
584 if (err)
585 goto err_drop_spawn;
586
587out:
588 return err;
589
590err_drop_spawn:
591 crypto_drop_skcipher(spawn);
592err_free_inst:
64470f1b 593 kfree(inst);
700cb3f5 594 goto out;
64470f1b
RS
595}
596
597static struct crypto_template crypto_tmpl = {
598 .name = "lrw",
700cb3f5 599 .create = create,
64470f1b
RS
600 .module = THIS_MODULE,
601};
602
603static int __init crypto_module_init(void)
604{
605 return crypto_register_template(&crypto_tmpl);
606}
607
608static void __exit crypto_module_exit(void)
609{
610 crypto_unregister_template(&crypto_tmpl);
611}
612
613module_init(crypto_module_init);
614module_exit(crypto_module_exit);
615
616MODULE_LICENSE("GPL");
617MODULE_DESCRIPTION("LRW block cipher mode");
4943ba16 618MODULE_ALIAS_CRYPTO("lrw");