crypto: remove CRYPTO_TFM_RES_BAD_KEY_LEN
[linux-block.git] / arch / arm64 / crypto / ghash-ce-glue.c
CommitLineData
d2912cb1 1// SPDX-License-Identifier: GPL-2.0-only
fdd23894
AB
2/*
3 * Accelerated GHASH implementation with ARMv8 PMULL instructions.
4 *
30f1a9f5 5 * Copyright (C) 2014 - 2018 Linaro Ltd. <ard.biesheuvel@linaro.org>
fdd23894
AB
6 */
7
8#include <asm/neon.h>
6d6254d7 9#include <asm/simd.h>
fdd23894 10#include <asm/unaligned.h>
537c1445
AB
11#include <crypto/aes.h>
12#include <crypto/algapi.h>
13#include <crypto/b128ops.h>
6d6254d7 14#include <crypto/gf128mul.h>
537c1445 15#include <crypto/internal/aead.h>
fdd23894 16#include <crypto/internal/hash.h>
e52b7023 17#include <crypto/internal/simd.h>
537c1445
AB
18#include <crypto/internal/skcipher.h>
19#include <crypto/scatterwalk.h>
fdd23894
AB
20#include <linux/cpufeature.h>
21#include <linux/crypto.h>
22#include <linux/module.h>
23
537c1445 24MODULE_DESCRIPTION("GHASH and AES-GCM using ARMv8 Crypto Extensions");
fdd23894
AB
25MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
26MODULE_LICENSE("GPL v2");
03c9a333 27MODULE_ALIAS_CRYPTO("ghash");
fdd23894
AB
28
29#define GHASH_BLOCK_SIZE 16
30#define GHASH_DIGEST_SIZE 16
537c1445 31#define GCM_IV_SIZE 12
fdd23894
AB
32
33struct ghash_key {
22240df7
AB
34 u64 h[2];
35 u64 h2[2];
36 u64 h3[2];
37 u64 h4[2];
38
39 be128 k;
fdd23894
AB
40};
41
42struct ghash_desc_ctx {
43 u64 digest[GHASH_DIGEST_SIZE/sizeof(u64)];
44 u8 buf[GHASH_BLOCK_SIZE];
45 u32 count;
46};
47
537c1445
AB
48struct gcm_aes_ctx {
49 struct crypto_aes_ctx aes_key;
50 struct ghash_key ghash_key;
51};
52
03c9a333
AB
53asmlinkage void pmull_ghash_update_p64(int blocks, u64 dg[], const char *src,
54 struct ghash_key const *k,
55 const char *head);
56
57asmlinkage void pmull_ghash_update_p8(int blocks, u64 dg[], const char *src,
58 struct ghash_key const *k,
59 const char *head);
60
11031c0d
AB
61asmlinkage void pmull_gcm_encrypt(int bytes, u8 dst[], const u8 src[],
62 struct ghash_key const *k, u64 dg[],
22240df7 63 u8 ctr[], u32 const rk[], int rounds,
11031c0d 64 u8 tag[]);
537c1445 65
11031c0d
AB
66asmlinkage void pmull_gcm_decrypt(int bytes, u8 dst[], const u8 src[],
67 struct ghash_key const *k, u64 dg[],
68 u8 ctr[], u32 const rk[], int rounds,
69 u8 tag[]);
537c1445 70
fdd23894
AB
71static int ghash_init(struct shash_desc *desc)
72{
73 struct ghash_desc_ctx *ctx = shash_desc_ctx(desc);
74
75 *ctx = (struct ghash_desc_ctx){};
76 return 0;
77}
78
6d6254d7 79static void ghash_do_update(int blocks, u64 dg[], const char *src,
5a22b198
AB
80 struct ghash_key *key, const char *head,
81 void (*simd_update)(int blocks, u64 dg[],
82 const char *src,
83 struct ghash_key const *k,
84 const char *head))
6d6254d7 85{
11031c0d 86 if (likely(crypto_simd_usable() && simd_update)) {
6d6254d7 87 kernel_neon_begin();
5a22b198 88 simd_update(blocks, dg, src, key, head);
6d6254d7
AB
89 kernel_neon_end();
90 } else {
91 be128 dst = { cpu_to_be64(dg[1]), cpu_to_be64(dg[0]) };
92
93 do {
94 const u8 *in = src;
95
96 if (head) {
97 in = head;
98 blocks++;
99 head = NULL;
100 } else {
101 src += GHASH_BLOCK_SIZE;
102 }
103
104 crypto_xor((u8 *)&dst, in, GHASH_BLOCK_SIZE);
105 gf128mul_lle(&dst, &key->k);
106 } while (--blocks);
107
108 dg[0] = be64_to_cpu(dst.b);
109 dg[1] = be64_to_cpu(dst.a);
110 }
111}
112
8e492eff
AB
113/* avoid hogging the CPU for too long */
114#define MAX_BLOCKS (SZ_64K / GHASH_BLOCK_SIZE)
115
5a22b198
AB
116static int __ghash_update(struct shash_desc *desc, const u8 *src,
117 unsigned int len,
118 void (*simd_update)(int blocks, u64 dg[],
119 const char *src,
120 struct ghash_key const *k,
121 const char *head))
fdd23894
AB
122{
123 struct ghash_desc_ctx *ctx = shash_desc_ctx(desc);
124 unsigned int partial = ctx->count % GHASH_BLOCK_SIZE;
125
126 ctx->count += len;
127
128 if ((partial + len) >= GHASH_BLOCK_SIZE) {
129 struct ghash_key *key = crypto_shash_ctx(desc->tfm);
130 int blocks;
131
132 if (partial) {
133 int p = GHASH_BLOCK_SIZE - partial;
134
135 memcpy(ctx->buf + partial, src, p);
136 src += p;
137 len -= p;
138 }
139
140 blocks = len / GHASH_BLOCK_SIZE;
141 len %= GHASH_BLOCK_SIZE;
142
8e492eff
AB
143 do {
144 int chunk = min(blocks, MAX_BLOCKS);
145
146 ghash_do_update(chunk, ctx->digest, src, key,
5a22b198
AB
147 partial ? ctx->buf : NULL,
148 simd_update);
6d6254d7 149
8e492eff
AB
150 blocks -= chunk;
151 src += chunk * GHASH_BLOCK_SIZE;
152 partial = 0;
153 } while (unlikely(blocks > 0));
fdd23894
AB
154 }
155 if (len)
156 memcpy(ctx->buf + partial, src, len);
157 return 0;
158}
159
5a22b198
AB
160static int ghash_update_p8(struct shash_desc *desc, const u8 *src,
161 unsigned int len)
162{
163 return __ghash_update(desc, src, len, pmull_ghash_update_p8);
164}
165
166static int ghash_update_p64(struct shash_desc *desc, const u8 *src,
167 unsigned int len)
168{
169 return __ghash_update(desc, src, len, pmull_ghash_update_p64);
170}
171
172static int ghash_final_p8(struct shash_desc *desc, u8 *dst)
fdd23894
AB
173{
174 struct ghash_desc_ctx *ctx = shash_desc_ctx(desc);
175 unsigned int partial = ctx->count % GHASH_BLOCK_SIZE;
176
177 if (partial) {
178 struct ghash_key *key = crypto_shash_ctx(desc->tfm);
179
180 memset(ctx->buf + partial, 0, GHASH_BLOCK_SIZE - partial);
181
5a22b198
AB
182 ghash_do_update(1, ctx->digest, ctx->buf, key, NULL,
183 pmull_ghash_update_p8);
184 }
185 put_unaligned_be64(ctx->digest[1], dst);
186 put_unaligned_be64(ctx->digest[0], dst + 8);
187
188 *ctx = (struct ghash_desc_ctx){};
189 return 0;
190}
191
192static int ghash_final_p64(struct shash_desc *desc, u8 *dst)
193{
194 struct ghash_desc_ctx *ctx = shash_desc_ctx(desc);
195 unsigned int partial = ctx->count % GHASH_BLOCK_SIZE;
196
197 if (partial) {
198 struct ghash_key *key = crypto_shash_ctx(desc->tfm);
199
200 memset(ctx->buf + partial, 0, GHASH_BLOCK_SIZE - partial);
201
202 ghash_do_update(1, ctx->digest, ctx->buf, key, NULL,
203 pmull_ghash_update_p64);
fdd23894
AB
204 }
205 put_unaligned_be64(ctx->digest[1], dst);
206 put_unaligned_be64(ctx->digest[0], dst + 8);
207
208 *ctx = (struct ghash_desc_ctx){};
209 return 0;
210}
211
22240df7
AB
212static void ghash_reflect(u64 h[], const be128 *k)
213{
214 u64 carry = be64_to_cpu(k->a) & BIT(63) ? 1 : 0;
215
216 h[0] = (be64_to_cpu(k->b) << 1) | carry;
217 h[1] = (be64_to_cpu(k->a) << 1) | (be64_to_cpu(k->b) >> 63);
218
219 if (carry)
220 h[1] ^= 0xc200000000000000UL;
221}
222
537c1445
AB
223static int __ghash_setkey(struct ghash_key *key,
224 const u8 *inkey, unsigned int keylen)
fdd23894 225{
22240df7 226 be128 h;
fdd23894 227
6d6254d7
AB
228 /* needed for the fallback */
229 memcpy(&key->k, inkey, GHASH_BLOCK_SIZE);
230
22240df7
AB
231 ghash_reflect(key->h, &key->k);
232
233 h = key->k;
234 gf128mul_lle(&h, &key->k);
235 ghash_reflect(key->h2, &h);
fdd23894 236
22240df7
AB
237 gf128mul_lle(&h, &key->k);
238 ghash_reflect(key->h3, &h);
fdd23894 239
22240df7
AB
240 gf128mul_lle(&h, &key->k);
241 ghash_reflect(key->h4, &h);
fdd23894
AB
242
243 return 0;
244}
245
537c1445
AB
246static int ghash_setkey(struct crypto_shash *tfm,
247 const u8 *inkey, unsigned int keylen)
248{
249 struct ghash_key *key = crypto_shash_ctx(tfm);
250
674f368a 251 if (keylen != GHASH_BLOCK_SIZE)
537c1445 252 return -EINVAL;
537c1445
AB
253
254 return __ghash_setkey(key, inkey, keylen);
255}
256
5a22b198
AB
257static struct shash_alg ghash_alg[] = {{
258 .base.cra_name = "ghash",
259 .base.cra_driver_name = "ghash-neon",
5441c650 260 .base.cra_priority = 150,
5a22b198
AB
261 .base.cra_blocksize = GHASH_BLOCK_SIZE,
262 .base.cra_ctxsize = sizeof(struct ghash_key),
263 .base.cra_module = THIS_MODULE,
264
265 .digestsize = GHASH_DIGEST_SIZE,
266 .init = ghash_init,
267 .update = ghash_update_p8,
268 .final = ghash_final_p8,
269 .setkey = ghash_setkey,
270 .descsize = sizeof(struct ghash_desc_ctx),
271}, {
537c1445
AB
272 .base.cra_name = "ghash",
273 .base.cra_driver_name = "ghash-ce",
274 .base.cra_priority = 200,
537c1445
AB
275 .base.cra_blocksize = GHASH_BLOCK_SIZE,
276 .base.cra_ctxsize = sizeof(struct ghash_key),
277 .base.cra_module = THIS_MODULE,
278
279 .digestsize = GHASH_DIGEST_SIZE,
280 .init = ghash_init,
5a22b198
AB
281 .update = ghash_update_p64,
282 .final = ghash_final_p64,
537c1445
AB
283 .setkey = ghash_setkey,
284 .descsize = sizeof(struct ghash_desc_ctx),
5a22b198 285}};
537c1445
AB
286
287static int num_rounds(struct crypto_aes_ctx *ctx)
288{
289 /*
290 * # of rounds specified by AES:
291 * 128 bit key 10 rounds
292 * 192 bit key 12 rounds
293 * 256 bit key 14 rounds
294 * => n byte key => 6 + (n/4) rounds
295 */
296 return 6 + ctx->key_length / 4;
297}
298
299static int gcm_setkey(struct crypto_aead *tfm, const u8 *inkey,
300 unsigned int keylen)
301{
302 struct gcm_aes_ctx *ctx = crypto_aead_ctx(tfm);
22240df7 303 u8 key[GHASH_BLOCK_SIZE];
537c1445
AB
304 int ret;
305
fe3b99b6 306 ret = aes_expandkey(&ctx->aes_key, inkey, keylen);
674f368a 307 if (ret)
537c1445 308 return -EINVAL;
537c1445 309
fe3b99b6 310 aes_encrypt(&ctx->aes_key, key, (u8[AES_BLOCK_SIZE]){});
537c1445 311
22240df7 312 return __ghash_setkey(&ctx->ghash_key, key, sizeof(be128));
537c1445
AB
313}
314
315static int gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
316{
317 switch (authsize) {
318 case 4:
319 case 8:
320 case 12 ... 16:
321 break;
322 default:
323 return -EINVAL;
324 }
325 return 0;
326}
327
328static void gcm_update_mac(u64 dg[], const u8 *src, int count, u8 buf[],
329 int *buf_count, struct gcm_aes_ctx *ctx)
330{
331 if (*buf_count > 0) {
332 int buf_added = min(count, GHASH_BLOCK_SIZE - *buf_count);
333
334 memcpy(&buf[*buf_count], src, buf_added);
335
336 *buf_count += buf_added;
337 src += buf_added;
338 count -= buf_added;
339 }
340
341 if (count >= GHASH_BLOCK_SIZE || *buf_count == GHASH_BLOCK_SIZE) {
342 int blocks = count / GHASH_BLOCK_SIZE;
343
344 ghash_do_update(blocks, dg, src, &ctx->ghash_key,
5a22b198
AB
345 *buf_count ? buf : NULL,
346 pmull_ghash_update_p64);
537c1445
AB
347
348 src += blocks * GHASH_BLOCK_SIZE;
349 count %= GHASH_BLOCK_SIZE;
350 *buf_count = 0;
351 }
352
353 if (count > 0) {
354 memcpy(buf, src, count);
355 *buf_count = count;
356 }
357}
358
359static void gcm_calculate_auth_mac(struct aead_request *req, u64 dg[])
360{
361 struct crypto_aead *aead = crypto_aead_reqtfm(req);
362 struct gcm_aes_ctx *ctx = crypto_aead_ctx(aead);
363 u8 buf[GHASH_BLOCK_SIZE];
364 struct scatter_walk walk;
365 u32 len = req->assoclen;
366 int buf_count = 0;
367
368 scatterwalk_start(&walk, req->src);
369
370 do {
371 u32 n = scatterwalk_clamp(&walk, len);
372 u8 *p;
373
374 if (!n) {
375 scatterwalk_start(&walk, sg_next(walk.sg));
376 n = scatterwalk_clamp(&walk, len);
377 }
378 p = scatterwalk_map(&walk);
379
380 gcm_update_mac(dg, p, n, buf, &buf_count, ctx);
381 len -= n;
382
383 scatterwalk_unmap(p);
384 scatterwalk_advance(&walk, n);
385 scatterwalk_done(&walk, 0, len);
386 } while (len);
387
388 if (buf_count) {
389 memset(&buf[buf_count], 0, GHASH_BLOCK_SIZE - buf_count);
5a22b198
AB
390 ghash_do_update(1, dg, buf, &ctx->ghash_key, NULL,
391 pmull_ghash_update_p64);
537c1445
AB
392 }
393}
394
537c1445
AB
395static int gcm_encrypt(struct aead_request *req)
396{
397 struct crypto_aead *aead = crypto_aead_reqtfm(req);
398 struct gcm_aes_ctx *ctx = crypto_aead_ctx(aead);
11031c0d 399 int nrounds = num_rounds(&ctx->aes_key);
537c1445 400 struct skcipher_walk walk;
11031c0d 401 u8 buf[AES_BLOCK_SIZE];
537c1445 402 u8 iv[AES_BLOCK_SIZE];
537c1445 403 u64 dg[2] = {};
11031c0d
AB
404 u128 lengths;
405 u8 *tag;
537c1445
AB
406 int err;
407
11031c0d
AB
408 lengths.a = cpu_to_be64(req->assoclen * 8);
409 lengths.b = cpu_to_be64(req->cryptlen * 8);
410
537c1445
AB
411 if (req->assoclen)
412 gcm_calculate_auth_mac(req, dg);
413
414 memcpy(iv, req->iv, GCM_IV_SIZE);
11031c0d 415 put_unaligned_be32(2, iv + GCM_IV_SIZE);
537c1445 416
30f1a9f5 417 err = skcipher_walk_aead_encrypt(&walk, req, false);
537c1445 418
11031c0d 419 if (likely(crypto_simd_usable())) {
30f1a9f5 420 do {
11031c0d
AB
421 const u8 *src = walk.src.virt.addr;
422 u8 *dst = walk.dst.virt.addr;
423 int nbytes = walk.nbytes;
424
425 tag = (u8 *)&lengths;
537c1445 426
11031c0d
AB
427 if (unlikely(nbytes > 0 && nbytes < AES_BLOCK_SIZE)) {
428 src = dst = memcpy(buf + sizeof(buf) - nbytes,
429 src, nbytes);
430 } else if (nbytes < walk.total) {
431 nbytes &= ~(AES_BLOCK_SIZE - 1);
432 tag = NULL;
433 }
30f1a9f5 434
11031c0d
AB
435 kernel_neon_begin();
436 pmull_gcm_encrypt(nbytes, dst, src, &ctx->ghash_key, dg,
437 iv, ctx->aes_key.key_enc, nrounds,
438 tag);
7c50136a 439 kernel_neon_end();
537c1445 440
11031c0d
AB
441 if (unlikely(!nbytes))
442 break;
30f1a9f5 443
11031c0d
AB
444 if (unlikely(nbytes > 0 && nbytes < AES_BLOCK_SIZE))
445 memcpy(walk.dst.virt.addr,
446 buf + sizeof(buf) - nbytes, nbytes);
537c1445 447
11031c0d
AB
448 err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
449 } while (walk.nbytes);
450 } else {
451 while (walk.nbytes >= AES_BLOCK_SIZE) {
452 int blocks = walk.nbytes / AES_BLOCK_SIZE;
453 const u8 *src = walk.src.virt.addr;
537c1445 454 u8 *dst = walk.dst.virt.addr;
580e2951 455 int remaining = blocks;
537c1445
AB
456
457 do {
11031c0d
AB
458 aes_encrypt(&ctx->aes_key, buf, iv);
459 crypto_xor_cpy(dst, src, buf, AES_BLOCK_SIZE);
537c1445
AB
460 crypto_inc(iv, AES_BLOCK_SIZE);
461
462 dst += AES_BLOCK_SIZE;
463 src += AES_BLOCK_SIZE;
580e2951 464 } while (--remaining > 0);
537c1445 465
11031c0d
AB
466 ghash_do_update(blocks, dg, walk.dst.virt.addr,
467 &ctx->ghash_key, NULL, NULL);
537c1445
AB
468
469 err = skcipher_walk_done(&walk,
11031c0d 470 walk.nbytes % AES_BLOCK_SIZE);
c2b24c36 471 }
537c1445 472
11031c0d
AB
473 /* handle the tail */
474 if (walk.nbytes) {
475 aes_encrypt(&ctx->aes_key, buf, iv);
537c1445 476
11031c0d
AB
477 crypto_xor_cpy(walk.dst.virt.addr, walk.src.virt.addr,
478 buf, walk.nbytes);
537c1445 479
11031c0d
AB
480 memcpy(buf, walk.dst.virt.addr, walk.nbytes);
481 memset(buf + walk.nbytes, 0, sizeof(buf) - walk.nbytes);
71e52c27
AB
482 }
483
11031c0d
AB
484 tag = (u8 *)&lengths;
485 ghash_do_update(1, dg, tag, &ctx->ghash_key,
486 walk.nbytes ? buf : NULL, NULL);
537c1445 487
11031c0d
AB
488 if (walk.nbytes)
489 err = skcipher_walk_done(&walk, 0);
490
491 put_unaligned_be64(dg[1], tag);
492 put_unaligned_be64(dg[0], tag + 8);
493 put_unaligned_be32(1, iv + GCM_IV_SIZE);
494 aes_encrypt(&ctx->aes_key, iv, iv);
495 crypto_xor(tag, iv, AES_BLOCK_SIZE);
537c1445
AB
496 }
497
498 if (err)
499 return err;
500
537c1445
AB
501 /* copy authtag to end of dst */
502 scatterwalk_map_and_copy(tag, req->dst, req->assoclen + req->cryptlen,
503 crypto_aead_authsize(aead), 1);
504
505 return 0;
506}
507
508static int gcm_decrypt(struct aead_request *req)
509{
510 struct crypto_aead *aead = crypto_aead_reqtfm(req);
511 struct gcm_aes_ctx *ctx = crypto_aead_ctx(aead);
512 unsigned int authsize = crypto_aead_authsize(aead);
11031c0d 513 int nrounds = num_rounds(&ctx->aes_key);
537c1445 514 struct skcipher_walk walk;
11031c0d
AB
515 u8 buf[AES_BLOCK_SIZE];
516 u8 iv[AES_BLOCK_SIZE];
537c1445 517 u64 dg[2] = {};
11031c0d
AB
518 u128 lengths;
519 u8 *tag;
537c1445
AB
520 int err;
521
11031c0d
AB
522 lengths.a = cpu_to_be64(req->assoclen * 8);
523 lengths.b = cpu_to_be64((req->cryptlen - authsize) * 8);
524
537c1445
AB
525 if (req->assoclen)
526 gcm_calculate_auth_mac(req, dg);
527
528 memcpy(iv, req->iv, GCM_IV_SIZE);
11031c0d 529 put_unaligned_be32(2, iv + GCM_IV_SIZE);
537c1445 530
30f1a9f5
AB
531 err = skcipher_walk_aead_decrypt(&walk, req, false);
532
11031c0d 533 if (likely(crypto_simd_usable())) {
30f1a9f5 534 do {
11031c0d
AB
535 const u8 *src = walk.src.virt.addr;
536 u8 *dst = walk.dst.virt.addr;
537 int nbytes = walk.nbytes;
71e52c27 538
11031c0d 539 tag = (u8 *)&lengths;
71e52c27 540
11031c0d
AB
541 if (unlikely(nbytes > 0 && nbytes < AES_BLOCK_SIZE)) {
542 src = dst = memcpy(buf + sizeof(buf) - nbytes,
543 src, nbytes);
544 } else if (nbytes < walk.total) {
545 nbytes &= ~(AES_BLOCK_SIZE - 1);
546 tag = NULL;
30f1a9f5 547 }
71e52c27 548
11031c0d
AB
549 kernel_neon_begin();
550 pmull_gcm_decrypt(nbytes, dst, src, &ctx->ghash_key, dg,
551 iv, ctx->aes_key.key_enc, nrounds,
552 tag);
c7513c2a 553 kernel_neon_end();
30f1a9f5 554
11031c0d
AB
555 if (unlikely(!nbytes))
556 break;
30f1a9f5 557
11031c0d
AB
558 if (unlikely(nbytes > 0 && nbytes < AES_BLOCK_SIZE))
559 memcpy(walk.dst.virt.addr,
560 buf + sizeof(buf) - nbytes, nbytes);
537c1445 561
11031c0d
AB
562 err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
563 } while (walk.nbytes);
564 } else {
565 while (walk.nbytes >= AES_BLOCK_SIZE) {
566 int blocks = walk.nbytes / AES_BLOCK_SIZE;
567 const u8 *src = walk.src.virt.addr;
537c1445 568 u8 *dst = walk.dst.virt.addr;
537c1445
AB
569
570 ghash_do_update(blocks, dg, walk.src.virt.addr,
11031c0d 571 &ctx->ghash_key, NULL, NULL);
537c1445
AB
572
573 do {
fe3b99b6 574 aes_encrypt(&ctx->aes_key, buf, iv);
537c1445
AB
575 crypto_xor_cpy(dst, src, buf, AES_BLOCK_SIZE);
576 crypto_inc(iv, AES_BLOCK_SIZE);
577
578 dst += AES_BLOCK_SIZE;
579 src += AES_BLOCK_SIZE;
580 } while (--blocks > 0);
581
582 err = skcipher_walk_done(&walk,
11031c0d 583 walk.nbytes % AES_BLOCK_SIZE);
537c1445 584 }
c2b24c36 585
11031c0d
AB
586 /* handle the tail */
587 if (walk.nbytes) {
588 memcpy(buf, walk.src.virt.addr, walk.nbytes);
589 memset(buf + walk.nbytes, 0, sizeof(buf) - walk.nbytes);
c2b24c36 590 }
537c1445 591
11031c0d
AB
592 tag = (u8 *)&lengths;
593 ghash_do_update(1, dg, tag, &ctx->ghash_key,
594 walk.nbytes ? buf : NULL, NULL);
71e52c27 595
11031c0d
AB
596 if (walk.nbytes) {
597 aes_encrypt(&ctx->aes_key, buf, iv);
71e52c27 598
11031c0d
AB
599 crypto_xor_cpy(walk.dst.virt.addr, walk.src.virt.addr,
600 buf, walk.nbytes);
537c1445 601
11031c0d
AB
602 err = skcipher_walk_done(&walk, 0);
603 }
537c1445 604
11031c0d
AB
605 put_unaligned_be64(dg[1], tag);
606 put_unaligned_be64(dg[0], tag + 8);
607 put_unaligned_be32(1, iv + GCM_IV_SIZE);
608 aes_encrypt(&ctx->aes_key, iv, iv);
609 crypto_xor(tag, iv, AES_BLOCK_SIZE);
537c1445
AB
610 }
611
612 if (err)
613 return err;
614
537c1445
AB
615 /* compare calculated auth tag with the stored one */
616 scatterwalk_map_and_copy(buf, req->src,
617 req->assoclen + req->cryptlen - authsize,
618 authsize, 0);
619
620 if (crypto_memneq(tag, buf, authsize))
621 return -EBADMSG;
622 return 0;
623}
624
625static struct aead_alg gcm_aes_alg = {
626 .ivsize = GCM_IV_SIZE,
11031c0d 627 .chunksize = AES_BLOCK_SIZE,
537c1445
AB
628 .maxauthsize = AES_BLOCK_SIZE,
629 .setkey = gcm_setkey,
630 .setauthsize = gcm_setauthsize,
631 .encrypt = gcm_encrypt,
632 .decrypt = gcm_decrypt,
633
634 .base.cra_name = "gcm(aes)",
635 .base.cra_driver_name = "gcm-aes-ce",
636 .base.cra_priority = 300,
637 .base.cra_blocksize = 1,
638 .base.cra_ctxsize = sizeof(struct gcm_aes_ctx),
639 .base.cra_module = THIS_MODULE,
fdd23894
AB
640};
641
642static int __init ghash_ce_mod_init(void)
643{
537c1445
AB
644 int ret;
645
aaba098f 646 if (!cpu_have_named_feature(ASIMD))
03c9a333
AB
647 return -ENODEV;
648
aaba098f 649 if (cpu_have_named_feature(PMULL))
5a22b198
AB
650 ret = crypto_register_shashes(ghash_alg,
651 ARRAY_SIZE(ghash_alg));
03c9a333 652 else
5a22b198
AB
653 /* only register the first array element */
654 ret = crypto_register_shash(ghash_alg);
537c1445 655
537c1445 656 if (ret)
03c9a333
AB
657 return ret;
658
aaba098f 659 if (cpu_have_named_feature(PMULL)) {
03c9a333
AB
660 ret = crypto_register_aead(&gcm_aes_alg);
661 if (ret)
5a22b198
AB
662 crypto_unregister_shashes(ghash_alg,
663 ARRAY_SIZE(ghash_alg));
03c9a333 664 }
537c1445 665 return ret;
fdd23894
AB
666}
667
668static void __exit ghash_ce_mod_exit(void)
669{
aaba098f 670 if (cpu_have_named_feature(PMULL))
5a22b198
AB
671 crypto_unregister_shashes(ghash_alg, ARRAY_SIZE(ghash_alg));
672 else
673 crypto_unregister_shash(ghash_alg);
537c1445 674 crypto_unregister_aead(&gcm_aes_alg);
fdd23894
AB
675}
676
03c9a333
AB
677static const struct cpu_feature ghash_cpu_feature[] = {
678 { cpu_feature(PMULL) }, { }
679};
680MODULE_DEVICE_TABLE(cpu, ghash_cpu_feature);
681
682module_init(ghash_ce_mod_init);
fdd23894 683module_exit(ghash_ce_mod_exit);