Merge tag 'nfsd-5.2' of git://linux-nfs.org/~bfields/linux
[linux-2.6-block.git] / arch / x86 / crypto / morus1280_glue.c
1 /*
2  * The MORUS-1280 Authenticated-Encryption Algorithm
3  *   Common x86 SIMD glue skeleton
4  *
5  * Copyright (c) 2016-2018 Ondrej Mosnacek <omosnacek@gmail.com>
6  * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved.
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms of the GNU General Public License as published by the Free
10  * Software Foundation; either version 2 of the License, or (at your option)
11  * any later version.
12  */
13
14 #include <crypto/internal/aead.h>
15 #include <crypto/internal/skcipher.h>
16 #include <crypto/morus1280_glue.h>
17 #include <crypto/scatterwalk.h>
18 #include <linux/err.h>
19 #include <linux/init.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/scatterlist.h>
23 #include <asm/fpu/api.h>
24
25 struct morus1280_state {
26         struct morus1280_block s[MORUS_STATE_BLOCKS];
27 };
28
29 struct morus1280_ops {
30         int (*skcipher_walk_init)(struct skcipher_walk *walk,
31                                   struct aead_request *req, bool atomic);
32
33         void (*crypt_blocks)(void *state, const void *src, void *dst,
34                              unsigned int length);
35         void (*crypt_tail)(void *state, const void *src, void *dst,
36                            unsigned int length);
37 };
38
39 static void crypto_morus1280_glue_process_ad(
40                 struct morus1280_state *state,
41                 const struct morus1280_glue_ops *ops,
42                 struct scatterlist *sg_src, unsigned int assoclen)
43 {
44         struct scatter_walk walk;
45         struct morus1280_block buf;
46         unsigned int pos = 0;
47
48         scatterwalk_start(&walk, sg_src);
49         while (assoclen != 0) {
50                 unsigned int size = scatterwalk_clamp(&walk, assoclen);
51                 unsigned int left = size;
52                 void *mapped = scatterwalk_map(&walk);
53                 const u8 *src = (const u8 *)mapped;
54
55                 if (pos + size >= MORUS1280_BLOCK_SIZE) {
56                         if (pos > 0) {
57                                 unsigned int fill = MORUS1280_BLOCK_SIZE - pos;
58                                 memcpy(buf.bytes + pos, src, fill);
59                                 ops->ad(state, buf.bytes, MORUS1280_BLOCK_SIZE);
60                                 pos = 0;
61                                 left -= fill;
62                                 src += fill;
63                         }
64
65                         ops->ad(state, src, left);
66                         src += left & ~(MORUS1280_BLOCK_SIZE - 1);
67                         left &= MORUS1280_BLOCK_SIZE - 1;
68                 }
69
70                 memcpy(buf.bytes + pos, src, left);
71
72                 pos += left;
73                 assoclen -= size;
74                 scatterwalk_unmap(mapped);
75                 scatterwalk_advance(&walk, size);
76                 scatterwalk_done(&walk, 0, assoclen);
77         }
78
79         if (pos > 0) {
80                 memset(buf.bytes + pos, 0, MORUS1280_BLOCK_SIZE - pos);
81                 ops->ad(state, buf.bytes, MORUS1280_BLOCK_SIZE);
82         }
83 }
84
85 static void crypto_morus1280_glue_process_crypt(struct morus1280_state *state,
86                                                 struct morus1280_ops ops,
87                                                 struct skcipher_walk *walk)
88 {
89         while (walk->nbytes >= MORUS1280_BLOCK_SIZE) {
90                 ops.crypt_blocks(state, walk->src.virt.addr,
91                                  walk->dst.virt.addr,
92                                  round_down(walk->nbytes,
93                                             MORUS1280_BLOCK_SIZE));
94                 skcipher_walk_done(walk, walk->nbytes % MORUS1280_BLOCK_SIZE);
95         }
96
97         if (walk->nbytes) {
98                 ops.crypt_tail(state, walk->src.virt.addr, walk->dst.virt.addr,
99                                walk->nbytes);
100                 skcipher_walk_done(walk, 0);
101         }
102 }
103
104 int crypto_morus1280_glue_setkey(struct crypto_aead *aead, const u8 *key,
105                                  unsigned int keylen)
106 {
107         struct morus1280_ctx *ctx = crypto_aead_ctx(aead);
108
109         if (keylen == MORUS1280_BLOCK_SIZE) {
110                 memcpy(ctx->key.bytes, key, MORUS1280_BLOCK_SIZE);
111         } else if (keylen == MORUS1280_BLOCK_SIZE / 2) {
112                 memcpy(ctx->key.bytes, key, keylen);
113                 memcpy(ctx->key.bytes + keylen, key, keylen);
114         } else {
115                 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
116                 return -EINVAL;
117         }
118
119         return 0;
120 }
121 EXPORT_SYMBOL_GPL(crypto_morus1280_glue_setkey);
122
123 int crypto_morus1280_glue_setauthsize(struct crypto_aead *tfm,
124                                       unsigned int authsize)
125 {
126         return (authsize <= MORUS_MAX_AUTH_SIZE) ? 0 : -EINVAL;
127 }
128 EXPORT_SYMBOL_GPL(crypto_morus1280_glue_setauthsize);
129
130 static void crypto_morus1280_glue_crypt(struct aead_request *req,
131                                         struct morus1280_ops ops,
132                                         unsigned int cryptlen,
133                                         struct morus1280_block *tag_xor)
134 {
135         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
136         struct morus1280_ctx *ctx = crypto_aead_ctx(tfm);
137         struct morus1280_state state;
138         struct skcipher_walk walk;
139
140         ops.skcipher_walk_init(&walk, req, true);
141
142         kernel_fpu_begin();
143
144         ctx->ops->init(&state, &ctx->key, req->iv);
145         crypto_morus1280_glue_process_ad(&state, ctx->ops, req->src, req->assoclen);
146         crypto_morus1280_glue_process_crypt(&state, ops, &walk);
147         ctx->ops->final(&state, tag_xor, req->assoclen, cryptlen);
148
149         kernel_fpu_end();
150 }
151
152 int crypto_morus1280_glue_encrypt(struct aead_request *req)
153 {
154         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
155         struct morus1280_ctx *ctx = crypto_aead_ctx(tfm);
156         struct morus1280_ops OPS = {
157                 .skcipher_walk_init = skcipher_walk_aead_encrypt,
158                 .crypt_blocks = ctx->ops->enc,
159                 .crypt_tail = ctx->ops->enc_tail,
160         };
161
162         struct morus1280_block tag = {};
163         unsigned int authsize = crypto_aead_authsize(tfm);
164         unsigned int cryptlen = req->cryptlen;
165
166         crypto_morus1280_glue_crypt(req, OPS, cryptlen, &tag);
167
168         scatterwalk_map_and_copy(tag.bytes, req->dst,
169                                  req->assoclen + cryptlen, authsize, 1);
170         return 0;
171 }
172 EXPORT_SYMBOL_GPL(crypto_morus1280_glue_encrypt);
173
174 int crypto_morus1280_glue_decrypt(struct aead_request *req)
175 {
176         static const u8 zeros[MORUS1280_BLOCK_SIZE] = {};
177
178         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
179         struct morus1280_ctx *ctx = crypto_aead_ctx(tfm);
180         struct morus1280_ops OPS = {
181                 .skcipher_walk_init = skcipher_walk_aead_decrypt,
182                 .crypt_blocks = ctx->ops->dec,
183                 .crypt_tail = ctx->ops->dec_tail,
184         };
185
186         struct morus1280_block tag;
187         unsigned int authsize = crypto_aead_authsize(tfm);
188         unsigned int cryptlen = req->cryptlen - authsize;
189
190         scatterwalk_map_and_copy(tag.bytes, req->src,
191                                  req->assoclen + cryptlen, authsize, 0);
192
193         crypto_morus1280_glue_crypt(req, OPS, cryptlen, &tag);
194
195         return crypto_memneq(tag.bytes, zeros, authsize) ? -EBADMSG : 0;
196 }
197 EXPORT_SYMBOL_GPL(crypto_morus1280_glue_decrypt);
198
199 void crypto_morus1280_glue_init_ops(struct crypto_aead *aead,
200                                     const struct morus1280_glue_ops *ops)
201 {
202         struct morus1280_ctx *ctx = crypto_aead_ctx(aead);
203         ctx->ops = ops;
204 }
205 EXPORT_SYMBOL_GPL(crypto_morus1280_glue_init_ops);
206
207 MODULE_LICENSE("GPL");
208 MODULE_AUTHOR("Ondrej Mosnacek <omosnacek@gmail.com>");
209 MODULE_DESCRIPTION("MORUS-1280 AEAD mode -- glue for x86 optimizations");