Merge tag 'char-misc-5.4-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh...
[linux-block.git] / arch / x86 / crypto / aesni-intel_glue.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Support for Intel AES-NI instructions. This file contains glue
4  * code, the real AES implementation is in intel-aes_asm.S.
5  *
6  * Copyright (C) 2008, Intel Corp.
7  *    Author: Huang Ying <ying.huang@intel.com>
8  *
9  * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD
10  * interface for 64-bit kernels.
11  *    Authors: Adrian Hoban <adrian.hoban@intel.com>
12  *             Gabriele Paoloni <gabriele.paoloni@intel.com>
13  *             Tadeusz Struk (tadeusz.struk@intel.com)
14  *             Aidan O'Mahony (aidan.o.mahony@intel.com)
15  *    Copyright (c) 2010, Intel Corporation.
16  */
17
18 #include <linux/hardirq.h>
19 #include <linux/types.h>
20 #include <linux/module.h>
21 #include <linux/err.h>
22 #include <crypto/algapi.h>
23 #include <crypto/aes.h>
24 #include <crypto/ctr.h>
25 #include <crypto/b128ops.h>
26 #include <crypto/gcm.h>
27 #include <crypto/xts.h>
28 #include <asm/cpu_device_id.h>
29 #include <asm/simd.h>
30 #include <crypto/scatterwalk.h>
31 #include <crypto/internal/aead.h>
32 #include <crypto/internal/simd.h>
33 #include <crypto/internal/skcipher.h>
34 #include <linux/workqueue.h>
35 #include <linux/spinlock.h>
36 #ifdef CONFIG_X86_64
37 #include <asm/crypto/glue_helper.h>
38 #endif
39
40
41 #define AESNI_ALIGN     16
42 #define AESNI_ALIGN_ATTR __attribute__ ((__aligned__(AESNI_ALIGN)))
43 #define AES_BLOCK_MASK  (~(AES_BLOCK_SIZE - 1))
44 #define RFC4106_HASH_SUBKEY_SIZE 16
45 #define AESNI_ALIGN_EXTRA ((AESNI_ALIGN - 1) & ~(CRYPTO_MINALIGN - 1))
46 #define CRYPTO_AES_CTX_SIZE (sizeof(struct crypto_aes_ctx) + AESNI_ALIGN_EXTRA)
47 #define XTS_AES_CTX_SIZE (sizeof(struct aesni_xts_ctx) + AESNI_ALIGN_EXTRA)
48
49 /* This data is stored at the end of the crypto_tfm struct.
50  * It's a type of per "session" data storage location.
51  * This needs to be 16 byte aligned.
52  */
53 struct aesni_rfc4106_gcm_ctx {
54         u8 hash_subkey[16] AESNI_ALIGN_ATTR;
55         struct crypto_aes_ctx aes_key_expanded AESNI_ALIGN_ATTR;
56         u8 nonce[4];
57 };
58
59 struct generic_gcmaes_ctx {
60         u8 hash_subkey[16] AESNI_ALIGN_ATTR;
61         struct crypto_aes_ctx aes_key_expanded AESNI_ALIGN_ATTR;
62 };
63
64 struct aesni_xts_ctx {
65         u8 raw_tweak_ctx[sizeof(struct crypto_aes_ctx)] AESNI_ALIGN_ATTR;
66         u8 raw_crypt_ctx[sizeof(struct crypto_aes_ctx)] AESNI_ALIGN_ATTR;
67 };
68
69 #define GCM_BLOCK_LEN 16
70
71 struct gcm_context_data {
72         /* init, update and finalize context data */
73         u8 aad_hash[GCM_BLOCK_LEN];
74         u64 aad_length;
75         u64 in_length;
76         u8 partial_block_enc_key[GCM_BLOCK_LEN];
77         u8 orig_IV[GCM_BLOCK_LEN];
78         u8 current_counter[GCM_BLOCK_LEN];
79         u64 partial_block_len;
80         u64 unused;
81         u8 hash_keys[GCM_BLOCK_LEN * 16];
82 };
83
84 asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
85                              unsigned int key_len);
86 asmlinkage void aesni_enc(struct crypto_aes_ctx *ctx, u8 *out,
87                           const u8 *in);
88 asmlinkage void aesni_dec(struct crypto_aes_ctx *ctx, u8 *out,
89                           const u8 *in);
90 asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out,
91                               const u8 *in, unsigned int len);
92 asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out,
93                               const u8 *in, unsigned int len);
94 asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
95                               const u8 *in, unsigned int len, u8 *iv);
96 asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
97                               const u8 *in, unsigned int len, u8 *iv);
98
99 #define AVX_GEN2_OPTSIZE 640
100 #define AVX_GEN4_OPTSIZE 4096
101
102 #ifdef CONFIG_X86_64
103
104 static void (*aesni_ctr_enc_tfm)(struct crypto_aes_ctx *ctx, u8 *out,
105                               const u8 *in, unsigned int len, u8 *iv);
106 asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
107                               const u8 *in, unsigned int len, u8 *iv);
108
109 asmlinkage void aesni_xts_crypt8(struct crypto_aes_ctx *ctx, u8 *out,
110                                  const u8 *in, bool enc, u8 *iv);
111
112 /* asmlinkage void aesni_gcm_enc()
113  * void *ctx,  AES Key schedule. Starts on a 16 byte boundary.
114  * struct gcm_context_data.  May be uninitialized.
115  * u8 *out, Ciphertext output. Encrypt in-place is allowed.
116  * const u8 *in, Plaintext input
117  * unsigned long plaintext_len, Length of data in bytes for encryption.
118  * u8 *iv, Pre-counter block j0: 12 byte IV concatenated with 0x00000001.
119  *         16-byte aligned pointer.
120  * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
121  * const u8 *aad, Additional Authentication Data (AAD)
122  * unsigned long aad_len, Length of AAD in bytes.
123  * u8 *auth_tag, Authenticated Tag output.
124  * unsigned long auth_tag_len), Authenticated Tag Length in bytes.
125  *          Valid values are 16 (most likely), 12 or 8.
126  */
127 asmlinkage void aesni_gcm_enc(void *ctx,
128                         struct gcm_context_data *gdata, u8 *out,
129                         const u8 *in, unsigned long plaintext_len, u8 *iv,
130                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
131                         u8 *auth_tag, unsigned long auth_tag_len);
132
133 /* asmlinkage void aesni_gcm_dec()
134  * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
135  * struct gcm_context_data.  May be uninitialized.
136  * u8 *out, Plaintext output. Decrypt in-place is allowed.
137  * const u8 *in, Ciphertext input
138  * unsigned long ciphertext_len, Length of data in bytes for decryption.
139  * u8 *iv, Pre-counter block j0: 12 byte IV concatenated with 0x00000001.
140  *         16-byte aligned pointer.
141  * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
142  * const u8 *aad, Additional Authentication Data (AAD)
143  * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this is going
144  * to be 8 or 12 bytes
145  * u8 *auth_tag, Authenticated Tag output.
146  * unsigned long auth_tag_len) Authenticated Tag Length in bytes.
147  * Valid values are 16 (most likely), 12 or 8.
148  */
149 asmlinkage void aesni_gcm_dec(void *ctx,
150                         struct gcm_context_data *gdata, u8 *out,
151                         const u8 *in, unsigned long ciphertext_len, u8 *iv,
152                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
153                         u8 *auth_tag, unsigned long auth_tag_len);
154
155 /* Scatter / Gather routines, with args similar to above */
156 asmlinkage void aesni_gcm_init(void *ctx,
157                                struct gcm_context_data *gdata,
158                                u8 *iv,
159                                u8 *hash_subkey, const u8 *aad,
160                                unsigned long aad_len);
161 asmlinkage void aesni_gcm_enc_update(void *ctx,
162                                      struct gcm_context_data *gdata, u8 *out,
163                                      const u8 *in, unsigned long plaintext_len);
164 asmlinkage void aesni_gcm_dec_update(void *ctx,
165                                      struct gcm_context_data *gdata, u8 *out,
166                                      const u8 *in,
167                                      unsigned long ciphertext_len);
168 asmlinkage void aesni_gcm_finalize(void *ctx,
169                                    struct gcm_context_data *gdata,
170                                    u8 *auth_tag, unsigned long auth_tag_len);
171
172 static const struct aesni_gcm_tfm_s {
173         void (*init)(void *ctx, struct gcm_context_data *gdata, u8 *iv,
174                      u8 *hash_subkey, const u8 *aad, unsigned long aad_len);
175         void (*enc_update)(void *ctx, struct gcm_context_data *gdata, u8 *out,
176                            const u8 *in, unsigned long plaintext_len);
177         void (*dec_update)(void *ctx, struct gcm_context_data *gdata, u8 *out,
178                            const u8 *in, unsigned long ciphertext_len);
179         void (*finalize)(void *ctx, struct gcm_context_data *gdata,
180                          u8 *auth_tag, unsigned long auth_tag_len);
181 } *aesni_gcm_tfm;
182
183 static const struct aesni_gcm_tfm_s aesni_gcm_tfm_sse = {
184         .init = &aesni_gcm_init,
185         .enc_update = &aesni_gcm_enc_update,
186         .dec_update = &aesni_gcm_dec_update,
187         .finalize = &aesni_gcm_finalize,
188 };
189
190 #ifdef CONFIG_AS_AVX
191 asmlinkage void aes_ctr_enc_128_avx_by8(const u8 *in, u8 *iv,
192                 void *keys, u8 *out, unsigned int num_bytes);
193 asmlinkage void aes_ctr_enc_192_avx_by8(const u8 *in, u8 *iv,
194                 void *keys, u8 *out, unsigned int num_bytes);
195 asmlinkage void aes_ctr_enc_256_avx_by8(const u8 *in, u8 *iv,
196                 void *keys, u8 *out, unsigned int num_bytes);
197 /*
198  * asmlinkage void aesni_gcm_init_avx_gen2()
199  * gcm_data *my_ctx_data, context data
200  * u8 *hash_subkey,  the Hash sub key input. Data starts on a 16-byte boundary.
201  */
202 asmlinkage void aesni_gcm_init_avx_gen2(void *my_ctx_data,
203                                         struct gcm_context_data *gdata,
204                                         u8 *iv,
205                                         u8 *hash_subkey,
206                                         const u8 *aad,
207                                         unsigned long aad_len);
208
209 asmlinkage void aesni_gcm_enc_update_avx_gen2(void *ctx,
210                                      struct gcm_context_data *gdata, u8 *out,
211                                      const u8 *in, unsigned long plaintext_len);
212 asmlinkage void aesni_gcm_dec_update_avx_gen2(void *ctx,
213                                      struct gcm_context_data *gdata, u8 *out,
214                                      const u8 *in,
215                                      unsigned long ciphertext_len);
216 asmlinkage void aesni_gcm_finalize_avx_gen2(void *ctx,
217                                    struct gcm_context_data *gdata,
218                                    u8 *auth_tag, unsigned long auth_tag_len);
219
220 asmlinkage void aesni_gcm_enc_avx_gen2(void *ctx,
221                                 struct gcm_context_data *gdata, u8 *out,
222                         const u8 *in, unsigned long plaintext_len, u8 *iv,
223                         const u8 *aad, unsigned long aad_len,
224                         u8 *auth_tag, unsigned long auth_tag_len);
225
226 asmlinkage void aesni_gcm_dec_avx_gen2(void *ctx,
227                                 struct gcm_context_data *gdata, u8 *out,
228                         const u8 *in, unsigned long ciphertext_len, u8 *iv,
229                         const u8 *aad, unsigned long aad_len,
230                         u8 *auth_tag, unsigned long auth_tag_len);
231
232 static const struct aesni_gcm_tfm_s aesni_gcm_tfm_avx_gen2 = {
233         .init = &aesni_gcm_init_avx_gen2,
234         .enc_update = &aesni_gcm_enc_update_avx_gen2,
235         .dec_update = &aesni_gcm_dec_update_avx_gen2,
236         .finalize = &aesni_gcm_finalize_avx_gen2,
237 };
238
239 #endif
240
241 #ifdef CONFIG_AS_AVX2
242 /*
243  * asmlinkage void aesni_gcm_init_avx_gen4()
244  * gcm_data *my_ctx_data, context data
245  * u8 *hash_subkey,  the Hash sub key input. Data starts on a 16-byte boundary.
246  */
247 asmlinkage void aesni_gcm_init_avx_gen4(void *my_ctx_data,
248                                         struct gcm_context_data *gdata,
249                                         u8 *iv,
250                                         u8 *hash_subkey,
251                                         const u8 *aad,
252                                         unsigned long aad_len);
253
254 asmlinkage void aesni_gcm_enc_update_avx_gen4(void *ctx,
255                                      struct gcm_context_data *gdata, u8 *out,
256                                      const u8 *in, unsigned long plaintext_len);
257 asmlinkage void aesni_gcm_dec_update_avx_gen4(void *ctx,
258                                      struct gcm_context_data *gdata, u8 *out,
259                                      const u8 *in,
260                                      unsigned long ciphertext_len);
261 asmlinkage void aesni_gcm_finalize_avx_gen4(void *ctx,
262                                    struct gcm_context_data *gdata,
263                                    u8 *auth_tag, unsigned long auth_tag_len);
264
265 asmlinkage void aesni_gcm_enc_avx_gen4(void *ctx,
266                                 struct gcm_context_data *gdata, u8 *out,
267                         const u8 *in, unsigned long plaintext_len, u8 *iv,
268                         const u8 *aad, unsigned long aad_len,
269                         u8 *auth_tag, unsigned long auth_tag_len);
270
271 asmlinkage void aesni_gcm_dec_avx_gen4(void *ctx,
272                                 struct gcm_context_data *gdata, u8 *out,
273                         const u8 *in, unsigned long ciphertext_len, u8 *iv,
274                         const u8 *aad, unsigned long aad_len,
275                         u8 *auth_tag, unsigned long auth_tag_len);
276
277 static const struct aesni_gcm_tfm_s aesni_gcm_tfm_avx_gen4 = {
278         .init = &aesni_gcm_init_avx_gen4,
279         .enc_update = &aesni_gcm_enc_update_avx_gen4,
280         .dec_update = &aesni_gcm_dec_update_avx_gen4,
281         .finalize = &aesni_gcm_finalize_avx_gen4,
282 };
283
284 #endif
285
286 static inline struct
287 aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
288 {
289         unsigned long align = AESNI_ALIGN;
290
291         if (align <= crypto_tfm_ctx_alignment())
292                 align = 1;
293         return PTR_ALIGN(crypto_aead_ctx(tfm), align);
294 }
295
296 static inline struct
297 generic_gcmaes_ctx *generic_gcmaes_ctx_get(struct crypto_aead *tfm)
298 {
299         unsigned long align = AESNI_ALIGN;
300
301         if (align <= crypto_tfm_ctx_alignment())
302                 align = 1;
303         return PTR_ALIGN(crypto_aead_ctx(tfm), align);
304 }
305 #endif
306
307 static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
308 {
309         unsigned long addr = (unsigned long)raw_ctx;
310         unsigned long align = AESNI_ALIGN;
311
312         if (align <= crypto_tfm_ctx_alignment())
313                 align = 1;
314         return (struct crypto_aes_ctx *)ALIGN(addr, align);
315 }
316
317 static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
318                               const u8 *in_key, unsigned int key_len)
319 {
320         struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx);
321         u32 *flags = &tfm->crt_flags;
322         int err;
323
324         if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
325             key_len != AES_KEYSIZE_256) {
326                 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
327                 return -EINVAL;
328         }
329
330         if (!crypto_simd_usable())
331                 err = aes_expandkey(ctx, in_key, key_len);
332         else {
333                 kernel_fpu_begin();
334                 err = aesni_set_key(ctx, in_key, key_len);
335                 kernel_fpu_end();
336         }
337
338         return err;
339 }
340
341 static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
342                        unsigned int key_len)
343 {
344         return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len);
345 }
346
347 static void aesni_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
348 {
349         struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
350
351         if (!crypto_simd_usable()) {
352                 aes_encrypt(ctx, dst, src);
353         } else {
354                 kernel_fpu_begin();
355                 aesni_enc(ctx, dst, src);
356                 kernel_fpu_end();
357         }
358 }
359
360 static void aesni_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
361 {
362         struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
363
364         if (!crypto_simd_usable()) {
365                 aes_decrypt(ctx, dst, src);
366         } else {
367                 kernel_fpu_begin();
368                 aesni_dec(ctx, dst, src);
369                 kernel_fpu_end();
370         }
371 }
372
373 static int aesni_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
374                                  unsigned int len)
375 {
376         return aes_set_key_common(crypto_skcipher_tfm(tfm),
377                                   crypto_skcipher_ctx(tfm), key, len);
378 }
379
380 static int ecb_encrypt(struct skcipher_request *req)
381 {
382         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
383         struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
384         struct skcipher_walk walk;
385         unsigned int nbytes;
386         int err;
387
388         err = skcipher_walk_virt(&walk, req, true);
389
390         kernel_fpu_begin();
391         while ((nbytes = walk.nbytes)) {
392                 aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
393                               nbytes & AES_BLOCK_MASK);
394                 nbytes &= AES_BLOCK_SIZE - 1;
395                 err = skcipher_walk_done(&walk, nbytes);
396         }
397         kernel_fpu_end();
398
399         return err;
400 }
401
402 static int ecb_decrypt(struct skcipher_request *req)
403 {
404         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
405         struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
406         struct skcipher_walk walk;
407         unsigned int nbytes;
408         int err;
409
410         err = skcipher_walk_virt(&walk, req, true);
411
412         kernel_fpu_begin();
413         while ((nbytes = walk.nbytes)) {
414                 aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
415                               nbytes & AES_BLOCK_MASK);
416                 nbytes &= AES_BLOCK_SIZE - 1;
417                 err = skcipher_walk_done(&walk, nbytes);
418         }
419         kernel_fpu_end();
420
421         return err;
422 }
423
424 static int cbc_encrypt(struct skcipher_request *req)
425 {
426         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
427         struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
428         struct skcipher_walk walk;
429         unsigned int nbytes;
430         int err;
431
432         err = skcipher_walk_virt(&walk, req, true);
433
434         kernel_fpu_begin();
435         while ((nbytes = walk.nbytes)) {
436                 aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
437                               nbytes & AES_BLOCK_MASK, walk.iv);
438                 nbytes &= AES_BLOCK_SIZE - 1;
439                 err = skcipher_walk_done(&walk, nbytes);
440         }
441         kernel_fpu_end();
442
443         return err;
444 }
445
446 static int cbc_decrypt(struct skcipher_request *req)
447 {
448         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
449         struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
450         struct skcipher_walk walk;
451         unsigned int nbytes;
452         int err;
453
454         err = skcipher_walk_virt(&walk, req, true);
455
456         kernel_fpu_begin();
457         while ((nbytes = walk.nbytes)) {
458                 aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
459                               nbytes & AES_BLOCK_MASK, walk.iv);
460                 nbytes &= AES_BLOCK_SIZE - 1;
461                 err = skcipher_walk_done(&walk, nbytes);
462         }
463         kernel_fpu_end();
464
465         return err;
466 }
467
468 #ifdef CONFIG_X86_64
469 static void ctr_crypt_final(struct crypto_aes_ctx *ctx,
470                             struct skcipher_walk *walk)
471 {
472         u8 *ctrblk = walk->iv;
473         u8 keystream[AES_BLOCK_SIZE];
474         u8 *src = walk->src.virt.addr;
475         u8 *dst = walk->dst.virt.addr;
476         unsigned int nbytes = walk->nbytes;
477
478         aesni_enc(ctx, keystream, ctrblk);
479         crypto_xor_cpy(dst, keystream, src, nbytes);
480
481         crypto_inc(ctrblk, AES_BLOCK_SIZE);
482 }
483
484 #ifdef CONFIG_AS_AVX
485 static void aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx *ctx, u8 *out,
486                               const u8 *in, unsigned int len, u8 *iv)
487 {
488         /*
489          * based on key length, override with the by8 version
490          * of ctr mode encryption/decryption for improved performance
491          * aes_set_key_common() ensures that key length is one of
492          * {128,192,256}
493          */
494         if (ctx->key_length == AES_KEYSIZE_128)
495                 aes_ctr_enc_128_avx_by8(in, iv, (void *)ctx, out, len);
496         else if (ctx->key_length == AES_KEYSIZE_192)
497                 aes_ctr_enc_192_avx_by8(in, iv, (void *)ctx, out, len);
498         else
499                 aes_ctr_enc_256_avx_by8(in, iv, (void *)ctx, out, len);
500 }
501 #endif
502
503 static int ctr_crypt(struct skcipher_request *req)
504 {
505         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
506         struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
507         struct skcipher_walk walk;
508         unsigned int nbytes;
509         int err;
510
511         err = skcipher_walk_virt(&walk, req, true);
512
513         kernel_fpu_begin();
514         while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
515                 aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr,
516                                       nbytes & AES_BLOCK_MASK, walk.iv);
517                 nbytes &= AES_BLOCK_SIZE - 1;
518                 err = skcipher_walk_done(&walk, nbytes);
519         }
520         if (walk.nbytes) {
521                 ctr_crypt_final(ctx, &walk);
522                 err = skcipher_walk_done(&walk, 0);
523         }
524         kernel_fpu_end();
525
526         return err;
527 }
528
529 static int xts_aesni_setkey(struct crypto_skcipher *tfm, const u8 *key,
530                             unsigned int keylen)
531 {
532         struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
533         int err;
534
535         err = xts_verify_key(tfm, key, keylen);
536         if (err)
537                 return err;
538
539         keylen /= 2;
540
541         /* first half of xts-key is for crypt */
542         err = aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_crypt_ctx,
543                                  key, keylen);
544         if (err)
545                 return err;
546
547         /* second half of xts-key is for tweak */
548         return aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_tweak_ctx,
549                                   key + keylen, keylen);
550 }
551
552
553 static void aesni_xts_tweak(void *ctx, u8 *out, const u8 *in)
554 {
555         aesni_enc(ctx, out, in);
556 }
557
558 static void aesni_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv)
559 {
560         glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_enc));
561 }
562
563 static void aesni_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv)
564 {
565         glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_dec));
566 }
567
568 static void aesni_xts_enc8(void *ctx, u128 *dst, const u128 *src, le128 *iv)
569 {
570         aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, true, (u8 *)iv);
571 }
572
573 static void aesni_xts_dec8(void *ctx, u128 *dst, const u128 *src, le128 *iv)
574 {
575         aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, false, (u8 *)iv);
576 }
577
578 static const struct common_glue_ctx aesni_enc_xts = {
579         .num_funcs = 2,
580         .fpu_blocks_limit = 1,
581
582         .funcs = { {
583                 .num_blocks = 8,
584                 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc8) }
585         }, {
586                 .num_blocks = 1,
587                 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc) }
588         } }
589 };
590
591 static const struct common_glue_ctx aesni_dec_xts = {
592         .num_funcs = 2,
593         .fpu_blocks_limit = 1,
594
595         .funcs = { {
596                 .num_blocks = 8,
597                 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec8) }
598         }, {
599                 .num_blocks = 1,
600                 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec) }
601         } }
602 };
603
604 static int xts_encrypt(struct skcipher_request *req)
605 {
606         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
607         struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
608
609         return glue_xts_req_128bit(&aesni_enc_xts, req,
610                                    XTS_TWEAK_CAST(aesni_xts_tweak),
611                                    aes_ctx(ctx->raw_tweak_ctx),
612                                    aes_ctx(ctx->raw_crypt_ctx),
613                                    false);
614 }
615
616 static int xts_decrypt(struct skcipher_request *req)
617 {
618         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
619         struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
620
621         return glue_xts_req_128bit(&aesni_dec_xts, req,
622                                    XTS_TWEAK_CAST(aesni_xts_tweak),
623                                    aes_ctx(ctx->raw_tweak_ctx),
624                                    aes_ctx(ctx->raw_crypt_ctx),
625                                    true);
626 }
627
628 static int
629 rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
630 {
631         struct crypto_aes_ctx ctx;
632         int ret;
633
634         ret = aes_expandkey(&ctx, key, key_len);
635         if (ret)
636                 return ret;
637
638         /* Clear the data in the hash sub key container to zero.*/
639         /* We want to cipher all zeros to create the hash sub key. */
640         memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE);
641
642         aes_encrypt(&ctx, hash_subkey, hash_subkey);
643
644         memzero_explicit(&ctx, sizeof(ctx));
645         return 0;
646 }
647
648 static int common_rfc4106_set_key(struct crypto_aead *aead, const u8 *key,
649                                   unsigned int key_len)
650 {
651         struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(aead);
652
653         if (key_len < 4) {
654                 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
655                 return -EINVAL;
656         }
657         /*Account for 4 byte nonce at the end.*/
658         key_len -= 4;
659
660         memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce));
661
662         return aes_set_key_common(crypto_aead_tfm(aead),
663                                   &ctx->aes_key_expanded, key, key_len) ?:
664                rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
665 }
666
667 /* This is the Integrity Check Value (aka the authentication tag) length and can
668  * be 8, 12 or 16 bytes long. */
669 static int common_rfc4106_set_authsize(struct crypto_aead *aead,
670                                        unsigned int authsize)
671 {
672         switch (authsize) {
673         case 8:
674         case 12:
675         case 16:
676                 break;
677         default:
678                 return -EINVAL;
679         }
680
681         return 0;
682 }
683
684 static int generic_gcmaes_set_authsize(struct crypto_aead *tfm,
685                                        unsigned int authsize)
686 {
687         switch (authsize) {
688         case 4:
689         case 8:
690         case 12:
691         case 13:
692         case 14:
693         case 15:
694         case 16:
695                 break;
696         default:
697                 return -EINVAL;
698         }
699
700         return 0;
701 }
702
703 static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
704                               unsigned int assoclen, u8 *hash_subkey,
705                               u8 *iv, void *aes_ctx)
706 {
707         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
708         unsigned long auth_tag_len = crypto_aead_authsize(tfm);
709         const struct aesni_gcm_tfm_s *gcm_tfm = aesni_gcm_tfm;
710         struct gcm_context_data data AESNI_ALIGN_ATTR;
711         struct scatter_walk dst_sg_walk = {};
712         unsigned long left = req->cryptlen;
713         unsigned long len, srclen, dstlen;
714         struct scatter_walk assoc_sg_walk;
715         struct scatter_walk src_sg_walk;
716         struct scatterlist src_start[2];
717         struct scatterlist dst_start[2];
718         struct scatterlist *src_sg;
719         struct scatterlist *dst_sg;
720         u8 *src, *dst, *assoc;
721         u8 *assocmem = NULL;
722         u8 authTag[16];
723
724         if (!enc)
725                 left -= auth_tag_len;
726
727 #ifdef CONFIG_AS_AVX2
728         if (left < AVX_GEN4_OPTSIZE && gcm_tfm == &aesni_gcm_tfm_avx_gen4)
729                 gcm_tfm = &aesni_gcm_tfm_avx_gen2;
730 #endif
731 #ifdef CONFIG_AS_AVX
732         if (left < AVX_GEN2_OPTSIZE && gcm_tfm == &aesni_gcm_tfm_avx_gen2)
733                 gcm_tfm = &aesni_gcm_tfm_sse;
734 #endif
735
736         /* Linearize assoc, if not already linear */
737         if (req->src->length >= assoclen && req->src->length &&
738                 (!PageHighMem(sg_page(req->src)) ||
739                         req->src->offset + req->src->length <= PAGE_SIZE)) {
740                 scatterwalk_start(&assoc_sg_walk, req->src);
741                 assoc = scatterwalk_map(&assoc_sg_walk);
742         } else {
743                 /* assoc can be any length, so must be on heap */
744                 assocmem = kmalloc(assoclen, GFP_ATOMIC);
745                 if (unlikely(!assocmem))
746                         return -ENOMEM;
747                 assoc = assocmem;
748
749                 scatterwalk_map_and_copy(assoc, req->src, 0, assoclen, 0);
750         }
751
752         if (left) {
753                 src_sg = scatterwalk_ffwd(src_start, req->src, req->assoclen);
754                 scatterwalk_start(&src_sg_walk, src_sg);
755                 if (req->src != req->dst) {
756                         dst_sg = scatterwalk_ffwd(dst_start, req->dst,
757                                                   req->assoclen);
758                         scatterwalk_start(&dst_sg_walk, dst_sg);
759                 }
760         }
761
762         kernel_fpu_begin();
763         gcm_tfm->init(aes_ctx, &data, iv,
764                 hash_subkey, assoc, assoclen);
765         if (req->src != req->dst) {
766                 while (left) {
767                         src = scatterwalk_map(&src_sg_walk);
768                         dst = scatterwalk_map(&dst_sg_walk);
769                         srclen = scatterwalk_clamp(&src_sg_walk, left);
770                         dstlen = scatterwalk_clamp(&dst_sg_walk, left);
771                         len = min(srclen, dstlen);
772                         if (len) {
773                                 if (enc)
774                                         gcm_tfm->enc_update(aes_ctx, &data,
775                                                              dst, src, len);
776                                 else
777                                         gcm_tfm->dec_update(aes_ctx, &data,
778                                                              dst, src, len);
779                         }
780                         left -= len;
781
782                         scatterwalk_unmap(src);
783                         scatterwalk_unmap(dst);
784                         scatterwalk_advance(&src_sg_walk, len);
785                         scatterwalk_advance(&dst_sg_walk, len);
786                         scatterwalk_done(&src_sg_walk, 0, left);
787                         scatterwalk_done(&dst_sg_walk, 1, left);
788                 }
789         } else {
790                 while (left) {
791                         dst = src = scatterwalk_map(&src_sg_walk);
792                         len = scatterwalk_clamp(&src_sg_walk, left);
793                         if (len) {
794                                 if (enc)
795                                         gcm_tfm->enc_update(aes_ctx, &data,
796                                                              src, src, len);
797                                 else
798                                         gcm_tfm->dec_update(aes_ctx, &data,
799                                                              src, src, len);
800                         }
801                         left -= len;
802                         scatterwalk_unmap(src);
803                         scatterwalk_advance(&src_sg_walk, len);
804                         scatterwalk_done(&src_sg_walk, 1, left);
805                 }
806         }
807         gcm_tfm->finalize(aes_ctx, &data, authTag, auth_tag_len);
808         kernel_fpu_end();
809
810         if (!assocmem)
811                 scatterwalk_unmap(assoc);
812         else
813                 kfree(assocmem);
814
815         if (!enc) {
816                 u8 authTagMsg[16];
817
818                 /* Copy out original authTag */
819                 scatterwalk_map_and_copy(authTagMsg, req->src,
820                                          req->assoclen + req->cryptlen -
821                                          auth_tag_len,
822                                          auth_tag_len, 0);
823
824                 /* Compare generated tag with passed in tag. */
825                 return crypto_memneq(authTagMsg, authTag, auth_tag_len) ?
826                         -EBADMSG : 0;
827         }
828
829         /* Copy in the authTag */
830         scatterwalk_map_and_copy(authTag, req->dst,
831                                  req->assoclen + req->cryptlen,
832                                  auth_tag_len, 1);
833
834         return 0;
835 }
836
837 static int gcmaes_encrypt(struct aead_request *req, unsigned int assoclen,
838                           u8 *hash_subkey, u8 *iv, void *aes_ctx)
839 {
840         return gcmaes_crypt_by_sg(true, req, assoclen, hash_subkey, iv,
841                                 aes_ctx);
842 }
843
844 static int gcmaes_decrypt(struct aead_request *req, unsigned int assoclen,
845                           u8 *hash_subkey, u8 *iv, void *aes_ctx)
846 {
847         return gcmaes_crypt_by_sg(false, req, assoclen, hash_subkey, iv,
848                                 aes_ctx);
849 }
850
851 static int helper_rfc4106_encrypt(struct aead_request *req)
852 {
853         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
854         struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
855         void *aes_ctx = &(ctx->aes_key_expanded);
856         u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
857         unsigned int i;
858         __be32 counter = cpu_to_be32(1);
859
860         /* Assuming we are supporting rfc4106 64-bit extended */
861         /* sequence numbers We need to have the AAD length equal */
862         /* to 16 or 20 bytes */
863         if (unlikely(req->assoclen != 16 && req->assoclen != 20))
864                 return -EINVAL;
865
866         /* IV below built */
867         for (i = 0; i < 4; i++)
868                 *(iv+i) = ctx->nonce[i];
869         for (i = 0; i < 8; i++)
870                 *(iv+4+i) = req->iv[i];
871         *((__be32 *)(iv+12)) = counter;
872
873         return gcmaes_encrypt(req, req->assoclen - 8, ctx->hash_subkey, iv,
874                               aes_ctx);
875 }
876
877 static int helper_rfc4106_decrypt(struct aead_request *req)
878 {
879         __be32 counter = cpu_to_be32(1);
880         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
881         struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
882         void *aes_ctx = &(ctx->aes_key_expanded);
883         u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
884         unsigned int i;
885
886         if (unlikely(req->assoclen != 16 && req->assoclen != 20))
887                 return -EINVAL;
888
889         /* Assuming we are supporting rfc4106 64-bit extended */
890         /* sequence numbers We need to have the AAD length */
891         /* equal to 16 or 20 bytes */
892
893         /* IV below built */
894         for (i = 0; i < 4; i++)
895                 *(iv+i) = ctx->nonce[i];
896         for (i = 0; i < 8; i++)
897                 *(iv+4+i) = req->iv[i];
898         *((__be32 *)(iv+12)) = counter;
899
900         return gcmaes_decrypt(req, req->assoclen - 8, ctx->hash_subkey, iv,
901                               aes_ctx);
902 }
903 #endif
904
905 static struct crypto_alg aesni_cipher_alg = {
906         .cra_name               = "aes",
907         .cra_driver_name        = "aes-aesni",
908         .cra_priority           = 300,
909         .cra_flags              = CRYPTO_ALG_TYPE_CIPHER,
910         .cra_blocksize          = AES_BLOCK_SIZE,
911         .cra_ctxsize            = CRYPTO_AES_CTX_SIZE,
912         .cra_module             = THIS_MODULE,
913         .cra_u  = {
914                 .cipher = {
915                         .cia_min_keysize        = AES_MIN_KEY_SIZE,
916                         .cia_max_keysize        = AES_MAX_KEY_SIZE,
917                         .cia_setkey             = aes_set_key,
918                         .cia_encrypt            = aesni_encrypt,
919                         .cia_decrypt            = aesni_decrypt
920                 }
921         }
922 };
923
924 static struct skcipher_alg aesni_skciphers[] = {
925         {
926                 .base = {
927                         .cra_name               = "__ecb(aes)",
928                         .cra_driver_name        = "__ecb-aes-aesni",
929                         .cra_priority           = 400,
930                         .cra_flags              = CRYPTO_ALG_INTERNAL,
931                         .cra_blocksize          = AES_BLOCK_SIZE,
932                         .cra_ctxsize            = CRYPTO_AES_CTX_SIZE,
933                         .cra_module             = THIS_MODULE,
934                 },
935                 .min_keysize    = AES_MIN_KEY_SIZE,
936                 .max_keysize    = AES_MAX_KEY_SIZE,
937                 .setkey         = aesni_skcipher_setkey,
938                 .encrypt        = ecb_encrypt,
939                 .decrypt        = ecb_decrypt,
940         }, {
941                 .base = {
942                         .cra_name               = "__cbc(aes)",
943                         .cra_driver_name        = "__cbc-aes-aesni",
944                         .cra_priority           = 400,
945                         .cra_flags              = CRYPTO_ALG_INTERNAL,
946                         .cra_blocksize          = AES_BLOCK_SIZE,
947                         .cra_ctxsize            = CRYPTO_AES_CTX_SIZE,
948                         .cra_module             = THIS_MODULE,
949                 },
950                 .min_keysize    = AES_MIN_KEY_SIZE,
951                 .max_keysize    = AES_MAX_KEY_SIZE,
952                 .ivsize         = AES_BLOCK_SIZE,
953                 .setkey         = aesni_skcipher_setkey,
954                 .encrypt        = cbc_encrypt,
955                 .decrypt        = cbc_decrypt,
956 #ifdef CONFIG_X86_64
957         }, {
958                 .base = {
959                         .cra_name               = "__ctr(aes)",
960                         .cra_driver_name        = "__ctr-aes-aesni",
961                         .cra_priority           = 400,
962                         .cra_flags              = CRYPTO_ALG_INTERNAL,
963                         .cra_blocksize          = 1,
964                         .cra_ctxsize            = CRYPTO_AES_CTX_SIZE,
965                         .cra_module             = THIS_MODULE,
966                 },
967                 .min_keysize    = AES_MIN_KEY_SIZE,
968                 .max_keysize    = AES_MAX_KEY_SIZE,
969                 .ivsize         = AES_BLOCK_SIZE,
970                 .chunksize      = AES_BLOCK_SIZE,
971                 .setkey         = aesni_skcipher_setkey,
972                 .encrypt        = ctr_crypt,
973                 .decrypt        = ctr_crypt,
974         }, {
975                 .base = {
976                         .cra_name               = "__xts(aes)",
977                         .cra_driver_name        = "__xts-aes-aesni",
978                         .cra_priority           = 401,
979                         .cra_flags              = CRYPTO_ALG_INTERNAL,
980                         .cra_blocksize          = AES_BLOCK_SIZE,
981                         .cra_ctxsize            = XTS_AES_CTX_SIZE,
982                         .cra_module             = THIS_MODULE,
983                 },
984                 .min_keysize    = 2 * AES_MIN_KEY_SIZE,
985                 .max_keysize    = 2 * AES_MAX_KEY_SIZE,
986                 .ivsize         = AES_BLOCK_SIZE,
987                 .setkey         = xts_aesni_setkey,
988                 .encrypt        = xts_encrypt,
989                 .decrypt        = xts_decrypt,
990 #endif
991         }
992 };
993
994 static
995 struct simd_skcipher_alg *aesni_simd_skciphers[ARRAY_SIZE(aesni_skciphers)];
996
997 #ifdef CONFIG_X86_64
998 static int generic_gcmaes_set_key(struct crypto_aead *aead, const u8 *key,
999                                   unsigned int key_len)
1000 {
1001         struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(aead);
1002
1003         return aes_set_key_common(crypto_aead_tfm(aead),
1004                                   &ctx->aes_key_expanded, key, key_len) ?:
1005                rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
1006 }
1007
1008 static int generic_gcmaes_encrypt(struct aead_request *req)
1009 {
1010         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1011         struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm);
1012         void *aes_ctx = &(ctx->aes_key_expanded);
1013         u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
1014         __be32 counter = cpu_to_be32(1);
1015
1016         memcpy(iv, req->iv, 12);
1017         *((__be32 *)(iv+12)) = counter;
1018
1019         return gcmaes_encrypt(req, req->assoclen, ctx->hash_subkey, iv,
1020                               aes_ctx);
1021 }
1022
1023 static int generic_gcmaes_decrypt(struct aead_request *req)
1024 {
1025         __be32 counter = cpu_to_be32(1);
1026         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1027         struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm);
1028         void *aes_ctx = &(ctx->aes_key_expanded);
1029         u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
1030
1031         memcpy(iv, req->iv, 12);
1032         *((__be32 *)(iv+12)) = counter;
1033
1034         return gcmaes_decrypt(req, req->assoclen, ctx->hash_subkey, iv,
1035                               aes_ctx);
1036 }
1037
1038 static struct aead_alg aesni_aeads[] = { {
1039         .setkey                 = common_rfc4106_set_key,
1040         .setauthsize            = common_rfc4106_set_authsize,
1041         .encrypt                = helper_rfc4106_encrypt,
1042         .decrypt                = helper_rfc4106_decrypt,
1043         .ivsize                 = GCM_RFC4106_IV_SIZE,
1044         .maxauthsize            = 16,
1045         .base = {
1046                 .cra_name               = "__rfc4106(gcm(aes))",
1047                 .cra_driver_name        = "__rfc4106-gcm-aesni",
1048                 .cra_priority           = 400,
1049                 .cra_flags              = CRYPTO_ALG_INTERNAL,
1050                 .cra_blocksize          = 1,
1051                 .cra_ctxsize            = sizeof(struct aesni_rfc4106_gcm_ctx),
1052                 .cra_alignmask          = AESNI_ALIGN - 1,
1053                 .cra_module             = THIS_MODULE,
1054         },
1055 }, {
1056         .setkey                 = generic_gcmaes_set_key,
1057         .setauthsize            = generic_gcmaes_set_authsize,
1058         .encrypt                = generic_gcmaes_encrypt,
1059         .decrypt                = generic_gcmaes_decrypt,
1060         .ivsize                 = GCM_AES_IV_SIZE,
1061         .maxauthsize            = 16,
1062         .base = {
1063                 .cra_name               = "__gcm(aes)",
1064                 .cra_driver_name        = "__generic-gcm-aesni",
1065                 .cra_priority           = 400,
1066                 .cra_flags              = CRYPTO_ALG_INTERNAL,
1067                 .cra_blocksize          = 1,
1068                 .cra_ctxsize            = sizeof(struct generic_gcmaes_ctx),
1069                 .cra_alignmask          = AESNI_ALIGN - 1,
1070                 .cra_module             = THIS_MODULE,
1071         },
1072 } };
1073 #else
1074 static struct aead_alg aesni_aeads[0];
1075 #endif
1076
1077 static struct simd_aead_alg *aesni_simd_aeads[ARRAY_SIZE(aesni_aeads)];
1078
1079 static const struct x86_cpu_id aesni_cpu_id[] = {
1080         X86_FEATURE_MATCH(X86_FEATURE_AES),
1081         {}
1082 };
1083 MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
1084
1085 static int __init aesni_init(void)
1086 {
1087         int err;
1088
1089         if (!x86_match_cpu(aesni_cpu_id))
1090                 return -ENODEV;
1091 #ifdef CONFIG_X86_64
1092 #ifdef CONFIG_AS_AVX2
1093         if (boot_cpu_has(X86_FEATURE_AVX2)) {
1094                 pr_info("AVX2 version of gcm_enc/dec engaged.\n");
1095                 aesni_gcm_tfm = &aesni_gcm_tfm_avx_gen4;
1096         } else
1097 #endif
1098 #ifdef CONFIG_AS_AVX
1099         if (boot_cpu_has(X86_FEATURE_AVX)) {
1100                 pr_info("AVX version of gcm_enc/dec engaged.\n");
1101                 aesni_gcm_tfm = &aesni_gcm_tfm_avx_gen2;
1102         } else
1103 #endif
1104         {
1105                 pr_info("SSE version of gcm_enc/dec engaged.\n");
1106                 aesni_gcm_tfm = &aesni_gcm_tfm_sse;
1107         }
1108         aesni_ctr_enc_tfm = aesni_ctr_enc;
1109 #ifdef CONFIG_AS_AVX
1110         if (boot_cpu_has(X86_FEATURE_AVX)) {
1111                 /* optimize performance of ctr mode encryption transform */
1112                 aesni_ctr_enc_tfm = aesni_ctr_enc_avx_tfm;
1113                 pr_info("AES CTR mode by8 optimization enabled\n");
1114         }
1115 #endif
1116 #endif
1117
1118         err = crypto_register_alg(&aesni_cipher_alg);
1119         if (err)
1120                 return err;
1121
1122         err = simd_register_skciphers_compat(aesni_skciphers,
1123                                              ARRAY_SIZE(aesni_skciphers),
1124                                              aesni_simd_skciphers);
1125         if (err)
1126                 goto unregister_cipher;
1127
1128         err = simd_register_aeads_compat(aesni_aeads, ARRAY_SIZE(aesni_aeads),
1129                                          aesni_simd_aeads);
1130         if (err)
1131                 goto unregister_skciphers;
1132
1133         return 0;
1134
1135 unregister_skciphers:
1136         simd_unregister_skciphers(aesni_skciphers, ARRAY_SIZE(aesni_skciphers),
1137                                   aesni_simd_skciphers);
1138 unregister_cipher:
1139         crypto_unregister_alg(&aesni_cipher_alg);
1140         return err;
1141 }
1142
1143 static void __exit aesni_exit(void)
1144 {
1145         simd_unregister_aeads(aesni_aeads, ARRAY_SIZE(aesni_aeads),
1146                               aesni_simd_aeads);
1147         simd_unregister_skciphers(aesni_skciphers, ARRAY_SIZE(aesni_skciphers),
1148                                   aesni_simd_skciphers);
1149         crypto_unregister_alg(&aesni_cipher_alg);
1150 }
1151
1152 late_initcall(aesni_init);
1153 module_exit(aesni_exit);
1154
1155 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
1156 MODULE_LICENSE("GPL");
1157 MODULE_ALIAS_CRYPTO("aes");