Commit | Line | Data |
---|---|---|
54b6a1bd HY |
1 | /* |
2 | * Support for Intel AES-NI instructions. This file contains glue | |
3 | * code, the real AES implementation is in intel-aes_asm.S. | |
4 | * | |
5 | * Copyright (C) 2008, Intel Corp. | |
6 | * Author: Huang Ying <ying.huang@intel.com> | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License as published by | |
10 | * the Free Software Foundation; either version 2 of the License, or | |
11 | * (at your option) any later version. | |
12 | */ | |
13 | ||
14 | #include <linux/hardirq.h> | |
15 | #include <linux/types.h> | |
16 | #include <linux/crypto.h> | |
17 | #include <linux/err.h> | |
18 | #include <crypto/algapi.h> | |
19 | #include <crypto/aes.h> | |
20 | #include <crypto/cryptd.h> | |
21 | #include <asm/i387.h> | |
22 | #include <asm/aes.h> | |
23 | ||
24 | struct async_aes_ctx { | |
25 | struct cryptd_ablkcipher *cryptd_tfm; | |
26 | }; | |
27 | ||
28 | #define AESNI_ALIGN 16 | |
29 | #define AES_BLOCK_MASK (~(AES_BLOCK_SIZE-1)) | |
30 | ||
31 | asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key, | |
32 | unsigned int key_len); | |
33 | asmlinkage void aesni_enc(struct crypto_aes_ctx *ctx, u8 *out, | |
34 | const u8 *in); | |
35 | asmlinkage void aesni_dec(struct crypto_aes_ctx *ctx, u8 *out, | |
36 | const u8 *in); | |
37 | asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out, | |
38 | const u8 *in, unsigned int len); | |
39 | asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out, | |
40 | const u8 *in, unsigned int len); | |
41 | asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out, | |
42 | const u8 *in, unsigned int len, u8 *iv); | |
43 | asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out, | |
44 | const u8 *in, unsigned int len, u8 *iv); | |
45 | ||
46 | static inline int kernel_fpu_using(void) | |
47 | { | |
48 | if (in_interrupt() && !(read_cr0() & X86_CR0_TS)) | |
49 | return 1; | |
50 | return 0; | |
51 | } | |
52 | ||
53 | static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx) | |
54 | { | |
55 | unsigned long addr = (unsigned long)raw_ctx; | |
56 | unsigned long align = AESNI_ALIGN; | |
57 | ||
58 | if (align <= crypto_tfm_ctx_alignment()) | |
59 | align = 1; | |
60 | return (struct crypto_aes_ctx *)ALIGN(addr, align); | |
61 | } | |
62 | ||
63 | static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx, | |
64 | const u8 *in_key, unsigned int key_len) | |
65 | { | |
66 | struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx); | |
67 | u32 *flags = &tfm->crt_flags; | |
68 | int err; | |
69 | ||
70 | if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 && | |
71 | key_len != AES_KEYSIZE_256) { | |
72 | *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; | |
73 | return -EINVAL; | |
74 | } | |
75 | ||
76 | if (kernel_fpu_using()) | |
77 | err = crypto_aes_expand_key(ctx, in_key, key_len); | |
78 | else { | |
79 | kernel_fpu_begin(); | |
80 | err = aesni_set_key(ctx, in_key, key_len); | |
81 | kernel_fpu_end(); | |
82 | } | |
83 | ||
84 | return err; | |
85 | } | |
86 | ||
87 | static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, | |
88 | unsigned int key_len) | |
89 | { | |
90 | return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len); | |
91 | } | |
92 | ||
93 | static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) | |
94 | { | |
95 | struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm)); | |
96 | ||
97 | if (kernel_fpu_using()) | |
98 | crypto_aes_encrypt_x86(ctx, dst, src); | |
99 | else { | |
100 | kernel_fpu_begin(); | |
101 | aesni_enc(ctx, dst, src); | |
102 | kernel_fpu_end(); | |
103 | } | |
104 | } | |
105 | ||
106 | static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) | |
107 | { | |
108 | struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm)); | |
109 | ||
110 | if (kernel_fpu_using()) | |
111 | crypto_aes_decrypt_x86(ctx, dst, src); | |
112 | else { | |
113 | kernel_fpu_begin(); | |
114 | aesni_dec(ctx, dst, src); | |
115 | kernel_fpu_end(); | |
116 | } | |
117 | } | |
118 | ||
119 | static struct crypto_alg aesni_alg = { | |
120 | .cra_name = "aes", | |
121 | .cra_driver_name = "aes-aesni", | |
122 | .cra_priority = 300, | |
123 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER, | |
124 | .cra_blocksize = AES_BLOCK_SIZE, | |
125 | .cra_ctxsize = sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1, | |
126 | .cra_alignmask = 0, | |
127 | .cra_module = THIS_MODULE, | |
128 | .cra_list = LIST_HEAD_INIT(aesni_alg.cra_list), | |
129 | .cra_u = { | |
130 | .cipher = { | |
131 | .cia_min_keysize = AES_MIN_KEY_SIZE, | |
132 | .cia_max_keysize = AES_MAX_KEY_SIZE, | |
133 | .cia_setkey = aes_set_key, | |
134 | .cia_encrypt = aes_encrypt, | |
135 | .cia_decrypt = aes_decrypt | |
136 | } | |
137 | } | |
138 | }; | |
139 | ||
140 | static int ecb_encrypt(struct blkcipher_desc *desc, | |
141 | struct scatterlist *dst, struct scatterlist *src, | |
142 | unsigned int nbytes) | |
143 | { | |
144 | struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm)); | |
145 | struct blkcipher_walk walk; | |
146 | int err; | |
147 | ||
148 | blkcipher_walk_init(&walk, dst, src, nbytes); | |
149 | err = blkcipher_walk_virt(desc, &walk); | |
150 | ||
151 | kernel_fpu_begin(); | |
152 | while ((nbytes = walk.nbytes)) { | |
153 | aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr, | |
154 | nbytes & AES_BLOCK_MASK); | |
155 | nbytes &= AES_BLOCK_SIZE - 1; | |
156 | err = blkcipher_walk_done(desc, &walk, nbytes); | |
157 | } | |
158 | kernel_fpu_end(); | |
159 | ||
160 | return err; | |
161 | } | |
162 | ||
163 | static int ecb_decrypt(struct blkcipher_desc *desc, | |
164 | struct scatterlist *dst, struct scatterlist *src, | |
165 | unsigned int nbytes) | |
166 | { | |
167 | struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm)); | |
168 | struct blkcipher_walk walk; | |
169 | int err; | |
170 | ||
171 | blkcipher_walk_init(&walk, dst, src, nbytes); | |
172 | err = blkcipher_walk_virt(desc, &walk); | |
173 | ||
174 | kernel_fpu_begin(); | |
175 | while ((nbytes = walk.nbytes)) { | |
176 | aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr, | |
177 | nbytes & AES_BLOCK_MASK); | |
178 | nbytes &= AES_BLOCK_SIZE - 1; | |
179 | err = blkcipher_walk_done(desc, &walk, nbytes); | |
180 | } | |
181 | kernel_fpu_end(); | |
182 | ||
183 | return err; | |
184 | } | |
185 | ||
186 | static struct crypto_alg blk_ecb_alg = { | |
187 | .cra_name = "__ecb-aes-aesni", | |
188 | .cra_driver_name = "__driver-ecb-aes-aesni", | |
189 | .cra_priority = 0, | |
190 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, | |
191 | .cra_blocksize = AES_BLOCK_SIZE, | |
192 | .cra_ctxsize = sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1, | |
193 | .cra_alignmask = 0, | |
194 | .cra_type = &crypto_blkcipher_type, | |
195 | .cra_module = THIS_MODULE, | |
196 | .cra_list = LIST_HEAD_INIT(blk_ecb_alg.cra_list), | |
197 | .cra_u = { | |
198 | .blkcipher = { | |
199 | .min_keysize = AES_MIN_KEY_SIZE, | |
200 | .max_keysize = AES_MAX_KEY_SIZE, | |
201 | .setkey = aes_set_key, | |
202 | .encrypt = ecb_encrypt, | |
203 | .decrypt = ecb_decrypt, | |
204 | }, | |
205 | }, | |
206 | }; | |
207 | ||
208 | static int cbc_encrypt(struct blkcipher_desc *desc, | |
209 | struct scatterlist *dst, struct scatterlist *src, | |
210 | unsigned int nbytes) | |
211 | { | |
212 | struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm)); | |
213 | struct blkcipher_walk walk; | |
214 | int err; | |
215 | ||
216 | blkcipher_walk_init(&walk, dst, src, nbytes); | |
217 | err = blkcipher_walk_virt(desc, &walk); | |
218 | ||
219 | kernel_fpu_begin(); | |
220 | while ((nbytes = walk.nbytes)) { | |
221 | aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr, | |
222 | nbytes & AES_BLOCK_MASK, walk.iv); | |
223 | nbytes &= AES_BLOCK_SIZE - 1; | |
224 | err = blkcipher_walk_done(desc, &walk, nbytes); | |
225 | } | |
226 | kernel_fpu_end(); | |
227 | ||
228 | return err; | |
229 | } | |
230 | ||
231 | static int cbc_decrypt(struct blkcipher_desc *desc, | |
232 | struct scatterlist *dst, struct scatterlist *src, | |
233 | unsigned int nbytes) | |
234 | { | |
235 | struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm)); | |
236 | struct blkcipher_walk walk; | |
237 | int err; | |
238 | ||
239 | blkcipher_walk_init(&walk, dst, src, nbytes); | |
240 | err = blkcipher_walk_virt(desc, &walk); | |
241 | ||
242 | kernel_fpu_begin(); | |
243 | while ((nbytes = walk.nbytes)) { | |
244 | aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr, | |
245 | nbytes & AES_BLOCK_MASK, walk.iv); | |
246 | nbytes &= AES_BLOCK_SIZE - 1; | |
247 | err = blkcipher_walk_done(desc, &walk, nbytes); | |
248 | } | |
249 | kernel_fpu_end(); | |
250 | ||
251 | return err; | |
252 | } | |
253 | ||
254 | static struct crypto_alg blk_cbc_alg = { | |
255 | .cra_name = "__cbc-aes-aesni", | |
256 | .cra_driver_name = "__driver-cbc-aes-aesni", | |
257 | .cra_priority = 0, | |
258 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, | |
259 | .cra_blocksize = AES_BLOCK_SIZE, | |
260 | .cra_ctxsize = sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1, | |
261 | .cra_alignmask = 0, | |
262 | .cra_type = &crypto_blkcipher_type, | |
263 | .cra_module = THIS_MODULE, | |
264 | .cra_list = LIST_HEAD_INIT(blk_cbc_alg.cra_list), | |
265 | .cra_u = { | |
266 | .blkcipher = { | |
267 | .min_keysize = AES_MIN_KEY_SIZE, | |
268 | .max_keysize = AES_MAX_KEY_SIZE, | |
269 | .setkey = aes_set_key, | |
270 | .encrypt = cbc_encrypt, | |
271 | .decrypt = cbc_decrypt, | |
272 | }, | |
273 | }, | |
274 | }; | |
275 | ||
276 | static int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key, | |
277 | unsigned int key_len) | |
278 | { | |
279 | struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); | |
280 | ||
281 | return crypto_ablkcipher_setkey(&ctx->cryptd_tfm->base, key, key_len); | |
282 | } | |
283 | ||
284 | static int ablk_encrypt(struct ablkcipher_request *req) | |
285 | { | |
286 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | |
287 | struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); | |
288 | ||
289 | if (kernel_fpu_using()) { | |
290 | struct ablkcipher_request *cryptd_req = | |
291 | ablkcipher_request_ctx(req); | |
292 | memcpy(cryptd_req, req, sizeof(*req)); | |
293 | ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); | |
294 | return crypto_ablkcipher_encrypt(cryptd_req); | |
295 | } else { | |
296 | struct blkcipher_desc desc; | |
297 | desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm); | |
298 | desc.info = req->info; | |
299 | desc.flags = 0; | |
300 | return crypto_blkcipher_crt(desc.tfm)->encrypt( | |
301 | &desc, req->dst, req->src, req->nbytes); | |
302 | } | |
303 | } | |
304 | ||
305 | static int ablk_decrypt(struct ablkcipher_request *req) | |
306 | { | |
307 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | |
308 | struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); | |
309 | ||
310 | if (kernel_fpu_using()) { | |
311 | struct ablkcipher_request *cryptd_req = | |
312 | ablkcipher_request_ctx(req); | |
313 | memcpy(cryptd_req, req, sizeof(*req)); | |
314 | ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); | |
315 | return crypto_ablkcipher_decrypt(cryptd_req); | |
316 | } else { | |
317 | struct blkcipher_desc desc; | |
318 | desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm); | |
319 | desc.info = req->info; | |
320 | desc.flags = 0; | |
321 | return crypto_blkcipher_crt(desc.tfm)->decrypt( | |
322 | &desc, req->dst, req->src, req->nbytes); | |
323 | } | |
324 | } | |
325 | ||
326 | static void ablk_exit(struct crypto_tfm *tfm) | |
327 | { | |
328 | struct async_aes_ctx *ctx = crypto_tfm_ctx(tfm); | |
329 | ||
330 | cryptd_free_ablkcipher(ctx->cryptd_tfm); | |
331 | } | |
332 | ||
333 | static void ablk_init_common(struct crypto_tfm *tfm, | |
334 | struct cryptd_ablkcipher *cryptd_tfm) | |
335 | { | |
336 | struct async_aes_ctx *ctx = crypto_tfm_ctx(tfm); | |
337 | ||
338 | ctx->cryptd_tfm = cryptd_tfm; | |
339 | tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request) + | |
340 | crypto_ablkcipher_reqsize(&cryptd_tfm->base); | |
341 | } | |
342 | ||
343 | static int ablk_ecb_init(struct crypto_tfm *tfm) | |
344 | { | |
345 | struct cryptd_ablkcipher *cryptd_tfm; | |
346 | ||
347 | cryptd_tfm = cryptd_alloc_ablkcipher("__driver-ecb-aes-aesni", 0, 0); | |
348 | if (IS_ERR(cryptd_tfm)) | |
349 | return PTR_ERR(cryptd_tfm); | |
350 | ablk_init_common(tfm, cryptd_tfm); | |
351 | return 0; | |
352 | } | |
353 | ||
354 | static struct crypto_alg ablk_ecb_alg = { | |
355 | .cra_name = "ecb(aes)", | |
356 | .cra_driver_name = "ecb-aes-aesni", | |
357 | .cra_priority = 400, | |
358 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC, | |
359 | .cra_blocksize = AES_BLOCK_SIZE, | |
360 | .cra_ctxsize = sizeof(struct async_aes_ctx), | |
361 | .cra_alignmask = 0, | |
362 | .cra_type = &crypto_ablkcipher_type, | |
363 | .cra_module = THIS_MODULE, | |
364 | .cra_list = LIST_HEAD_INIT(ablk_ecb_alg.cra_list), | |
365 | .cra_init = ablk_ecb_init, | |
366 | .cra_exit = ablk_exit, | |
367 | .cra_u = { | |
368 | .ablkcipher = { | |
369 | .min_keysize = AES_MIN_KEY_SIZE, | |
370 | .max_keysize = AES_MAX_KEY_SIZE, | |
371 | .setkey = ablk_set_key, | |
372 | .encrypt = ablk_encrypt, | |
373 | .decrypt = ablk_decrypt, | |
374 | }, | |
375 | }, | |
376 | }; | |
377 | ||
378 | static int ablk_cbc_init(struct crypto_tfm *tfm) | |
379 | { | |
380 | struct cryptd_ablkcipher *cryptd_tfm; | |
381 | ||
382 | cryptd_tfm = cryptd_alloc_ablkcipher("__driver-cbc-aes-aesni", 0, 0); | |
383 | if (IS_ERR(cryptd_tfm)) | |
384 | return PTR_ERR(cryptd_tfm); | |
385 | ablk_init_common(tfm, cryptd_tfm); | |
386 | return 0; | |
387 | } | |
388 | ||
389 | static struct crypto_alg ablk_cbc_alg = { | |
390 | .cra_name = "cbc(aes)", | |
391 | .cra_driver_name = "cbc-aes-aesni", | |
392 | .cra_priority = 400, | |
393 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC, | |
394 | .cra_blocksize = AES_BLOCK_SIZE, | |
395 | .cra_ctxsize = sizeof(struct async_aes_ctx), | |
396 | .cra_alignmask = 0, | |
397 | .cra_type = &crypto_ablkcipher_type, | |
398 | .cra_module = THIS_MODULE, | |
399 | .cra_list = LIST_HEAD_INIT(ablk_cbc_alg.cra_list), | |
400 | .cra_init = ablk_cbc_init, | |
401 | .cra_exit = ablk_exit, | |
402 | .cra_u = { | |
403 | .ablkcipher = { | |
404 | .min_keysize = AES_MIN_KEY_SIZE, | |
405 | .max_keysize = AES_MAX_KEY_SIZE, | |
406 | .ivsize = AES_BLOCK_SIZE, | |
407 | .setkey = ablk_set_key, | |
408 | .encrypt = ablk_encrypt, | |
409 | .decrypt = ablk_decrypt, | |
410 | }, | |
411 | }, | |
412 | }; | |
413 | ||
414 | static int __init aesni_init(void) | |
415 | { | |
416 | int err; | |
417 | ||
418 | if (!cpu_has_aes) { | |
419 | printk(KERN_ERR "Intel AES-NI instructions are not detected.\n"); | |
420 | return -ENODEV; | |
421 | } | |
422 | if ((err = crypto_register_alg(&aesni_alg))) | |
423 | goto aes_err; | |
424 | if ((err = crypto_register_alg(&blk_ecb_alg))) | |
425 | goto blk_ecb_err; | |
426 | if ((err = crypto_register_alg(&blk_cbc_alg))) | |
427 | goto blk_cbc_err; | |
428 | if ((err = crypto_register_alg(&ablk_ecb_alg))) | |
429 | goto ablk_ecb_err; | |
430 | if ((err = crypto_register_alg(&ablk_cbc_alg))) | |
431 | goto ablk_cbc_err; | |
432 | ||
433 | return err; | |
434 | ||
435 | ablk_cbc_err: | |
436 | crypto_unregister_alg(&ablk_ecb_alg); | |
437 | ablk_ecb_err: | |
438 | crypto_unregister_alg(&blk_cbc_alg); | |
439 | blk_cbc_err: | |
440 | crypto_unregister_alg(&blk_ecb_alg); | |
441 | blk_ecb_err: | |
442 | crypto_unregister_alg(&aesni_alg); | |
443 | aes_err: | |
444 | return err; | |
445 | } | |
446 | ||
447 | static void __exit aesni_exit(void) | |
448 | { | |
449 | crypto_unregister_alg(&ablk_cbc_alg); | |
450 | crypto_unregister_alg(&ablk_ecb_alg); | |
451 | crypto_unregister_alg(&blk_cbc_alg); | |
452 | crypto_unregister_alg(&blk_ecb_alg); | |
453 | crypto_unregister_alg(&aesni_alg); | |
454 | } | |
455 | ||
456 | module_init(aesni_init); | |
457 | module_exit(aesni_exit); | |
458 | ||
459 | MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized"); | |
460 | MODULE_LICENSE("GPL"); | |
461 | MODULE_ALIAS("aes"); |