Commit | Line | Data |
---|---|---|
81bef015 CH |
1 | /* |
2 | * Intel IXP4xx NPE-C crypto driver | |
3 | * | |
4 | * Copyright (C) 2008 Christian Hohnstaedt <chohnstaedt@innominate.com> | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify it | |
7 | * under the terms of version 2 of the GNU General Public License | |
8 | * as published by the Free Software Foundation. | |
9 | * | |
10 | */ | |
11 | ||
12 | #include <linux/platform_device.h> | |
13 | #include <linux/dma-mapping.h> | |
14 | #include <linux/dmapool.h> | |
15 | #include <linux/crypto.h> | |
16 | #include <linux/kernel.h> | |
17 | #include <linux/rtnetlink.h> | |
18 | #include <linux/interrupt.h> | |
19 | #include <linux/spinlock.h> | |
20 | ||
21 | #include <crypto/ctr.h> | |
22 | #include <crypto/des.h> | |
23 | #include <crypto/aes.h> | |
24 | #include <crypto/sha.h> | |
25 | #include <crypto/algapi.h> | |
26 | #include <crypto/aead.h> | |
27 | #include <crypto/authenc.h> | |
28 | #include <crypto/scatterwalk.h> | |
29 | ||
30 | #include <asm/arch/npe.h> | |
31 | #include <asm/arch/qmgr.h> | |
32 | ||
33 | #define MAX_KEYLEN 32 | |
34 | ||
35 | /* hash: cfgword + 2 * digestlen; crypt: keylen + cfgword */ | |
36 | #define NPE_CTX_LEN 80 | |
37 | #define AES_BLOCK128 16 | |
38 | ||
39 | #define NPE_OP_HASH_VERIFY 0x01 | |
40 | #define NPE_OP_CCM_ENABLE 0x04 | |
41 | #define NPE_OP_CRYPT_ENABLE 0x08 | |
42 | #define NPE_OP_HASH_ENABLE 0x10 | |
43 | #define NPE_OP_NOT_IN_PLACE 0x20 | |
44 | #define NPE_OP_HMAC_DISABLE 0x40 | |
45 | #define NPE_OP_CRYPT_ENCRYPT 0x80 | |
46 | ||
47 | #define NPE_OP_CCM_GEN_MIC 0xcc | |
48 | #define NPE_OP_HASH_GEN_ICV 0x50 | |
49 | #define NPE_OP_ENC_GEN_KEY 0xc9 | |
50 | ||
51 | #define MOD_ECB 0x0000 | |
52 | #define MOD_CTR 0x1000 | |
53 | #define MOD_CBC_ENC 0x2000 | |
54 | #define MOD_CBC_DEC 0x3000 | |
55 | #define MOD_CCM_ENC 0x4000 | |
56 | #define MOD_CCM_DEC 0x5000 | |
57 | ||
58 | #define KEYLEN_128 4 | |
59 | #define KEYLEN_192 6 | |
60 | #define KEYLEN_256 8 | |
61 | ||
62 | #define CIPH_DECR 0x0000 | |
63 | #define CIPH_ENCR 0x0400 | |
64 | ||
65 | #define MOD_DES 0x0000 | |
66 | #define MOD_TDEA2 0x0100 | |
67 | #define MOD_3DES 0x0200 | |
68 | #define MOD_AES 0x0800 | |
69 | #define MOD_AES128 (0x0800 | KEYLEN_128) | |
70 | #define MOD_AES192 (0x0900 | KEYLEN_192) | |
71 | #define MOD_AES256 (0x0a00 | KEYLEN_256) | |
72 | ||
73 | #define MAX_IVLEN 16 | |
74 | #define NPE_ID 2 /* NPE C */ | |
75 | #define NPE_QLEN 16 | |
76 | /* Space for registering when the first | |
77 | * NPE_QLEN crypt_ctl are busy */ | |
78 | #define NPE_QLEN_TOTAL 64 | |
79 | ||
80 | #define SEND_QID 29 | |
81 | #define RECV_QID 30 | |
82 | ||
83 | #define CTL_FLAG_UNUSED 0x0000 | |
84 | #define CTL_FLAG_USED 0x1000 | |
85 | #define CTL_FLAG_PERFORM_ABLK 0x0001 | |
86 | #define CTL_FLAG_GEN_ICV 0x0002 | |
87 | #define CTL_FLAG_GEN_REVAES 0x0004 | |
88 | #define CTL_FLAG_PERFORM_AEAD 0x0008 | |
89 | #define CTL_FLAG_MASK 0x000f | |
90 | ||
91 | #define HMAC_IPAD_VALUE 0x36 | |
92 | #define HMAC_OPAD_VALUE 0x5C | |
93 | #define HMAC_PAD_BLOCKLEN SHA1_BLOCK_SIZE | |
94 | ||
95 | #define MD5_DIGEST_SIZE 16 | |
96 | ||
97 | struct buffer_desc { | |
98 | u32 phys_next; | |
99 | u16 buf_len; | |
100 | u16 pkt_len; | |
101 | u32 phys_addr; | |
102 | u32 __reserved[4]; | |
103 | struct buffer_desc *next; | |
104 | }; | |
105 | ||
106 | struct crypt_ctl { | |
107 | u8 mode; /* NPE_OP_* operation mode */ | |
108 | u8 init_len; | |
109 | u16 reserved; | |
110 | u8 iv[MAX_IVLEN]; /* IV for CBC mode or CTR IV for CTR mode */ | |
111 | u32 icv_rev_aes; /* icv or rev aes */ | |
112 | u32 src_buf; | |
113 | u32 dst_buf; | |
114 | u16 auth_offs; /* Authentication start offset */ | |
115 | u16 auth_len; /* Authentication data length */ | |
116 | u16 crypt_offs; /* Cryption start offset */ | |
117 | u16 crypt_len; /* Cryption data length */ | |
118 | u32 aadAddr; /* Additional Auth Data Addr for CCM mode */ | |
119 | u32 crypto_ctx; /* NPE Crypto Param structure address */ | |
120 | ||
121 | /* Used by Host: 4*4 bytes*/ | |
122 | unsigned ctl_flags; | |
123 | union { | |
124 | struct ablkcipher_request *ablk_req; | |
125 | struct aead_request *aead_req; | |
126 | struct crypto_tfm *tfm; | |
127 | } data; | |
128 | struct buffer_desc *regist_buf; | |
129 | u8 *regist_ptr; | |
130 | }; | |
131 | ||
132 | struct ablk_ctx { | |
133 | struct buffer_desc *src; | |
134 | struct buffer_desc *dst; | |
135 | unsigned src_nents; | |
136 | unsigned dst_nents; | |
137 | }; | |
138 | ||
139 | struct aead_ctx { | |
140 | struct buffer_desc *buffer; | |
141 | unsigned short assoc_nents; | |
142 | unsigned short src_nents; | |
143 | struct scatterlist ivlist; | |
144 | /* used when the hmac is not on one sg entry */ | |
145 | u8 *hmac_virt; | |
146 | int encrypt; | |
147 | }; | |
148 | ||
149 | struct ix_hash_algo { | |
150 | u32 cfgword; | |
151 | unsigned char *icv; | |
152 | }; | |
153 | ||
154 | struct ix_sa_dir { | |
155 | unsigned char *npe_ctx; | |
156 | dma_addr_t npe_ctx_phys; | |
157 | int npe_ctx_idx; | |
158 | u8 npe_mode; | |
159 | }; | |
160 | ||
161 | struct ixp_ctx { | |
162 | struct ix_sa_dir encrypt; | |
163 | struct ix_sa_dir decrypt; | |
164 | int authkey_len; | |
165 | u8 authkey[MAX_KEYLEN]; | |
166 | int enckey_len; | |
167 | u8 enckey[MAX_KEYLEN]; | |
168 | u8 salt[MAX_IVLEN]; | |
169 | u8 nonce[CTR_RFC3686_NONCE_SIZE]; | |
170 | unsigned salted; | |
171 | atomic_t configuring; | |
172 | struct completion completion; | |
173 | }; | |
174 | ||
175 | struct ixp_alg { | |
176 | struct crypto_alg crypto; | |
177 | const struct ix_hash_algo *hash; | |
178 | u32 cfg_enc; | |
179 | u32 cfg_dec; | |
180 | ||
181 | int registered; | |
182 | }; | |
183 | ||
184 | static const struct ix_hash_algo hash_alg_md5 = { | |
185 | .cfgword = 0xAA010004, | |
186 | .icv = "\x01\x23\x45\x67\x89\xAB\xCD\xEF" | |
187 | "\xFE\xDC\xBA\x98\x76\x54\x32\x10", | |
188 | }; | |
189 | static const struct ix_hash_algo hash_alg_sha1 = { | |
190 | .cfgword = 0x00000005, | |
191 | .icv = "\x67\x45\x23\x01\xEF\xCD\xAB\x89\x98\xBA" | |
192 | "\xDC\xFE\x10\x32\x54\x76\xC3\xD2\xE1\xF0", | |
193 | }; | |
194 | ||
195 | static struct npe *npe_c; | |
196 | static struct dma_pool *buffer_pool = NULL; | |
197 | static struct dma_pool *ctx_pool = NULL; | |
198 | ||
199 | static struct crypt_ctl *crypt_virt = NULL; | |
200 | static dma_addr_t crypt_phys; | |
201 | ||
202 | static int support_aes = 1; | |
203 | ||
204 | static void dev_release(struct device *dev) | |
205 | { | |
206 | return; | |
207 | } | |
208 | ||
209 | #define DRIVER_NAME "ixp4xx_crypto" | |
210 | static struct platform_device pseudo_dev = { | |
211 | .name = DRIVER_NAME, | |
212 | .id = 0, | |
213 | .num_resources = 0, | |
214 | .dev = { | |
215 | .coherent_dma_mask = DMA_32BIT_MASK, | |
216 | .release = dev_release, | |
217 | } | |
218 | }; | |
219 | ||
220 | static struct device *dev = &pseudo_dev.dev; | |
221 | ||
222 | static inline dma_addr_t crypt_virt2phys(struct crypt_ctl *virt) | |
223 | { | |
224 | return crypt_phys + (virt - crypt_virt) * sizeof(struct crypt_ctl); | |
225 | } | |
226 | ||
227 | static inline struct crypt_ctl *crypt_phys2virt(dma_addr_t phys) | |
228 | { | |
229 | return crypt_virt + (phys - crypt_phys) / sizeof(struct crypt_ctl); | |
230 | } | |
231 | ||
232 | static inline u32 cipher_cfg_enc(struct crypto_tfm *tfm) | |
233 | { | |
234 | return container_of(tfm->__crt_alg, struct ixp_alg,crypto)->cfg_enc; | |
235 | } | |
236 | ||
237 | static inline u32 cipher_cfg_dec(struct crypto_tfm *tfm) | |
238 | { | |
239 | return container_of(tfm->__crt_alg, struct ixp_alg,crypto)->cfg_dec; | |
240 | } | |
241 | ||
242 | static inline const struct ix_hash_algo *ix_hash(struct crypto_tfm *tfm) | |
243 | { | |
244 | return container_of(tfm->__crt_alg, struct ixp_alg, crypto)->hash; | |
245 | } | |
246 | ||
247 | static int setup_crypt_desc(void) | |
248 | { | |
249 | BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64); | |
250 | crypt_virt = dma_alloc_coherent(dev, | |
251 | NPE_QLEN * sizeof(struct crypt_ctl), | |
252 | &crypt_phys, GFP_KERNEL); | |
253 | if (!crypt_virt) | |
254 | return -ENOMEM; | |
255 | memset(crypt_virt, 0, NPE_QLEN * sizeof(struct crypt_ctl)); | |
256 | return 0; | |
257 | } | |
258 | ||
259 | static spinlock_t desc_lock; | |
260 | static struct crypt_ctl *get_crypt_desc(void) | |
261 | { | |
262 | int i; | |
263 | static int idx = 0; | |
264 | unsigned long flags; | |
265 | ||
266 | spin_lock_irqsave(&desc_lock, flags); | |
267 | ||
268 | if (unlikely(!crypt_virt)) | |
269 | setup_crypt_desc(); | |
270 | if (unlikely(!crypt_virt)) { | |
271 | spin_unlock_irqrestore(&desc_lock, flags); | |
272 | return NULL; | |
273 | } | |
274 | i = idx; | |
275 | if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) { | |
276 | if (++idx >= NPE_QLEN) | |
277 | idx = 0; | |
278 | crypt_virt[i].ctl_flags = CTL_FLAG_USED; | |
279 | spin_unlock_irqrestore(&desc_lock, flags); | |
280 | return crypt_virt +i; | |
281 | } else { | |
282 | spin_unlock_irqrestore(&desc_lock, flags); | |
283 | return NULL; | |
284 | } | |
285 | } | |
286 | ||
287 | static spinlock_t emerg_lock; | |
288 | static struct crypt_ctl *get_crypt_desc_emerg(void) | |
289 | { | |
290 | int i; | |
291 | static int idx = NPE_QLEN; | |
292 | struct crypt_ctl *desc; | |
293 | unsigned long flags; | |
294 | ||
295 | desc = get_crypt_desc(); | |
296 | if (desc) | |
297 | return desc; | |
298 | if (unlikely(!crypt_virt)) | |
299 | return NULL; | |
300 | ||
301 | spin_lock_irqsave(&emerg_lock, flags); | |
302 | i = idx; | |
303 | if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) { | |
304 | if (++idx >= NPE_QLEN_TOTAL) | |
305 | idx = NPE_QLEN; | |
306 | crypt_virt[i].ctl_flags = CTL_FLAG_USED; | |
307 | spin_unlock_irqrestore(&emerg_lock, flags); | |
308 | return crypt_virt +i; | |
309 | } else { | |
310 | spin_unlock_irqrestore(&emerg_lock, flags); | |
311 | return NULL; | |
312 | } | |
313 | } | |
314 | ||
315 | static void free_buf_chain(struct buffer_desc *buf, u32 phys) | |
316 | { | |
317 | while (buf) { | |
318 | struct buffer_desc *buf1; | |
319 | u32 phys1; | |
320 | ||
321 | buf1 = buf->next; | |
322 | phys1 = buf->phys_next; | |
323 | dma_pool_free(buffer_pool, buf, phys); | |
324 | buf = buf1; | |
325 | phys = phys1; | |
326 | } | |
327 | } | |
328 | ||
329 | static struct tasklet_struct crypto_done_tasklet; | |
330 | ||
331 | static void finish_scattered_hmac(struct crypt_ctl *crypt) | |
332 | { | |
333 | struct aead_request *req = crypt->data.aead_req; | |
334 | struct aead_ctx *req_ctx = aead_request_ctx(req); | |
335 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | |
336 | int authsize = crypto_aead_authsize(tfm); | |
337 | int decryptlen = req->cryptlen - authsize; | |
338 | ||
339 | if (req_ctx->encrypt) { | |
340 | scatterwalk_map_and_copy(req_ctx->hmac_virt, | |
341 | req->src, decryptlen, authsize, 1); | |
342 | } | |
343 | dma_pool_free(buffer_pool, req_ctx->hmac_virt, crypt->icv_rev_aes); | |
344 | } | |
345 | ||
346 | static void one_packet(dma_addr_t phys) | |
347 | { | |
348 | struct crypt_ctl *crypt; | |
349 | struct ixp_ctx *ctx; | |
350 | int failed; | |
351 | enum dma_data_direction src_direction = DMA_BIDIRECTIONAL; | |
352 | ||
353 | failed = phys & 0x1 ? -EBADMSG : 0; | |
354 | phys &= ~0x3; | |
355 | crypt = crypt_phys2virt(phys); | |
356 | ||
357 | switch (crypt->ctl_flags & CTL_FLAG_MASK) { | |
358 | case CTL_FLAG_PERFORM_AEAD: { | |
359 | struct aead_request *req = crypt->data.aead_req; | |
360 | struct aead_ctx *req_ctx = aead_request_ctx(req); | |
361 | dma_unmap_sg(dev, req->assoc, req_ctx->assoc_nents, | |
362 | DMA_TO_DEVICE); | |
363 | dma_unmap_sg(dev, &req_ctx->ivlist, 1, DMA_BIDIRECTIONAL); | |
364 | dma_unmap_sg(dev, req->src, req_ctx->src_nents, | |
365 | DMA_BIDIRECTIONAL); | |
366 | ||
367 | free_buf_chain(req_ctx->buffer, crypt->src_buf); | |
368 | if (req_ctx->hmac_virt) { | |
369 | finish_scattered_hmac(crypt); | |
370 | } | |
371 | req->base.complete(&req->base, failed); | |
372 | break; | |
373 | } | |
374 | case CTL_FLAG_PERFORM_ABLK: { | |
375 | struct ablkcipher_request *req = crypt->data.ablk_req; | |
376 | struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req); | |
377 | int nents; | |
378 | if (req_ctx->dst) { | |
379 | nents = req_ctx->dst_nents; | |
380 | dma_unmap_sg(dev, req->dst, nents, DMA_FROM_DEVICE); | |
381 | free_buf_chain(req_ctx->dst, crypt->dst_buf); | |
382 | src_direction = DMA_TO_DEVICE; | |
383 | } | |
384 | nents = req_ctx->src_nents; | |
385 | dma_unmap_sg(dev, req->src, nents, src_direction); | |
386 | free_buf_chain(req_ctx->src, crypt->src_buf); | |
387 | req->base.complete(&req->base, failed); | |
388 | break; | |
389 | } | |
390 | case CTL_FLAG_GEN_ICV: | |
391 | ctx = crypto_tfm_ctx(crypt->data.tfm); | |
392 | dma_pool_free(ctx_pool, crypt->regist_ptr, | |
393 | crypt->regist_buf->phys_addr); | |
394 | dma_pool_free(buffer_pool, crypt->regist_buf, crypt->src_buf); | |
395 | if (atomic_dec_and_test(&ctx->configuring)) | |
396 | complete(&ctx->completion); | |
397 | break; | |
398 | case CTL_FLAG_GEN_REVAES: | |
399 | ctx = crypto_tfm_ctx(crypt->data.tfm); | |
400 | *(u32*)ctx->decrypt.npe_ctx &= cpu_to_be32(~CIPH_ENCR); | |
401 | if (atomic_dec_and_test(&ctx->configuring)) | |
402 | complete(&ctx->completion); | |
403 | break; | |
404 | default: | |
405 | BUG(); | |
406 | } | |
407 | crypt->ctl_flags = CTL_FLAG_UNUSED; | |
408 | } | |
409 | ||
410 | static void irqhandler(void *_unused) | |
411 | { | |
412 | tasklet_schedule(&crypto_done_tasklet); | |
413 | } | |
414 | ||
415 | static void crypto_done_action(unsigned long arg) | |
416 | { | |
417 | int i; | |
418 | ||
419 | for(i=0; i<4; i++) { | |
420 | dma_addr_t phys = qmgr_get_entry(RECV_QID); | |
421 | if (!phys) | |
422 | return; | |
423 | one_packet(phys); | |
424 | } | |
425 | tasklet_schedule(&crypto_done_tasklet); | |
426 | } | |
427 | ||
428 | static int init_ixp_crypto(void) | |
429 | { | |
430 | int ret = -ENODEV; | |
431 | ||
432 | if (! ( ~(*IXP4XX_EXP_CFG2) & (IXP4XX_FEATURE_HASH | | |
433 | IXP4XX_FEATURE_AES | IXP4XX_FEATURE_DES))) { | |
434 | printk(KERN_ERR "ixp_crypto: No HW crypto available\n"); | |
435 | return ret; | |
436 | } | |
437 | npe_c = npe_request(NPE_ID); | |
438 | if (!npe_c) | |
439 | return ret; | |
440 | ||
441 | if (!npe_running(npe_c)) { | |
442 | npe_load_firmware(npe_c, npe_name(npe_c), dev); | |
443 | } | |
444 | ||
445 | /* buffer_pool will also be used to sometimes store the hmac, | |
446 | * so assure it is large enough | |
447 | */ | |
448 | BUILD_BUG_ON(SHA1_DIGEST_SIZE > sizeof(struct buffer_desc)); | |
449 | buffer_pool = dma_pool_create("buffer", dev, | |
450 | sizeof(struct buffer_desc), 32, 0); | |
451 | ret = -ENOMEM; | |
452 | if (!buffer_pool) { | |
453 | goto err; | |
454 | } | |
455 | ctx_pool = dma_pool_create("context", dev, | |
456 | NPE_CTX_LEN, 16, 0); | |
457 | if (!ctx_pool) { | |
458 | goto err; | |
459 | } | |
460 | ret = qmgr_request_queue(SEND_QID, NPE_QLEN_TOTAL, 0, 0); | |
461 | if (ret) | |
462 | goto err; | |
463 | ret = qmgr_request_queue(RECV_QID, NPE_QLEN, 0, 0); | |
464 | if (ret) { | |
465 | qmgr_release_queue(SEND_QID); | |
466 | goto err; | |
467 | } | |
468 | qmgr_set_irq(RECV_QID, QUEUE_IRQ_SRC_NOT_EMPTY, irqhandler, NULL); | |
469 | tasklet_init(&crypto_done_tasklet, crypto_done_action, 0); | |
470 | ||
471 | qmgr_enable_irq(RECV_QID); | |
472 | return 0; | |
473 | err: | |
474 | if (ctx_pool) | |
475 | dma_pool_destroy(ctx_pool); | |
476 | if (buffer_pool) | |
477 | dma_pool_destroy(buffer_pool); | |
478 | npe_release(npe_c); | |
479 | return ret; | |
480 | } | |
481 | ||
482 | static void release_ixp_crypto(void) | |
483 | { | |
484 | qmgr_disable_irq(RECV_QID); | |
485 | tasklet_kill(&crypto_done_tasklet); | |
486 | ||
487 | qmgr_release_queue(SEND_QID); | |
488 | qmgr_release_queue(RECV_QID); | |
489 | ||
490 | dma_pool_destroy(ctx_pool); | |
491 | dma_pool_destroy(buffer_pool); | |
492 | ||
493 | npe_release(npe_c); | |
494 | ||
495 | if (crypt_virt) { | |
496 | dma_free_coherent(dev, | |
497 | NPE_QLEN_TOTAL * sizeof( struct crypt_ctl), | |
498 | crypt_virt, crypt_phys); | |
499 | } | |
500 | return; | |
501 | } | |
502 | ||
503 | static void reset_sa_dir(struct ix_sa_dir *dir) | |
504 | { | |
505 | memset(dir->npe_ctx, 0, NPE_CTX_LEN); | |
506 | dir->npe_ctx_idx = 0; | |
507 | dir->npe_mode = 0; | |
508 | } | |
509 | ||
510 | static int init_sa_dir(struct ix_sa_dir *dir) | |
511 | { | |
512 | dir->npe_ctx = dma_pool_alloc(ctx_pool, GFP_KERNEL, &dir->npe_ctx_phys); | |
513 | if (!dir->npe_ctx) { | |
514 | return -ENOMEM; | |
515 | } | |
516 | reset_sa_dir(dir); | |
517 | return 0; | |
518 | } | |
519 | ||
520 | static void free_sa_dir(struct ix_sa_dir *dir) | |
521 | { | |
522 | memset(dir->npe_ctx, 0, NPE_CTX_LEN); | |
523 | dma_pool_free(ctx_pool, dir->npe_ctx, dir->npe_ctx_phys); | |
524 | } | |
525 | ||
526 | static int init_tfm(struct crypto_tfm *tfm) | |
527 | { | |
528 | struct ixp_ctx *ctx = crypto_tfm_ctx(tfm); | |
529 | int ret; | |
530 | ||
531 | atomic_set(&ctx->configuring, 0); | |
532 | ret = init_sa_dir(&ctx->encrypt); | |
533 | if (ret) | |
534 | return ret; | |
535 | ret = init_sa_dir(&ctx->decrypt); | |
536 | if (ret) { | |
537 | free_sa_dir(&ctx->encrypt); | |
538 | } | |
539 | return ret; | |
540 | } | |
541 | ||
542 | static int init_tfm_ablk(struct crypto_tfm *tfm) | |
543 | { | |
544 | tfm->crt_ablkcipher.reqsize = sizeof(struct ablk_ctx); | |
545 | return init_tfm(tfm); | |
546 | } | |
547 | ||
548 | static int init_tfm_aead(struct crypto_tfm *tfm) | |
549 | { | |
550 | tfm->crt_aead.reqsize = sizeof(struct aead_ctx); | |
551 | return init_tfm(tfm); | |
552 | } | |
553 | ||
554 | static void exit_tfm(struct crypto_tfm *tfm) | |
555 | { | |
556 | struct ixp_ctx *ctx = crypto_tfm_ctx(tfm); | |
557 | free_sa_dir(&ctx->encrypt); | |
558 | free_sa_dir(&ctx->decrypt); | |
559 | } | |
560 | ||
561 | static int register_chain_var(struct crypto_tfm *tfm, u8 xpad, u32 target, | |
562 | int init_len, u32 ctx_addr, const u8 *key, int key_len) | |
563 | { | |
564 | struct ixp_ctx *ctx = crypto_tfm_ctx(tfm); | |
565 | struct crypt_ctl *crypt; | |
566 | struct buffer_desc *buf; | |
567 | int i; | |
568 | u8 *pad; | |
569 | u32 pad_phys, buf_phys; | |
570 | ||
571 | BUILD_BUG_ON(NPE_CTX_LEN < HMAC_PAD_BLOCKLEN); | |
572 | pad = dma_pool_alloc(ctx_pool, GFP_KERNEL, &pad_phys); | |
573 | if (!pad) | |
574 | return -ENOMEM; | |
575 | buf = dma_pool_alloc(buffer_pool, GFP_KERNEL, &buf_phys); | |
576 | if (!buf) { | |
577 | dma_pool_free(ctx_pool, pad, pad_phys); | |
578 | return -ENOMEM; | |
579 | } | |
580 | crypt = get_crypt_desc_emerg(); | |
581 | if (!crypt) { | |
582 | dma_pool_free(ctx_pool, pad, pad_phys); | |
583 | dma_pool_free(buffer_pool, buf, buf_phys); | |
584 | return -EAGAIN; | |
585 | } | |
586 | ||
587 | memcpy(pad, key, key_len); | |
588 | memset(pad + key_len, 0, HMAC_PAD_BLOCKLEN - key_len); | |
589 | for (i = 0; i < HMAC_PAD_BLOCKLEN; i++) { | |
590 | pad[i] ^= xpad; | |
591 | } | |
592 | ||
593 | crypt->data.tfm = tfm; | |
594 | crypt->regist_ptr = pad; | |
595 | crypt->regist_buf = buf; | |
596 | ||
597 | crypt->auth_offs = 0; | |
598 | crypt->auth_len = HMAC_PAD_BLOCKLEN; | |
599 | crypt->crypto_ctx = ctx_addr; | |
600 | crypt->src_buf = buf_phys; | |
601 | crypt->icv_rev_aes = target; | |
602 | crypt->mode = NPE_OP_HASH_GEN_ICV; | |
603 | crypt->init_len = init_len; | |
604 | crypt->ctl_flags |= CTL_FLAG_GEN_ICV; | |
605 | ||
606 | buf->next = 0; | |
607 | buf->buf_len = HMAC_PAD_BLOCKLEN; | |
608 | buf->pkt_len = 0; | |
609 | buf->phys_addr = pad_phys; | |
610 | ||
611 | atomic_inc(&ctx->configuring); | |
612 | qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt)); | |
613 | BUG_ON(qmgr_stat_overflow(SEND_QID)); | |
614 | return 0; | |
615 | } | |
616 | ||
617 | static int setup_auth(struct crypto_tfm *tfm, int encrypt, unsigned authsize, | |
618 | const u8 *key, int key_len, unsigned digest_len) | |
619 | { | |
620 | u32 itarget, otarget, npe_ctx_addr; | |
621 | unsigned char *cinfo; | |
622 | int init_len, ret = 0; | |
623 | u32 cfgword; | |
624 | struct ix_sa_dir *dir; | |
625 | struct ixp_ctx *ctx = crypto_tfm_ctx(tfm); | |
626 | const struct ix_hash_algo *algo; | |
627 | ||
628 | dir = encrypt ? &ctx->encrypt : &ctx->decrypt; | |
629 | cinfo = dir->npe_ctx + dir->npe_ctx_idx; | |
630 | algo = ix_hash(tfm); | |
631 | ||
632 | /* write cfg word to cryptinfo */ | |
633 | cfgword = algo->cfgword | ( authsize << 6); /* (authsize/4) << 8 */ | |
634 | *(u32*)cinfo = cpu_to_be32(cfgword); | |
635 | cinfo += sizeof(cfgword); | |
636 | ||
637 | /* write ICV to cryptinfo */ | |
638 | memcpy(cinfo, algo->icv, digest_len); | |
639 | cinfo += digest_len; | |
640 | ||
641 | itarget = dir->npe_ctx_phys + dir->npe_ctx_idx | |
642 | + sizeof(algo->cfgword); | |
643 | otarget = itarget + digest_len; | |
644 | init_len = cinfo - (dir->npe_ctx + dir->npe_ctx_idx); | |
645 | npe_ctx_addr = dir->npe_ctx_phys + dir->npe_ctx_idx; | |
646 | ||
647 | dir->npe_ctx_idx += init_len; | |
648 | dir->npe_mode |= NPE_OP_HASH_ENABLE; | |
649 | ||
650 | if (!encrypt) | |
651 | dir->npe_mode |= NPE_OP_HASH_VERIFY; | |
652 | ||
653 | ret = register_chain_var(tfm, HMAC_OPAD_VALUE, otarget, | |
654 | init_len, npe_ctx_addr, key, key_len); | |
655 | if (ret) | |
656 | return ret; | |
657 | return register_chain_var(tfm, HMAC_IPAD_VALUE, itarget, | |
658 | init_len, npe_ctx_addr, key, key_len); | |
659 | } | |
660 | ||
661 | static int gen_rev_aes_key(struct crypto_tfm *tfm) | |
662 | { | |
663 | struct crypt_ctl *crypt; | |
664 | struct ixp_ctx *ctx = crypto_tfm_ctx(tfm); | |
665 | struct ix_sa_dir *dir = &ctx->decrypt; | |
666 | ||
667 | crypt = get_crypt_desc_emerg(); | |
668 | if (!crypt) { | |
669 | return -EAGAIN; | |
670 | } | |
671 | *(u32*)dir->npe_ctx |= cpu_to_be32(CIPH_ENCR); | |
672 | ||
673 | crypt->data.tfm = tfm; | |
674 | crypt->crypt_offs = 0; | |
675 | crypt->crypt_len = AES_BLOCK128; | |
676 | crypt->src_buf = 0; | |
677 | crypt->crypto_ctx = dir->npe_ctx_phys; | |
678 | crypt->icv_rev_aes = dir->npe_ctx_phys + sizeof(u32); | |
679 | crypt->mode = NPE_OP_ENC_GEN_KEY; | |
680 | crypt->init_len = dir->npe_ctx_idx; | |
681 | crypt->ctl_flags |= CTL_FLAG_GEN_REVAES; | |
682 | ||
683 | atomic_inc(&ctx->configuring); | |
684 | qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt)); | |
685 | BUG_ON(qmgr_stat_overflow(SEND_QID)); | |
686 | return 0; | |
687 | } | |
688 | ||
689 | static int setup_cipher(struct crypto_tfm *tfm, int encrypt, | |
690 | const u8 *key, int key_len) | |
691 | { | |
692 | u8 *cinfo; | |
693 | u32 cipher_cfg; | |
694 | u32 keylen_cfg = 0; | |
695 | struct ix_sa_dir *dir; | |
696 | struct ixp_ctx *ctx = crypto_tfm_ctx(tfm); | |
697 | u32 *flags = &tfm->crt_flags; | |
698 | ||
699 | dir = encrypt ? &ctx->encrypt : &ctx->decrypt; | |
700 | cinfo = dir->npe_ctx; | |
701 | ||
702 | if (encrypt) { | |
703 | cipher_cfg = cipher_cfg_enc(tfm); | |
704 | dir->npe_mode |= NPE_OP_CRYPT_ENCRYPT; | |
705 | } else { | |
706 | cipher_cfg = cipher_cfg_dec(tfm); | |
707 | } | |
708 | if (cipher_cfg & MOD_AES) { | |
709 | switch (key_len) { | |
710 | case 16: keylen_cfg = MOD_AES128 | KEYLEN_128; break; | |
711 | case 24: keylen_cfg = MOD_AES192 | KEYLEN_192; break; | |
712 | case 32: keylen_cfg = MOD_AES256 | KEYLEN_256; break; | |
713 | default: | |
714 | *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; | |
715 | return -EINVAL; | |
716 | } | |
717 | cipher_cfg |= keylen_cfg; | |
718 | } else if (cipher_cfg & MOD_3DES) { | |
719 | const u32 *K = (const u32 *)key; | |
720 | if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) || | |
721 | !((K[2] ^ K[4]) | (K[3] ^ K[5])))) | |
722 | { | |
723 | *flags |= CRYPTO_TFM_RES_BAD_KEY_SCHED; | |
724 | return -EINVAL; | |
725 | } | |
726 | } else { | |
727 | u32 tmp[DES_EXPKEY_WORDS]; | |
728 | if (des_ekey(tmp, key) == 0) { | |
729 | *flags |= CRYPTO_TFM_RES_WEAK_KEY; | |
730 | } | |
731 | } | |
732 | /* write cfg word to cryptinfo */ | |
733 | *(u32*)cinfo = cpu_to_be32(cipher_cfg); | |
734 | cinfo += sizeof(cipher_cfg); | |
735 | ||
736 | /* write cipher key to cryptinfo */ | |
737 | memcpy(cinfo, key, key_len); | |
738 | /* NPE wants keylen set to DES3_EDE_KEY_SIZE even for single DES */ | |
739 | if (key_len < DES3_EDE_KEY_SIZE && !(cipher_cfg & MOD_AES)) { | |
740 | memset(cinfo + key_len, 0, DES3_EDE_KEY_SIZE -key_len); | |
741 | key_len = DES3_EDE_KEY_SIZE; | |
742 | } | |
743 | dir->npe_ctx_idx = sizeof(cipher_cfg) + key_len; | |
744 | dir->npe_mode |= NPE_OP_CRYPT_ENABLE; | |
745 | if ((cipher_cfg & MOD_AES) && !encrypt) { | |
746 | return gen_rev_aes_key(tfm); | |
747 | } | |
748 | return 0; | |
749 | } | |
750 | ||
751 | static int count_sg(struct scatterlist *sg, int nbytes) | |
752 | { | |
753 | int i; | |
754 | for (i = 0; nbytes > 0; i++, sg = sg_next(sg)) | |
755 | nbytes -= sg->length; | |
756 | return i; | |
757 | } | |
758 | ||
759 | static struct buffer_desc *chainup_buffers(struct scatterlist *sg, | |
760 | unsigned nbytes, struct buffer_desc *buf, gfp_t flags) | |
761 | { | |
762 | int nents = 0; | |
763 | ||
764 | while (nbytes > 0) { | |
765 | struct buffer_desc *next_buf; | |
766 | u32 next_buf_phys; | |
767 | unsigned len = min(nbytes, sg_dma_len(sg)); | |
768 | ||
769 | nents++; | |
770 | nbytes -= len; | |
771 | if (!buf->phys_addr) { | |
772 | buf->phys_addr = sg_dma_address(sg); | |
773 | buf->buf_len = len; | |
774 | buf->next = NULL; | |
775 | buf->phys_next = 0; | |
776 | goto next; | |
777 | } | |
778 | /* Two consecutive chunks on one page may be handled by the old | |
779 | * buffer descriptor, increased by the length of the new one | |
780 | */ | |
781 | if (sg_dma_address(sg) == buf->phys_addr + buf->buf_len) { | |
782 | buf->buf_len += len; | |
783 | goto next; | |
784 | } | |
785 | next_buf = dma_pool_alloc(buffer_pool, flags, &next_buf_phys); | |
786 | if (!next_buf) | |
787 | return NULL; | |
788 | buf->next = next_buf; | |
789 | buf->phys_next = next_buf_phys; | |
790 | ||
791 | buf = next_buf; | |
792 | buf->next = NULL; | |
793 | buf->phys_next = 0; | |
794 | buf->phys_addr = sg_dma_address(sg); | |
795 | buf->buf_len = len; | |
796 | next: | |
797 | if (nbytes > 0) { | |
798 | sg = sg_next(sg); | |
799 | } | |
800 | } | |
801 | return buf; | |
802 | } | |
803 | ||
804 | static int ablk_setkey(struct crypto_ablkcipher *tfm, const u8 *key, | |
805 | unsigned int key_len) | |
806 | { | |
807 | struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm); | |
808 | u32 *flags = &tfm->base.crt_flags; | |
809 | int ret; | |
810 | ||
811 | init_completion(&ctx->completion); | |
812 | atomic_inc(&ctx->configuring); | |
813 | ||
814 | reset_sa_dir(&ctx->encrypt); | |
815 | reset_sa_dir(&ctx->decrypt); | |
816 | ||
817 | ctx->encrypt.npe_mode = NPE_OP_HMAC_DISABLE; | |
818 | ctx->decrypt.npe_mode = NPE_OP_HMAC_DISABLE; | |
819 | ||
820 | ret = setup_cipher(&tfm->base, 0, key, key_len); | |
821 | if (ret) | |
822 | goto out; | |
823 | ret = setup_cipher(&tfm->base, 1, key, key_len); | |
824 | if (ret) | |
825 | goto out; | |
826 | ||
827 | if (*flags & CRYPTO_TFM_RES_WEAK_KEY) { | |
828 | if (*flags & CRYPTO_TFM_REQ_WEAK_KEY) { | |
829 | ret = -EINVAL; | |
830 | } else { | |
831 | *flags &= ~CRYPTO_TFM_RES_WEAK_KEY; | |
832 | } | |
833 | } | |
834 | out: | |
835 | if (!atomic_dec_and_test(&ctx->configuring)) | |
836 | wait_for_completion(&ctx->completion); | |
837 | return ret; | |
838 | } | |
839 | ||
840 | static int ablk_rfc3686_setkey(struct crypto_ablkcipher *tfm, const u8 *key, | |
841 | unsigned int key_len) | |
842 | { | |
843 | struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm); | |
844 | ||
845 | /* the nonce is stored in bytes at end of key */ | |
846 | if (key_len < CTR_RFC3686_NONCE_SIZE) | |
847 | return -EINVAL; | |
848 | ||
849 | memcpy(ctx->nonce, key + (key_len - CTR_RFC3686_NONCE_SIZE), | |
850 | CTR_RFC3686_NONCE_SIZE); | |
851 | ||
852 | key_len -= CTR_RFC3686_NONCE_SIZE; | |
853 | return ablk_setkey(tfm, key, key_len); | |
854 | } | |
855 | ||
856 | static int ablk_perform(struct ablkcipher_request *req, int encrypt) | |
857 | { | |
858 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | |
859 | struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm); | |
860 | unsigned ivsize = crypto_ablkcipher_ivsize(tfm); | |
861 | int ret = -ENOMEM; | |
862 | struct ix_sa_dir *dir; | |
863 | struct crypt_ctl *crypt; | |
864 | unsigned int nbytes = req->nbytes, nents; | |
865 | enum dma_data_direction src_direction = DMA_BIDIRECTIONAL; | |
866 | struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req); | |
867 | gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? | |
868 | GFP_KERNEL : GFP_ATOMIC; | |
869 | ||
870 | if (qmgr_stat_full(SEND_QID)) | |
871 | return -EAGAIN; | |
872 | if (atomic_read(&ctx->configuring)) | |
873 | return -EAGAIN; | |
874 | ||
875 | dir = encrypt ? &ctx->encrypt : &ctx->decrypt; | |
876 | ||
877 | crypt = get_crypt_desc(); | |
878 | if (!crypt) | |
879 | return ret; | |
880 | ||
881 | crypt->data.ablk_req = req; | |
882 | crypt->crypto_ctx = dir->npe_ctx_phys; | |
883 | crypt->mode = dir->npe_mode; | |
884 | crypt->init_len = dir->npe_ctx_idx; | |
885 | ||
886 | crypt->crypt_offs = 0; | |
887 | crypt->crypt_len = nbytes; | |
888 | ||
889 | BUG_ON(ivsize && !req->info); | |
890 | memcpy(crypt->iv, req->info, ivsize); | |
891 | if (req->src != req->dst) { | |
892 | crypt->mode |= NPE_OP_NOT_IN_PLACE; | |
893 | nents = count_sg(req->dst, nbytes); | |
894 | /* This was never tested by Intel | |
895 | * for more than one dst buffer, I think. */ | |
896 | BUG_ON(nents != 1); | |
897 | req_ctx->dst_nents = nents; | |
898 | dma_map_sg(dev, req->dst, nents, DMA_FROM_DEVICE); | |
899 | req_ctx->dst = dma_pool_alloc(buffer_pool, flags,&crypt->dst_buf); | |
900 | if (!req_ctx->dst) | |
901 | goto unmap_sg_dest; | |
902 | req_ctx->dst->phys_addr = 0; | |
903 | if (!chainup_buffers(req->dst, nbytes, req_ctx->dst, flags)) | |
904 | goto free_buf_dest; | |
905 | src_direction = DMA_TO_DEVICE; | |
906 | } else { | |
907 | req_ctx->dst = NULL; | |
908 | req_ctx->dst_nents = 0; | |
909 | } | |
910 | nents = count_sg(req->src, nbytes); | |
911 | req_ctx->src_nents = nents; | |
912 | dma_map_sg(dev, req->src, nents, src_direction); | |
913 | ||
914 | req_ctx->src = dma_pool_alloc(buffer_pool, flags, &crypt->src_buf); | |
915 | if (!req_ctx->src) | |
916 | goto unmap_sg_src; | |
917 | req_ctx->src->phys_addr = 0; | |
918 | if (!chainup_buffers(req->src, nbytes, req_ctx->src, flags)) | |
919 | goto free_buf_src; | |
920 | ||
921 | crypt->ctl_flags |= CTL_FLAG_PERFORM_ABLK; | |
922 | qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt)); | |
923 | BUG_ON(qmgr_stat_overflow(SEND_QID)); | |
924 | return -EINPROGRESS; | |
925 | ||
926 | free_buf_src: | |
927 | free_buf_chain(req_ctx->src, crypt->src_buf); | |
928 | unmap_sg_src: | |
929 | dma_unmap_sg(dev, req->src, req_ctx->src_nents, src_direction); | |
930 | free_buf_dest: | |
931 | if (req->src != req->dst) { | |
932 | free_buf_chain(req_ctx->dst, crypt->dst_buf); | |
933 | unmap_sg_dest: | |
934 | dma_unmap_sg(dev, req->src, req_ctx->dst_nents, | |
935 | DMA_FROM_DEVICE); | |
936 | } | |
937 | crypt->ctl_flags = CTL_FLAG_UNUSED; | |
938 | return ret; | |
939 | } | |
940 | ||
941 | static int ablk_encrypt(struct ablkcipher_request *req) | |
942 | { | |
943 | return ablk_perform(req, 1); | |
944 | } | |
945 | ||
946 | static int ablk_decrypt(struct ablkcipher_request *req) | |
947 | { | |
948 | return ablk_perform(req, 0); | |
949 | } | |
950 | ||
951 | static int ablk_rfc3686_crypt(struct ablkcipher_request *req) | |
952 | { | |
953 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | |
954 | struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm); | |
955 | u8 iv[CTR_RFC3686_BLOCK_SIZE]; | |
956 | u8 *info = req->info; | |
957 | int ret; | |
958 | ||
959 | /* set up counter block */ | |
960 | memcpy(iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE); | |
961 | memcpy(iv + CTR_RFC3686_NONCE_SIZE, info, CTR_RFC3686_IV_SIZE); | |
962 | ||
963 | /* initialize counter portion of counter block */ | |
964 | *(__be32 *)(iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) = | |
965 | cpu_to_be32(1); | |
966 | ||
967 | req->info = iv; | |
968 | ret = ablk_perform(req, 1); | |
969 | req->info = info; | |
970 | return ret; | |
971 | } | |
972 | ||
973 | static int hmac_inconsistent(struct scatterlist *sg, unsigned start, | |
974 | unsigned int nbytes) | |
975 | { | |
976 | int offset = 0; | |
977 | ||
978 | if (!nbytes) | |
979 | return 0; | |
980 | ||
981 | for (;;) { | |
982 | if (start < offset + sg->length) | |
983 | break; | |
984 | ||
985 | offset += sg->length; | |
986 | sg = sg_next(sg); | |
987 | } | |
988 | return (start + nbytes > offset + sg->length); | |
989 | } | |
990 | ||
991 | static int aead_perform(struct aead_request *req, int encrypt, | |
992 | int cryptoffset, int eff_cryptlen, u8 *iv) | |
993 | { | |
994 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | |
995 | struct ixp_ctx *ctx = crypto_aead_ctx(tfm); | |
996 | unsigned ivsize = crypto_aead_ivsize(tfm); | |
997 | unsigned authsize = crypto_aead_authsize(tfm); | |
998 | int ret = -ENOMEM; | |
999 | struct ix_sa_dir *dir; | |
1000 | struct crypt_ctl *crypt; | |
1001 | unsigned int cryptlen, nents; | |
1002 | struct buffer_desc *buf; | |
1003 | struct aead_ctx *req_ctx = aead_request_ctx(req); | |
1004 | gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? | |
1005 | GFP_KERNEL : GFP_ATOMIC; | |
1006 | ||
1007 | if (qmgr_stat_full(SEND_QID)) | |
1008 | return -EAGAIN; | |
1009 | if (atomic_read(&ctx->configuring)) | |
1010 | return -EAGAIN; | |
1011 | ||
1012 | if (encrypt) { | |
1013 | dir = &ctx->encrypt; | |
1014 | cryptlen = req->cryptlen; | |
1015 | } else { | |
1016 | dir = &ctx->decrypt; | |
1017 | /* req->cryptlen includes the authsize when decrypting */ | |
1018 | cryptlen = req->cryptlen -authsize; | |
1019 | eff_cryptlen -= authsize; | |
1020 | } | |
1021 | crypt = get_crypt_desc(); | |
1022 | if (!crypt) | |
1023 | return ret; | |
1024 | ||
1025 | crypt->data.aead_req = req; | |
1026 | crypt->crypto_ctx = dir->npe_ctx_phys; | |
1027 | crypt->mode = dir->npe_mode; | |
1028 | crypt->init_len = dir->npe_ctx_idx; | |
1029 | ||
1030 | crypt->crypt_offs = cryptoffset; | |
1031 | crypt->crypt_len = eff_cryptlen; | |
1032 | ||
1033 | crypt->auth_offs = 0; | |
1034 | crypt->auth_len = req->assoclen + ivsize + cryptlen; | |
1035 | BUG_ON(ivsize && !req->iv); | |
1036 | memcpy(crypt->iv, req->iv, ivsize); | |
1037 | ||
1038 | if (req->src != req->dst) { | |
1039 | BUG(); /* -ENOTSUP because of my lazyness */ | |
1040 | } | |
1041 | ||
1042 | req_ctx->buffer = dma_pool_alloc(buffer_pool, flags, &crypt->src_buf); | |
1043 | if (!req_ctx->buffer) | |
1044 | goto out; | |
1045 | req_ctx->buffer->phys_addr = 0; | |
1046 | /* ASSOC data */ | |
1047 | nents = count_sg(req->assoc, req->assoclen); | |
1048 | req_ctx->assoc_nents = nents; | |
1049 | dma_map_sg(dev, req->assoc, nents, DMA_TO_DEVICE); | |
1050 | buf = chainup_buffers(req->assoc, req->assoclen, req_ctx->buffer,flags); | |
1051 | if (!buf) | |
1052 | goto unmap_sg_assoc; | |
1053 | /* IV */ | |
1054 | sg_init_table(&req_ctx->ivlist, 1); | |
1055 | sg_set_buf(&req_ctx->ivlist, iv, ivsize); | |
1056 | dma_map_sg(dev, &req_ctx->ivlist, 1, DMA_BIDIRECTIONAL); | |
1057 | buf = chainup_buffers(&req_ctx->ivlist, ivsize, buf, flags); | |
1058 | if (!buf) | |
1059 | goto unmap_sg_iv; | |
1060 | if (unlikely(hmac_inconsistent(req->src, cryptlen, authsize))) { | |
1061 | /* The 12 hmac bytes are scattered, | |
1062 | * we need to copy them into a safe buffer */ | |
1063 | req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags, | |
1064 | &crypt->icv_rev_aes); | |
1065 | if (unlikely(!req_ctx->hmac_virt)) | |
1066 | goto unmap_sg_iv; | |
1067 | if (!encrypt) { | |
1068 | scatterwalk_map_and_copy(req_ctx->hmac_virt, | |
1069 | req->src, cryptlen, authsize, 0); | |
1070 | } | |
1071 | req_ctx->encrypt = encrypt; | |
1072 | } else { | |
1073 | req_ctx->hmac_virt = NULL; | |
1074 | } | |
1075 | /* Crypt */ | |
1076 | nents = count_sg(req->src, cryptlen + authsize); | |
1077 | req_ctx->src_nents = nents; | |
1078 | dma_map_sg(dev, req->src, nents, DMA_BIDIRECTIONAL); | |
1079 | buf = chainup_buffers(req->src, cryptlen + authsize, buf, flags); | |
1080 | if (!buf) | |
1081 | goto unmap_sg_src; | |
1082 | if (!req_ctx->hmac_virt) { | |
1083 | crypt->icv_rev_aes = buf->phys_addr + buf->buf_len - authsize; | |
1084 | } | |
1085 | crypt->ctl_flags |= CTL_FLAG_PERFORM_AEAD; | |
1086 | qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt)); | |
1087 | BUG_ON(qmgr_stat_overflow(SEND_QID)); | |
1088 | return -EINPROGRESS; | |
1089 | unmap_sg_src: | |
1090 | dma_unmap_sg(dev, req->src, req_ctx->src_nents, DMA_BIDIRECTIONAL); | |
1091 | if (req_ctx->hmac_virt) { | |
1092 | dma_pool_free(buffer_pool, req_ctx->hmac_virt, | |
1093 | crypt->icv_rev_aes); | |
1094 | } | |
1095 | unmap_sg_iv: | |
1096 | dma_unmap_sg(dev, &req_ctx->ivlist, 1, DMA_BIDIRECTIONAL); | |
1097 | unmap_sg_assoc: | |
1098 | dma_unmap_sg(dev, req->assoc, req_ctx->assoc_nents, DMA_TO_DEVICE); | |
1099 | free_buf_chain(req_ctx->buffer, crypt->src_buf); | |
1100 | out: | |
1101 | crypt->ctl_flags = CTL_FLAG_UNUSED; | |
1102 | return ret; | |
1103 | } | |
1104 | ||
1105 | static int aead_setup(struct crypto_aead *tfm, unsigned int authsize) | |
1106 | { | |
1107 | struct ixp_ctx *ctx = crypto_aead_ctx(tfm); | |
1108 | u32 *flags = &tfm->base.crt_flags; | |
1109 | unsigned digest_len = crypto_aead_alg(tfm)->maxauthsize; | |
1110 | int ret; | |
1111 | ||
1112 | if (!ctx->enckey_len && !ctx->authkey_len) | |
1113 | return 0; | |
1114 | init_completion(&ctx->completion); | |
1115 | atomic_inc(&ctx->configuring); | |
1116 | ||
1117 | reset_sa_dir(&ctx->encrypt); | |
1118 | reset_sa_dir(&ctx->decrypt); | |
1119 | ||
1120 | ret = setup_cipher(&tfm->base, 0, ctx->enckey, ctx->enckey_len); | |
1121 | if (ret) | |
1122 | goto out; | |
1123 | ret = setup_cipher(&tfm->base, 1, ctx->enckey, ctx->enckey_len); | |
1124 | if (ret) | |
1125 | goto out; | |
1126 | ret = setup_auth(&tfm->base, 0, authsize, ctx->authkey, | |
1127 | ctx->authkey_len, digest_len); | |
1128 | if (ret) | |
1129 | goto out; | |
1130 | ret = setup_auth(&tfm->base, 1, authsize, ctx->authkey, | |
1131 | ctx->authkey_len, digest_len); | |
1132 | if (ret) | |
1133 | goto out; | |
1134 | ||
1135 | if (*flags & CRYPTO_TFM_RES_WEAK_KEY) { | |
1136 | if (*flags & CRYPTO_TFM_REQ_WEAK_KEY) { | |
1137 | ret = -EINVAL; | |
1138 | goto out; | |
1139 | } else { | |
1140 | *flags &= ~CRYPTO_TFM_RES_WEAK_KEY; | |
1141 | } | |
1142 | } | |
1143 | out: | |
1144 | if (!atomic_dec_and_test(&ctx->configuring)) | |
1145 | wait_for_completion(&ctx->completion); | |
1146 | return ret; | |
1147 | } | |
1148 | ||
1149 | static int aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize) | |
1150 | { | |
1151 | int max = crypto_aead_alg(tfm)->maxauthsize >> 2; | |
1152 | ||
1153 | if ((authsize>>2) < 1 || (authsize>>2) > max || (authsize & 3)) | |
1154 | return -EINVAL; | |
1155 | return aead_setup(tfm, authsize); | |
1156 | } | |
1157 | ||
1158 | static int aead_setkey(struct crypto_aead *tfm, const u8 *key, | |
1159 | unsigned int keylen) | |
1160 | { | |
1161 | struct ixp_ctx *ctx = crypto_aead_ctx(tfm); | |
1162 | struct rtattr *rta = (struct rtattr *)key; | |
1163 | struct crypto_authenc_key_param *param; | |
1164 | ||
1165 | if (!RTA_OK(rta, keylen)) | |
1166 | goto badkey; | |
1167 | if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) | |
1168 | goto badkey; | |
1169 | if (RTA_PAYLOAD(rta) < sizeof(*param)) | |
1170 | goto badkey; | |
1171 | ||
1172 | param = RTA_DATA(rta); | |
1173 | ctx->enckey_len = be32_to_cpu(param->enckeylen); | |
1174 | ||
1175 | key += RTA_ALIGN(rta->rta_len); | |
1176 | keylen -= RTA_ALIGN(rta->rta_len); | |
1177 | ||
1178 | if (keylen < ctx->enckey_len) | |
1179 | goto badkey; | |
1180 | ||
1181 | ctx->authkey_len = keylen - ctx->enckey_len; | |
1182 | memcpy(ctx->enckey, key + ctx->authkey_len, ctx->enckey_len); | |
1183 | memcpy(ctx->authkey, key, ctx->authkey_len); | |
1184 | ||
1185 | return aead_setup(tfm, crypto_aead_authsize(tfm)); | |
1186 | badkey: | |
1187 | ctx->enckey_len = 0; | |
1188 | crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); | |
1189 | return -EINVAL; | |
1190 | } | |
1191 | ||
1192 | static int aead_encrypt(struct aead_request *req) | |
1193 | { | |
1194 | unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req)); | |
1195 | return aead_perform(req, 1, req->assoclen + ivsize, | |
1196 | req->cryptlen, req->iv); | |
1197 | } | |
1198 | ||
1199 | static int aead_decrypt(struct aead_request *req) | |
1200 | { | |
1201 | unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req)); | |
1202 | return aead_perform(req, 0, req->assoclen + ivsize, | |
1203 | req->cryptlen, req->iv); | |
1204 | } | |
1205 | ||
1206 | static int aead_givencrypt(struct aead_givcrypt_request *req) | |
1207 | { | |
1208 | struct crypto_aead *tfm = aead_givcrypt_reqtfm(req); | |
1209 | struct ixp_ctx *ctx = crypto_aead_ctx(tfm); | |
1210 | unsigned len, ivsize = crypto_aead_ivsize(tfm); | |
1211 | __be64 seq; | |
1212 | ||
1213 | /* copied from eseqiv.c */ | |
1214 | if (!ctx->salted) { | |
1215 | get_random_bytes(ctx->salt, ivsize); | |
1216 | ctx->salted = 1; | |
1217 | } | |
1218 | memcpy(req->areq.iv, ctx->salt, ivsize); | |
1219 | len = ivsize; | |
1220 | if (ivsize > sizeof(u64)) { | |
1221 | memset(req->giv, 0, ivsize - sizeof(u64)); | |
1222 | len = sizeof(u64); | |
1223 | } | |
1224 | seq = cpu_to_be64(req->seq); | |
1225 | memcpy(req->giv + ivsize - len, &seq, len); | |
1226 | return aead_perform(&req->areq, 1, req->areq.assoclen, | |
1227 | req->areq.cryptlen +ivsize, req->giv); | |
1228 | } | |
1229 | ||
1230 | static struct ixp_alg ixp4xx_algos[] = { | |
1231 | { | |
1232 | .crypto = { | |
1233 | .cra_name = "cbc(des)", | |
1234 | .cra_blocksize = DES_BLOCK_SIZE, | |
1235 | .cra_u = { .ablkcipher = { | |
1236 | .min_keysize = DES_KEY_SIZE, | |
1237 | .max_keysize = DES_KEY_SIZE, | |
1238 | .ivsize = DES_BLOCK_SIZE, | |
1239 | .geniv = "eseqiv", | |
1240 | } | |
1241 | } | |
1242 | }, | |
1243 | .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192, | |
1244 | .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192, | |
1245 | ||
1246 | }, { | |
1247 | .crypto = { | |
1248 | .cra_name = "ecb(des)", | |
1249 | .cra_blocksize = DES_BLOCK_SIZE, | |
1250 | .cra_u = { .ablkcipher = { | |
1251 | .min_keysize = DES_KEY_SIZE, | |
1252 | .max_keysize = DES_KEY_SIZE, | |
1253 | } | |
1254 | } | |
1255 | }, | |
1256 | .cfg_enc = CIPH_ENCR | MOD_DES | MOD_ECB | KEYLEN_192, | |
1257 | .cfg_dec = CIPH_DECR | MOD_DES | MOD_ECB | KEYLEN_192, | |
1258 | }, { | |
1259 | .crypto = { | |
1260 | .cra_name = "cbc(des3_ede)", | |
1261 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | |
1262 | .cra_u = { .ablkcipher = { | |
1263 | .min_keysize = DES3_EDE_KEY_SIZE, | |
1264 | .max_keysize = DES3_EDE_KEY_SIZE, | |
1265 | .ivsize = DES3_EDE_BLOCK_SIZE, | |
1266 | .geniv = "eseqiv", | |
1267 | } | |
1268 | } | |
1269 | }, | |
1270 | .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192, | |
1271 | .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192, | |
1272 | }, { | |
1273 | .crypto = { | |
1274 | .cra_name = "ecb(des3_ede)", | |
1275 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | |
1276 | .cra_u = { .ablkcipher = { | |
1277 | .min_keysize = DES3_EDE_KEY_SIZE, | |
1278 | .max_keysize = DES3_EDE_KEY_SIZE, | |
1279 | } | |
1280 | } | |
1281 | }, | |
1282 | .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_ECB | KEYLEN_192, | |
1283 | .cfg_dec = CIPH_DECR | MOD_3DES | MOD_ECB | KEYLEN_192, | |
1284 | }, { | |
1285 | .crypto = { | |
1286 | .cra_name = "cbc(aes)", | |
1287 | .cra_blocksize = AES_BLOCK_SIZE, | |
1288 | .cra_u = { .ablkcipher = { | |
1289 | .min_keysize = AES_MIN_KEY_SIZE, | |
1290 | .max_keysize = AES_MAX_KEY_SIZE, | |
1291 | .ivsize = AES_BLOCK_SIZE, | |
1292 | .geniv = "eseqiv", | |
1293 | } | |
1294 | } | |
1295 | }, | |
1296 | .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC, | |
1297 | .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC, | |
1298 | }, { | |
1299 | .crypto = { | |
1300 | .cra_name = "ecb(aes)", | |
1301 | .cra_blocksize = AES_BLOCK_SIZE, | |
1302 | .cra_u = { .ablkcipher = { | |
1303 | .min_keysize = AES_MIN_KEY_SIZE, | |
1304 | .max_keysize = AES_MAX_KEY_SIZE, | |
1305 | } | |
1306 | } | |
1307 | }, | |
1308 | .cfg_enc = CIPH_ENCR | MOD_AES | MOD_ECB, | |
1309 | .cfg_dec = CIPH_DECR | MOD_AES | MOD_ECB, | |
1310 | }, { | |
1311 | .crypto = { | |
1312 | .cra_name = "ctr(aes)", | |
1313 | .cra_blocksize = AES_BLOCK_SIZE, | |
1314 | .cra_u = { .ablkcipher = { | |
1315 | .min_keysize = AES_MIN_KEY_SIZE, | |
1316 | .max_keysize = AES_MAX_KEY_SIZE, | |
1317 | .ivsize = AES_BLOCK_SIZE, | |
1318 | .geniv = "eseqiv", | |
1319 | } | |
1320 | } | |
1321 | }, | |
1322 | .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR, | |
1323 | .cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR, | |
1324 | }, { | |
1325 | .crypto = { | |
1326 | .cra_name = "rfc3686(ctr(aes))", | |
1327 | .cra_blocksize = AES_BLOCK_SIZE, | |
1328 | .cra_u = { .ablkcipher = { | |
1329 | .min_keysize = AES_MIN_KEY_SIZE, | |
1330 | .max_keysize = AES_MAX_KEY_SIZE, | |
1331 | .ivsize = AES_BLOCK_SIZE, | |
1332 | .geniv = "eseqiv", | |
1333 | .setkey = ablk_rfc3686_setkey, | |
1334 | .encrypt = ablk_rfc3686_crypt, | |
1335 | .decrypt = ablk_rfc3686_crypt } | |
1336 | } | |
1337 | }, | |
1338 | .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR, | |
1339 | .cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR, | |
1340 | }, { | |
1341 | .crypto = { | |
1342 | .cra_name = "authenc(hmac(md5),cbc(des))", | |
1343 | .cra_blocksize = DES_BLOCK_SIZE, | |
1344 | .cra_u = { .aead = { | |
1345 | .ivsize = DES_BLOCK_SIZE, | |
1346 | .maxauthsize = MD5_DIGEST_SIZE, | |
1347 | } | |
1348 | } | |
1349 | }, | |
1350 | .hash = &hash_alg_md5, | |
1351 | .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192, | |
1352 | .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192, | |
1353 | }, { | |
1354 | .crypto = { | |
1355 | .cra_name = "authenc(hmac(md5),cbc(des3_ede))", | |
1356 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | |
1357 | .cra_u = { .aead = { | |
1358 | .ivsize = DES3_EDE_BLOCK_SIZE, | |
1359 | .maxauthsize = MD5_DIGEST_SIZE, | |
1360 | } | |
1361 | } | |
1362 | }, | |
1363 | .hash = &hash_alg_md5, | |
1364 | .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192, | |
1365 | .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192, | |
1366 | }, { | |
1367 | .crypto = { | |
1368 | .cra_name = "authenc(hmac(sha1),cbc(des))", | |
1369 | .cra_blocksize = DES_BLOCK_SIZE, | |
1370 | .cra_u = { .aead = { | |
1371 | .ivsize = DES_BLOCK_SIZE, | |
1372 | .maxauthsize = SHA1_DIGEST_SIZE, | |
1373 | } | |
1374 | } | |
1375 | }, | |
1376 | .hash = &hash_alg_sha1, | |
1377 | .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192, | |
1378 | .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192, | |
1379 | }, { | |
1380 | .crypto = { | |
1381 | .cra_name = "authenc(hmac(sha1),cbc(des3_ede))", | |
1382 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | |
1383 | .cra_u = { .aead = { | |
1384 | .ivsize = DES3_EDE_BLOCK_SIZE, | |
1385 | .maxauthsize = SHA1_DIGEST_SIZE, | |
1386 | } | |
1387 | } | |
1388 | }, | |
1389 | .hash = &hash_alg_sha1, | |
1390 | .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192, | |
1391 | .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192, | |
1392 | }, { | |
1393 | .crypto = { | |
1394 | .cra_name = "authenc(hmac(md5),cbc(aes))", | |
1395 | .cra_blocksize = AES_BLOCK_SIZE, | |
1396 | .cra_u = { .aead = { | |
1397 | .ivsize = AES_BLOCK_SIZE, | |
1398 | .maxauthsize = MD5_DIGEST_SIZE, | |
1399 | } | |
1400 | } | |
1401 | }, | |
1402 | .hash = &hash_alg_md5, | |
1403 | .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC, | |
1404 | .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC, | |
1405 | }, { | |
1406 | .crypto = { | |
1407 | .cra_name = "authenc(hmac(sha1),cbc(aes))", | |
1408 | .cra_blocksize = AES_BLOCK_SIZE, | |
1409 | .cra_u = { .aead = { | |
1410 | .ivsize = AES_BLOCK_SIZE, | |
1411 | .maxauthsize = SHA1_DIGEST_SIZE, | |
1412 | } | |
1413 | } | |
1414 | }, | |
1415 | .hash = &hash_alg_sha1, | |
1416 | .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC, | |
1417 | .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC, | |
1418 | } }; | |
1419 | ||
1420 | #define IXP_POSTFIX "-ixp4xx" | |
1421 | static int __init ixp_module_init(void) | |
1422 | { | |
1423 | int num = ARRAY_SIZE(ixp4xx_algos); | |
1424 | int i,err ; | |
1425 | ||
1426 | if (platform_device_register(&pseudo_dev)) | |
1427 | return -ENODEV; | |
1428 | ||
1429 | spin_lock_init(&desc_lock); | |
1430 | spin_lock_init(&emerg_lock); | |
1431 | ||
1432 | err = init_ixp_crypto(); | |
1433 | if (err) { | |
1434 | platform_device_unregister(&pseudo_dev); | |
1435 | return err; | |
1436 | } | |
1437 | for (i=0; i< num; i++) { | |
1438 | struct crypto_alg *cra = &ixp4xx_algos[i].crypto; | |
1439 | ||
1440 | if (snprintf(cra->cra_driver_name, CRYPTO_MAX_ALG_NAME, | |
1441 | "%s"IXP_POSTFIX, cra->cra_name) >= | |
1442 | CRYPTO_MAX_ALG_NAME) | |
1443 | { | |
1444 | continue; | |
1445 | } | |
1446 | if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES)) { | |
1447 | continue; | |
1448 | } | |
1449 | if (!ixp4xx_algos[i].hash) { | |
1450 | /* block ciphers */ | |
1451 | cra->cra_type = &crypto_ablkcipher_type; | |
1452 | cra->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | |
1453 | CRYPTO_ALG_ASYNC; | |
1454 | if (!cra->cra_ablkcipher.setkey) | |
1455 | cra->cra_ablkcipher.setkey = ablk_setkey; | |
1456 | if (!cra->cra_ablkcipher.encrypt) | |
1457 | cra->cra_ablkcipher.encrypt = ablk_encrypt; | |
1458 | if (!cra->cra_ablkcipher.decrypt) | |
1459 | cra->cra_ablkcipher.decrypt = ablk_decrypt; | |
1460 | cra->cra_init = init_tfm_ablk; | |
1461 | } else { | |
1462 | /* authenc */ | |
1463 | cra->cra_type = &crypto_aead_type; | |
1464 | cra->cra_flags = CRYPTO_ALG_TYPE_AEAD | | |
1465 | CRYPTO_ALG_ASYNC; | |
1466 | cra->cra_aead.setkey = aead_setkey; | |
1467 | cra->cra_aead.setauthsize = aead_setauthsize; | |
1468 | cra->cra_aead.encrypt = aead_encrypt; | |
1469 | cra->cra_aead.decrypt = aead_decrypt; | |
1470 | cra->cra_aead.givencrypt = aead_givencrypt; | |
1471 | cra->cra_init = init_tfm_aead; | |
1472 | } | |
1473 | cra->cra_ctxsize = sizeof(struct ixp_ctx); | |
1474 | cra->cra_module = THIS_MODULE; | |
1475 | cra->cra_alignmask = 3; | |
1476 | cra->cra_priority = 300; | |
1477 | cra->cra_exit = exit_tfm; | |
1478 | if (crypto_register_alg(cra)) | |
1479 | printk(KERN_ERR "Failed to register '%s'\n", | |
1480 | cra->cra_name); | |
1481 | else | |
1482 | ixp4xx_algos[i].registered = 1; | |
1483 | } | |
1484 | return 0; | |
1485 | } | |
1486 | ||
1487 | static void __exit ixp_module_exit(void) | |
1488 | { | |
1489 | int num = ARRAY_SIZE(ixp4xx_algos); | |
1490 | int i; | |
1491 | ||
1492 | for (i=0; i< num; i++) { | |
1493 | if (ixp4xx_algos[i].registered) | |
1494 | crypto_unregister_alg(&ixp4xx_algos[i].crypto); | |
1495 | } | |
1496 | release_ixp_crypto(); | |
1497 | platform_device_unregister(&pseudo_dev); | |
1498 | } | |
1499 | ||
1500 | module_init(ixp_module_init); | |
1501 | module_exit(ixp_module_exit); | |
1502 | ||
1503 | MODULE_LICENSE("GPL"); | |
1504 | MODULE_AUTHOR("Christian Hohnstaedt <chohnstaedt@innominate.com>"); | |
1505 | MODULE_DESCRIPTION("IXP4xx hardware crypto"); | |
1506 |