Merge branch 'pm-cpufreq'
[linux-2.6-block.git] / drivers / crypto / ixp4xx_crypto.c
CommitLineData
81bef015
CH
1/*
2 * Intel IXP4xx NPE-C crypto driver
3 *
4 * Copyright (C) 2008 Christian Hohnstaedt <chohnstaedt@innominate.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of version 2 of the GNU General Public License
8 * as published by the Free Software Foundation.
9 *
10 */
11
12#include <linux/platform_device.h>
13#include <linux/dma-mapping.h>
14#include <linux/dmapool.h>
15#include <linux/crypto.h>
16#include <linux/kernel.h>
17#include <linux/rtnetlink.h>
18#include <linux/interrupt.h>
19#include <linux/spinlock.h>
5a0e3ad6 20#include <linux/gfp.h>
75258723 21#include <linux/module.h>
81bef015
CH
22
23#include <crypto/ctr.h>
24#include <crypto/des.h>
25#include <crypto/aes.h>
26#include <crypto/sha.h>
27#include <crypto/algapi.h>
5290b428 28#include <crypto/internal/aead.h>
81bef015
CH
29#include <crypto/authenc.h>
30#include <crypto/scatterwalk.h>
31
a09e64fb
RK
32#include <mach/npe.h>
33#include <mach/qmgr.h>
81bef015
CH
34
35#define MAX_KEYLEN 32
36
37/* hash: cfgword + 2 * digestlen; crypt: keylen + cfgword */
38#define NPE_CTX_LEN 80
39#define AES_BLOCK128 16
40
41#define NPE_OP_HASH_VERIFY 0x01
42#define NPE_OP_CCM_ENABLE 0x04
43#define NPE_OP_CRYPT_ENABLE 0x08
44#define NPE_OP_HASH_ENABLE 0x10
45#define NPE_OP_NOT_IN_PLACE 0x20
46#define NPE_OP_HMAC_DISABLE 0x40
47#define NPE_OP_CRYPT_ENCRYPT 0x80
48
49#define NPE_OP_CCM_GEN_MIC 0xcc
50#define NPE_OP_HASH_GEN_ICV 0x50
51#define NPE_OP_ENC_GEN_KEY 0xc9
52
53#define MOD_ECB 0x0000
54#define MOD_CTR 0x1000
55#define MOD_CBC_ENC 0x2000
56#define MOD_CBC_DEC 0x3000
57#define MOD_CCM_ENC 0x4000
58#define MOD_CCM_DEC 0x5000
59
60#define KEYLEN_128 4
61#define KEYLEN_192 6
62#define KEYLEN_256 8
63
64#define CIPH_DECR 0x0000
65#define CIPH_ENCR 0x0400
66
67#define MOD_DES 0x0000
68#define MOD_TDEA2 0x0100
69#define MOD_3DES 0x0200
70#define MOD_AES 0x0800
71#define MOD_AES128 (0x0800 | KEYLEN_128)
72#define MOD_AES192 (0x0900 | KEYLEN_192)
73#define MOD_AES256 (0x0a00 | KEYLEN_256)
74
75#define MAX_IVLEN 16
76#define NPE_ID 2 /* NPE C */
77#define NPE_QLEN 16
78/* Space for registering when the first
79 * NPE_QLEN crypt_ctl are busy */
80#define NPE_QLEN_TOTAL 64
81
82#define SEND_QID 29
83#define RECV_QID 30
84
85#define CTL_FLAG_UNUSED 0x0000
86#define CTL_FLAG_USED 0x1000
87#define CTL_FLAG_PERFORM_ABLK 0x0001
88#define CTL_FLAG_GEN_ICV 0x0002
89#define CTL_FLAG_GEN_REVAES 0x0004
90#define CTL_FLAG_PERFORM_AEAD 0x0008
91#define CTL_FLAG_MASK 0x000f
92
93#define HMAC_IPAD_VALUE 0x36
94#define HMAC_OPAD_VALUE 0x5C
95#define HMAC_PAD_BLOCKLEN SHA1_BLOCK_SIZE
96
97#define MD5_DIGEST_SIZE 16
98
99struct buffer_desc {
100 u32 phys_next;
ce057297 101#ifdef __ARMEB__
81bef015
CH
102 u16 buf_len;
103 u16 pkt_len;
ce057297
KH
104#else
105 u16 pkt_len;
106 u16 buf_len;
107#endif
81bef015
CH
108 u32 phys_addr;
109 u32 __reserved[4];
110 struct buffer_desc *next;
0d44dc59 111 enum dma_data_direction dir;
81bef015
CH
112};
113
114struct crypt_ctl {
ce057297 115#ifdef __ARMEB__
81bef015
CH
116 u8 mode; /* NPE_OP_* operation mode */
117 u8 init_len;
118 u16 reserved;
ce057297
KH
119#else
120 u16 reserved;
121 u8 init_len;
122 u8 mode; /* NPE_OP_* operation mode */
123#endif
81bef015
CH
124 u8 iv[MAX_IVLEN]; /* IV for CBC mode or CTR IV for CTR mode */
125 u32 icv_rev_aes; /* icv or rev aes */
126 u32 src_buf;
127 u32 dst_buf;
ce057297 128#ifdef __ARMEB__
81bef015
CH
129 u16 auth_offs; /* Authentication start offset */
130 u16 auth_len; /* Authentication data length */
131 u16 crypt_offs; /* Cryption start offset */
132 u16 crypt_len; /* Cryption data length */
ce057297
KH
133#else
134 u16 auth_len; /* Authentication data length */
135 u16 auth_offs; /* Authentication start offset */
136 u16 crypt_len; /* Cryption data length */
137 u16 crypt_offs; /* Cryption start offset */
138#endif
81bef015
CH
139 u32 aadAddr; /* Additional Auth Data Addr for CCM mode */
140 u32 crypto_ctx; /* NPE Crypto Param structure address */
141
142 /* Used by Host: 4*4 bytes*/
143 unsigned ctl_flags;
144 union {
145 struct ablkcipher_request *ablk_req;
146 struct aead_request *aead_req;
147 struct crypto_tfm *tfm;
148 } data;
149 struct buffer_desc *regist_buf;
150 u8 *regist_ptr;
151};
152
153struct ablk_ctx {
154 struct buffer_desc *src;
155 struct buffer_desc *dst;
81bef015
CH
156};
157
158struct aead_ctx {
d7295a8d
HX
159 struct buffer_desc *src;
160 struct buffer_desc *dst;
81bef015
CH
161 struct scatterlist ivlist;
162 /* used when the hmac is not on one sg entry */
163 u8 *hmac_virt;
164 int encrypt;
165};
166
167struct ix_hash_algo {
168 u32 cfgword;
169 unsigned char *icv;
170};
171
172struct ix_sa_dir {
173 unsigned char *npe_ctx;
174 dma_addr_t npe_ctx_phys;
175 int npe_ctx_idx;
176 u8 npe_mode;
177};
178
179struct ixp_ctx {
180 struct ix_sa_dir encrypt;
181 struct ix_sa_dir decrypt;
182 int authkey_len;
183 u8 authkey[MAX_KEYLEN];
184 int enckey_len;
185 u8 enckey[MAX_KEYLEN];
186 u8 salt[MAX_IVLEN];
187 u8 nonce[CTR_RFC3686_NONCE_SIZE];
188 unsigned salted;
189 atomic_t configuring;
190 struct completion completion;
191};
192
193struct ixp_alg {
194 struct crypto_alg crypto;
195 const struct ix_hash_algo *hash;
196 u32 cfg_enc;
197 u32 cfg_dec;
198
199 int registered;
200};
201
d7295a8d
HX
202struct ixp_aead_alg {
203 struct aead_alg crypto;
204 const struct ix_hash_algo *hash;
205 u32 cfg_enc;
206 u32 cfg_dec;
207
208 int registered;
209};
210
81bef015
CH
211static const struct ix_hash_algo hash_alg_md5 = {
212 .cfgword = 0xAA010004,
213 .icv = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
214 "\xFE\xDC\xBA\x98\x76\x54\x32\x10",
215};
216static const struct ix_hash_algo hash_alg_sha1 = {
217 .cfgword = 0x00000005,
218 .icv = "\x67\x45\x23\x01\xEF\xCD\xAB\x89\x98\xBA"
219 "\xDC\xFE\x10\x32\x54\x76\xC3\xD2\xE1\xF0",
220};
221
222static struct npe *npe_c;
223static struct dma_pool *buffer_pool = NULL;
224static struct dma_pool *ctx_pool = NULL;
225
226static struct crypt_ctl *crypt_virt = NULL;
227static dma_addr_t crypt_phys;
228
229static int support_aes = 1;
230
81bef015 231#define DRIVER_NAME "ixp4xx_crypto"
81bef015 232
d8cbc3f7 233static struct platform_device *pdev;
81bef015
CH
234
235static inline dma_addr_t crypt_virt2phys(struct crypt_ctl *virt)
236{
237 return crypt_phys + (virt - crypt_virt) * sizeof(struct crypt_ctl);
238}
239
240static inline struct crypt_ctl *crypt_phys2virt(dma_addr_t phys)
241{
242 return crypt_virt + (phys - crypt_phys) / sizeof(struct crypt_ctl);
243}
244
245static inline u32 cipher_cfg_enc(struct crypto_tfm *tfm)
246{
247 return container_of(tfm->__crt_alg, struct ixp_alg,crypto)->cfg_enc;
248}
249
250static inline u32 cipher_cfg_dec(struct crypto_tfm *tfm)
251{
252 return container_of(tfm->__crt_alg, struct ixp_alg,crypto)->cfg_dec;
253}
254
255static inline const struct ix_hash_algo *ix_hash(struct crypto_tfm *tfm)
256{
257 return container_of(tfm->__crt_alg, struct ixp_alg, crypto)->hash;
258}
259
260static int setup_crypt_desc(void)
261{
27c1789c 262 struct device *dev = &pdev->dev;
81bef015
CH
263 BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64);
264 crypt_virt = dma_alloc_coherent(dev,
265 NPE_QLEN * sizeof(struct crypt_ctl),
e7a2577a 266 &crypt_phys, GFP_ATOMIC);
81bef015
CH
267 if (!crypt_virt)
268 return -ENOMEM;
269 memset(crypt_virt, 0, NPE_QLEN * sizeof(struct crypt_ctl));
270 return 0;
271}
272
273static spinlock_t desc_lock;
274static struct crypt_ctl *get_crypt_desc(void)
275{
276 int i;
277 static int idx = 0;
278 unsigned long flags;
279
280 spin_lock_irqsave(&desc_lock, flags);
281
282 if (unlikely(!crypt_virt))
283 setup_crypt_desc();
284 if (unlikely(!crypt_virt)) {
285 spin_unlock_irqrestore(&desc_lock, flags);
286 return NULL;
287 }
288 i = idx;
289 if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
290 if (++idx >= NPE_QLEN)
291 idx = 0;
292 crypt_virt[i].ctl_flags = CTL_FLAG_USED;
293 spin_unlock_irqrestore(&desc_lock, flags);
294 return crypt_virt +i;
295 } else {
296 spin_unlock_irqrestore(&desc_lock, flags);
297 return NULL;
298 }
299}
300
301static spinlock_t emerg_lock;
302static struct crypt_ctl *get_crypt_desc_emerg(void)
303{
304 int i;
305 static int idx = NPE_QLEN;
306 struct crypt_ctl *desc;
307 unsigned long flags;
308
309 desc = get_crypt_desc();
310 if (desc)
311 return desc;
312 if (unlikely(!crypt_virt))
313 return NULL;
314
315 spin_lock_irqsave(&emerg_lock, flags);
316 i = idx;
317 if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
318 if (++idx >= NPE_QLEN_TOTAL)
319 idx = NPE_QLEN;
320 crypt_virt[i].ctl_flags = CTL_FLAG_USED;
321 spin_unlock_irqrestore(&emerg_lock, flags);
322 return crypt_virt +i;
323 } else {
324 spin_unlock_irqrestore(&emerg_lock, flags);
325 return NULL;
326 }
327}
328
0d44dc59 329static void free_buf_chain(struct device *dev, struct buffer_desc *buf,u32 phys)
81bef015
CH
330{
331 while (buf) {
332 struct buffer_desc *buf1;
333 u32 phys1;
334
335 buf1 = buf->next;
336 phys1 = buf->phys_next;
0d44dc59 337 dma_unmap_single(dev, buf->phys_next, buf->buf_len, buf->dir);
81bef015
CH
338 dma_pool_free(buffer_pool, buf, phys);
339 buf = buf1;
340 phys = phys1;
341 }
342}
343
344static struct tasklet_struct crypto_done_tasklet;
345
346static void finish_scattered_hmac(struct crypt_ctl *crypt)
347{
348 struct aead_request *req = crypt->data.aead_req;
349 struct aead_ctx *req_ctx = aead_request_ctx(req);
350 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
351 int authsize = crypto_aead_authsize(tfm);
d7295a8d 352 int decryptlen = req->assoclen + req->cryptlen - authsize;
81bef015
CH
353
354 if (req_ctx->encrypt) {
355 scatterwalk_map_and_copy(req_ctx->hmac_virt,
d7295a8d 356 req->dst, decryptlen, authsize, 1);
81bef015
CH
357 }
358 dma_pool_free(buffer_pool, req_ctx->hmac_virt, crypt->icv_rev_aes);
359}
360
361static void one_packet(dma_addr_t phys)
362{
27c1789c 363 struct device *dev = &pdev->dev;
81bef015
CH
364 struct crypt_ctl *crypt;
365 struct ixp_ctx *ctx;
366 int failed;
81bef015
CH
367
368 failed = phys & 0x1 ? -EBADMSG : 0;
369 phys &= ~0x3;
370 crypt = crypt_phys2virt(phys);
371
372 switch (crypt->ctl_flags & CTL_FLAG_MASK) {
373 case CTL_FLAG_PERFORM_AEAD: {
374 struct aead_request *req = crypt->data.aead_req;
375 struct aead_ctx *req_ctx = aead_request_ctx(req);
81bef015 376
d7295a8d
HX
377 free_buf_chain(dev, req_ctx->src, crypt->src_buf);
378 free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
81bef015
CH
379 if (req_ctx->hmac_virt) {
380 finish_scattered_hmac(crypt);
381 }
382 req->base.complete(&req->base, failed);
383 break;
384 }
385 case CTL_FLAG_PERFORM_ABLK: {
386 struct ablkcipher_request *req = crypt->data.ablk_req;
387 struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req);
0d44dc59 388
81bef015 389 if (req_ctx->dst) {
0d44dc59 390 free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
81bef015 391 }
0d44dc59 392 free_buf_chain(dev, req_ctx->src, crypt->src_buf);
81bef015
CH
393 req->base.complete(&req->base, failed);
394 break;
395 }
396 case CTL_FLAG_GEN_ICV:
397 ctx = crypto_tfm_ctx(crypt->data.tfm);
398 dma_pool_free(ctx_pool, crypt->regist_ptr,
399 crypt->regist_buf->phys_addr);
400 dma_pool_free(buffer_pool, crypt->regist_buf, crypt->src_buf);
401 if (atomic_dec_and_test(&ctx->configuring))
402 complete(&ctx->completion);
403 break;
404 case CTL_FLAG_GEN_REVAES:
405 ctx = crypto_tfm_ctx(crypt->data.tfm);
406 *(u32*)ctx->decrypt.npe_ctx &= cpu_to_be32(~CIPH_ENCR);
407 if (atomic_dec_and_test(&ctx->configuring))
408 complete(&ctx->completion);
409 break;
410 default:
411 BUG();
412 }
413 crypt->ctl_flags = CTL_FLAG_UNUSED;
414}
415
416static void irqhandler(void *_unused)
417{
418 tasklet_schedule(&crypto_done_tasklet);
419}
420
421static void crypto_done_action(unsigned long arg)
422{
423 int i;
424
425 for(i=0; i<4; i++) {
426 dma_addr_t phys = qmgr_get_entry(RECV_QID);
427 if (!phys)
428 return;
429 one_packet(phys);
430 }
431 tasklet_schedule(&crypto_done_tasklet);
432}
433
27c1789c 434static int init_ixp_crypto(struct device *dev)
81bef015
CH
435{
436 int ret = -ENODEV;
295c01f9 437 u32 msg[2] = { 0, 0 };
81bef015
CH
438
439 if (! ( ~(*IXP4XX_EXP_CFG2) & (IXP4XX_FEATURE_HASH |
440 IXP4XX_FEATURE_AES | IXP4XX_FEATURE_DES))) {
441 printk(KERN_ERR "ixp_crypto: No HW crypto available\n");
442 return ret;
443 }
444 npe_c = npe_request(NPE_ID);
445 if (!npe_c)
446 return ret;
447
448 if (!npe_running(npe_c)) {
295c01f9
CH
449 ret = npe_load_firmware(npe_c, npe_name(npe_c), dev);
450 if (ret) {
451 return ret;
452 }
453 if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
454 goto npe_error;
455 } else {
456 if (npe_send_message(npe_c, msg, "STATUS_MSG"))
457 goto npe_error;
458
459 if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
460 goto npe_error;
81bef015
CH
461 }
462
295c01f9
CH
463 switch ((msg[1]>>16) & 0xff) {
464 case 3:
465 printk(KERN_WARNING "Firmware of %s lacks AES support\n",
466 npe_name(npe_c));
467 support_aes = 0;
468 break;
469 case 4:
470 case 5:
471 support_aes = 1;
472 break;
473 default:
474 printk(KERN_ERR "Firmware of %s lacks crypto support\n",
475 npe_name(npe_c));
476 return -ENODEV;
477 }
81bef015
CH
478 /* buffer_pool will also be used to sometimes store the hmac,
479 * so assure it is large enough
480 */
481 BUILD_BUG_ON(SHA1_DIGEST_SIZE > sizeof(struct buffer_desc));
482 buffer_pool = dma_pool_create("buffer", dev,
483 sizeof(struct buffer_desc), 32, 0);
484 ret = -ENOMEM;
485 if (!buffer_pool) {
486 goto err;
487 }
488 ctx_pool = dma_pool_create("context", dev,
489 NPE_CTX_LEN, 16, 0);
490 if (!ctx_pool) {
491 goto err;
492 }
1777f1a9
KH
493 ret = qmgr_request_queue(SEND_QID, NPE_QLEN_TOTAL, 0, 0,
494 "ixp_crypto:out", NULL);
81bef015
CH
495 if (ret)
496 goto err;
1777f1a9
KH
497 ret = qmgr_request_queue(RECV_QID, NPE_QLEN, 0, 0,
498 "ixp_crypto:in", NULL);
81bef015
CH
499 if (ret) {
500 qmgr_release_queue(SEND_QID);
501 goto err;
502 }
503 qmgr_set_irq(RECV_QID, QUEUE_IRQ_SRC_NOT_EMPTY, irqhandler, NULL);
504 tasklet_init(&crypto_done_tasklet, crypto_done_action, 0);
505
506 qmgr_enable_irq(RECV_QID);
507 return 0;
295c01f9
CH
508
509npe_error:
510 printk(KERN_ERR "%s not responding\n", npe_name(npe_c));
511 ret = -EIO;
81bef015
CH
512err:
513 if (ctx_pool)
514 dma_pool_destroy(ctx_pool);
515 if (buffer_pool)
516 dma_pool_destroy(buffer_pool);
517 npe_release(npe_c);
518 return ret;
519}
520
27c1789c 521static void release_ixp_crypto(struct device *dev)
81bef015
CH
522{
523 qmgr_disable_irq(RECV_QID);
524 tasklet_kill(&crypto_done_tasklet);
525
526 qmgr_release_queue(SEND_QID);
527 qmgr_release_queue(RECV_QID);
528
529 dma_pool_destroy(ctx_pool);
530 dma_pool_destroy(buffer_pool);
531
532 npe_release(npe_c);
533
534 if (crypt_virt) {
535 dma_free_coherent(dev,
536 NPE_QLEN_TOTAL * sizeof( struct crypt_ctl),
537 crypt_virt, crypt_phys);
538 }
539 return;
540}
541
542static void reset_sa_dir(struct ix_sa_dir *dir)
543{
544 memset(dir->npe_ctx, 0, NPE_CTX_LEN);
545 dir->npe_ctx_idx = 0;
546 dir->npe_mode = 0;
547}
548
549static int init_sa_dir(struct ix_sa_dir *dir)
550{
551 dir->npe_ctx = dma_pool_alloc(ctx_pool, GFP_KERNEL, &dir->npe_ctx_phys);
552 if (!dir->npe_ctx) {
553 return -ENOMEM;
554 }
555 reset_sa_dir(dir);
556 return 0;
557}
558
559static void free_sa_dir(struct ix_sa_dir *dir)
560{
561 memset(dir->npe_ctx, 0, NPE_CTX_LEN);
562 dma_pool_free(ctx_pool, dir->npe_ctx, dir->npe_ctx_phys);
563}
564
565static int init_tfm(struct crypto_tfm *tfm)
566{
567 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
568 int ret;
569
570 atomic_set(&ctx->configuring, 0);
571 ret = init_sa_dir(&ctx->encrypt);
572 if (ret)
573 return ret;
574 ret = init_sa_dir(&ctx->decrypt);
575 if (ret) {
576 free_sa_dir(&ctx->encrypt);
577 }
578 return ret;
579}
580
581static int init_tfm_ablk(struct crypto_tfm *tfm)
582{
583 tfm->crt_ablkcipher.reqsize = sizeof(struct ablk_ctx);
584 return init_tfm(tfm);
585}
586
d7295a8d 587static int init_tfm_aead(struct crypto_aead *tfm)
81bef015 588{
d7295a8d
HX
589 crypto_aead_set_reqsize(tfm, sizeof(struct aead_ctx));
590 return init_tfm(crypto_aead_tfm(tfm));
81bef015
CH
591}
592
593static void exit_tfm(struct crypto_tfm *tfm)
594{
595 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
596 free_sa_dir(&ctx->encrypt);
597 free_sa_dir(&ctx->decrypt);
598}
599
d7295a8d
HX
600static void exit_tfm_aead(struct crypto_aead *tfm)
601{
602 exit_tfm(crypto_aead_tfm(tfm));
603}
604
81bef015
CH
605static int register_chain_var(struct crypto_tfm *tfm, u8 xpad, u32 target,
606 int init_len, u32 ctx_addr, const u8 *key, int key_len)
607{
608 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
609 struct crypt_ctl *crypt;
610 struct buffer_desc *buf;
611 int i;
612 u8 *pad;
613 u32 pad_phys, buf_phys;
614
615 BUILD_BUG_ON(NPE_CTX_LEN < HMAC_PAD_BLOCKLEN);
616 pad = dma_pool_alloc(ctx_pool, GFP_KERNEL, &pad_phys);
617 if (!pad)
618 return -ENOMEM;
619 buf = dma_pool_alloc(buffer_pool, GFP_KERNEL, &buf_phys);
620 if (!buf) {
621 dma_pool_free(ctx_pool, pad, pad_phys);
622 return -ENOMEM;
623 }
624 crypt = get_crypt_desc_emerg();
625 if (!crypt) {
626 dma_pool_free(ctx_pool, pad, pad_phys);
627 dma_pool_free(buffer_pool, buf, buf_phys);
628 return -EAGAIN;
629 }
630
631 memcpy(pad, key, key_len);
632 memset(pad + key_len, 0, HMAC_PAD_BLOCKLEN - key_len);
633 for (i = 0; i < HMAC_PAD_BLOCKLEN; i++) {
634 pad[i] ^= xpad;
635 }
636
637 crypt->data.tfm = tfm;
638 crypt->regist_ptr = pad;
639 crypt->regist_buf = buf;
640
641 crypt->auth_offs = 0;
642 crypt->auth_len = HMAC_PAD_BLOCKLEN;
643 crypt->crypto_ctx = ctx_addr;
644 crypt->src_buf = buf_phys;
645 crypt->icv_rev_aes = target;
646 crypt->mode = NPE_OP_HASH_GEN_ICV;
647 crypt->init_len = init_len;
648 crypt->ctl_flags |= CTL_FLAG_GEN_ICV;
649
650 buf->next = 0;
651 buf->buf_len = HMAC_PAD_BLOCKLEN;
652 buf->pkt_len = 0;
653 buf->phys_addr = pad_phys;
654
655 atomic_inc(&ctx->configuring);
656 qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
657 BUG_ON(qmgr_stat_overflow(SEND_QID));
658 return 0;
659}
660
661static int setup_auth(struct crypto_tfm *tfm, int encrypt, unsigned authsize,
662 const u8 *key, int key_len, unsigned digest_len)
663{
664 u32 itarget, otarget, npe_ctx_addr;
665 unsigned char *cinfo;
666 int init_len, ret = 0;
667 u32 cfgword;
668 struct ix_sa_dir *dir;
669 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
670 const struct ix_hash_algo *algo;
671
672 dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
673 cinfo = dir->npe_ctx + dir->npe_ctx_idx;
674 algo = ix_hash(tfm);
675
676 /* write cfg word to cryptinfo */
677 cfgword = algo->cfgword | ( authsize << 6); /* (authsize/4) << 8 */
ce057297
KH
678#ifndef __ARMEB__
679 cfgword ^= 0xAA000000; /* change the "byte swap" flags */
680#endif
81bef015
CH
681 *(u32*)cinfo = cpu_to_be32(cfgword);
682 cinfo += sizeof(cfgword);
683
684 /* write ICV to cryptinfo */
685 memcpy(cinfo, algo->icv, digest_len);
686 cinfo += digest_len;
687
688 itarget = dir->npe_ctx_phys + dir->npe_ctx_idx
689 + sizeof(algo->cfgword);
690 otarget = itarget + digest_len;
691 init_len = cinfo - (dir->npe_ctx + dir->npe_ctx_idx);
692 npe_ctx_addr = dir->npe_ctx_phys + dir->npe_ctx_idx;
693
694 dir->npe_ctx_idx += init_len;
695 dir->npe_mode |= NPE_OP_HASH_ENABLE;
696
697 if (!encrypt)
698 dir->npe_mode |= NPE_OP_HASH_VERIFY;
699
700 ret = register_chain_var(tfm, HMAC_OPAD_VALUE, otarget,
701 init_len, npe_ctx_addr, key, key_len);
702 if (ret)
703 return ret;
704 return register_chain_var(tfm, HMAC_IPAD_VALUE, itarget,
705 init_len, npe_ctx_addr, key, key_len);
706}
707
708static int gen_rev_aes_key(struct crypto_tfm *tfm)
709{
710 struct crypt_ctl *crypt;
711 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
712 struct ix_sa_dir *dir = &ctx->decrypt;
713
714 crypt = get_crypt_desc_emerg();
715 if (!crypt) {
716 return -EAGAIN;
717 }
718 *(u32*)dir->npe_ctx |= cpu_to_be32(CIPH_ENCR);
719
720 crypt->data.tfm = tfm;
721 crypt->crypt_offs = 0;
722 crypt->crypt_len = AES_BLOCK128;
723 crypt->src_buf = 0;
724 crypt->crypto_ctx = dir->npe_ctx_phys;
725 crypt->icv_rev_aes = dir->npe_ctx_phys + sizeof(u32);
726 crypt->mode = NPE_OP_ENC_GEN_KEY;
727 crypt->init_len = dir->npe_ctx_idx;
728 crypt->ctl_flags |= CTL_FLAG_GEN_REVAES;
729
730 atomic_inc(&ctx->configuring);
731 qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
732 BUG_ON(qmgr_stat_overflow(SEND_QID));
733 return 0;
734}
735
736static int setup_cipher(struct crypto_tfm *tfm, int encrypt,
737 const u8 *key, int key_len)
738{
739 u8 *cinfo;
740 u32 cipher_cfg;
741 u32 keylen_cfg = 0;
742 struct ix_sa_dir *dir;
743 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
744 u32 *flags = &tfm->crt_flags;
745
746 dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
747 cinfo = dir->npe_ctx;
748
749 if (encrypt) {
750 cipher_cfg = cipher_cfg_enc(tfm);
751 dir->npe_mode |= NPE_OP_CRYPT_ENCRYPT;
752 } else {
753 cipher_cfg = cipher_cfg_dec(tfm);
754 }
755 if (cipher_cfg & MOD_AES) {
756 switch (key_len) {
9792eb1d
KH
757 case 16: keylen_cfg = MOD_AES128; break;
758 case 24: keylen_cfg = MOD_AES192; break;
759 case 32: keylen_cfg = MOD_AES256; break;
760 default:
761 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
762 return -EINVAL;
81bef015
CH
763 }
764 cipher_cfg |= keylen_cfg;
765 } else if (cipher_cfg & MOD_3DES) {
766 const u32 *K = (const u32 *)key;
767 if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
768 !((K[2] ^ K[4]) | (K[3] ^ K[5]))))
769 {
770 *flags |= CRYPTO_TFM_RES_BAD_KEY_SCHED;
771 return -EINVAL;
772 }
773 } else {
774 u32 tmp[DES_EXPKEY_WORDS];
775 if (des_ekey(tmp, key) == 0) {
776 *flags |= CRYPTO_TFM_RES_WEAK_KEY;
777 }
778 }
779 /* write cfg word to cryptinfo */
780 *(u32*)cinfo = cpu_to_be32(cipher_cfg);
781 cinfo += sizeof(cipher_cfg);
782
783 /* write cipher key to cryptinfo */
784 memcpy(cinfo, key, key_len);
785 /* NPE wants keylen set to DES3_EDE_KEY_SIZE even for single DES */
786 if (key_len < DES3_EDE_KEY_SIZE && !(cipher_cfg & MOD_AES)) {
787 memset(cinfo + key_len, 0, DES3_EDE_KEY_SIZE -key_len);
788 key_len = DES3_EDE_KEY_SIZE;
789 }
790 dir->npe_ctx_idx = sizeof(cipher_cfg) + key_len;
791 dir->npe_mode |= NPE_OP_CRYPT_ENABLE;
792 if ((cipher_cfg & MOD_AES) && !encrypt) {
793 return gen_rev_aes_key(tfm);
794 }
795 return 0;
796}
797
0d44dc59
CH
798static struct buffer_desc *chainup_buffers(struct device *dev,
799 struct scatterlist *sg, unsigned nbytes,
800 struct buffer_desc *buf, gfp_t flags,
801 enum dma_data_direction dir)
81bef015 802{
5be4d4c9 803 for (; nbytes > 0; sg = sg_next(sg)) {
0d44dc59 804 unsigned len = min(nbytes, sg->length);
81bef015
CH
805 struct buffer_desc *next_buf;
806 u32 next_buf_phys;
0d44dc59 807 void *ptr;
81bef015 808
81bef015 809 nbytes -= len;
0d44dc59 810 ptr = page_address(sg_page(sg)) + sg->offset;
81bef015 811 next_buf = dma_pool_alloc(buffer_pool, flags, &next_buf_phys);
0d44dc59
CH
812 if (!next_buf) {
813 buf = NULL;
814 break;
815 }
816 sg_dma_address(sg) = dma_map_single(dev, ptr, len, dir);
81bef015
CH
817 buf->next = next_buf;
818 buf->phys_next = next_buf_phys;
81bef015 819 buf = next_buf;
0d44dc59 820
81bef015
CH
821 buf->phys_addr = sg_dma_address(sg);
822 buf->buf_len = len;
0d44dc59 823 buf->dir = dir;
81bef015 824 }
0d44dc59
CH
825 buf->next = NULL;
826 buf->phys_next = 0;
81bef015
CH
827 return buf;
828}
829
830static int ablk_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
831 unsigned int key_len)
832{
833 struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
834 u32 *flags = &tfm->base.crt_flags;
835 int ret;
836
837 init_completion(&ctx->completion);
838 atomic_inc(&ctx->configuring);
839
840 reset_sa_dir(&ctx->encrypt);
841 reset_sa_dir(&ctx->decrypt);
842
843 ctx->encrypt.npe_mode = NPE_OP_HMAC_DISABLE;
844 ctx->decrypt.npe_mode = NPE_OP_HMAC_DISABLE;
845
846 ret = setup_cipher(&tfm->base, 0, key, key_len);
847 if (ret)
848 goto out;
849 ret = setup_cipher(&tfm->base, 1, key, key_len);
850 if (ret)
851 goto out;
852
853 if (*flags & CRYPTO_TFM_RES_WEAK_KEY) {
854 if (*flags & CRYPTO_TFM_REQ_WEAK_KEY) {
855 ret = -EINVAL;
856 } else {
857 *flags &= ~CRYPTO_TFM_RES_WEAK_KEY;
858 }
859 }
860out:
861 if (!atomic_dec_and_test(&ctx->configuring))
862 wait_for_completion(&ctx->completion);
863 return ret;
864}
865
866static int ablk_rfc3686_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
867 unsigned int key_len)
868{
869 struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
870
871 /* the nonce is stored in bytes at end of key */
872 if (key_len < CTR_RFC3686_NONCE_SIZE)
873 return -EINVAL;
874
875 memcpy(ctx->nonce, key + (key_len - CTR_RFC3686_NONCE_SIZE),
876 CTR_RFC3686_NONCE_SIZE);
877
878 key_len -= CTR_RFC3686_NONCE_SIZE;
879 return ablk_setkey(tfm, key, key_len);
880}
881
882static int ablk_perform(struct ablkcipher_request *req, int encrypt)
883{
884 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
885 struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
886 unsigned ivsize = crypto_ablkcipher_ivsize(tfm);
81bef015
CH
887 struct ix_sa_dir *dir;
888 struct crypt_ctl *crypt;
0d44dc59 889 unsigned int nbytes = req->nbytes;
81bef015
CH
890 enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
891 struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req);
0d44dc59 892 struct buffer_desc src_hook;
27c1789c 893 struct device *dev = &pdev->dev;
81bef015
CH
894 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
895 GFP_KERNEL : GFP_ATOMIC;
896
897 if (qmgr_stat_full(SEND_QID))
898 return -EAGAIN;
899 if (atomic_read(&ctx->configuring))
900 return -EAGAIN;
901
902 dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
903
904 crypt = get_crypt_desc();
905 if (!crypt)
0d44dc59 906 return -ENOMEM;
81bef015
CH
907
908 crypt->data.ablk_req = req;
909 crypt->crypto_ctx = dir->npe_ctx_phys;
910 crypt->mode = dir->npe_mode;
911 crypt->init_len = dir->npe_ctx_idx;
912
913 crypt->crypt_offs = 0;
914 crypt->crypt_len = nbytes;
915
916 BUG_ON(ivsize && !req->info);
917 memcpy(crypt->iv, req->info, ivsize);
918 if (req->src != req->dst) {
0d44dc59 919 struct buffer_desc dst_hook;
81bef015 920 crypt->mode |= NPE_OP_NOT_IN_PLACE;
81bef015
CH
921 /* This was never tested by Intel
922 * for more than one dst buffer, I think. */
0d44dc59
CH
923 req_ctx->dst = NULL;
924 if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook,
925 flags, DMA_FROM_DEVICE))
81bef015
CH
926 goto free_buf_dest;
927 src_direction = DMA_TO_DEVICE;
0d44dc59
CH
928 req_ctx->dst = dst_hook.next;
929 crypt->dst_buf = dst_hook.phys_next;
81bef015
CH
930 } else {
931 req_ctx->dst = NULL;
81bef015 932 }
0d44dc59
CH
933 req_ctx->src = NULL;
934 if (!chainup_buffers(dev, req->src, nbytes, &src_hook,
935 flags, src_direction))
81bef015
CH
936 goto free_buf_src;
937
0d44dc59
CH
938 req_ctx->src = src_hook.next;
939 crypt->src_buf = src_hook.phys_next;
81bef015
CH
940 crypt->ctl_flags |= CTL_FLAG_PERFORM_ABLK;
941 qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
942 BUG_ON(qmgr_stat_overflow(SEND_QID));
943 return -EINPROGRESS;
944
945free_buf_src:
0d44dc59 946 free_buf_chain(dev, req_ctx->src, crypt->src_buf);
81bef015
CH
947free_buf_dest:
948 if (req->src != req->dst) {
0d44dc59 949 free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
81bef015
CH
950 }
951 crypt->ctl_flags = CTL_FLAG_UNUSED;
0d44dc59 952 return -ENOMEM;
81bef015
CH
953}
954
955static int ablk_encrypt(struct ablkcipher_request *req)
956{
957 return ablk_perform(req, 1);
958}
959
960static int ablk_decrypt(struct ablkcipher_request *req)
961{
962 return ablk_perform(req, 0);
963}
964
965static int ablk_rfc3686_crypt(struct ablkcipher_request *req)
966{
967 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
968 struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
969 u8 iv[CTR_RFC3686_BLOCK_SIZE];
970 u8 *info = req->info;
971 int ret;
972
973 /* set up counter block */
974 memcpy(iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE);
975 memcpy(iv + CTR_RFC3686_NONCE_SIZE, info, CTR_RFC3686_IV_SIZE);
976
977 /* initialize counter portion of counter block */
978 *(__be32 *)(iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) =
979 cpu_to_be32(1);
980
981 req->info = iv;
982 ret = ablk_perform(req, 1);
983 req->info = info;
984 return ret;
985}
986
81bef015
CH
987static int aead_perform(struct aead_request *req, int encrypt,
988 int cryptoffset, int eff_cryptlen, u8 *iv)
989{
990 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
991 struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
992 unsigned ivsize = crypto_aead_ivsize(tfm);
993 unsigned authsize = crypto_aead_authsize(tfm);
81bef015
CH
994 struct ix_sa_dir *dir;
995 struct crypt_ctl *crypt;
0d44dc59
CH
996 unsigned int cryptlen;
997 struct buffer_desc *buf, src_hook;
81bef015 998 struct aead_ctx *req_ctx = aead_request_ctx(req);
27c1789c 999 struct device *dev = &pdev->dev;
81bef015
CH
1000 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
1001 GFP_KERNEL : GFP_ATOMIC;
d7295a8d
HX
1002 enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
1003 unsigned int lastlen;
81bef015
CH
1004
1005 if (qmgr_stat_full(SEND_QID))
1006 return -EAGAIN;
1007 if (atomic_read(&ctx->configuring))
1008 return -EAGAIN;
1009
1010 if (encrypt) {
1011 dir = &ctx->encrypt;
1012 cryptlen = req->cryptlen;
1013 } else {
1014 dir = &ctx->decrypt;
1015 /* req->cryptlen includes the authsize when decrypting */
1016 cryptlen = req->cryptlen -authsize;
1017 eff_cryptlen -= authsize;
1018 }
1019 crypt = get_crypt_desc();
1020 if (!crypt)
0d44dc59 1021 return -ENOMEM;
81bef015
CH
1022
1023 crypt->data.aead_req = req;
1024 crypt->crypto_ctx = dir->npe_ctx_phys;
1025 crypt->mode = dir->npe_mode;
1026 crypt->init_len = dir->npe_ctx_idx;
1027
1028 crypt->crypt_offs = cryptoffset;
1029 crypt->crypt_len = eff_cryptlen;
1030
1031 crypt->auth_offs = 0;
d7295a8d 1032 crypt->auth_len = req->assoclen + cryptlen;
81bef015
CH
1033 BUG_ON(ivsize && !req->iv);
1034 memcpy(crypt->iv, req->iv, ivsize);
1035
d7295a8d
HX
1036 req_ctx->dst = NULL;
1037
81bef015 1038 if (req->src != req->dst) {
d7295a8d
HX
1039 struct buffer_desc dst_hook;
1040
1041 crypt->mode |= NPE_OP_NOT_IN_PLACE;
1042 src_direction = DMA_TO_DEVICE;
1043
1044 buf = chainup_buffers(dev, req->dst, crypt->auth_len,
1045 &dst_hook, flags, DMA_FROM_DEVICE);
1046 req_ctx->dst = dst_hook.next;
1047 crypt->dst_buf = dst_hook.phys_next;
1048
1049 if (!buf)
1050 goto free_buf_dst;
1051
1052 if (encrypt) {
1053 lastlen = buf->buf_len;
1054 if (lastlen >= authsize)
1055 crypt->icv_rev_aes = buf->phys_addr +
1056 buf->buf_len - authsize;
1057 }
81bef015
CH
1058 }
1059
d7295a8d
HX
1060 buf = chainup_buffers(dev, req->src, crypt->auth_len,
1061 &src_hook, flags, src_direction);
1062 req_ctx->src = src_hook.next;
0d44dc59 1063 crypt->src_buf = src_hook.phys_next;
81bef015 1064 if (!buf)
d7295a8d
HX
1065 goto free_buf_src;
1066
1067 if (!encrypt || !req_ctx->dst) {
1068 lastlen = buf->buf_len;
1069 if (lastlen >= authsize)
1070 crypt->icv_rev_aes = buf->phys_addr +
1071 buf->buf_len - authsize;
1072 }
1073
1074 if (unlikely(lastlen < authsize)) {
81bef015
CH
1075 /* The 12 hmac bytes are scattered,
1076 * we need to copy them into a safe buffer */
1077 req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags,
1078 &crypt->icv_rev_aes);
1079 if (unlikely(!req_ctx->hmac_virt))
d7295a8d 1080 goto free_buf_src;
81bef015
CH
1081 if (!encrypt) {
1082 scatterwalk_map_and_copy(req_ctx->hmac_virt,
1083 req->src, cryptlen, authsize, 0);
1084 }
1085 req_ctx->encrypt = encrypt;
1086 } else {
1087 req_ctx->hmac_virt = NULL;
1088 }
0d44dc59 1089
81bef015
CH
1090 crypt->ctl_flags |= CTL_FLAG_PERFORM_AEAD;
1091 qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
1092 BUG_ON(qmgr_stat_overflow(SEND_QID));
1093 return -EINPROGRESS;
d7295a8d
HX
1094
1095free_buf_src:
1096 free_buf_chain(dev, req_ctx->src, crypt->src_buf);
1097free_buf_dst:
1098 free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
81bef015 1099 crypt->ctl_flags = CTL_FLAG_UNUSED;
0d44dc59 1100 return -ENOMEM;
81bef015
CH
1101}
1102
1103static int aead_setup(struct crypto_aead *tfm, unsigned int authsize)
1104{
1105 struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1106 u32 *flags = &tfm->base.crt_flags;
6da9c233 1107 unsigned digest_len = crypto_aead_maxauthsize(tfm);
81bef015
CH
1108 int ret;
1109
1110 if (!ctx->enckey_len && !ctx->authkey_len)
1111 return 0;
1112 init_completion(&ctx->completion);
1113 atomic_inc(&ctx->configuring);
1114
1115 reset_sa_dir(&ctx->encrypt);
1116 reset_sa_dir(&ctx->decrypt);
1117
1118 ret = setup_cipher(&tfm->base, 0, ctx->enckey, ctx->enckey_len);
1119 if (ret)
1120 goto out;
1121 ret = setup_cipher(&tfm->base, 1, ctx->enckey, ctx->enckey_len);
1122 if (ret)
1123 goto out;
1124 ret = setup_auth(&tfm->base, 0, authsize, ctx->authkey,
1125 ctx->authkey_len, digest_len);
1126 if (ret)
1127 goto out;
1128 ret = setup_auth(&tfm->base, 1, authsize, ctx->authkey,
1129 ctx->authkey_len, digest_len);
1130 if (ret)
1131 goto out;
1132
1133 if (*flags & CRYPTO_TFM_RES_WEAK_KEY) {
1134 if (*flags & CRYPTO_TFM_REQ_WEAK_KEY) {
1135 ret = -EINVAL;
1136 goto out;
1137 } else {
1138 *flags &= ~CRYPTO_TFM_RES_WEAK_KEY;
1139 }
1140 }
1141out:
1142 if (!atomic_dec_and_test(&ctx->configuring))
1143 wait_for_completion(&ctx->completion);
1144 return ret;
1145}
1146
1147static int aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
1148{
6da9c233 1149 int max = crypto_aead_maxauthsize(tfm) >> 2;
81bef015
CH
1150
1151 if ((authsize>>2) < 1 || (authsize>>2) > max || (authsize & 3))
1152 return -EINVAL;
1153 return aead_setup(tfm, authsize);
1154}
1155
1156static int aead_setkey(struct crypto_aead *tfm, const u8 *key,
1157 unsigned int keylen)
1158{
1159 struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
56902781 1160 struct crypto_authenc_keys keys;
81bef015 1161
56902781 1162 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
81bef015
CH
1163 goto badkey;
1164
56902781
MK
1165 if (keys.authkeylen > sizeof(ctx->authkey))
1166 goto badkey;
81bef015 1167
56902781 1168 if (keys.enckeylen > sizeof(ctx->enckey))
81bef015
CH
1169 goto badkey;
1170
56902781
MK
1171 memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
1172 memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
1173 ctx->authkey_len = keys.authkeylen;
1174 ctx->enckey_len = keys.enckeylen;
81bef015
CH
1175
1176 return aead_setup(tfm, crypto_aead_authsize(tfm));
1177badkey:
81bef015
CH
1178 crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
1179 return -EINVAL;
1180}
1181
1182static int aead_encrypt(struct aead_request *req)
1183{
d7295a8d 1184 return aead_perform(req, 1, req->assoclen, req->cryptlen, req->iv);
81bef015
CH
1185}
1186
1187static int aead_decrypt(struct aead_request *req)
1188{
d7295a8d 1189 return aead_perform(req, 0, req->assoclen, req->cryptlen, req->iv);
81bef015
CH
1190}
1191
1192static struct ixp_alg ixp4xx_algos[] = {
1193{
1194 .crypto = {
1195 .cra_name = "cbc(des)",
1196 .cra_blocksize = DES_BLOCK_SIZE,
1197 .cra_u = { .ablkcipher = {
1198 .min_keysize = DES_KEY_SIZE,
1199 .max_keysize = DES_KEY_SIZE,
1200 .ivsize = DES_BLOCK_SIZE,
1201 .geniv = "eseqiv",
1202 }
1203 }
1204 },
1205 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1206 .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1207
1208}, {
1209 .crypto = {
1210 .cra_name = "ecb(des)",
1211 .cra_blocksize = DES_BLOCK_SIZE,
1212 .cra_u = { .ablkcipher = {
1213 .min_keysize = DES_KEY_SIZE,
1214 .max_keysize = DES_KEY_SIZE,
1215 }
1216 }
1217 },
1218 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_ECB | KEYLEN_192,
1219 .cfg_dec = CIPH_DECR | MOD_DES | MOD_ECB | KEYLEN_192,
1220}, {
1221 .crypto = {
1222 .cra_name = "cbc(des3_ede)",
1223 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1224 .cra_u = { .ablkcipher = {
1225 .min_keysize = DES3_EDE_KEY_SIZE,
1226 .max_keysize = DES3_EDE_KEY_SIZE,
1227 .ivsize = DES3_EDE_BLOCK_SIZE,
1228 .geniv = "eseqiv",
1229 }
1230 }
1231 },
1232 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1233 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1234}, {
1235 .crypto = {
1236 .cra_name = "ecb(des3_ede)",
1237 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1238 .cra_u = { .ablkcipher = {
1239 .min_keysize = DES3_EDE_KEY_SIZE,
1240 .max_keysize = DES3_EDE_KEY_SIZE,
1241 }
1242 }
1243 },
1244 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_ECB | KEYLEN_192,
1245 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_ECB | KEYLEN_192,
1246}, {
1247 .crypto = {
1248 .cra_name = "cbc(aes)",
1249 .cra_blocksize = AES_BLOCK_SIZE,
1250 .cra_u = { .ablkcipher = {
1251 .min_keysize = AES_MIN_KEY_SIZE,
1252 .max_keysize = AES_MAX_KEY_SIZE,
1253 .ivsize = AES_BLOCK_SIZE,
1254 .geniv = "eseqiv",
1255 }
1256 }
1257 },
1258 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1259 .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1260}, {
1261 .crypto = {
1262 .cra_name = "ecb(aes)",
1263 .cra_blocksize = AES_BLOCK_SIZE,
1264 .cra_u = { .ablkcipher = {
1265 .min_keysize = AES_MIN_KEY_SIZE,
1266 .max_keysize = AES_MAX_KEY_SIZE,
1267 }
1268 }
1269 },
1270 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_ECB,
1271 .cfg_dec = CIPH_DECR | MOD_AES | MOD_ECB,
1272}, {
1273 .crypto = {
1274 .cra_name = "ctr(aes)",
1275 .cra_blocksize = AES_BLOCK_SIZE,
1276 .cra_u = { .ablkcipher = {
1277 .min_keysize = AES_MIN_KEY_SIZE,
1278 .max_keysize = AES_MAX_KEY_SIZE,
1279 .ivsize = AES_BLOCK_SIZE,
1280 .geniv = "eseqiv",
1281 }
1282 }
1283 },
1284 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
1285 .cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
1286}, {
1287 .crypto = {
1288 .cra_name = "rfc3686(ctr(aes))",
1289 .cra_blocksize = AES_BLOCK_SIZE,
1290 .cra_u = { .ablkcipher = {
1291 .min_keysize = AES_MIN_KEY_SIZE,
1292 .max_keysize = AES_MAX_KEY_SIZE,
1293 .ivsize = AES_BLOCK_SIZE,
1294 .geniv = "eseqiv",
1295 .setkey = ablk_rfc3686_setkey,
1296 .encrypt = ablk_rfc3686_crypt,
1297 .decrypt = ablk_rfc3686_crypt }
1298 }
1299 },
1300 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
1301 .cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
d7295a8d
HX
1302} };
1303
1304static struct ixp_aead_alg ixp4xx_aeads[] = {
1305{
81bef015 1306 .crypto = {
d7295a8d
HX
1307 .base = {
1308 .cra_name = "authenc(hmac(md5),cbc(des))",
1309 .cra_blocksize = DES_BLOCK_SIZE,
1310 },
1311 .ivsize = DES_BLOCK_SIZE,
1312 .maxauthsize = MD5_DIGEST_SIZE,
81bef015
CH
1313 },
1314 .hash = &hash_alg_md5,
1315 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1316 .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1317}, {
1318 .crypto = {
d7295a8d
HX
1319 .base = {
1320 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
1321 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1322 },
1323 .ivsize = DES3_EDE_BLOCK_SIZE,
1324 .maxauthsize = MD5_DIGEST_SIZE,
81bef015
CH
1325 },
1326 .hash = &hash_alg_md5,
1327 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1328 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1329}, {
1330 .crypto = {
d7295a8d
HX
1331 .base = {
1332 .cra_name = "authenc(hmac(sha1),cbc(des))",
1333 .cra_blocksize = DES_BLOCK_SIZE,
1334 },
81bef015
CH
1335 .ivsize = DES_BLOCK_SIZE,
1336 .maxauthsize = SHA1_DIGEST_SIZE,
81bef015
CH
1337 },
1338 .hash = &hash_alg_sha1,
1339 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1340 .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1341}, {
1342 .crypto = {
d7295a8d
HX
1343 .base = {
1344 .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
1345 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1346 },
1347 .ivsize = DES3_EDE_BLOCK_SIZE,
1348 .maxauthsize = SHA1_DIGEST_SIZE,
81bef015
CH
1349 },
1350 .hash = &hash_alg_sha1,
1351 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1352 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1353}, {
1354 .crypto = {
d7295a8d
HX
1355 .base = {
1356 .cra_name = "authenc(hmac(md5),cbc(aes))",
1357 .cra_blocksize = AES_BLOCK_SIZE,
1358 },
1359 .ivsize = AES_BLOCK_SIZE,
1360 .maxauthsize = MD5_DIGEST_SIZE,
81bef015
CH
1361 },
1362 .hash = &hash_alg_md5,
1363 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1364 .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1365}, {
1366 .crypto = {
d7295a8d
HX
1367 .base = {
1368 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1369 .cra_blocksize = AES_BLOCK_SIZE,
1370 },
1371 .ivsize = AES_BLOCK_SIZE,
1372 .maxauthsize = SHA1_DIGEST_SIZE,
81bef015
CH
1373 },
1374 .hash = &hash_alg_sha1,
1375 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1376 .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1377} };
1378
1379#define IXP_POSTFIX "-ixp4xx"
d8cbc3f7
RK
1380
1381static const struct platform_device_info ixp_dev_info __initdata = {
1382 .name = DRIVER_NAME,
1383 .id = 0,
1384 .dma_mask = DMA_BIT_MASK(32),
1385};
1386
81bef015
CH
1387static int __init ixp_module_init(void)
1388{
1389 int num = ARRAY_SIZE(ixp4xx_algos);
efb753b8 1390 int i, err;
81bef015 1391
d8cbc3f7
RK
1392 pdev = platform_device_register_full(&ixp_dev_info);
1393 if (IS_ERR(pdev))
1394 return PTR_ERR(pdev);
1395
81bef015
CH
1396 spin_lock_init(&desc_lock);
1397 spin_lock_init(&emerg_lock);
1398
27c1789c 1399 err = init_ixp_crypto(&pdev->dev);
81bef015 1400 if (err) {
d8cbc3f7 1401 platform_device_unregister(pdev);
81bef015
CH
1402 return err;
1403 }
1404 for (i=0; i< num; i++) {
1405 struct crypto_alg *cra = &ixp4xx_algos[i].crypto;
1406
1407 if (snprintf(cra->cra_driver_name, CRYPTO_MAX_ALG_NAME,
1408 "%s"IXP_POSTFIX, cra->cra_name) >=
1409 CRYPTO_MAX_ALG_NAME)
1410 {
1411 continue;
1412 }
1413 if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES)) {
1414 continue;
1415 }
d7295a8d
HX
1416
1417 /* block ciphers */
1418 cra->cra_type = &crypto_ablkcipher_type;
1419 cra->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1420 CRYPTO_ALG_KERN_DRIVER_ONLY |
1421 CRYPTO_ALG_ASYNC;
1422 if (!cra->cra_ablkcipher.setkey)
1423 cra->cra_ablkcipher.setkey = ablk_setkey;
1424 if (!cra->cra_ablkcipher.encrypt)
1425 cra->cra_ablkcipher.encrypt = ablk_encrypt;
1426 if (!cra->cra_ablkcipher.decrypt)
1427 cra->cra_ablkcipher.decrypt = ablk_decrypt;
1428 cra->cra_init = init_tfm_ablk;
1429
81bef015
CH
1430 cra->cra_ctxsize = sizeof(struct ixp_ctx);
1431 cra->cra_module = THIS_MODULE;
1432 cra->cra_alignmask = 3;
1433 cra->cra_priority = 300;
1434 cra->cra_exit = exit_tfm;
1435 if (crypto_register_alg(cra))
1436 printk(KERN_ERR "Failed to register '%s'\n",
1437 cra->cra_name);
1438 else
1439 ixp4xx_algos[i].registered = 1;
1440 }
d7295a8d
HX
1441
1442 for (i = 0; i < ARRAY_SIZE(ixp4xx_aeads); i++) {
1443 struct aead_alg *cra = &ixp4xx_aeads[i].crypto;
1444
1445 if (snprintf(cra->base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
1446 "%s"IXP_POSTFIX, cra->base.cra_name) >=
1447 CRYPTO_MAX_ALG_NAME)
1448 continue;
1449 if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES))
1450 continue;
1451
1452 /* authenc */
1453 cra->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
d7295a8d
HX
1454 CRYPTO_ALG_ASYNC;
1455 cra->setkey = aead_setkey;
1456 cra->setauthsize = aead_setauthsize;
1457 cra->encrypt = aead_encrypt;
1458 cra->decrypt = aead_decrypt;
1459 cra->init = init_tfm_aead;
1460 cra->exit = exit_tfm_aead;
1461
1462 cra->base.cra_ctxsize = sizeof(struct ixp_ctx);
1463 cra->base.cra_module = THIS_MODULE;
1464 cra->base.cra_alignmask = 3;
1465 cra->base.cra_priority = 300;
1466
1467 if (crypto_register_aead(cra))
1468 printk(KERN_ERR "Failed to register '%s'\n",
1469 cra->base.cra_driver_name);
1470 else
1471 ixp4xx_aeads[i].registered = 1;
1472 }
81bef015
CH
1473 return 0;
1474}
1475
1476static void __exit ixp_module_exit(void)
1477{
1478 int num = ARRAY_SIZE(ixp4xx_algos);
1479 int i;
1480
d7295a8d
HX
1481 for (i = 0; i < ARRAY_SIZE(ixp4xx_aeads); i++) {
1482 if (ixp4xx_aeads[i].registered)
1483 crypto_unregister_aead(&ixp4xx_aeads[i].crypto);
1484 }
1485
81bef015
CH
1486 for (i=0; i< num; i++) {
1487 if (ixp4xx_algos[i].registered)
1488 crypto_unregister_alg(&ixp4xx_algos[i].crypto);
1489 }
27c1789c 1490 release_ixp_crypto(&pdev->dev);
d8cbc3f7 1491 platform_device_unregister(pdev);
81bef015
CH
1492}
1493
1494module_init(ixp_module_init);
1495module_exit(ixp_module_exit);
1496
1497MODULE_LICENSE("GPL");
1498MODULE_AUTHOR("Christian Hohnstaedt <chohnstaedt@innominate.com>");
1499MODULE_DESCRIPTION("IXP4xx hardware crypto");
1500