Merge branch 'drm-next' of git://people.freedesktop.org/~airlied/linux
[linux-2.6-block.git] / drivers / crypto / marvell / cipher.c
CommitLineData
f63601fd
BB
1/*
2 * Cipher algorithms supported by the CESA: DES, 3DES and AES.
3 *
4 * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
5 * Author: Arnaud Ebalard <arno@natisbad.org>
6 *
7 * This work is based on an initial version written by
8 * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License version 2 as published
12 * by the Free Software Foundation.
13 */
14
15#include <crypto/aes.h>
7b3aaaa0 16#include <crypto/des.h>
f63601fd
BB
17
18#include "cesa.h"
19
7b3aaaa0
BB
20struct mv_cesa_des_ctx {
21 struct mv_cesa_ctx base;
22 u8 key[DES_KEY_SIZE];
23};
24
4ada4839
AE
25struct mv_cesa_des3_ctx {
26 struct mv_cesa_ctx base;
27 u8 key[DES3_EDE_KEY_SIZE];
28};
29
f63601fd
BB
30struct mv_cesa_aes_ctx {
31 struct mv_cesa_ctx base;
32 struct crypto_aes_ctx aes;
33};
34
db509a45
BB
35struct mv_cesa_ablkcipher_dma_iter {
36 struct mv_cesa_dma_iter base;
37 struct mv_cesa_sg_dma_iter src;
38 struct mv_cesa_sg_dma_iter dst;
39};
40
41static inline void
42mv_cesa_ablkcipher_req_iter_init(struct mv_cesa_ablkcipher_dma_iter *iter,
43 struct ablkcipher_request *req)
44{
45 mv_cesa_req_dma_iter_init(&iter->base, req->nbytes);
46 mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE);
47 mv_cesa_sg_dma_iter_init(&iter->dst, req->dst, DMA_FROM_DEVICE);
48}
49
50static inline bool
51mv_cesa_ablkcipher_req_iter_next_op(struct mv_cesa_ablkcipher_dma_iter *iter)
52{
53 iter->src.op_offset = 0;
54 iter->dst.op_offset = 0;
55
56 return mv_cesa_req_dma_iter_next_op(&iter->base);
57}
58
59static inline void
60mv_cesa_ablkcipher_dma_cleanup(struct ablkcipher_request *req)
61{
62 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
63
64 if (req->dst != req->src) {
65 dma_unmap_sg(cesa_dev->dev, req->dst, creq->dst_nents,
66 DMA_FROM_DEVICE);
67 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents,
68 DMA_TO_DEVICE);
69 } else {
70 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents,
71 DMA_BIDIRECTIONAL);
72 }
73 mv_cesa_dma_cleanup(&creq->req.dma);
74}
75
76static inline void mv_cesa_ablkcipher_cleanup(struct ablkcipher_request *req)
77{
78 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
79
80 if (creq->req.base.type == CESA_DMA_REQ)
81 mv_cesa_ablkcipher_dma_cleanup(req);
82}
83
f63601fd
BB
84static void mv_cesa_ablkcipher_std_step(struct ablkcipher_request *req)
85{
86 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
87 struct mv_cesa_ablkcipher_std_req *sreq = &creq->req.std;
88 struct mv_cesa_engine *engine = sreq->base.engine;
89 size_t len = min_t(size_t, req->nbytes - sreq->offset,
90 CESA_SA_SRAM_PAYLOAD_SIZE);
91
92 len = sg_pcopy_to_buffer(req->src, creq->src_nents,
93 engine->sram + CESA_SA_DATA_SRAM_OFFSET,
94 len, sreq->offset);
95
96 sreq->size = len;
97 mv_cesa_set_crypt_op_len(&sreq->op, len);
98
99 /* FIXME: only update enc_len field */
100 if (!sreq->skip_ctx) {
0f3304dc 101 memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op));
f63601fd
BB
102 sreq->skip_ctx = true;
103 } else {
0f3304dc 104 memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op.desc));
f63601fd
BB
105 }
106
107 mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE);
b1508561 108 writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG);
f63601fd
BB
109 writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
110}
111
112static int mv_cesa_ablkcipher_std_process(struct ablkcipher_request *req,
113 u32 status)
114{
115 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
116 struct mv_cesa_ablkcipher_std_req *sreq = &creq->req.std;
117 struct mv_cesa_engine *engine = sreq->base.engine;
118 size_t len;
119
120 len = sg_pcopy_from_buffer(req->dst, creq->dst_nents,
121 engine->sram + CESA_SA_DATA_SRAM_OFFSET,
122 sreq->size, sreq->offset);
123
124 sreq->offset += len;
125 if (sreq->offset < req->nbytes)
126 return -EINPROGRESS;
127
128 return 0;
129}
130
131static int mv_cesa_ablkcipher_process(struct crypto_async_request *req,
132 u32 status)
133{
134 struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
135 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq);
136 struct mv_cesa_ablkcipher_std_req *sreq = &creq->req.std;
137 struct mv_cesa_engine *engine = sreq->base.engine;
138 int ret;
139
db509a45
BB
140 if (creq->req.base.type == CESA_DMA_REQ)
141 ret = mv_cesa_dma_process(&creq->req.dma, status);
142 else
143 ret = mv_cesa_ablkcipher_std_process(ablkreq, status);
144
f63601fd
BB
145 if (ret)
146 return ret;
147
0f3304dc
RK
148 memcpy_fromio(ablkreq->info,
149 engine->sram + CESA_SA_CRYPT_IV_SRAM_OFFSET,
150 crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(ablkreq)));
f63601fd
BB
151
152 return 0;
153}
154
155static void mv_cesa_ablkcipher_step(struct crypto_async_request *req)
156{
157 struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
db509a45 158 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq);
f63601fd 159
db509a45
BB
160 if (creq->req.base.type == CESA_DMA_REQ)
161 mv_cesa_dma_step(&creq->req.dma);
162 else
163 mv_cesa_ablkcipher_std_step(ablkreq);
164}
165
166static inline void
167mv_cesa_ablkcipher_dma_prepare(struct ablkcipher_request *req)
168{
169 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
170 struct mv_cesa_tdma_req *dreq = &creq->req.dma;
171
172 mv_cesa_dma_prepare(dreq, dreq->base.engine);
f63601fd
BB
173}
174
175static inline void
176mv_cesa_ablkcipher_std_prepare(struct ablkcipher_request *req)
177{
178 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
179 struct mv_cesa_ablkcipher_std_req *sreq = &creq->req.std;
180 struct mv_cesa_engine *engine = sreq->base.engine;
181
182 sreq->size = 0;
183 sreq->offset = 0;
184 mv_cesa_adjust_op(engine, &sreq->op);
0f3304dc 185 memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op));
f63601fd
BB
186}
187
188static inline void mv_cesa_ablkcipher_prepare(struct crypto_async_request *req,
189 struct mv_cesa_engine *engine)
190{
191 struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
192 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq);
f63601fd
BB
193 creq->req.base.engine = engine;
194
db509a45
BB
195 if (creq->req.base.type == CESA_DMA_REQ)
196 mv_cesa_ablkcipher_dma_prepare(ablkreq);
197 else
198 mv_cesa_ablkcipher_std_prepare(ablkreq);
f63601fd
BB
199}
200
201static inline void
202mv_cesa_ablkcipher_req_cleanup(struct crypto_async_request *req)
203{
db509a45
BB
204 struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
205
206 mv_cesa_ablkcipher_cleanup(ablkreq);
f63601fd
BB
207}
208
209static const struct mv_cesa_req_ops mv_cesa_ablkcipher_req_ops = {
210 .step = mv_cesa_ablkcipher_step,
211 .process = mv_cesa_ablkcipher_process,
212 .prepare = mv_cesa_ablkcipher_prepare,
213 .cleanup = mv_cesa_ablkcipher_req_cleanup,
214};
215
216static int mv_cesa_ablkcipher_cra_init(struct crypto_tfm *tfm)
217{
218 struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(tfm);
219
220 ctx->base.ops = &mv_cesa_ablkcipher_req_ops;
221
222 tfm->crt_ablkcipher.reqsize = sizeof(struct mv_cesa_ablkcipher_req);
223
224 return 0;
225}
226
227static int mv_cesa_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
228 unsigned int len)
229{
230 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
231 struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(tfm);
232 int remaining;
233 int offset;
234 int ret;
235 int i;
236
237 ret = crypto_aes_expand_key(&ctx->aes, key, len);
238 if (ret) {
239 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
240 return ret;
241 }
242
243 remaining = (ctx->aes.key_length - 16) / 4;
244 offset = ctx->aes.key_length + 24 - remaining;
245 for (i = 0; i < remaining; i++)
246 ctx->aes.key_dec[4 + i] =
247 cpu_to_le32(ctx->aes.key_enc[offset + i]);
248
249 return 0;
250}
251
7b3aaaa0
BB
252static int mv_cesa_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
253 unsigned int len)
254{
255 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
256 struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(tfm);
257 u32 tmp[DES_EXPKEY_WORDS];
258 int ret;
259
260 if (len != DES_KEY_SIZE) {
261 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
262 return -EINVAL;
263 }
264
265 ret = des_ekey(tmp, key);
266 if (!ret && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
267 tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
268 return -EINVAL;
269 }
270
271 memcpy(ctx->key, key, DES_KEY_SIZE);
272
273 return 0;
274}
275
4ada4839
AE
276static int mv_cesa_des3_ede_setkey(struct crypto_ablkcipher *cipher,
277 const u8 *key, unsigned int len)
278{
279 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
280 struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(tfm);
281
282 if (len != DES3_EDE_KEY_SIZE) {
283 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
284 return -EINVAL;
285 }
286
287 memcpy(ctx->key, key, DES3_EDE_KEY_SIZE);
288
289 return 0;
290}
291
db509a45
BB
292static int mv_cesa_ablkcipher_dma_req_init(struct ablkcipher_request *req,
293 const struct mv_cesa_op_ctx *op_templ)
294{
295 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
296 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
297 GFP_KERNEL : GFP_ATOMIC;
298 struct mv_cesa_tdma_req *dreq = &creq->req.dma;
299 struct mv_cesa_ablkcipher_dma_iter iter;
300 struct mv_cesa_tdma_chain chain;
301 bool skip_ctx = false;
302 int ret;
303
304 dreq->base.type = CESA_DMA_REQ;
305 dreq->chain.first = NULL;
306 dreq->chain.last = NULL;
307
308 if (req->src != req->dst) {
309 ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
310 DMA_TO_DEVICE);
311 if (!ret)
312 return -ENOMEM;
313
314 ret = dma_map_sg(cesa_dev->dev, req->dst, creq->dst_nents,
315 DMA_FROM_DEVICE);
316 if (!ret) {
317 ret = -ENOMEM;
318 goto err_unmap_src;
319 }
320 } else {
321 ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
322 DMA_BIDIRECTIONAL);
323 if (!ret)
324 return -ENOMEM;
325 }
326
327 mv_cesa_tdma_desc_iter_init(&chain);
328 mv_cesa_ablkcipher_req_iter_init(&iter, req);
329
330 do {
331 struct mv_cesa_op_ctx *op;
332
333 op = mv_cesa_dma_add_op(&chain, op_templ, skip_ctx, flags);
334 if (IS_ERR(op)) {
335 ret = PTR_ERR(op);
336 goto err_free_tdma;
337 }
338 skip_ctx = true;
339
340 mv_cesa_set_crypt_op_len(op, iter.base.op_len);
341
342 /* Add input transfers */
343 ret = mv_cesa_dma_add_op_transfers(&chain, &iter.base,
344 &iter.src, flags);
345 if (ret)
346 goto err_free_tdma;
347
348 /* Add dummy desc to launch the crypto operation */
349 ret = mv_cesa_dma_add_dummy_launch(&chain, flags);
350 if (ret)
351 goto err_free_tdma;
352
353 /* Add output transfers */
354 ret = mv_cesa_dma_add_op_transfers(&chain, &iter.base,
355 &iter.dst, flags);
356 if (ret)
357 goto err_free_tdma;
358
359 } while (mv_cesa_ablkcipher_req_iter_next_op(&iter));
360
361 dreq->chain = chain;
362
363 return 0;
364
365err_free_tdma:
366 mv_cesa_dma_cleanup(dreq);
367 if (req->dst != req->src)
368 dma_unmap_sg(cesa_dev->dev, req->dst, creq->dst_nents,
369 DMA_FROM_DEVICE);
370
371err_unmap_src:
372 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents,
373 req->dst != req->src ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL);
374
375 return ret;
376}
377
f63601fd
BB
378static inline int
379mv_cesa_ablkcipher_std_req_init(struct ablkcipher_request *req,
380 const struct mv_cesa_op_ctx *op_templ)
381{
382 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
383 struct mv_cesa_ablkcipher_std_req *sreq = &creq->req.std;
384
385 sreq->base.type = CESA_STD_REQ;
386 sreq->op = *op_templ;
387 sreq->skip_ctx = false;
388
389 return 0;
390}
391
392static int mv_cesa_ablkcipher_req_init(struct ablkcipher_request *req,
393 struct mv_cesa_op_ctx *tmpl)
394{
395 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
396 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
397 unsigned int blksize = crypto_ablkcipher_blocksize(tfm);
db509a45 398 int ret;
f63601fd
BB
399
400 if (!IS_ALIGNED(req->nbytes, blksize))
401 return -EINVAL;
402
403 creq->src_nents = sg_nents_for_len(req->src, req->nbytes);
c22dafb3
LC
404 if (creq->src_nents < 0) {
405 dev_err(cesa_dev->dev, "Invalid number of src SG");
406 return creq->src_nents;
407 }
f63601fd 408 creq->dst_nents = sg_nents_for_len(req->dst, req->nbytes);
c22dafb3
LC
409 if (creq->dst_nents < 0) {
410 dev_err(cesa_dev->dev, "Invalid number of dst SG");
411 return creq->dst_nents;
412 }
f63601fd
BB
413
414 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_OP_CRYPT_ONLY,
415 CESA_SA_DESC_CFG_OP_MSK);
416
db509a45
BB
417 /* TODO: add a threshold for DMA usage */
418 if (cesa_dev->caps->has_tdma)
419 ret = mv_cesa_ablkcipher_dma_req_init(req, tmpl);
420 else
421 ret = mv_cesa_ablkcipher_std_req_init(req, tmpl);
422
423 return ret;
f63601fd
BB
424}
425
7b3aaaa0
BB
426static int mv_cesa_des_op(struct ablkcipher_request *req,
427 struct mv_cesa_op_ctx *tmpl)
428{
429 struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
430 int ret;
431
432 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTM_DES,
433 CESA_SA_DESC_CFG_CRYPTM_MSK);
434
435 memcpy(tmpl->ctx.blkcipher.key, ctx->key, DES_KEY_SIZE);
436
437 ret = mv_cesa_ablkcipher_req_init(req, tmpl);
438 if (ret)
439 return ret;
440
441 ret = mv_cesa_queue_req(&req->base);
cfcd2271 442 if (mv_cesa_req_needs_cleanup(&req->base, ret))
7b3aaaa0
BB
443 mv_cesa_ablkcipher_cleanup(req);
444
445 return ret;
446}
447
448static int mv_cesa_ecb_des_encrypt(struct ablkcipher_request *req)
449{
450 struct mv_cesa_op_ctx tmpl;
451
452 mv_cesa_set_op_cfg(&tmpl,
453 CESA_SA_DESC_CFG_CRYPTCM_ECB |
454 CESA_SA_DESC_CFG_DIR_ENC);
455
456 return mv_cesa_des_op(req, &tmpl);
457}
458
459static int mv_cesa_ecb_des_decrypt(struct ablkcipher_request *req)
460{
461 struct mv_cesa_op_ctx tmpl;
462
463 mv_cesa_set_op_cfg(&tmpl,
464 CESA_SA_DESC_CFG_CRYPTCM_ECB |
465 CESA_SA_DESC_CFG_DIR_DEC);
466
467 return mv_cesa_des_op(req, &tmpl);
468}
469
470struct crypto_alg mv_cesa_ecb_des_alg = {
471 .cra_name = "ecb(des)",
472 .cra_driver_name = "mv-ecb-des",
473 .cra_priority = 300,
474 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
475 CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
476 .cra_blocksize = DES_BLOCK_SIZE,
477 .cra_ctxsize = sizeof(struct mv_cesa_des_ctx),
478 .cra_alignmask = 0,
479 .cra_type = &crypto_ablkcipher_type,
480 .cra_module = THIS_MODULE,
481 .cra_init = mv_cesa_ablkcipher_cra_init,
482 .cra_u = {
483 .ablkcipher = {
484 .min_keysize = DES_KEY_SIZE,
485 .max_keysize = DES_KEY_SIZE,
486 .setkey = mv_cesa_des_setkey,
487 .encrypt = mv_cesa_ecb_des_encrypt,
488 .decrypt = mv_cesa_ecb_des_decrypt,
489 },
490 },
491};
492
493static int mv_cesa_cbc_des_op(struct ablkcipher_request *req,
494 struct mv_cesa_op_ctx *tmpl)
495{
496 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTCM_CBC,
497 CESA_SA_DESC_CFG_CRYPTCM_MSK);
498
499 memcpy(tmpl->ctx.blkcipher.iv, req->info, DES_BLOCK_SIZE);
500
501 return mv_cesa_des_op(req, tmpl);
502}
503
504static int mv_cesa_cbc_des_encrypt(struct ablkcipher_request *req)
505{
506 struct mv_cesa_op_ctx tmpl;
507
508 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_ENC);
509
510 return mv_cesa_cbc_des_op(req, &tmpl);
511}
512
513static int mv_cesa_cbc_des_decrypt(struct ablkcipher_request *req)
514{
515 struct mv_cesa_op_ctx tmpl;
516
517 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_DEC);
518
519 return mv_cesa_cbc_des_op(req, &tmpl);
520}
521
522struct crypto_alg mv_cesa_cbc_des_alg = {
523 .cra_name = "cbc(des)",
524 .cra_driver_name = "mv-cbc-des",
525 .cra_priority = 300,
526 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
527 CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
528 .cra_blocksize = DES_BLOCK_SIZE,
529 .cra_ctxsize = sizeof(struct mv_cesa_des_ctx),
530 .cra_alignmask = 0,
531 .cra_type = &crypto_ablkcipher_type,
532 .cra_module = THIS_MODULE,
533 .cra_init = mv_cesa_ablkcipher_cra_init,
534 .cra_u = {
535 .ablkcipher = {
536 .min_keysize = DES_KEY_SIZE,
537 .max_keysize = DES_KEY_SIZE,
538 .ivsize = DES_BLOCK_SIZE,
539 .setkey = mv_cesa_des_setkey,
540 .encrypt = mv_cesa_cbc_des_encrypt,
541 .decrypt = mv_cesa_cbc_des_decrypt,
542 },
543 },
544};
545
4ada4839
AE
546static int mv_cesa_des3_op(struct ablkcipher_request *req,
547 struct mv_cesa_op_ctx *tmpl)
548{
549 struct mv_cesa_des3_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
550 int ret;
551
552 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTM_3DES,
553 CESA_SA_DESC_CFG_CRYPTM_MSK);
554
555 memcpy(tmpl->ctx.blkcipher.key, ctx->key, DES3_EDE_KEY_SIZE);
556
557 ret = mv_cesa_ablkcipher_req_init(req, tmpl);
558 if (ret)
559 return ret;
560
561 ret = mv_cesa_queue_req(&req->base);
cfcd2271 562 if (mv_cesa_req_needs_cleanup(&req->base, ret))
4ada4839
AE
563 mv_cesa_ablkcipher_cleanup(req);
564
565 return ret;
566}
567
568static int mv_cesa_ecb_des3_ede_encrypt(struct ablkcipher_request *req)
569{
570 struct mv_cesa_op_ctx tmpl;
571
572 mv_cesa_set_op_cfg(&tmpl,
573 CESA_SA_DESC_CFG_CRYPTCM_ECB |
574 CESA_SA_DESC_CFG_3DES_EDE |
575 CESA_SA_DESC_CFG_DIR_ENC);
576
577 return mv_cesa_des3_op(req, &tmpl);
578}
579
580static int mv_cesa_ecb_des3_ede_decrypt(struct ablkcipher_request *req)
581{
582 struct mv_cesa_op_ctx tmpl;
583
584 mv_cesa_set_op_cfg(&tmpl,
585 CESA_SA_DESC_CFG_CRYPTCM_ECB |
586 CESA_SA_DESC_CFG_3DES_EDE |
587 CESA_SA_DESC_CFG_DIR_DEC);
588
589 return mv_cesa_des3_op(req, &tmpl);
590}
591
592struct crypto_alg mv_cesa_ecb_des3_ede_alg = {
593 .cra_name = "ecb(des3_ede)",
594 .cra_driver_name = "mv-ecb-des3-ede",
595 .cra_priority = 300,
596 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
597 CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
598 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
599 .cra_ctxsize = sizeof(struct mv_cesa_des3_ctx),
600 .cra_alignmask = 0,
601 .cra_type = &crypto_ablkcipher_type,
602 .cra_module = THIS_MODULE,
603 .cra_init = mv_cesa_ablkcipher_cra_init,
604 .cra_u = {
605 .ablkcipher = {
606 .min_keysize = DES3_EDE_KEY_SIZE,
607 .max_keysize = DES3_EDE_KEY_SIZE,
608 .ivsize = DES3_EDE_BLOCK_SIZE,
609 .setkey = mv_cesa_des3_ede_setkey,
610 .encrypt = mv_cesa_ecb_des3_ede_encrypt,
611 .decrypt = mv_cesa_ecb_des3_ede_decrypt,
612 },
613 },
614};
615
616static int mv_cesa_cbc_des3_op(struct ablkcipher_request *req,
617 struct mv_cesa_op_ctx *tmpl)
618{
619 memcpy(tmpl->ctx.blkcipher.iv, req->info, DES3_EDE_BLOCK_SIZE);
620
621 return mv_cesa_des3_op(req, tmpl);
622}
623
624static int mv_cesa_cbc_des3_ede_encrypt(struct ablkcipher_request *req)
625{
626 struct mv_cesa_op_ctx tmpl;
627
628 mv_cesa_set_op_cfg(&tmpl,
629 CESA_SA_DESC_CFG_CRYPTCM_CBC |
630 CESA_SA_DESC_CFG_3DES_EDE |
631 CESA_SA_DESC_CFG_DIR_ENC);
632
633 return mv_cesa_cbc_des3_op(req, &tmpl);
634}
635
636static int mv_cesa_cbc_des3_ede_decrypt(struct ablkcipher_request *req)
637{
638 struct mv_cesa_op_ctx tmpl;
639
640 mv_cesa_set_op_cfg(&tmpl,
641 CESA_SA_DESC_CFG_CRYPTCM_CBC |
642 CESA_SA_DESC_CFG_3DES_EDE |
643 CESA_SA_DESC_CFG_DIR_DEC);
644
645 return mv_cesa_cbc_des3_op(req, &tmpl);
646}
647
648struct crypto_alg mv_cesa_cbc_des3_ede_alg = {
649 .cra_name = "cbc(des3_ede)",
650 .cra_driver_name = "mv-cbc-des3-ede",
651 .cra_priority = 300,
652 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
653 CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
654 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
655 .cra_ctxsize = sizeof(struct mv_cesa_des3_ctx),
656 .cra_alignmask = 0,
657 .cra_type = &crypto_ablkcipher_type,
658 .cra_module = THIS_MODULE,
659 .cra_init = mv_cesa_ablkcipher_cra_init,
660 .cra_u = {
661 .ablkcipher = {
662 .min_keysize = DES3_EDE_KEY_SIZE,
663 .max_keysize = DES3_EDE_KEY_SIZE,
664 .ivsize = DES3_EDE_BLOCK_SIZE,
665 .setkey = mv_cesa_des3_ede_setkey,
666 .encrypt = mv_cesa_cbc_des3_ede_encrypt,
667 .decrypt = mv_cesa_cbc_des3_ede_decrypt,
668 },
669 },
670};
671
f63601fd
BB
672static int mv_cesa_aes_op(struct ablkcipher_request *req,
673 struct mv_cesa_op_ctx *tmpl)
674{
675 struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
676 int ret, i;
677 u32 *key;
678 u32 cfg;
679
680 cfg = CESA_SA_DESC_CFG_CRYPTM_AES;
681
682 if (mv_cesa_get_op_cfg(tmpl) & CESA_SA_DESC_CFG_DIR_DEC)
683 key = ctx->aes.key_dec;
684 else
685 key = ctx->aes.key_enc;
686
687 for (i = 0; i < ctx->aes.key_length / sizeof(u32); i++)
688 tmpl->ctx.blkcipher.key[i] = cpu_to_le32(key[i]);
689
690 if (ctx->aes.key_length == 24)
691 cfg |= CESA_SA_DESC_CFG_AES_LEN_192;
692 else if (ctx->aes.key_length == 32)
693 cfg |= CESA_SA_DESC_CFG_AES_LEN_256;
694
695 mv_cesa_update_op_cfg(tmpl, cfg,
696 CESA_SA_DESC_CFG_CRYPTM_MSK |
697 CESA_SA_DESC_CFG_AES_LEN_MSK);
698
699 ret = mv_cesa_ablkcipher_req_init(req, tmpl);
700 if (ret)
701 return ret;
702
db509a45 703 ret = mv_cesa_queue_req(&req->base);
cfcd2271 704 if (mv_cesa_req_needs_cleanup(&req->base, ret))
db509a45
BB
705 mv_cesa_ablkcipher_cleanup(req);
706
707 return ret;
f63601fd
BB
708}
709
710static int mv_cesa_ecb_aes_encrypt(struct ablkcipher_request *req)
711{
712 struct mv_cesa_op_ctx tmpl;
713
714 mv_cesa_set_op_cfg(&tmpl,
715 CESA_SA_DESC_CFG_CRYPTCM_ECB |
716 CESA_SA_DESC_CFG_DIR_ENC);
717
718 return mv_cesa_aes_op(req, &tmpl);
719}
720
721static int mv_cesa_ecb_aes_decrypt(struct ablkcipher_request *req)
722{
723 struct mv_cesa_op_ctx tmpl;
724
725 mv_cesa_set_op_cfg(&tmpl,
726 CESA_SA_DESC_CFG_CRYPTCM_ECB |
727 CESA_SA_DESC_CFG_DIR_DEC);
728
729 return mv_cesa_aes_op(req, &tmpl);
730}
731
732struct crypto_alg mv_cesa_ecb_aes_alg = {
733 .cra_name = "ecb(aes)",
734 .cra_driver_name = "mv-ecb-aes",
735 .cra_priority = 300,
736 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
737 CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
738 .cra_blocksize = AES_BLOCK_SIZE,
739 .cra_ctxsize = sizeof(struct mv_cesa_aes_ctx),
740 .cra_alignmask = 0,
741 .cra_type = &crypto_ablkcipher_type,
742 .cra_module = THIS_MODULE,
743 .cra_init = mv_cesa_ablkcipher_cra_init,
744 .cra_u = {
745 .ablkcipher = {
746 .min_keysize = AES_MIN_KEY_SIZE,
747 .max_keysize = AES_MAX_KEY_SIZE,
748 .setkey = mv_cesa_aes_setkey,
749 .encrypt = mv_cesa_ecb_aes_encrypt,
750 .decrypt = mv_cesa_ecb_aes_decrypt,
751 },
752 },
753};
754
755static int mv_cesa_cbc_aes_op(struct ablkcipher_request *req,
756 struct mv_cesa_op_ctx *tmpl)
757{
758 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTCM_CBC,
759 CESA_SA_DESC_CFG_CRYPTCM_MSK);
760 memcpy(tmpl->ctx.blkcipher.iv, req->info, AES_BLOCK_SIZE);
761
762 return mv_cesa_aes_op(req, tmpl);
763}
764
765static int mv_cesa_cbc_aes_encrypt(struct ablkcipher_request *req)
766{
767 struct mv_cesa_op_ctx tmpl;
768
769 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_ENC);
770
771 return mv_cesa_cbc_aes_op(req, &tmpl);
772}
773
774static int mv_cesa_cbc_aes_decrypt(struct ablkcipher_request *req)
775{
776 struct mv_cesa_op_ctx tmpl;
777
778 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_DEC);
779
780 return mv_cesa_cbc_aes_op(req, &tmpl);
781}
782
783struct crypto_alg mv_cesa_cbc_aes_alg = {
784 .cra_name = "cbc(aes)",
785 .cra_driver_name = "mv-cbc-aes",
786 .cra_priority = 300,
787 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
788 CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
789 .cra_blocksize = AES_BLOCK_SIZE,
790 .cra_ctxsize = sizeof(struct mv_cesa_aes_ctx),
791 .cra_alignmask = 0,
792 .cra_type = &crypto_ablkcipher_type,
793 .cra_module = THIS_MODULE,
794 .cra_init = mv_cesa_ablkcipher_cra_init,
795 .cra_u = {
796 .ablkcipher = {
797 .min_keysize = AES_MIN_KEY_SIZE,
798 .max_keysize = AES_MAX_KEY_SIZE,
799 .ivsize = AES_BLOCK_SIZE,
800 .setkey = mv_cesa_aes_setkey,
801 .encrypt = mv_cesa_cbc_aes_encrypt,
802 .decrypt = mv_cesa_cbc_aes_decrypt,
803 },
804 },
805};