crypto: remove CRYPTO_TFM_RES_BAD_KEY_LEN
[linux-block.git] / drivers / crypto / marvell / cipher.c
CommitLineData
d2912cb1 1// SPDX-License-Identifier: GPL-2.0-only
f63601fd
BB
2/*
3 * Cipher algorithms supported by the CESA: DES, 3DES and AES.
4 *
5 * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
6 * Author: Arnaud Ebalard <arno@natisbad.org>
7 *
8 * This work is based on an initial version written by
9 * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
f63601fd
BB
10 */
11
12#include <crypto/aes.h>
9bfa85eb 13#include <crypto/internal/des.h>
f63601fd
BB
14
15#include "cesa.h"
16
7b3aaaa0
BB
17struct mv_cesa_des_ctx {
18 struct mv_cesa_ctx base;
19 u8 key[DES_KEY_SIZE];
20};
21
4ada4839
AE
22struct mv_cesa_des3_ctx {
23 struct mv_cesa_ctx base;
24 u8 key[DES3_EDE_KEY_SIZE];
25};
26
f63601fd
BB
27struct mv_cesa_aes_ctx {
28 struct mv_cesa_ctx base;
29 struct crypto_aes_ctx aes;
30};
31
e6cd5bf6 32struct mv_cesa_skcipher_dma_iter {
db509a45
BB
33 struct mv_cesa_dma_iter base;
34 struct mv_cesa_sg_dma_iter src;
35 struct mv_cesa_sg_dma_iter dst;
36};
37
38static inline void
e6cd5bf6
BB
39mv_cesa_skcipher_req_iter_init(struct mv_cesa_skcipher_dma_iter *iter,
40 struct skcipher_request *req)
db509a45 41{
e6cd5bf6 42 mv_cesa_req_dma_iter_init(&iter->base, req->cryptlen);
db509a45
BB
43 mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE);
44 mv_cesa_sg_dma_iter_init(&iter->dst, req->dst, DMA_FROM_DEVICE);
45}
46
47static inline bool
e6cd5bf6 48mv_cesa_skcipher_req_iter_next_op(struct mv_cesa_skcipher_dma_iter *iter)
db509a45
BB
49{
50 iter->src.op_offset = 0;
51 iter->dst.op_offset = 0;
52
53 return mv_cesa_req_dma_iter_next_op(&iter->base);
54}
55
56static inline void
e6cd5bf6 57mv_cesa_skcipher_dma_cleanup(struct skcipher_request *req)
db509a45 58{
e6cd5bf6 59 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
db509a45
BB
60
61 if (req->dst != req->src) {
62 dma_unmap_sg(cesa_dev->dev, req->dst, creq->dst_nents,
63 DMA_FROM_DEVICE);
64 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents,
65 DMA_TO_DEVICE);
66 } else {
67 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents,
68 DMA_BIDIRECTIONAL);
69 }
53da740f 70 mv_cesa_dma_cleanup(&creq->base);
db509a45
BB
71}
72
e6cd5bf6 73static inline void mv_cesa_skcipher_cleanup(struct skcipher_request *req)
db509a45 74{
e6cd5bf6 75 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
db509a45 76
53da740f 77 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
e6cd5bf6 78 mv_cesa_skcipher_dma_cleanup(req);
db509a45
BB
79}
80
e6cd5bf6 81static void mv_cesa_skcipher_std_step(struct skcipher_request *req)
f63601fd 82{
e6cd5bf6
BB
83 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
84 struct mv_cesa_skcipher_std_req *sreq = &creq->std;
53da740f 85 struct mv_cesa_engine *engine = creq->base.engine;
e6cd5bf6 86 size_t len = min_t(size_t, req->cryptlen - sreq->offset,
f63601fd
BB
87 CESA_SA_SRAM_PAYLOAD_SIZE);
88
2786cee8
RP
89 mv_cesa_adjust_op(engine, &sreq->op);
90 memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op));
91
f63601fd
BB
92 len = sg_pcopy_to_buffer(req->src, creq->src_nents,
93 engine->sram + CESA_SA_DATA_SRAM_OFFSET,
94 len, sreq->offset);
95
96 sreq->size = len;
97 mv_cesa_set_crypt_op_len(&sreq->op, len);
98
99 /* FIXME: only update enc_len field */
100 if (!sreq->skip_ctx) {
0f3304dc 101 memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op));
f63601fd
BB
102 sreq->skip_ctx = true;
103 } else {
0f3304dc 104 memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op.desc));
f63601fd
BB
105 }
106
107 mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE);
b1508561 108 writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG);
f6283088
RP
109 BUG_ON(readl(engine->regs + CESA_SA_CMD) &
110 CESA_SA_CMD_EN_CESA_SA_ACCL0);
f63601fd
BB
111 writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
112}
113
e6cd5bf6
BB
114static int mv_cesa_skcipher_std_process(struct skcipher_request *req,
115 u32 status)
f63601fd 116{
e6cd5bf6
BB
117 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
118 struct mv_cesa_skcipher_std_req *sreq = &creq->std;
53da740f 119 struct mv_cesa_engine *engine = creq->base.engine;
f63601fd
BB
120 size_t len;
121
122 len = sg_pcopy_from_buffer(req->dst, creq->dst_nents,
123 engine->sram + CESA_SA_DATA_SRAM_OFFSET,
124 sreq->size, sreq->offset);
125
126 sreq->offset += len;
e6cd5bf6 127 if (sreq->offset < req->cryptlen)
f63601fd
BB
128 return -EINPROGRESS;
129
130 return 0;
131}
132
e6cd5bf6
BB
133static int mv_cesa_skcipher_process(struct crypto_async_request *req,
134 u32 status)
f63601fd 135{
e6cd5bf6
BB
136 struct skcipher_request *skreq = skcipher_request_cast(req);
137 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq);
53da740f 138 struct mv_cesa_req *basereq = &creq->base;
f63601fd 139
53da740f 140 if (mv_cesa_req_get_type(basereq) == CESA_STD_REQ)
e6cd5bf6 141 return mv_cesa_skcipher_std_process(skreq, status);
db509a45 142
8cf740ae 143 return mv_cesa_dma_process(basereq, status);
f63601fd
BB
144}
145
e6cd5bf6 146static void mv_cesa_skcipher_step(struct crypto_async_request *req)
f63601fd 147{
e6cd5bf6
BB
148 struct skcipher_request *skreq = skcipher_request_cast(req);
149 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq);
f63601fd 150
53da740f
RP
151 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
152 mv_cesa_dma_step(&creq->base);
db509a45 153 else
e6cd5bf6 154 mv_cesa_skcipher_std_step(skreq);
db509a45
BB
155}
156
157static inline void
e6cd5bf6 158mv_cesa_skcipher_dma_prepare(struct skcipher_request *req)
db509a45 159{
e6cd5bf6 160 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
53da740f 161 struct mv_cesa_req *basereq = &creq->base;
db509a45 162
53da740f 163 mv_cesa_dma_prepare(basereq, basereq->engine);
f63601fd
BB
164}
165
166static inline void
e6cd5bf6 167mv_cesa_skcipher_std_prepare(struct skcipher_request *req)
f63601fd 168{
e6cd5bf6
BB
169 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
170 struct mv_cesa_skcipher_std_req *sreq = &creq->std;
f63601fd
BB
171
172 sreq->size = 0;
173 sreq->offset = 0;
f63601fd
BB
174}
175
e6cd5bf6
BB
176static inline void mv_cesa_skcipher_prepare(struct crypto_async_request *req,
177 struct mv_cesa_engine *engine)
f63601fd 178{
e6cd5bf6
BB
179 struct skcipher_request *skreq = skcipher_request_cast(req);
180 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq);
53da740f 181 creq->base.engine = engine;
f63601fd 182
53da740f 183 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
e6cd5bf6 184 mv_cesa_skcipher_dma_prepare(skreq);
db509a45 185 else
e6cd5bf6 186 mv_cesa_skcipher_std_prepare(skreq);
f63601fd
BB
187}
188
189static inline void
e6cd5bf6 190mv_cesa_skcipher_req_cleanup(struct crypto_async_request *req)
f63601fd 191{
e6cd5bf6 192 struct skcipher_request *skreq = skcipher_request_cast(req);
db509a45 193
e6cd5bf6 194 mv_cesa_skcipher_cleanup(skreq);
f63601fd
BB
195}
196
1bf6682c 197static void
e6cd5bf6 198mv_cesa_skcipher_complete(struct crypto_async_request *req)
1bf6682c 199{
e6cd5bf6
BB
200 struct skcipher_request *skreq = skcipher_request_cast(req);
201 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq);
1bf6682c
RP
202 struct mv_cesa_engine *engine = creq->base.engine;
203 unsigned int ivsize;
204
e6cd5bf6
BB
205 atomic_sub(skreq->cryptlen, &engine->load);
206 ivsize = crypto_skcipher_ivsize(crypto_skcipher_reqtfm(skreq));
1bf6682c
RP
207
208 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) {
209 struct mv_cesa_req *basereq;
210
211 basereq = &creq->base;
8160ee7e 212 memcpy(skreq->iv, basereq->chain.last->op->ctx.skcipher.iv,
0c99620f 213 ivsize);
1bf6682c 214 } else {
e6cd5bf6 215 memcpy_fromio(skreq->iv,
1bf6682c
RP
216 engine->sram + CESA_SA_CRYPT_IV_SRAM_OFFSET,
217 ivsize);
218 }
219}
220
e6cd5bf6
BB
221static const struct mv_cesa_req_ops mv_cesa_skcipher_req_ops = {
222 .step = mv_cesa_skcipher_step,
223 .process = mv_cesa_skcipher_process,
224 .cleanup = mv_cesa_skcipher_req_cleanup,
225 .complete = mv_cesa_skcipher_complete,
f63601fd
BB
226};
227
e6cd5bf6 228static void mv_cesa_skcipher_cra_exit(struct crypto_tfm *tfm)
f63601fd 229{
e6cd5bf6
BB
230 void *ctx = crypto_tfm_ctx(tfm);
231
232 memzero_explicit(ctx, tfm->__crt_alg->cra_ctxsize);
233}
234
235static int mv_cesa_skcipher_cra_init(struct crypto_tfm *tfm)
236{
237 struct mv_cesa_ctx *ctx = crypto_tfm_ctx(tfm);
f63601fd 238
e6cd5bf6 239 ctx->ops = &mv_cesa_skcipher_req_ops;
f63601fd 240
e6cd5bf6
BB
241 crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
242 sizeof(struct mv_cesa_skcipher_req));
f63601fd
BB
243
244 return 0;
245}
246
e6cd5bf6 247static int mv_cesa_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
f63601fd
BB
248 unsigned int len)
249{
e6cd5bf6 250 struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
f63601fd
BB
251 struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(tfm);
252 int remaining;
253 int offset;
254 int ret;
255 int i;
256
18d8b96d 257 ret = aes_expandkey(&ctx->aes, key, len);
674f368a 258 if (ret)
f63601fd 259 return ret;
f63601fd
BB
260
261 remaining = (ctx->aes.key_length - 16) / 4;
262 offset = ctx->aes.key_length + 24 - remaining;
263 for (i = 0; i < remaining; i++)
264 ctx->aes.key_dec[4 + i] =
265 cpu_to_le32(ctx->aes.key_enc[offset + i]);
266
267 return 0;
268}
269
e6cd5bf6 270static int mv_cesa_des_setkey(struct crypto_skcipher *cipher, const u8 *key,
7b3aaaa0
BB
271 unsigned int len)
272{
9bfa85eb
AB
273 struct mv_cesa_des_ctx *ctx = crypto_skcipher_ctx(cipher);
274 int err;
7b3aaaa0 275
9bfa85eb
AB
276 err = verify_skcipher_des_key(cipher, key);
277 if (err)
278 return err;
7b3aaaa0
BB
279
280 memcpy(ctx->key, key, DES_KEY_SIZE);
281
282 return 0;
283}
284
e6cd5bf6 285static int mv_cesa_des3_ede_setkey(struct crypto_skcipher *cipher,
4ada4839
AE
286 const u8 *key, unsigned int len)
287{
cc4bd9f2
HX
288 struct mv_cesa_des_ctx *ctx = crypto_skcipher_ctx(cipher);
289 int err;
4ada4839 290
9bfa85eb
AB
291 err = verify_skcipher_des3_key(cipher, key);
292 if (err)
cc4bd9f2 293 return err;
4ada4839
AE
294
295 memcpy(ctx->key, key, DES3_EDE_KEY_SIZE);
296
297 return 0;
298}
299
e6cd5bf6
BB
300static int mv_cesa_skcipher_dma_req_init(struct skcipher_request *req,
301 const struct mv_cesa_op_ctx *op_templ)
db509a45 302{
e6cd5bf6 303 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
db509a45
BB
304 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
305 GFP_KERNEL : GFP_ATOMIC;
53da740f 306 struct mv_cesa_req *basereq = &creq->base;
e6cd5bf6 307 struct mv_cesa_skcipher_dma_iter iter;
db509a45
BB
308 bool skip_ctx = false;
309 int ret;
310
53da740f
RP
311 basereq->chain.first = NULL;
312 basereq->chain.last = NULL;
db509a45
BB
313
314 if (req->src != req->dst) {
315 ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
316 DMA_TO_DEVICE);
317 if (!ret)
318 return -ENOMEM;
319
320 ret = dma_map_sg(cesa_dev->dev, req->dst, creq->dst_nents,
321 DMA_FROM_DEVICE);
322 if (!ret) {
323 ret = -ENOMEM;
324 goto err_unmap_src;
325 }
326 } else {
327 ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
328 DMA_BIDIRECTIONAL);
329 if (!ret)
330 return -ENOMEM;
331 }
332
ec38f82e 333 mv_cesa_tdma_desc_iter_init(&basereq->chain);
e6cd5bf6 334 mv_cesa_skcipher_req_iter_init(&iter, req);
db509a45
BB
335
336 do {
337 struct mv_cesa_op_ctx *op;
338
ec38f82e 339 op = mv_cesa_dma_add_op(&basereq->chain, op_templ, skip_ctx, flags);
db509a45
BB
340 if (IS_ERR(op)) {
341 ret = PTR_ERR(op);
342 goto err_free_tdma;
343 }
344 skip_ctx = true;
345
346 mv_cesa_set_crypt_op_len(op, iter.base.op_len);
347
348 /* Add input transfers */
ec38f82e 349 ret = mv_cesa_dma_add_op_transfers(&basereq->chain, &iter.base,
db509a45
BB
350 &iter.src, flags);
351 if (ret)
352 goto err_free_tdma;
353
354 /* Add dummy desc to launch the crypto operation */
ec38f82e 355 ret = mv_cesa_dma_add_dummy_launch(&basereq->chain, flags);
db509a45
BB
356 if (ret)
357 goto err_free_tdma;
358
359 /* Add output transfers */
ec38f82e 360 ret = mv_cesa_dma_add_op_transfers(&basereq->chain, &iter.base,
db509a45
BB
361 &iter.dst, flags);
362 if (ret)
363 goto err_free_tdma;
364
e6cd5bf6 365 } while (mv_cesa_skcipher_req_iter_next_op(&iter));
db509a45 366
bac8e805 367 /* Add output data for IV */
0c99620f
RP
368 ret = mv_cesa_dma_add_result_op(&basereq->chain, CESA_SA_CFG_SRAM_OFFSET,
369 CESA_SA_DATA_SRAM_OFFSET,
370 CESA_TDMA_SRC_IN_SRAM, flags);
bac8e805
RP
371
372 if (ret)
373 goto err_free_tdma;
374
85030c51 375 basereq->chain.last->flags |= CESA_TDMA_END_OF_REQ;
db509a45
BB
376
377 return 0;
378
379err_free_tdma:
53da740f 380 mv_cesa_dma_cleanup(basereq);
db509a45
BB
381 if (req->dst != req->src)
382 dma_unmap_sg(cesa_dev->dev, req->dst, creq->dst_nents,
383 DMA_FROM_DEVICE);
384
385err_unmap_src:
386 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents,
387 req->dst != req->src ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL);
388
389 return ret;
390}
391
f63601fd 392static inline int
e6cd5bf6
BB
393mv_cesa_skcipher_std_req_init(struct skcipher_request *req,
394 const struct mv_cesa_op_ctx *op_templ)
f63601fd 395{
e6cd5bf6
BB
396 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
397 struct mv_cesa_skcipher_std_req *sreq = &creq->std;
53da740f 398 struct mv_cesa_req *basereq = &creq->base;
f63601fd 399
f63601fd
BB
400 sreq->op = *op_templ;
401 sreq->skip_ctx = false;
53da740f
RP
402 basereq->chain.first = NULL;
403 basereq->chain.last = NULL;
f63601fd
BB
404
405 return 0;
406}
407
e6cd5bf6
BB
408static int mv_cesa_skcipher_req_init(struct skcipher_request *req,
409 struct mv_cesa_op_ctx *tmpl)
f63601fd 410{
e6cd5bf6
BB
411 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
412 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
413 unsigned int blksize = crypto_skcipher_blocksize(tfm);
db509a45 414 int ret;
f63601fd 415
e6cd5bf6 416 if (!IS_ALIGNED(req->cryptlen, blksize))
f63601fd
BB
417 return -EINVAL;
418
e6cd5bf6 419 creq->src_nents = sg_nents_for_len(req->src, req->cryptlen);
c22dafb3
LC
420 if (creq->src_nents < 0) {
421 dev_err(cesa_dev->dev, "Invalid number of src SG");
422 return creq->src_nents;
423 }
e6cd5bf6 424 creq->dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
c22dafb3
LC
425 if (creq->dst_nents < 0) {
426 dev_err(cesa_dev->dev, "Invalid number of dst SG");
427 return creq->dst_nents;
428 }
f63601fd
BB
429
430 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_OP_CRYPT_ONLY,
431 CESA_SA_DESC_CFG_OP_MSK);
432
db509a45 433 if (cesa_dev->caps->has_tdma)
e6cd5bf6 434 ret = mv_cesa_skcipher_dma_req_init(req, tmpl);
db509a45 435 else
e6cd5bf6 436 ret = mv_cesa_skcipher_std_req_init(req, tmpl);
db509a45
BB
437
438 return ret;
f63601fd
BB
439}
440
e6cd5bf6
BB
441static int mv_cesa_skcipher_queue_req(struct skcipher_request *req,
442 struct mv_cesa_op_ctx *tmpl)
7b3aaaa0 443{
7b3aaaa0 444 int ret;
e6cd5bf6 445 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
bf8f91e7 446 struct mv_cesa_engine *engine;
7b3aaaa0 447
e6cd5bf6 448 ret = mv_cesa_skcipher_req_init(req, tmpl);
7b3aaaa0
BB
449 if (ret)
450 return ret;
451
e6cd5bf6
BB
452 engine = mv_cesa_select_engine(req->cryptlen);
453 mv_cesa_skcipher_prepare(&req->base, engine);
bf8f91e7 454
53da740f 455 ret = mv_cesa_queue_req(&req->base, &creq->base);
bf8f91e7 456
cfcd2271 457 if (mv_cesa_req_needs_cleanup(&req->base, ret))
e6cd5bf6 458 mv_cesa_skcipher_cleanup(req);
7b3aaaa0
BB
459
460 return ret;
461}
462
e6cd5bf6 463static int mv_cesa_des_op(struct skcipher_request *req,
bf8f91e7
RP
464 struct mv_cesa_op_ctx *tmpl)
465{
466 struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
467
468 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTM_DES,
469 CESA_SA_DESC_CFG_CRYPTM_MSK);
470
8160ee7e 471 memcpy(tmpl->ctx.skcipher.key, ctx->key, DES_KEY_SIZE);
bf8f91e7 472
e6cd5bf6 473 return mv_cesa_skcipher_queue_req(req, tmpl);
bf8f91e7
RP
474}
475
e6cd5bf6 476static int mv_cesa_ecb_des_encrypt(struct skcipher_request *req)
7b3aaaa0
BB
477{
478 struct mv_cesa_op_ctx tmpl;
479
480 mv_cesa_set_op_cfg(&tmpl,
481 CESA_SA_DESC_CFG_CRYPTCM_ECB |
482 CESA_SA_DESC_CFG_DIR_ENC);
483
484 return mv_cesa_des_op(req, &tmpl);
485}
486
e6cd5bf6 487static int mv_cesa_ecb_des_decrypt(struct skcipher_request *req)
7b3aaaa0
BB
488{
489 struct mv_cesa_op_ctx tmpl;
490
491 mv_cesa_set_op_cfg(&tmpl,
492 CESA_SA_DESC_CFG_CRYPTCM_ECB |
493 CESA_SA_DESC_CFG_DIR_DEC);
494
495 return mv_cesa_des_op(req, &tmpl);
496}
497
e6cd5bf6
BB
498struct skcipher_alg mv_cesa_ecb_des_alg = {
499 .setkey = mv_cesa_des_setkey,
500 .encrypt = mv_cesa_ecb_des_encrypt,
501 .decrypt = mv_cesa_ecb_des_decrypt,
502 .min_keysize = DES_KEY_SIZE,
503 .max_keysize = DES_KEY_SIZE,
504 .base = {
505 .cra_name = "ecb(des)",
506 .cra_driver_name = "mv-ecb-des",
507 .cra_priority = 300,
508 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
509 .cra_blocksize = DES_BLOCK_SIZE,
510 .cra_ctxsize = sizeof(struct mv_cesa_des_ctx),
511 .cra_alignmask = 0,
512 .cra_module = THIS_MODULE,
513 .cra_init = mv_cesa_skcipher_cra_init,
514 .cra_exit = mv_cesa_skcipher_cra_exit,
7b3aaaa0
BB
515 },
516};
517
e6cd5bf6 518static int mv_cesa_cbc_des_op(struct skcipher_request *req,
7b3aaaa0
BB
519 struct mv_cesa_op_ctx *tmpl)
520{
521 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTCM_CBC,
522 CESA_SA_DESC_CFG_CRYPTCM_MSK);
523
8160ee7e 524 memcpy(tmpl->ctx.skcipher.iv, req->iv, DES_BLOCK_SIZE);
7b3aaaa0
BB
525
526 return mv_cesa_des_op(req, tmpl);
527}
528
e6cd5bf6 529static int mv_cesa_cbc_des_encrypt(struct skcipher_request *req)
7b3aaaa0
BB
530{
531 struct mv_cesa_op_ctx tmpl;
532
533 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_ENC);
534
535 return mv_cesa_cbc_des_op(req, &tmpl);
536}
537
e6cd5bf6 538static int mv_cesa_cbc_des_decrypt(struct skcipher_request *req)
7b3aaaa0
BB
539{
540 struct mv_cesa_op_ctx tmpl;
541
542 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_DEC);
543
544 return mv_cesa_cbc_des_op(req, &tmpl);
545}
546
e6cd5bf6
BB
547struct skcipher_alg mv_cesa_cbc_des_alg = {
548 .setkey = mv_cesa_des_setkey,
549 .encrypt = mv_cesa_cbc_des_encrypt,
550 .decrypt = mv_cesa_cbc_des_decrypt,
551 .min_keysize = DES_KEY_SIZE,
552 .max_keysize = DES_KEY_SIZE,
553 .ivsize = DES_BLOCK_SIZE,
554 .base = {
555 .cra_name = "cbc(des)",
556 .cra_driver_name = "mv-cbc-des",
557 .cra_priority = 300,
558 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
559 .cra_blocksize = DES_BLOCK_SIZE,
560 .cra_ctxsize = sizeof(struct mv_cesa_des_ctx),
561 .cra_alignmask = 0,
562 .cra_module = THIS_MODULE,
563 .cra_init = mv_cesa_skcipher_cra_init,
564 .cra_exit = mv_cesa_skcipher_cra_exit,
7b3aaaa0
BB
565 },
566};
567
e6cd5bf6 568static int mv_cesa_des3_op(struct skcipher_request *req,
4ada4839
AE
569 struct mv_cesa_op_ctx *tmpl)
570{
571 struct mv_cesa_des3_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
4ada4839
AE
572
573 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTM_3DES,
574 CESA_SA_DESC_CFG_CRYPTM_MSK);
575
8160ee7e 576 memcpy(tmpl->ctx.skcipher.key, ctx->key, DES3_EDE_KEY_SIZE);
4ada4839 577
e6cd5bf6 578 return mv_cesa_skcipher_queue_req(req, tmpl);
4ada4839
AE
579}
580
e6cd5bf6 581static int mv_cesa_ecb_des3_ede_encrypt(struct skcipher_request *req)
4ada4839
AE
582{
583 struct mv_cesa_op_ctx tmpl;
584
585 mv_cesa_set_op_cfg(&tmpl,
586 CESA_SA_DESC_CFG_CRYPTCM_ECB |
587 CESA_SA_DESC_CFG_3DES_EDE |
588 CESA_SA_DESC_CFG_DIR_ENC);
589
590 return mv_cesa_des3_op(req, &tmpl);
591}
592
e6cd5bf6 593static int mv_cesa_ecb_des3_ede_decrypt(struct skcipher_request *req)
4ada4839
AE
594{
595 struct mv_cesa_op_ctx tmpl;
596
597 mv_cesa_set_op_cfg(&tmpl,
598 CESA_SA_DESC_CFG_CRYPTCM_ECB |
599 CESA_SA_DESC_CFG_3DES_EDE |
600 CESA_SA_DESC_CFG_DIR_DEC);
601
602 return mv_cesa_des3_op(req, &tmpl);
603}
604
e6cd5bf6
BB
605struct skcipher_alg mv_cesa_ecb_des3_ede_alg = {
606 .setkey = mv_cesa_des3_ede_setkey,
607 .encrypt = mv_cesa_ecb_des3_ede_encrypt,
608 .decrypt = mv_cesa_ecb_des3_ede_decrypt,
609 .min_keysize = DES3_EDE_KEY_SIZE,
610 .max_keysize = DES3_EDE_KEY_SIZE,
611 .ivsize = DES3_EDE_BLOCK_SIZE,
612 .base = {
613 .cra_name = "ecb(des3_ede)",
614 .cra_driver_name = "mv-ecb-des3-ede",
615 .cra_priority = 300,
616 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
617 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
618 .cra_ctxsize = sizeof(struct mv_cesa_des3_ctx),
619 .cra_alignmask = 0,
620 .cra_module = THIS_MODULE,
621 .cra_init = mv_cesa_skcipher_cra_init,
622 .cra_exit = mv_cesa_skcipher_cra_exit,
4ada4839
AE
623 },
624};
625
e6cd5bf6 626static int mv_cesa_cbc_des3_op(struct skcipher_request *req,
4ada4839
AE
627 struct mv_cesa_op_ctx *tmpl)
628{
8160ee7e 629 memcpy(tmpl->ctx.skcipher.iv, req->iv, DES3_EDE_BLOCK_SIZE);
4ada4839
AE
630
631 return mv_cesa_des3_op(req, tmpl);
632}
633
e6cd5bf6 634static int mv_cesa_cbc_des3_ede_encrypt(struct skcipher_request *req)
4ada4839
AE
635{
636 struct mv_cesa_op_ctx tmpl;
637
638 mv_cesa_set_op_cfg(&tmpl,
639 CESA_SA_DESC_CFG_CRYPTCM_CBC |
640 CESA_SA_DESC_CFG_3DES_EDE |
641 CESA_SA_DESC_CFG_DIR_ENC);
642
643 return mv_cesa_cbc_des3_op(req, &tmpl);
644}
645
e6cd5bf6 646static int mv_cesa_cbc_des3_ede_decrypt(struct skcipher_request *req)
4ada4839
AE
647{
648 struct mv_cesa_op_ctx tmpl;
649
650 mv_cesa_set_op_cfg(&tmpl,
651 CESA_SA_DESC_CFG_CRYPTCM_CBC |
652 CESA_SA_DESC_CFG_3DES_EDE |
653 CESA_SA_DESC_CFG_DIR_DEC);
654
655 return mv_cesa_cbc_des3_op(req, &tmpl);
656}
657
e6cd5bf6
BB
658struct skcipher_alg mv_cesa_cbc_des3_ede_alg = {
659 .setkey = mv_cesa_des3_ede_setkey,
660 .encrypt = mv_cesa_cbc_des3_ede_encrypt,
661 .decrypt = mv_cesa_cbc_des3_ede_decrypt,
662 .min_keysize = DES3_EDE_KEY_SIZE,
663 .max_keysize = DES3_EDE_KEY_SIZE,
664 .ivsize = DES3_EDE_BLOCK_SIZE,
665 .base = {
666 .cra_name = "cbc(des3_ede)",
667 .cra_driver_name = "mv-cbc-des3-ede",
668 .cra_priority = 300,
669 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
670 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
671 .cra_ctxsize = sizeof(struct mv_cesa_des3_ctx),
672 .cra_alignmask = 0,
673 .cra_module = THIS_MODULE,
674 .cra_init = mv_cesa_skcipher_cra_init,
675 .cra_exit = mv_cesa_skcipher_cra_exit,
4ada4839
AE
676 },
677};
678
e6cd5bf6 679static int mv_cesa_aes_op(struct skcipher_request *req,
f63601fd
BB
680 struct mv_cesa_op_ctx *tmpl)
681{
682 struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
bf8f91e7 683 int i;
f63601fd
BB
684 u32 *key;
685 u32 cfg;
686
687 cfg = CESA_SA_DESC_CFG_CRYPTM_AES;
688
689 if (mv_cesa_get_op_cfg(tmpl) & CESA_SA_DESC_CFG_DIR_DEC)
690 key = ctx->aes.key_dec;
691 else
692 key = ctx->aes.key_enc;
693
694 for (i = 0; i < ctx->aes.key_length / sizeof(u32); i++)
8160ee7e 695 tmpl->ctx.skcipher.key[i] = cpu_to_le32(key[i]);
f63601fd
BB
696
697 if (ctx->aes.key_length == 24)
698 cfg |= CESA_SA_DESC_CFG_AES_LEN_192;
699 else if (ctx->aes.key_length == 32)
700 cfg |= CESA_SA_DESC_CFG_AES_LEN_256;
701
702 mv_cesa_update_op_cfg(tmpl, cfg,
703 CESA_SA_DESC_CFG_CRYPTM_MSK |
704 CESA_SA_DESC_CFG_AES_LEN_MSK);
705
e6cd5bf6 706 return mv_cesa_skcipher_queue_req(req, tmpl);
f63601fd
BB
707}
708
e6cd5bf6 709static int mv_cesa_ecb_aes_encrypt(struct skcipher_request *req)
f63601fd
BB
710{
711 struct mv_cesa_op_ctx tmpl;
712
713 mv_cesa_set_op_cfg(&tmpl,
714 CESA_SA_DESC_CFG_CRYPTCM_ECB |
715 CESA_SA_DESC_CFG_DIR_ENC);
716
717 return mv_cesa_aes_op(req, &tmpl);
718}
719
e6cd5bf6 720static int mv_cesa_ecb_aes_decrypt(struct skcipher_request *req)
f63601fd
BB
721{
722 struct mv_cesa_op_ctx tmpl;
723
724 mv_cesa_set_op_cfg(&tmpl,
725 CESA_SA_DESC_CFG_CRYPTCM_ECB |
726 CESA_SA_DESC_CFG_DIR_DEC);
727
728 return mv_cesa_aes_op(req, &tmpl);
729}
730
e6cd5bf6
BB
731struct skcipher_alg mv_cesa_ecb_aes_alg = {
732 .setkey = mv_cesa_aes_setkey,
733 .encrypt = mv_cesa_ecb_aes_encrypt,
734 .decrypt = mv_cesa_ecb_aes_decrypt,
735 .min_keysize = AES_MIN_KEY_SIZE,
736 .max_keysize = AES_MAX_KEY_SIZE,
737 .base = {
738 .cra_name = "ecb(aes)",
739 .cra_driver_name = "mv-ecb-aes",
740 .cra_priority = 300,
741 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
742 .cra_blocksize = AES_BLOCK_SIZE,
743 .cra_ctxsize = sizeof(struct mv_cesa_aes_ctx),
744 .cra_alignmask = 0,
745 .cra_module = THIS_MODULE,
746 .cra_init = mv_cesa_skcipher_cra_init,
747 .cra_exit = mv_cesa_skcipher_cra_exit,
f63601fd
BB
748 },
749};
750
e6cd5bf6 751static int mv_cesa_cbc_aes_op(struct skcipher_request *req,
f63601fd
BB
752 struct mv_cesa_op_ctx *tmpl)
753{
754 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTCM_CBC,
755 CESA_SA_DESC_CFG_CRYPTCM_MSK);
8160ee7e 756 memcpy(tmpl->ctx.skcipher.iv, req->iv, AES_BLOCK_SIZE);
f63601fd
BB
757
758 return mv_cesa_aes_op(req, tmpl);
759}
760
e6cd5bf6 761static int mv_cesa_cbc_aes_encrypt(struct skcipher_request *req)
f63601fd
BB
762{
763 struct mv_cesa_op_ctx tmpl;
764
765 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_ENC);
766
767 return mv_cesa_cbc_aes_op(req, &tmpl);
768}
769
e6cd5bf6 770static int mv_cesa_cbc_aes_decrypt(struct skcipher_request *req)
f63601fd
BB
771{
772 struct mv_cesa_op_ctx tmpl;
773
774 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_DEC);
775
776 return mv_cesa_cbc_aes_op(req, &tmpl);
777}
778
e6cd5bf6
BB
779struct skcipher_alg mv_cesa_cbc_aes_alg = {
780 .setkey = mv_cesa_aes_setkey,
781 .encrypt = mv_cesa_cbc_aes_encrypt,
782 .decrypt = mv_cesa_cbc_aes_decrypt,
783 .min_keysize = AES_MIN_KEY_SIZE,
784 .max_keysize = AES_MAX_KEY_SIZE,
785 .ivsize = AES_BLOCK_SIZE,
786 .base = {
787 .cra_name = "cbc(aes)",
788 .cra_driver_name = "mv-cbc-aes",
789 .cra_priority = 300,
790 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
791 .cra_blocksize = AES_BLOCK_SIZE,
792 .cra_ctxsize = sizeof(struct mv_cesa_aes_ctx),
793 .cra_alignmask = 0,
794 .cra_module = THIS_MODULE,
795 .cra_init = mv_cesa_skcipher_cra_init,
796 .cra_exit = mv_cesa_skcipher_cra_exit,
f63601fd
BB
797 },
798};