2 * Copyright (C) 2017 Marvell
4 * Antoine Tenart <antoine.tenart@free-electrons.com>
6 * This file is licensed under the terms of the GNU General Public
7 * License version 2. This program is licensed "as is" without any
8 * warranty of any kind, whether express or implied.
11 #include <linux/device.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/dmapool.h>
15 #include <crypto/aes.h>
16 #include <crypto/skcipher.h>
17 #include <crypto/internal/skcipher.h>
21 enum safexcel_cipher_direction {
26 struct safexcel_cipher_ctx {
27 struct safexcel_context base;
28 struct safexcel_crypto_priv *priv;
36 struct safexcel_cipher_req {
37 enum safexcel_cipher_direction direction;
41 static void safexcel_cipher_token(struct safexcel_cipher_ctx *ctx,
42 struct crypto_async_request *async,
43 struct safexcel_command_desc *cdesc,
46 struct skcipher_request *req = skcipher_request_cast(async);
47 struct safexcel_token *token;
50 if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) {
51 offset = AES_BLOCK_SIZE / sizeof(u32);
52 memcpy(cdesc->control_data.token, req->iv, AES_BLOCK_SIZE);
54 cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
57 token = (struct safexcel_token *)(cdesc->control_data.token + offset);
59 token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
60 token[0].packet_length = length;
61 token[0].stat = EIP197_TOKEN_STAT_LAST_PACKET |
62 EIP197_TOKEN_STAT_LAST_HASH;
63 token[0].instructions = EIP197_TOKEN_INS_LAST |
64 EIP197_TOKEN_INS_TYPE_CRYTO |
65 EIP197_TOKEN_INS_TYPE_OUTPUT;
68 static int safexcel_aes_setkey(struct crypto_skcipher *ctfm, const u8 *key,
71 struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm);
72 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
73 struct safexcel_crypto_priv *priv = ctx->priv;
74 struct crypto_aes_ctx aes;
77 ret = crypto_aes_expand_key(&aes, key, len);
79 crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
83 if (priv->version == EIP197 && ctx->base.ctxr_dma) {
84 for (i = 0; i < len / sizeof(u32); i++) {
85 if (ctx->key[i] != cpu_to_le32(aes.key_enc[i])) {
86 ctx->base.needs_inv = true;
92 for (i = 0; i < len / sizeof(u32); i++)
93 ctx->key[i] = cpu_to_le32(aes.key_enc[i]);
97 memzero_explicit(&aes, sizeof(aes));
101 static int safexcel_context_control(struct safexcel_cipher_ctx *ctx,
102 struct crypto_async_request *async,
103 struct safexcel_command_desc *cdesc)
105 struct safexcel_crypto_priv *priv = ctx->priv;
106 struct skcipher_request *req = skcipher_request_cast(async);
107 struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
110 if (sreq->direction == SAFEXCEL_ENCRYPT)
111 cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_CRYPTO_OUT;
113 cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_CRYPTO_IN;
115 cdesc->control_data.control0 |= CONTEXT_CONTROL_KEY_EN;
116 cdesc->control_data.control1 |= ctx->mode;
118 switch (ctx->key_len) {
119 case AES_KEYSIZE_128:
120 cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES128;
123 case AES_KEYSIZE_192:
124 cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES192;
127 case AES_KEYSIZE_256:
128 cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES256;
132 dev_err(priv->dev, "aes keysize not supported: %u\n",
136 cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(ctrl_size);
141 static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int ring,
142 struct crypto_async_request *async,
143 bool *should_complete, int *ret)
145 struct skcipher_request *req = skcipher_request_cast(async);
146 struct safexcel_result_desc *rdesc;
151 spin_lock_bh(&priv->ring[ring].egress_lock);
153 rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
156 "cipher: result: could not retrieve the result descriptor\n");
157 *ret = PTR_ERR(rdesc);
161 if (rdesc->result_data.error_code) {
163 "cipher: result: result descriptor error (%d)\n",
164 rdesc->result_data.error_code);
169 } while (!rdesc->last_seg);
171 safexcel_complete(priv, ring);
172 spin_unlock_bh(&priv->ring[ring].egress_lock);
174 if (req->src == req->dst) {
175 dma_unmap_sg(priv->dev, req->src,
176 sg_nents_for_len(req->src, req->cryptlen),
179 dma_unmap_sg(priv->dev, req->src,
180 sg_nents_for_len(req->src, req->cryptlen),
182 dma_unmap_sg(priv->dev, req->dst,
183 sg_nents_for_len(req->dst, req->cryptlen),
187 *should_complete = true;
192 static int safexcel_aes_send(struct crypto_async_request *async,
193 int ring, struct safexcel_request *request,
194 int *commands, int *results)
196 struct skcipher_request *req = skcipher_request_cast(async);
197 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
198 struct safexcel_crypto_priv *priv = ctx->priv;
199 struct safexcel_command_desc *cdesc;
200 struct safexcel_result_desc *rdesc;
201 struct scatterlist *sg;
202 int nr_src, nr_dst, n_cdesc = 0, n_rdesc = 0, queued = req->cryptlen;
205 if (req->src == req->dst) {
206 nr_src = dma_map_sg(priv->dev, req->src,
207 sg_nents_for_len(req->src, req->cryptlen),
213 nr_src = dma_map_sg(priv->dev, req->src,
214 sg_nents_for_len(req->src, req->cryptlen),
219 nr_dst = dma_map_sg(priv->dev, req->dst,
220 sg_nents_for_len(req->dst, req->cryptlen),
223 dma_unmap_sg(priv->dev, req->src,
224 sg_nents_for_len(req->src, req->cryptlen),
230 memcpy(ctx->base.ctxr->data, ctx->key, ctx->key_len);
232 spin_lock_bh(&priv->ring[ring].egress_lock);
234 /* command descriptors */
235 for_each_sg(req->src, sg, nr_src, i) {
236 int len = sg_dma_len(sg);
238 /* Do not overflow the request */
239 if (queued - len < 0)
242 cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc, !(queued - len),
243 sg_dma_address(sg), len, req->cryptlen,
246 /* No space left in the command descriptor ring */
247 ret = PTR_ERR(cdesc);
253 safexcel_context_control(ctx, async, cdesc);
254 safexcel_cipher_token(ctx, async, cdesc, req->cryptlen);
262 /* result descriptors */
263 for_each_sg(req->dst, sg, nr_dst, i) {
264 bool first = !i, last = (i == nr_dst - 1);
265 u32 len = sg_dma_len(sg);
267 rdesc = safexcel_add_rdesc(priv, ring, first, last,
268 sg_dma_address(sg), len);
270 /* No space left in the result descriptor ring */
271 ret = PTR_ERR(rdesc);
277 spin_unlock_bh(&priv->ring[ring].egress_lock);
279 request->req = &req->base;
286 for (i = 0; i < n_rdesc; i++)
287 safexcel_ring_rollback_wptr(priv, &priv->ring[ring].rdr);
289 for (i = 0; i < n_cdesc; i++)
290 safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
292 spin_unlock_bh(&priv->ring[ring].egress_lock);
294 if (req->src == req->dst) {
295 dma_unmap_sg(priv->dev, req->src,
296 sg_nents_for_len(req->src, req->cryptlen),
299 dma_unmap_sg(priv->dev, req->src,
300 sg_nents_for_len(req->src, req->cryptlen),
302 dma_unmap_sg(priv->dev, req->dst,
303 sg_nents_for_len(req->dst, req->cryptlen),
310 static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
312 struct crypto_async_request *async,
313 bool *should_complete, int *ret)
315 struct skcipher_request *req = skcipher_request_cast(async);
316 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
317 struct safexcel_result_desc *rdesc;
318 int ndesc = 0, enq_ret;
322 spin_lock_bh(&priv->ring[ring].egress_lock);
324 rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
327 "cipher: invalidate: could not retrieve the result descriptor\n");
328 *ret = PTR_ERR(rdesc);
332 if (rdesc->result_data.error_code) {
333 dev_err(priv->dev, "cipher: invalidate: result descriptor error (%d)\n",
334 rdesc->result_data.error_code);
339 } while (!rdesc->last_seg);
341 safexcel_complete(priv, ring);
342 spin_unlock_bh(&priv->ring[ring].egress_lock);
344 if (ctx->base.exit_inv) {
345 dma_pool_free(priv->context_pool, ctx->base.ctxr,
348 *should_complete = true;
353 ring = safexcel_select_ring(priv);
354 ctx->base.ring = ring;
356 spin_lock_bh(&priv->ring[ring].queue_lock);
357 enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async);
358 spin_unlock_bh(&priv->ring[ring].queue_lock);
360 if (enq_ret != -EINPROGRESS)
363 queue_work(priv->ring[ring].workqueue,
364 &priv->ring[ring].work_data.work);
366 *should_complete = false;
371 static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
372 struct crypto_async_request *async,
373 bool *should_complete, int *ret)
375 struct skcipher_request *req = skcipher_request_cast(async);
376 struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
379 if (sreq->needs_inv) {
380 sreq->needs_inv = false;
381 err = safexcel_handle_inv_result(priv, ring, async,
382 should_complete, ret);
384 err = safexcel_handle_req_result(priv, ring, async,
385 should_complete, ret);
391 static int safexcel_cipher_send_inv(struct crypto_async_request *async,
392 int ring, struct safexcel_request *request,
393 int *commands, int *results)
395 struct skcipher_request *req = skcipher_request_cast(async);
396 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
397 struct safexcel_crypto_priv *priv = ctx->priv;
400 ret = safexcel_invalidate_cache(async, priv,
401 ctx->base.ctxr_dma, ring, request);
411 static int safexcel_send(struct crypto_async_request *async,
412 int ring, struct safexcel_request *request,
413 int *commands, int *results)
415 struct skcipher_request *req = skcipher_request_cast(async);
416 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
417 struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
418 struct safexcel_crypto_priv *priv = ctx->priv;
421 BUG_ON(priv->version == EIP97 && sreq->needs_inv);
424 ret = safexcel_cipher_send_inv(async, ring, request,
427 ret = safexcel_aes_send(async, ring, request,
432 static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm)
434 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
435 struct safexcel_crypto_priv *priv = ctx->priv;
436 SKCIPHER_REQUEST_ON_STACK(req, __crypto_skcipher_cast(tfm));
437 struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
438 struct safexcel_inv_result result = {};
439 int ring = ctx->base.ring;
441 memset(req, 0, sizeof(struct skcipher_request));
443 /* create invalidation request */
444 init_completion(&result.completion);
445 skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
446 safexcel_inv_complete, &result);
448 skcipher_request_set_tfm(req, __crypto_skcipher_cast(tfm));
449 ctx = crypto_tfm_ctx(req->base.tfm);
450 ctx->base.exit_inv = true;
451 sreq->needs_inv = true;
453 spin_lock_bh(&priv->ring[ring].queue_lock);
454 crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
455 spin_unlock_bh(&priv->ring[ring].queue_lock);
457 queue_work(priv->ring[ring].workqueue,
458 &priv->ring[ring].work_data.work);
460 wait_for_completion(&result.completion);
464 "cipher: sync: invalidate: completion error %d\n",
472 static int safexcel_aes(struct skcipher_request *req,
473 enum safexcel_cipher_direction dir, u32 mode)
475 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
476 struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
477 struct safexcel_crypto_priv *priv = ctx->priv;
480 sreq->needs_inv = false;
481 sreq->direction = dir;
484 if (ctx->base.ctxr) {
485 if (priv->version == EIP197 && ctx->base.needs_inv) {
486 sreq->needs_inv = true;
487 ctx->base.needs_inv = false;
490 ctx->base.ring = safexcel_select_ring(priv);
491 ctx->base.ctxr = dma_pool_zalloc(priv->context_pool,
492 EIP197_GFP_FLAGS(req->base),
493 &ctx->base.ctxr_dma);
498 ring = ctx->base.ring;
500 spin_lock_bh(&priv->ring[ring].queue_lock);
501 ret = crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
502 spin_unlock_bh(&priv->ring[ring].queue_lock);
504 queue_work(priv->ring[ring].workqueue,
505 &priv->ring[ring].work_data.work);
510 static int safexcel_ecb_aes_encrypt(struct skcipher_request *req)
512 return safexcel_aes(req, SAFEXCEL_ENCRYPT,
513 CONTEXT_CONTROL_CRYPTO_MODE_ECB);
516 static int safexcel_ecb_aes_decrypt(struct skcipher_request *req)
518 return safexcel_aes(req, SAFEXCEL_DECRYPT,
519 CONTEXT_CONTROL_CRYPTO_MODE_ECB);
522 static int safexcel_skcipher_cra_init(struct crypto_tfm *tfm)
524 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
525 struct safexcel_alg_template *tmpl =
526 container_of(tfm->__crt_alg, struct safexcel_alg_template,
529 ctx->priv = tmpl->priv;
530 ctx->base.send = safexcel_send;
531 ctx->base.handle_result = safexcel_handle_result;
533 crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
534 sizeof(struct safexcel_cipher_req));
539 static void safexcel_skcipher_cra_exit(struct crypto_tfm *tfm)
541 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
542 struct safexcel_crypto_priv *priv = ctx->priv;
545 memzero_explicit(ctx->key, 8 * sizeof(u32));
547 /* context not allocated, skip invalidation */
551 memzero_explicit(ctx->base.ctxr->data, 8 * sizeof(u32));
553 if (priv->version == EIP197) {
554 ret = safexcel_cipher_exit_inv(tfm);
556 dev_warn(priv->dev, "cipher: invalidation error %d\n", ret);
558 dma_pool_free(priv->context_pool, ctx->base.ctxr,
563 struct safexcel_alg_template safexcel_alg_ecb_aes = {
564 .type = SAFEXCEL_ALG_TYPE_SKCIPHER,
566 .setkey = safexcel_aes_setkey,
567 .encrypt = safexcel_ecb_aes_encrypt,
568 .decrypt = safexcel_ecb_aes_decrypt,
569 .min_keysize = AES_MIN_KEY_SIZE,
570 .max_keysize = AES_MAX_KEY_SIZE,
572 .cra_name = "ecb(aes)",
573 .cra_driver_name = "safexcel-ecb-aes",
575 .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
576 CRYPTO_ALG_KERN_DRIVER_ONLY,
577 .cra_blocksize = AES_BLOCK_SIZE,
578 .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
580 .cra_init = safexcel_skcipher_cra_init,
581 .cra_exit = safexcel_skcipher_cra_exit,
582 .cra_module = THIS_MODULE,
587 static int safexcel_cbc_aes_encrypt(struct skcipher_request *req)
589 return safexcel_aes(req, SAFEXCEL_ENCRYPT,
590 CONTEXT_CONTROL_CRYPTO_MODE_CBC);
593 static int safexcel_cbc_aes_decrypt(struct skcipher_request *req)
595 return safexcel_aes(req, SAFEXCEL_DECRYPT,
596 CONTEXT_CONTROL_CRYPTO_MODE_CBC);
599 struct safexcel_alg_template safexcel_alg_cbc_aes = {
600 .type = SAFEXCEL_ALG_TYPE_SKCIPHER,
602 .setkey = safexcel_aes_setkey,
603 .encrypt = safexcel_cbc_aes_encrypt,
604 .decrypt = safexcel_cbc_aes_decrypt,
605 .min_keysize = AES_MIN_KEY_SIZE,
606 .max_keysize = AES_MAX_KEY_SIZE,
607 .ivsize = AES_BLOCK_SIZE,
609 .cra_name = "cbc(aes)",
610 .cra_driver_name = "safexcel-cbc-aes",
612 .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
613 CRYPTO_ALG_KERN_DRIVER_ONLY,
614 .cra_blocksize = AES_BLOCK_SIZE,
615 .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
617 .cra_init = safexcel_skcipher_cra_init,
618 .cra_exit = safexcel_skcipher_cra_exit,
619 .cra_module = THIS_MODULE,