2 * Copyright (C) 2017 Marvell
4 * Antoine Tenart <antoine.tenart@free-electrons.com>
6 * This file is licensed under the terms of the GNU General Public
7 * License version 2. This program is licensed "as is" without any
8 * warranty of any kind, whether express or implied.
11 #include <crypto/hmac.h>
12 #include <crypto/sha.h>
13 #include <linux/device.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/dmapool.h>
19 struct safexcel_ahash_ctx {
20 struct safexcel_context base;
21 struct safexcel_crypto_priv *priv;
25 u32 ipad[SHA256_DIGEST_SIZE / sizeof(u32)];
26 u32 opad[SHA256_DIGEST_SIZE / sizeof(u32)];
29 struct safexcel_ahash_req {
36 dma_addr_t result_dma;
40 u8 state_sz; /* expected sate size, only set once */
41 u32 state[SHA256_DIGEST_SIZE / sizeof(u32)] __aligned(sizeof(u32));
46 u8 cache[SHA256_BLOCK_SIZE] __aligned(sizeof(u32));
48 unsigned int cache_sz;
50 u8 cache_next[SHA256_BLOCK_SIZE] __aligned(sizeof(u32));
53 struct safexcel_ahash_export_state {
59 u32 state[SHA256_DIGEST_SIZE / sizeof(u32)];
60 u8 cache[SHA256_BLOCK_SIZE];
63 static void safexcel_hash_token(struct safexcel_command_desc *cdesc,
64 u32 input_length, u32 result_length)
66 struct safexcel_token *token =
67 (struct safexcel_token *)cdesc->control_data.token;
69 token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
70 token[0].packet_length = input_length;
71 token[0].stat = EIP197_TOKEN_STAT_LAST_HASH;
72 token[0].instructions = EIP197_TOKEN_INS_TYPE_HASH;
74 token[1].opcode = EIP197_TOKEN_OPCODE_INSERT;
75 token[1].packet_length = result_length;
76 token[1].stat = EIP197_TOKEN_STAT_LAST_HASH |
77 EIP197_TOKEN_STAT_LAST_PACKET;
78 token[1].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
79 EIP197_TOKEN_INS_INSERT_HASH_DIGEST;
82 static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
83 struct safexcel_ahash_req *req,
84 struct safexcel_command_desc *cdesc,
85 unsigned int digestsize,
86 unsigned int blocksize)
90 cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_HASH_OUT;
91 cdesc->control_data.control0 |= ctx->alg;
92 cdesc->control_data.control0 |= req->digest;
94 if (req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) {
96 if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA1)
97 cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(6);
98 else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA224 ||
99 ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA256)
100 cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(9);
102 cdesc->control_data.control1 |= CONTEXT_CONTROL_DIGEST_CNT;
104 cdesc->control_data.control0 |= CONTEXT_CONTROL_RESTART_HASH;
108 cdesc->control_data.control0 |= CONTEXT_CONTROL_NO_FINISH_HASH;
111 * Copy the input digest if needed, and setup the context
112 * fields. Do this now as we need it to setup the first command
115 if (req->processed) {
116 for (i = 0; i < digestsize / sizeof(u32); i++)
117 ctx->base.ctxr->data[i] = cpu_to_le32(req->state[i]);
120 ctx->base.ctxr->data[i] = cpu_to_le32(req->processed / blocksize);
122 } else if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC) {
123 cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(2 * req->state_sz / sizeof(u32));
125 memcpy(ctx->base.ctxr->data, ctx->ipad, req->state_sz);
126 memcpy(ctx->base.ctxr->data + req->state_sz / sizeof(u32),
127 ctx->opad, req->state_sz);
131 static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int ring,
132 struct crypto_async_request *async,
133 bool *should_complete, int *ret)
135 struct safexcel_result_desc *rdesc;
136 struct ahash_request *areq = ahash_request_cast(async);
137 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
138 struct safexcel_ahash_req *sreq = ahash_request_ctx(areq);
143 spin_lock_bh(&priv->ring[ring].egress_lock);
144 rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
147 "hash: result: could not retrieve the result descriptor\n");
148 *ret = PTR_ERR(rdesc);
150 *ret = safexcel_rdesc_check_errors(priv, rdesc);
153 safexcel_complete(priv, ring);
154 spin_unlock_bh(&priv->ring[ring].egress_lock);
157 dma_unmap_sg(priv->dev, areq->src, sreq->nents, DMA_TO_DEVICE);
161 if (sreq->result_dma) {
162 dma_unmap_single(priv->dev, sreq->result_dma, sreq->state_sz,
164 sreq->result_dma = 0;
167 if (sreq->cache_dma) {
168 dma_unmap_single(priv->dev, sreq->cache_dma, sreq->cache_sz,
174 memcpy(areq->result, sreq->state,
175 crypto_ahash_digestsize(ahash));
177 cache_len = sreq->len - sreq->processed;
179 memcpy(sreq->cache, sreq->cache_next, cache_len);
181 *should_complete = true;
186 static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
187 struct safexcel_request *request,
188 int *commands, int *results)
190 struct ahash_request *areq = ahash_request_cast(async);
191 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
192 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
193 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
194 struct safexcel_crypto_priv *priv = ctx->priv;
195 struct safexcel_command_desc *cdesc, *first_cdesc = NULL;
196 struct safexcel_result_desc *rdesc;
197 struct scatterlist *sg;
198 int i, queued, len, cache_len, extra, n_cdesc = 0, ret = 0;
200 queued = len = req->len - req->processed;
201 if (queued <= crypto_ahash_blocksize(ahash))
204 cache_len = queued - areq->nbytes;
206 if (!req->last_req) {
207 /* If this is not the last request and the queued data does not
208 * fit into full blocks, cache it for the next send() call.
210 extra = queued & (crypto_ahash_blocksize(ahash) - 1);
212 /* If this is not the last request and the queued data
213 * is a multiple of a block, cache the last one for now.
215 extra = crypto_ahash_blocksize(ahash);
218 sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
219 req->cache_next, extra,
220 areq->nbytes - extra);
233 spin_lock_bh(&priv->ring[ring].egress_lock);
235 /* Add a command descriptor for the cached data, if any */
237 req->cache_dma = dma_map_single(priv->dev, req->cache,
238 cache_len, DMA_TO_DEVICE);
239 if (dma_mapping_error(priv->dev, req->cache_dma)) {
240 spin_unlock_bh(&priv->ring[ring].egress_lock);
244 req->cache_sz = cache_len;
245 first_cdesc = safexcel_add_cdesc(priv, ring, 1,
247 req->cache_dma, cache_len, len,
249 if (IS_ERR(first_cdesc)) {
250 ret = PTR_ERR(first_cdesc);
260 /* Now handle the current ahash request buffer(s) */
261 req->nents = dma_map_sg(priv->dev, areq->src,
262 sg_nents_for_len(areq->src, areq->nbytes),
269 for_each_sg(areq->src, sg, req->nents, i) {
270 int sglen = sg_dma_len(sg);
272 /* Do not overflow the request */
273 if (queued - sglen < 0)
276 cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc,
277 !(queued - sglen), sg_dma_address(sg),
278 sglen, len, ctx->base.ctxr_dma);
280 ret = PTR_ERR(cdesc);
294 /* Setup the context options */
295 safexcel_context_control(ctx, req, first_cdesc, req->state_sz,
296 crypto_ahash_blocksize(ahash));
299 safexcel_hash_token(first_cdesc, len, req->state_sz);
301 req->result_dma = dma_map_single(priv->dev, req->state, req->state_sz,
303 if (dma_mapping_error(priv->dev, req->result_dma)) {
308 /* Add a result descriptor */
309 rdesc = safexcel_add_rdesc(priv, ring, 1, 1, req->result_dma,
312 ret = PTR_ERR(rdesc);
316 spin_unlock_bh(&priv->ring[ring].egress_lock);
318 req->processed += len;
319 request->req = &areq->base;
326 dma_unmap_single(priv->dev, req->result_dma, req->state_sz,
329 dma_unmap_sg(priv->dev, areq->src, req->nents, DMA_TO_DEVICE);
331 for (i = 0; i < n_cdesc; i++)
332 safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
334 if (req->cache_dma) {
335 dma_unmap_single(priv->dev, req->cache_dma, req->cache_sz,
340 spin_unlock_bh(&priv->ring[ring].egress_lock);
344 static inline bool safexcel_ahash_needs_inv_get(struct ahash_request *areq)
346 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
347 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
348 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
349 unsigned int state_w_sz = req->state_sz / sizeof(u32);
352 for (i = 0; i < state_w_sz; i++)
353 if (ctx->base.ctxr->data[i] != cpu_to_le32(req->state[i]))
356 if (ctx->base.ctxr->data[state_w_sz] !=
357 cpu_to_le32(req->processed / crypto_ahash_blocksize(ahash)))
363 static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
365 struct crypto_async_request *async,
366 bool *should_complete, int *ret)
368 struct safexcel_result_desc *rdesc;
369 struct ahash_request *areq = ahash_request_cast(async);
370 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
371 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(ahash);
376 spin_lock_bh(&priv->ring[ring].egress_lock);
377 rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
380 "hash: invalidate: could not retrieve the result descriptor\n");
381 *ret = PTR_ERR(rdesc);
382 } else if (rdesc->result_data.error_code) {
384 "hash: invalidate: result descriptor error (%d)\n",
385 rdesc->result_data.error_code);
389 safexcel_complete(priv, ring);
390 spin_unlock_bh(&priv->ring[ring].egress_lock);
392 if (ctx->base.exit_inv) {
393 dma_pool_free(priv->context_pool, ctx->base.ctxr,
396 *should_complete = true;
400 ring = safexcel_select_ring(priv);
401 ctx->base.ring = ring;
403 spin_lock_bh(&priv->ring[ring].queue_lock);
404 enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async);
405 spin_unlock_bh(&priv->ring[ring].queue_lock);
407 if (enq_ret != -EINPROGRESS)
410 queue_work(priv->ring[ring].workqueue,
411 &priv->ring[ring].work_data.work);
413 *should_complete = false;
418 static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
419 struct crypto_async_request *async,
420 bool *should_complete, int *ret)
422 struct ahash_request *areq = ahash_request_cast(async);
423 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
426 BUG_ON(priv->version == EIP97 && req->needs_inv);
428 if (req->needs_inv) {
429 req->needs_inv = false;
430 err = safexcel_handle_inv_result(priv, ring, async,
431 should_complete, ret);
433 err = safexcel_handle_req_result(priv, ring, async,
434 should_complete, ret);
440 static int safexcel_ahash_send_inv(struct crypto_async_request *async,
441 int ring, struct safexcel_request *request,
442 int *commands, int *results)
444 struct ahash_request *areq = ahash_request_cast(async);
445 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
448 ret = safexcel_invalidate_cache(async, ctx->priv,
449 ctx->base.ctxr_dma, ring, request);
459 static int safexcel_ahash_send(struct crypto_async_request *async,
460 int ring, struct safexcel_request *request,
461 int *commands, int *results)
463 struct ahash_request *areq = ahash_request_cast(async);
464 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
468 ret = safexcel_ahash_send_inv(async, ring, request,
471 ret = safexcel_ahash_send_req(async, ring, request,
476 static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
478 struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
479 struct safexcel_crypto_priv *priv = ctx->priv;
480 EIP197_REQUEST_ON_STACK(req, ahash, EIP197_AHASH_REQ_SIZE);
481 struct safexcel_ahash_req *rctx = ahash_request_ctx(req);
482 struct safexcel_inv_result result = {};
483 int ring = ctx->base.ring;
485 memset(req, 0, sizeof(struct ahash_request));
487 /* create invalidation request */
488 init_completion(&result.completion);
489 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
490 safexcel_inv_complete, &result);
492 ahash_request_set_tfm(req, __crypto_ahash_cast(tfm));
493 ctx = crypto_tfm_ctx(req->base.tfm);
494 ctx->base.exit_inv = true;
495 rctx->needs_inv = true;
497 spin_lock_bh(&priv->ring[ring].queue_lock);
498 crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
499 spin_unlock_bh(&priv->ring[ring].queue_lock);
501 queue_work(priv->ring[ring].workqueue,
502 &priv->ring[ring].work_data.work);
504 wait_for_completion(&result.completion);
507 dev_warn(priv->dev, "hash: completion error (%d)\n",
515 /* safexcel_ahash_cache: cache data until at least one request can be sent to
516 * the engine, aka. when there is at least 1 block size in the pipe.
518 static int safexcel_ahash_cache(struct ahash_request *areq)
520 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
521 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
522 int queued, cache_len;
524 /* cache_len: everyting accepted by the driver but not sent yet,
525 * tot sz handled by update() - last req sz - tot sz handled by send()
527 cache_len = req->len - areq->nbytes - req->processed;
528 /* queued: everything accepted by the driver which will be handled by
529 * the next send() calls.
530 * tot sz handled by update() - tot sz handled by send()
532 queued = req->len - req->processed;
535 * In case there isn't enough bytes to proceed (less than a
536 * block size), cache the data until we have enough.
538 if (cache_len + areq->nbytes <= crypto_ahash_blocksize(ahash)) {
539 sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
540 req->cache + cache_len,
545 /* We couldn't cache all the data */
549 static int safexcel_ahash_enqueue(struct ahash_request *areq)
551 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
552 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
553 struct safexcel_crypto_priv *priv = ctx->priv;
556 req->needs_inv = false;
558 if (ctx->base.ctxr) {
559 if (priv->version == EIP197 &&
560 !ctx->base.needs_inv && req->processed &&
561 req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED)
562 /* We're still setting needs_inv here, even though it is
563 * cleared right away, because the needs_inv flag can be
564 * set in other functions and we want to keep the same
567 ctx->base.needs_inv = safexcel_ahash_needs_inv_get(areq);
569 if (ctx->base.needs_inv) {
570 ctx->base.needs_inv = false;
571 req->needs_inv = true;
574 ctx->base.ring = safexcel_select_ring(priv);
575 ctx->base.ctxr = dma_pool_zalloc(priv->context_pool,
576 EIP197_GFP_FLAGS(areq->base),
577 &ctx->base.ctxr_dma);
582 ring = ctx->base.ring;
584 spin_lock_bh(&priv->ring[ring].queue_lock);
585 ret = crypto_enqueue_request(&priv->ring[ring].queue, &areq->base);
586 spin_unlock_bh(&priv->ring[ring].queue_lock);
588 queue_work(priv->ring[ring].workqueue,
589 &priv->ring[ring].work_data.work);
594 static int safexcel_ahash_update(struct ahash_request *areq)
596 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
597 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
599 /* If the request is 0 length, do nothing */
603 req->len += areq->nbytes;
605 safexcel_ahash_cache(areq);
608 * We're not doing partial updates when performing an hmac request.
609 * Everything will be handled by the final() call.
611 if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC)
615 return safexcel_ahash_enqueue(areq);
617 if (!req->last_req &&
618 req->len - req->processed > crypto_ahash_blocksize(ahash))
619 return safexcel_ahash_enqueue(areq);
624 static int safexcel_ahash_final(struct ahash_request *areq)
626 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
627 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
629 req->last_req = true;
632 /* If we have an overall 0 length request */
633 if (!(req->len + areq->nbytes)) {
634 if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA1)
635 memcpy(areq->result, sha1_zero_message_hash,
637 else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA224)
638 memcpy(areq->result, sha224_zero_message_hash,
640 else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA256)
641 memcpy(areq->result, sha256_zero_message_hash,
647 return safexcel_ahash_enqueue(areq);
650 static int safexcel_ahash_finup(struct ahash_request *areq)
652 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
654 req->last_req = true;
657 safexcel_ahash_update(areq);
658 return safexcel_ahash_final(areq);
661 static int safexcel_ahash_export(struct ahash_request *areq, void *out)
663 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
664 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
665 struct safexcel_ahash_export_state *export = out;
667 export->len = req->len;
668 export->processed = req->processed;
670 export->digest = req->digest;
672 memcpy(export->state, req->state, req->state_sz);
673 memcpy(export->cache, req->cache, crypto_ahash_blocksize(ahash));
678 static int safexcel_ahash_import(struct ahash_request *areq, const void *in)
680 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
681 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
682 const struct safexcel_ahash_export_state *export = in;
685 ret = crypto_ahash_init(areq);
689 req->len = export->len;
690 req->processed = export->processed;
692 req->digest = export->digest;
694 memcpy(req->cache, export->cache, crypto_ahash_blocksize(ahash));
695 memcpy(req->state, export->state, req->state_sz);
700 static int safexcel_ahash_cra_init(struct crypto_tfm *tfm)
702 struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
703 struct safexcel_alg_template *tmpl =
704 container_of(__crypto_ahash_alg(tfm->__crt_alg),
705 struct safexcel_alg_template, alg.ahash);
707 ctx->priv = tmpl->priv;
708 ctx->base.send = safexcel_ahash_send;
709 ctx->base.handle_result = safexcel_handle_result;
711 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
712 sizeof(struct safexcel_ahash_req));
716 static int safexcel_sha1_init(struct ahash_request *areq)
718 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
719 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
721 memset(req, 0, sizeof(*req));
723 req->state[0] = SHA1_H0;
724 req->state[1] = SHA1_H1;
725 req->state[2] = SHA1_H2;
726 req->state[3] = SHA1_H3;
727 req->state[4] = SHA1_H4;
729 ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1;
730 req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
731 req->state_sz = SHA1_DIGEST_SIZE;
736 static int safexcel_sha1_digest(struct ahash_request *areq)
738 int ret = safexcel_sha1_init(areq);
743 return safexcel_ahash_finup(areq);
746 static void safexcel_ahash_cra_exit(struct crypto_tfm *tfm)
748 struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
749 struct safexcel_crypto_priv *priv = ctx->priv;
752 /* context not allocated, skip invalidation */
756 if (priv->version == EIP197) {
757 ret = safexcel_ahash_exit_inv(tfm);
759 dev_warn(priv->dev, "hash: invalidation error %d\n", ret);
761 dma_pool_free(priv->context_pool, ctx->base.ctxr,
766 struct safexcel_alg_template safexcel_alg_sha1 = {
767 .type = SAFEXCEL_ALG_TYPE_AHASH,
769 .init = safexcel_sha1_init,
770 .update = safexcel_ahash_update,
771 .final = safexcel_ahash_final,
772 .finup = safexcel_ahash_finup,
773 .digest = safexcel_sha1_digest,
774 .export = safexcel_ahash_export,
775 .import = safexcel_ahash_import,
777 .digestsize = SHA1_DIGEST_SIZE,
778 .statesize = sizeof(struct safexcel_ahash_export_state),
781 .cra_driver_name = "safexcel-sha1",
783 .cra_flags = CRYPTO_ALG_ASYNC |
784 CRYPTO_ALG_KERN_DRIVER_ONLY,
785 .cra_blocksize = SHA1_BLOCK_SIZE,
786 .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
787 .cra_init = safexcel_ahash_cra_init,
788 .cra_exit = safexcel_ahash_cra_exit,
789 .cra_module = THIS_MODULE,
795 static int safexcel_hmac_sha1_init(struct ahash_request *areq)
797 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
799 safexcel_sha1_init(areq);
800 req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
804 static int safexcel_hmac_sha1_digest(struct ahash_request *areq)
806 int ret = safexcel_hmac_sha1_init(areq);
811 return safexcel_ahash_finup(areq);
814 struct safexcel_ahash_result {
815 struct completion completion;
819 static void safexcel_ahash_complete(struct crypto_async_request *req, int error)
821 struct safexcel_ahash_result *result = req->data;
823 if (error == -EINPROGRESS)
826 result->error = error;
827 complete(&result->completion);
830 static int safexcel_hmac_init_pad(struct ahash_request *areq,
831 unsigned int blocksize, const u8 *key,
832 unsigned int keylen, u8 *ipad, u8 *opad)
834 struct safexcel_ahash_result result;
835 struct scatterlist sg;
839 if (keylen <= blocksize) {
840 memcpy(ipad, key, keylen);
842 keydup = kmemdup(key, keylen, GFP_KERNEL);
846 ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_BACKLOG,
847 safexcel_ahash_complete, &result);
848 sg_init_one(&sg, keydup, keylen);
849 ahash_request_set_crypt(areq, &sg, ipad, keylen);
850 init_completion(&result.completion);
852 ret = crypto_ahash_digest(areq);
853 if (ret == -EINPROGRESS || ret == -EBUSY) {
854 wait_for_completion_interruptible(&result.completion);
859 memzero_explicit(keydup, keylen);
865 keylen = crypto_ahash_digestsize(crypto_ahash_reqtfm(areq));
868 memset(ipad + keylen, 0, blocksize - keylen);
869 memcpy(opad, ipad, blocksize);
871 for (i = 0; i < blocksize; i++) {
872 ipad[i] ^= HMAC_IPAD_VALUE;
873 opad[i] ^= HMAC_OPAD_VALUE;
879 static int safexcel_hmac_init_iv(struct ahash_request *areq,
880 unsigned int blocksize, u8 *pad, void *state)
882 struct safexcel_ahash_result result;
883 struct safexcel_ahash_req *req;
884 struct scatterlist sg;
887 ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_BACKLOG,
888 safexcel_ahash_complete, &result);
889 sg_init_one(&sg, pad, blocksize);
890 ahash_request_set_crypt(areq, &sg, pad, blocksize);
891 init_completion(&result.completion);
893 ret = crypto_ahash_init(areq);
897 req = ahash_request_ctx(areq);
899 req->last_req = true;
901 ret = crypto_ahash_update(areq);
902 if (ret && ret != -EINPROGRESS && ret != -EBUSY)
905 wait_for_completion_interruptible(&result.completion);
909 return crypto_ahash_export(areq, state);
912 static int safexcel_hmac_setkey(const char *alg, const u8 *key,
913 unsigned int keylen, void *istate, void *ostate)
915 struct ahash_request *areq;
916 struct crypto_ahash *tfm;
917 unsigned int blocksize;
921 tfm = crypto_alloc_ahash(alg, CRYPTO_ALG_TYPE_AHASH,
922 CRYPTO_ALG_TYPE_AHASH_MASK);
926 areq = ahash_request_alloc(tfm, GFP_KERNEL);
932 crypto_ahash_clear_flags(tfm, ~0);
933 blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
935 ipad = kzalloc(2 * blocksize, GFP_KERNEL);
941 opad = ipad + blocksize;
943 ret = safexcel_hmac_init_pad(areq, blocksize, key, keylen, ipad, opad);
947 ret = safexcel_hmac_init_iv(areq, blocksize, ipad, istate);
951 ret = safexcel_hmac_init_iv(areq, blocksize, opad, ostate);
956 ahash_request_free(areq);
958 crypto_free_ahash(tfm);
963 static int safexcel_hmac_alg_setkey(struct crypto_ahash *tfm, const u8 *key,
964 unsigned int keylen, const char *alg,
965 unsigned int state_sz)
967 struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
968 struct safexcel_crypto_priv *priv = ctx->priv;
969 struct safexcel_ahash_export_state istate, ostate;
972 ret = safexcel_hmac_setkey(alg, key, keylen, &istate, &ostate);
976 if (priv->version == EIP197 && ctx->base.ctxr) {
977 for (i = 0; i < state_sz / sizeof(u32); i++) {
978 if (ctx->ipad[i] != le32_to_cpu(istate.state[i]) ||
979 ctx->opad[i] != le32_to_cpu(ostate.state[i])) {
980 ctx->base.needs_inv = true;
986 memcpy(ctx->ipad, &istate.state, state_sz);
987 memcpy(ctx->opad, &ostate.state, state_sz);
992 static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
995 return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sha1",
999 struct safexcel_alg_template safexcel_alg_hmac_sha1 = {
1000 .type = SAFEXCEL_ALG_TYPE_AHASH,
1002 .init = safexcel_hmac_sha1_init,
1003 .update = safexcel_ahash_update,
1004 .final = safexcel_ahash_final,
1005 .finup = safexcel_ahash_finup,
1006 .digest = safexcel_hmac_sha1_digest,
1007 .setkey = safexcel_hmac_sha1_setkey,
1008 .export = safexcel_ahash_export,
1009 .import = safexcel_ahash_import,
1011 .digestsize = SHA1_DIGEST_SIZE,
1012 .statesize = sizeof(struct safexcel_ahash_export_state),
1014 .cra_name = "hmac(sha1)",
1015 .cra_driver_name = "safexcel-hmac-sha1",
1016 .cra_priority = 300,
1017 .cra_flags = CRYPTO_ALG_ASYNC |
1018 CRYPTO_ALG_KERN_DRIVER_ONLY,
1019 .cra_blocksize = SHA1_BLOCK_SIZE,
1020 .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
1021 .cra_init = safexcel_ahash_cra_init,
1022 .cra_exit = safexcel_ahash_cra_exit,
1023 .cra_module = THIS_MODULE,
1029 static int safexcel_sha256_init(struct ahash_request *areq)
1031 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
1032 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
1034 memset(req, 0, sizeof(*req));
1036 req->state[0] = SHA256_H0;
1037 req->state[1] = SHA256_H1;
1038 req->state[2] = SHA256_H2;
1039 req->state[3] = SHA256_H3;
1040 req->state[4] = SHA256_H4;
1041 req->state[5] = SHA256_H5;
1042 req->state[6] = SHA256_H6;
1043 req->state[7] = SHA256_H7;
1045 ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA256;
1046 req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
1047 req->state_sz = SHA256_DIGEST_SIZE;
1052 static int safexcel_sha256_digest(struct ahash_request *areq)
1054 int ret = safexcel_sha256_init(areq);
1059 return safexcel_ahash_finup(areq);
1062 struct safexcel_alg_template safexcel_alg_sha256 = {
1063 .type = SAFEXCEL_ALG_TYPE_AHASH,
1065 .init = safexcel_sha256_init,
1066 .update = safexcel_ahash_update,
1067 .final = safexcel_ahash_final,
1068 .finup = safexcel_ahash_finup,
1069 .digest = safexcel_sha256_digest,
1070 .export = safexcel_ahash_export,
1071 .import = safexcel_ahash_import,
1073 .digestsize = SHA256_DIGEST_SIZE,
1074 .statesize = sizeof(struct safexcel_ahash_export_state),
1076 .cra_name = "sha256",
1077 .cra_driver_name = "safexcel-sha256",
1078 .cra_priority = 300,
1079 .cra_flags = CRYPTO_ALG_ASYNC |
1080 CRYPTO_ALG_KERN_DRIVER_ONLY,
1081 .cra_blocksize = SHA256_BLOCK_SIZE,
1082 .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
1083 .cra_init = safexcel_ahash_cra_init,
1084 .cra_exit = safexcel_ahash_cra_exit,
1085 .cra_module = THIS_MODULE,
1091 static int safexcel_sha224_init(struct ahash_request *areq)
1093 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
1094 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
1096 memset(req, 0, sizeof(*req));
1098 req->state[0] = SHA224_H0;
1099 req->state[1] = SHA224_H1;
1100 req->state[2] = SHA224_H2;
1101 req->state[3] = SHA224_H3;
1102 req->state[4] = SHA224_H4;
1103 req->state[5] = SHA224_H5;
1104 req->state[6] = SHA224_H6;
1105 req->state[7] = SHA224_H7;
1107 ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA224;
1108 req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
1109 req->state_sz = SHA256_DIGEST_SIZE;
1114 static int safexcel_sha224_digest(struct ahash_request *areq)
1116 int ret = safexcel_sha224_init(areq);
1121 return safexcel_ahash_finup(areq);
1124 struct safexcel_alg_template safexcel_alg_sha224 = {
1125 .type = SAFEXCEL_ALG_TYPE_AHASH,
1127 .init = safexcel_sha224_init,
1128 .update = safexcel_ahash_update,
1129 .final = safexcel_ahash_final,
1130 .finup = safexcel_ahash_finup,
1131 .digest = safexcel_sha224_digest,
1132 .export = safexcel_ahash_export,
1133 .import = safexcel_ahash_import,
1135 .digestsize = SHA224_DIGEST_SIZE,
1136 .statesize = sizeof(struct safexcel_ahash_export_state),
1138 .cra_name = "sha224",
1139 .cra_driver_name = "safexcel-sha224",
1140 .cra_priority = 300,
1141 .cra_flags = CRYPTO_ALG_ASYNC |
1142 CRYPTO_ALG_KERN_DRIVER_ONLY,
1143 .cra_blocksize = SHA224_BLOCK_SIZE,
1144 .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
1145 .cra_init = safexcel_ahash_cra_init,
1146 .cra_exit = safexcel_ahash_cra_exit,
1147 .cra_module = THIS_MODULE,
1153 static int safexcel_hmac_sha224_setkey(struct crypto_ahash *tfm, const u8 *key,
1154 unsigned int keylen)
1156 return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sha224",
1157 SHA256_DIGEST_SIZE);
1160 static int safexcel_hmac_sha224_init(struct ahash_request *areq)
1162 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
1164 safexcel_sha224_init(areq);
1165 req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
1169 static int safexcel_hmac_sha224_digest(struct ahash_request *areq)
1171 int ret = safexcel_hmac_sha224_init(areq);
1176 return safexcel_ahash_finup(areq);
1179 struct safexcel_alg_template safexcel_alg_hmac_sha224 = {
1180 .type = SAFEXCEL_ALG_TYPE_AHASH,
1182 .init = safexcel_hmac_sha224_init,
1183 .update = safexcel_ahash_update,
1184 .final = safexcel_ahash_final,
1185 .finup = safexcel_ahash_finup,
1186 .digest = safexcel_hmac_sha224_digest,
1187 .setkey = safexcel_hmac_sha224_setkey,
1188 .export = safexcel_ahash_export,
1189 .import = safexcel_ahash_import,
1191 .digestsize = SHA224_DIGEST_SIZE,
1192 .statesize = sizeof(struct safexcel_ahash_export_state),
1194 .cra_name = "hmac(sha224)",
1195 .cra_driver_name = "safexcel-hmac-sha224",
1196 .cra_priority = 300,
1197 .cra_flags = CRYPTO_ALG_ASYNC |
1198 CRYPTO_ALG_KERN_DRIVER_ONLY,
1199 .cra_blocksize = SHA224_BLOCK_SIZE,
1200 .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
1201 .cra_init = safexcel_ahash_cra_init,
1202 .cra_exit = safexcel_ahash_cra_exit,
1203 .cra_module = THIS_MODULE,
1209 static int safexcel_hmac_sha256_setkey(struct crypto_ahash *tfm, const u8 *key,
1210 unsigned int keylen)
1212 return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sha256",
1213 SHA256_DIGEST_SIZE);
1216 static int safexcel_hmac_sha256_init(struct ahash_request *areq)
1218 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
1220 safexcel_sha256_init(areq);
1221 req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
1225 static int safexcel_hmac_sha256_digest(struct ahash_request *areq)
1227 int ret = safexcel_hmac_sha256_init(areq);
1232 return safexcel_ahash_finup(areq);
1235 struct safexcel_alg_template safexcel_alg_hmac_sha256 = {
1236 .type = SAFEXCEL_ALG_TYPE_AHASH,
1238 .init = safexcel_hmac_sha256_init,
1239 .update = safexcel_ahash_update,
1240 .final = safexcel_ahash_final,
1241 .finup = safexcel_ahash_finup,
1242 .digest = safexcel_hmac_sha256_digest,
1243 .setkey = safexcel_hmac_sha256_setkey,
1244 .export = safexcel_ahash_export,
1245 .import = safexcel_ahash_import,
1247 .digestsize = SHA256_DIGEST_SIZE,
1248 .statesize = sizeof(struct safexcel_ahash_export_state),
1250 .cra_name = "hmac(sha256)",
1251 .cra_driver_name = "safexcel-hmac-sha256",
1252 .cra_priority = 300,
1253 .cra_flags = CRYPTO_ALG_ASYNC |
1254 CRYPTO_ALG_KERN_DRIVER_ONLY,
1255 .cra_blocksize = SHA256_BLOCK_SIZE,
1256 .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
1257 .cra_init = safexcel_ahash_cra_init,
1258 .cra_exit = safexcel_ahash_cra_exit,
1259 .cra_module = THIS_MODULE,