x86/speculation: Fix redundant MDS mitigation message
[linux-2.6-block.git] / drivers / crypto / inside-secure / safexcel_hash.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2017 Marvell
4  *
5  * Antoine Tenart <antoine.tenart@free-electrons.com>
6  */
7
8 #include <crypto/hmac.h>
9 #include <crypto/md5.h>
10 #include <crypto/sha.h>
11 #include <linux/device.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/dmapool.h>
14
15 #include "safexcel.h"
16
17 struct safexcel_ahash_ctx {
18         struct safexcel_context base;
19         struct safexcel_crypto_priv *priv;
20
21         u32 alg;
22
23         u32 ipad[SHA512_DIGEST_SIZE / sizeof(u32)];
24         u32 opad[SHA512_DIGEST_SIZE / sizeof(u32)];
25 };
26
27 struct safexcel_ahash_req {
28         bool last_req;
29         bool finish;
30         bool hmac;
31         bool needs_inv;
32         bool hmac_zlen;
33         bool len_is_le;
34
35         int nents;
36         dma_addr_t result_dma;
37
38         u32 digest;
39
40         u8 state_sz;    /* expected state size, only set once */
41         u8 block_sz;    /* block size, only set once */
42         u32 state[SHA512_DIGEST_SIZE / sizeof(u32)] __aligned(sizeof(u32));
43
44         u64 len;
45         u64 processed;
46
47         u8 cache[HASH_CACHE_SIZE] __aligned(sizeof(u32));
48         dma_addr_t cache_dma;
49         unsigned int cache_sz;
50
51         u8 cache_next[HASH_CACHE_SIZE] __aligned(sizeof(u32));
52 };
53
54 static inline u64 safexcel_queued_len(struct safexcel_ahash_req *req)
55 {
56         return req->len - req->processed;
57 }
58
59 static void safexcel_hash_token(struct safexcel_command_desc *cdesc,
60                                 u32 input_length, u32 result_length)
61 {
62         struct safexcel_token *token =
63                 (struct safexcel_token *)cdesc->control_data.token;
64
65         token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
66         token[0].packet_length = input_length;
67         token[0].stat = EIP197_TOKEN_STAT_LAST_HASH;
68         token[0].instructions = EIP197_TOKEN_INS_TYPE_HASH;
69
70         token[1].opcode = EIP197_TOKEN_OPCODE_INSERT;
71         token[1].packet_length = result_length;
72         token[1].stat = EIP197_TOKEN_STAT_LAST_HASH |
73                         EIP197_TOKEN_STAT_LAST_PACKET;
74         token[1].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
75                                 EIP197_TOKEN_INS_INSERT_HASH_DIGEST;
76 }
77
78 static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
79                                      struct safexcel_ahash_req *req,
80                                      struct safexcel_command_desc *cdesc)
81 {
82         struct safexcel_crypto_priv *priv = ctx->priv;
83         u64 count = 0;
84
85         cdesc->control_data.control0 |= ctx->alg;
86
87         /*
88          * Copy the input digest if needed, and setup the context
89          * fields. Do this now as we need it to setup the first command
90          * descriptor.
91          */
92         if (!req->processed) {
93                 /* First - and possibly only - block of basic hash only */
94                 if (req->finish) {
95                         cdesc->control_data.control0 |=
96                                 CONTEXT_CONTROL_TYPE_HASH_OUT |
97                                 CONTEXT_CONTROL_RESTART_HASH  |
98                                 /* ensure its not 0! */
99                                 CONTEXT_CONTROL_SIZE(1);
100                 } else {
101                         cdesc->control_data.control0 |=
102                                 CONTEXT_CONTROL_TYPE_HASH_OUT  |
103                                 CONTEXT_CONTROL_RESTART_HASH   |
104                                 CONTEXT_CONTROL_NO_FINISH_HASH |
105                                 /* ensure its not 0! */
106                                 CONTEXT_CONTROL_SIZE(1);
107                 }
108                 return;
109         }
110
111         /* Hash continuation or HMAC, setup (inner) digest from state */
112         memcpy(ctx->base.ctxr->data, req->state, req->state_sz);
113
114         if (req->finish) {
115                 /* Compute digest count for hash/HMAC finish operations */
116                 if ((req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) ||
117                     req->hmac_zlen || (req->processed != req->block_sz)) {
118                         count = req->processed / EIP197_COUNTER_BLOCK_SIZE;
119
120                         /* This is a hardware limitation, as the
121                          * counter must fit into an u32. This represents
122                          * a fairly big amount of input data, so we
123                          * shouldn't see this.
124                          */
125                         if (unlikely(count & 0xffffffff00000000ULL)) {
126                                 dev_warn(priv->dev,
127                                          "Input data is too big\n");
128                                 return;
129                         }
130                 }
131
132                 if ((req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) ||
133                     /* Special case: zero length HMAC */
134                     req->hmac_zlen ||
135                     /* PE HW < 4.4 cannot do HMAC continue, fake using hash */
136                     (req->processed != req->block_sz)) {
137                         /* Basic hash continue operation, need digest + cnt */
138                         cdesc->control_data.control0 |=
139                                 CONTEXT_CONTROL_SIZE((req->state_sz >> 2) + 1) |
140                                 CONTEXT_CONTROL_TYPE_HASH_OUT |
141                                 CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
142                         /* For zero-len HMAC, don't finalize, already padded! */
143                         if (req->hmac_zlen)
144                                 cdesc->control_data.control0 |=
145                                         CONTEXT_CONTROL_NO_FINISH_HASH;
146                         cdesc->control_data.control1 |=
147                                 CONTEXT_CONTROL_DIGEST_CNT;
148                         ctx->base.ctxr->data[req->state_sz >> 2] =
149                                 cpu_to_le32(count);
150                         req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
151
152                         /* Clear zero-length HMAC flag for next operation! */
153                         req->hmac_zlen = false;
154                 } else { /* HMAC */
155                         /* Need outer digest for HMAC finalization */
156                         memcpy(ctx->base.ctxr->data + (req->state_sz >> 2),
157                                ctx->opad, req->state_sz);
158
159                         /* Single pass HMAC - no digest count */
160                         cdesc->control_data.control0 |=
161                                 CONTEXT_CONTROL_SIZE(req->state_sz >> 1) |
162                                 CONTEXT_CONTROL_TYPE_HASH_OUT |
163                                 CONTEXT_CONTROL_DIGEST_HMAC;
164                 }
165         } else { /* Hash continuation, do not finish yet */
166                 cdesc->control_data.control0 |=
167                         CONTEXT_CONTROL_SIZE(req->state_sz >> 2) |
168                         CONTEXT_CONTROL_DIGEST_PRECOMPUTED |
169                         CONTEXT_CONTROL_TYPE_HASH_OUT |
170                         CONTEXT_CONTROL_NO_FINISH_HASH;
171         }
172 }
173
174 static int safexcel_ahash_enqueue(struct ahash_request *areq);
175
176 static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv,
177                                       int ring,
178                                       struct crypto_async_request *async,
179                                       bool *should_complete, int *ret)
180 {
181         struct safexcel_result_desc *rdesc;
182         struct ahash_request *areq = ahash_request_cast(async);
183         struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
184         struct safexcel_ahash_req *sreq = ahash_request_ctx(areq);
185         struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(ahash);
186         u64 cache_len;
187
188         *ret = 0;
189
190         rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
191         if (IS_ERR(rdesc)) {
192                 dev_err(priv->dev,
193                         "hash: result: could not retrieve the result descriptor\n");
194                 *ret = PTR_ERR(rdesc);
195         } else {
196                 *ret = safexcel_rdesc_check_errors(priv, rdesc);
197         }
198
199         safexcel_complete(priv, ring);
200
201         if (sreq->nents) {
202                 dma_unmap_sg(priv->dev, areq->src, sreq->nents, DMA_TO_DEVICE);
203                 sreq->nents = 0;
204         }
205
206         if (sreq->result_dma) {
207                 dma_unmap_single(priv->dev, sreq->result_dma, sreq->state_sz,
208                                  DMA_FROM_DEVICE);
209                 sreq->result_dma = 0;
210         }
211
212         if (sreq->cache_dma) {
213                 dma_unmap_single(priv->dev, sreq->cache_dma, sreq->cache_sz,
214                                  DMA_TO_DEVICE);
215                 sreq->cache_dma = 0;
216                 sreq->cache_sz = 0;
217         }
218
219         if (sreq->finish) {
220                 if (sreq->hmac &&
221                     (sreq->digest != CONTEXT_CONTROL_DIGEST_HMAC)) {
222                         /* Faking HMAC using hash - need to do outer hash */
223                         memcpy(sreq->cache, sreq->state,
224                                crypto_ahash_digestsize(ahash));
225
226                         memcpy(sreq->state, ctx->opad, sreq->state_sz);
227
228                         sreq->len = sreq->block_sz +
229                                     crypto_ahash_digestsize(ahash);
230                         sreq->processed = sreq->block_sz;
231                         sreq->hmac = 0;
232
233                         ctx->base.needs_inv = true;
234                         areq->nbytes = 0;
235                         safexcel_ahash_enqueue(areq);
236
237                         *should_complete = false; /* Not done yet */
238                         return 1;
239                 }
240
241                 memcpy(areq->result, sreq->state,
242                        crypto_ahash_digestsize(ahash));
243         }
244
245         cache_len = safexcel_queued_len(sreq);
246         if (cache_len)
247                 memcpy(sreq->cache, sreq->cache_next, cache_len);
248
249         *should_complete = true;
250
251         return 1;
252 }
253
254 static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
255                                    int *commands, int *results)
256 {
257         struct ahash_request *areq = ahash_request_cast(async);
258         struct safexcel_ahash_req *req = ahash_request_ctx(areq);
259         struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
260         struct safexcel_crypto_priv *priv = ctx->priv;
261         struct safexcel_command_desc *cdesc, *first_cdesc = NULL;
262         struct safexcel_result_desc *rdesc;
263         struct scatterlist *sg;
264         int i, extra = 0, n_cdesc = 0, ret = 0;
265         u64 queued, len, cache_len;
266
267         queued = len = safexcel_queued_len(req);
268         if (queued <= HASH_CACHE_SIZE)
269                 cache_len = queued;
270         else
271                 cache_len = queued - areq->nbytes;
272
273         if (!req->finish && !req->last_req) {
274                 /* If this is not the last request and the queued data does not
275                  * fit into full cache blocks, cache it for the next send call.
276                  */
277                 extra = queued & (HASH_CACHE_SIZE - 1);
278
279                 /* If this is not the last request and the queued data
280                  * is a multiple of a block, cache the last one for now.
281                  */
282                 if (!extra)
283                         extra = HASH_CACHE_SIZE;
284
285                 sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
286                                    req->cache_next, extra,
287                                    areq->nbytes - extra);
288
289                 queued -= extra;
290                 len -= extra;
291
292                 if (!queued) {
293                         *commands = 0;
294                         *results = 0;
295                         return 0;
296                 }
297         }
298
299         /* Add a command descriptor for the cached data, if any */
300         if (cache_len) {
301                 req->cache_dma = dma_map_single(priv->dev, req->cache,
302                                                 cache_len, DMA_TO_DEVICE);
303                 if (dma_mapping_error(priv->dev, req->cache_dma))
304                         return -EINVAL;
305
306                 req->cache_sz = cache_len;
307                 first_cdesc = safexcel_add_cdesc(priv, ring, 1,
308                                                  (cache_len == len),
309                                                  req->cache_dma, cache_len, len,
310                                                  ctx->base.ctxr_dma);
311                 if (IS_ERR(first_cdesc)) {
312                         ret = PTR_ERR(first_cdesc);
313                         goto unmap_cache;
314                 }
315                 n_cdesc++;
316
317                 queued -= cache_len;
318                 if (!queued)
319                         goto send_command;
320         }
321
322         /* Skip descriptor generation for zero-length requests */
323         if (!areq->nbytes)
324                 goto send_command;
325
326         /* Now handle the current ahash request buffer(s) */
327         req->nents = dma_map_sg(priv->dev, areq->src,
328                                 sg_nents_for_len(areq->src,
329                                                  areq->nbytes),
330                                 DMA_TO_DEVICE);
331         if (!req->nents) {
332                 ret = -ENOMEM;
333                 goto cdesc_rollback;
334         }
335
336         for_each_sg(areq->src, sg, req->nents, i) {
337                 int sglen = sg_dma_len(sg);
338
339                 /* Do not overflow the request */
340                 if (queued < sglen)
341                         sglen = queued;
342
343                 cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc,
344                                            !(queued - sglen),
345                                            sg_dma_address(sg),
346                                            sglen, len, ctx->base.ctxr_dma);
347                 if (IS_ERR(cdesc)) {
348                         ret = PTR_ERR(cdesc);
349                         goto unmap_sg;
350                 }
351                 n_cdesc++;
352
353                 if (n_cdesc == 1)
354                         first_cdesc = cdesc;
355
356                 queued -= sglen;
357                 if (!queued)
358                         break;
359         }
360
361 send_command:
362         /* Setup the context options */
363         safexcel_context_control(ctx, req, first_cdesc);
364
365         /* Add the token */
366         safexcel_hash_token(first_cdesc, len, req->state_sz);
367
368         req->result_dma = dma_map_single(priv->dev, req->state, req->state_sz,
369                                          DMA_FROM_DEVICE);
370         if (dma_mapping_error(priv->dev, req->result_dma)) {
371                 ret = -EINVAL;
372                 goto unmap_sg;
373         }
374
375         /* Add a result descriptor */
376         rdesc = safexcel_add_rdesc(priv, ring, 1, 1, req->result_dma,
377                                    req->state_sz);
378         if (IS_ERR(rdesc)) {
379                 ret = PTR_ERR(rdesc);
380                 goto unmap_result;
381         }
382
383         safexcel_rdr_req_set(priv, ring, rdesc, &areq->base);
384
385         req->processed += len;
386
387         *commands = n_cdesc;
388         *results = 1;
389         return 0;
390
391 unmap_result:
392         dma_unmap_single(priv->dev, req->result_dma, req->state_sz,
393                          DMA_FROM_DEVICE);
394 unmap_sg:
395         dma_unmap_sg(priv->dev, areq->src, req->nents, DMA_TO_DEVICE);
396 cdesc_rollback:
397         for (i = 0; i < n_cdesc; i++)
398                 safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
399 unmap_cache:
400         if (req->cache_dma) {
401                 dma_unmap_single(priv->dev, req->cache_dma, req->cache_sz,
402                                  DMA_TO_DEVICE);
403                 req->cache_dma = 0;
404                 req->cache_sz = 0;
405         }
406
407         return ret;
408 }
409
410 static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
411                                       int ring,
412                                       struct crypto_async_request *async,
413                                       bool *should_complete, int *ret)
414 {
415         struct safexcel_result_desc *rdesc;
416         struct ahash_request *areq = ahash_request_cast(async);
417         struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
418         struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(ahash);
419         int enq_ret;
420
421         *ret = 0;
422
423         rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
424         if (IS_ERR(rdesc)) {
425                 dev_err(priv->dev,
426                         "hash: invalidate: could not retrieve the result descriptor\n");
427                 *ret = PTR_ERR(rdesc);
428         } else {
429                 *ret = safexcel_rdesc_check_errors(priv, rdesc);
430         }
431
432         safexcel_complete(priv, ring);
433
434         if (ctx->base.exit_inv) {
435                 dma_pool_free(priv->context_pool, ctx->base.ctxr,
436                               ctx->base.ctxr_dma);
437
438                 *should_complete = true;
439                 return 1;
440         }
441
442         ring = safexcel_select_ring(priv);
443         ctx->base.ring = ring;
444
445         spin_lock_bh(&priv->ring[ring].queue_lock);
446         enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async);
447         spin_unlock_bh(&priv->ring[ring].queue_lock);
448
449         if (enq_ret != -EINPROGRESS)
450                 *ret = enq_ret;
451
452         queue_work(priv->ring[ring].workqueue,
453                    &priv->ring[ring].work_data.work);
454
455         *should_complete = false;
456
457         return 1;
458 }
459
460 static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
461                                   struct crypto_async_request *async,
462                                   bool *should_complete, int *ret)
463 {
464         struct ahash_request *areq = ahash_request_cast(async);
465         struct safexcel_ahash_req *req = ahash_request_ctx(areq);
466         int err;
467
468         BUG_ON(!(priv->flags & EIP197_TRC_CACHE) && req->needs_inv);
469
470         if (req->needs_inv) {
471                 req->needs_inv = false;
472                 err = safexcel_handle_inv_result(priv, ring, async,
473                                                  should_complete, ret);
474         } else {
475                 err = safexcel_handle_req_result(priv, ring, async,
476                                                  should_complete, ret);
477         }
478
479         return err;
480 }
481
482 static int safexcel_ahash_send_inv(struct crypto_async_request *async,
483                                    int ring, int *commands, int *results)
484 {
485         struct ahash_request *areq = ahash_request_cast(async);
486         struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
487         int ret;
488
489         ret = safexcel_invalidate_cache(async, ctx->priv,
490                                         ctx->base.ctxr_dma, ring);
491         if (unlikely(ret))
492                 return ret;
493
494         *commands = 1;
495         *results = 1;
496
497         return 0;
498 }
499
500 static int safexcel_ahash_send(struct crypto_async_request *async,
501                                int ring, int *commands, int *results)
502 {
503         struct ahash_request *areq = ahash_request_cast(async);
504         struct safexcel_ahash_req *req = ahash_request_ctx(areq);
505         int ret;
506
507         if (req->needs_inv)
508                 ret = safexcel_ahash_send_inv(async, ring, commands, results);
509         else
510                 ret = safexcel_ahash_send_req(async, ring, commands, results);
511
512         return ret;
513 }
514
515 static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
516 {
517         struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
518         struct safexcel_crypto_priv *priv = ctx->priv;
519         EIP197_REQUEST_ON_STACK(req, ahash, EIP197_AHASH_REQ_SIZE);
520         struct safexcel_ahash_req *rctx = ahash_request_ctx(req);
521         struct safexcel_inv_result result = {};
522         int ring = ctx->base.ring;
523
524         memset(req, 0, EIP197_AHASH_REQ_SIZE);
525
526         /* create invalidation request */
527         init_completion(&result.completion);
528         ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
529                                    safexcel_inv_complete, &result);
530
531         ahash_request_set_tfm(req, __crypto_ahash_cast(tfm));
532         ctx = crypto_tfm_ctx(req->base.tfm);
533         ctx->base.exit_inv = true;
534         rctx->needs_inv = true;
535
536         spin_lock_bh(&priv->ring[ring].queue_lock);
537         crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
538         spin_unlock_bh(&priv->ring[ring].queue_lock);
539
540         queue_work(priv->ring[ring].workqueue,
541                    &priv->ring[ring].work_data.work);
542
543         wait_for_completion(&result.completion);
544
545         if (result.error) {
546                 dev_warn(priv->dev, "hash: completion error (%d)\n",
547                          result.error);
548                 return result.error;
549         }
550
551         return 0;
552 }
553
554 /* safexcel_ahash_cache: cache data until at least one request can be sent to
555  * the engine, aka. when there is at least 1 block size in the pipe.
556  */
557 static int safexcel_ahash_cache(struct ahash_request *areq)
558 {
559         struct safexcel_ahash_req *req = ahash_request_ctx(areq);
560         u64 cache_len;
561
562         /* cache_len: everything accepted by the driver but not sent yet,
563          * tot sz handled by update() - last req sz - tot sz handled by send()
564          */
565         cache_len = safexcel_queued_len(req);
566
567         /*
568          * In case there isn't enough bytes to proceed (less than a
569          * block size), cache the data until we have enough.
570          */
571         if (cache_len + areq->nbytes <= HASH_CACHE_SIZE) {
572                 sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
573                                    req->cache + cache_len,
574                                    areq->nbytes, 0);
575                 return 0;
576         }
577
578         /* We couldn't cache all the data */
579         return -E2BIG;
580 }
581
582 static int safexcel_ahash_enqueue(struct ahash_request *areq)
583 {
584         struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
585         struct safexcel_ahash_req *req = ahash_request_ctx(areq);
586         struct safexcel_crypto_priv *priv = ctx->priv;
587         int ret, ring;
588
589         req->needs_inv = false;
590
591         if (ctx->base.ctxr) {
592                 if (priv->flags & EIP197_TRC_CACHE && !ctx->base.needs_inv &&
593                     req->processed &&
594                     (/* invalidate for basic hash continuation finish */
595                      (req->finish &&
596                       (req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED)) ||
597                      /* invalidate if (i)digest changed */
598                      memcmp(ctx->base.ctxr->data, req->state, req->state_sz) ||
599                      /* invalidate for HMAC continuation finish */
600                      (req->finish && (req->processed != req->block_sz)) ||
601                      /* invalidate for HMAC finish with odigest changed */
602                      (req->finish &&
603                       memcmp(ctx->base.ctxr->data + (req->state_sz>>2),
604                              ctx->opad, req->state_sz))))
605                         /*
606                          * We're still setting needs_inv here, even though it is
607                          * cleared right away, because the needs_inv flag can be
608                          * set in other functions and we want to keep the same
609                          * logic.
610                          */
611                         ctx->base.needs_inv = true;
612
613                 if (ctx->base.needs_inv) {
614                         ctx->base.needs_inv = false;
615                         req->needs_inv = true;
616                 }
617         } else {
618                 ctx->base.ring = safexcel_select_ring(priv);
619                 ctx->base.ctxr = dma_pool_zalloc(priv->context_pool,
620                                                  EIP197_GFP_FLAGS(areq->base),
621                                                  &ctx->base.ctxr_dma);
622                 if (!ctx->base.ctxr)
623                         return -ENOMEM;
624         }
625
626         ring = ctx->base.ring;
627
628         spin_lock_bh(&priv->ring[ring].queue_lock);
629         ret = crypto_enqueue_request(&priv->ring[ring].queue, &areq->base);
630         spin_unlock_bh(&priv->ring[ring].queue_lock);
631
632         queue_work(priv->ring[ring].workqueue,
633                    &priv->ring[ring].work_data.work);
634
635         return ret;
636 }
637
638 static int safexcel_ahash_update(struct ahash_request *areq)
639 {
640         struct safexcel_ahash_req *req = ahash_request_ctx(areq);
641         int ret;
642
643         /* If the request is 0 length, do nothing */
644         if (!areq->nbytes)
645                 return 0;
646
647         /* Add request to the cache if it fits */
648         ret = safexcel_ahash_cache(areq);
649
650         /* Update total request length */
651         req->len += areq->nbytes;
652
653         /* If not all data could fit into the cache, go process the excess.
654          * Also go process immediately for an HMAC IV precompute, which
655          * will never be finished at all, but needs to be processed anyway.
656          */
657         if ((ret && !req->finish) || req->last_req)
658                 return safexcel_ahash_enqueue(areq);
659
660         return 0;
661 }
662
663 static int safexcel_ahash_final(struct ahash_request *areq)
664 {
665         struct safexcel_ahash_req *req = ahash_request_ctx(areq);
666         struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
667
668         req->finish = true;
669
670         if (unlikely(!req->len && !areq->nbytes)) {
671                 /*
672                  * If we have an overall 0 length *hash* request:
673                  * The HW cannot do 0 length hash, so we provide the correct
674                  * result directly here.
675                  */
676                 if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_MD5)
677                         memcpy(areq->result, md5_zero_message_hash,
678                                MD5_DIGEST_SIZE);
679                 else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA1)
680                         memcpy(areq->result, sha1_zero_message_hash,
681                                SHA1_DIGEST_SIZE);
682                 else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA224)
683                         memcpy(areq->result, sha224_zero_message_hash,
684                                SHA224_DIGEST_SIZE);
685                 else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA256)
686                         memcpy(areq->result, sha256_zero_message_hash,
687                                SHA256_DIGEST_SIZE);
688                 else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA384)
689                         memcpy(areq->result, sha384_zero_message_hash,
690                                SHA384_DIGEST_SIZE);
691                 else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA512)
692                         memcpy(areq->result, sha512_zero_message_hash,
693                                SHA512_DIGEST_SIZE);
694
695                 return 0;
696         } else if (unlikely(req->hmac &&
697                             (req->len == req->block_sz) &&
698                             !areq->nbytes)) {
699                 /*
700                  * If we have an overall 0 length *HMAC* request:
701                  * For HMAC, we need to finalize the inner digest
702                  * and then perform the outer hash.
703                  */
704
705                 /* generate pad block in the cache */
706                 /* start with a hash block of all zeroes */
707                 memset(req->cache, 0, req->block_sz);
708                 /* set the first byte to 0x80 to 'append a 1 bit' */
709                 req->cache[0] = 0x80;
710                 /* add the length in bits in the last 2 bytes */
711                 if (req->len_is_le) {
712                         /* Little endian length word (e.g. MD5) */
713                         req->cache[req->block_sz-8] = (req->block_sz << 3) &
714                                                       255;
715                         req->cache[req->block_sz-7] = (req->block_sz >> 5);
716                 } else {
717                         /* Big endian length word (e.g. any SHA) */
718                         req->cache[req->block_sz-2] = (req->block_sz >> 5);
719                         req->cache[req->block_sz-1] = (req->block_sz << 3) &
720                                                       255;
721                 }
722
723                 req->len += req->block_sz; /* plus 1 hash block */
724
725                 /* Set special zero-length HMAC flag */
726                 req->hmac_zlen = true;
727
728                 /* Finalize HMAC */
729                 req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
730         } else if (req->hmac) {
731                 /* Finalize HMAC */
732                 req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
733         }
734
735         return safexcel_ahash_enqueue(areq);
736 }
737
738 static int safexcel_ahash_finup(struct ahash_request *areq)
739 {
740         struct safexcel_ahash_req *req = ahash_request_ctx(areq);
741
742         req->finish = true;
743
744         safexcel_ahash_update(areq);
745         return safexcel_ahash_final(areq);
746 }
747
748 static int safexcel_ahash_export(struct ahash_request *areq, void *out)
749 {
750         struct safexcel_ahash_req *req = ahash_request_ctx(areq);
751         struct safexcel_ahash_export_state *export = out;
752
753         export->len = req->len;
754         export->processed = req->processed;
755
756         export->digest = req->digest;
757
758         memcpy(export->state, req->state, req->state_sz);
759         memcpy(export->cache, req->cache, HASH_CACHE_SIZE);
760
761         return 0;
762 }
763
764 static int safexcel_ahash_import(struct ahash_request *areq, const void *in)
765 {
766         struct safexcel_ahash_req *req = ahash_request_ctx(areq);
767         const struct safexcel_ahash_export_state *export = in;
768         int ret;
769
770         ret = crypto_ahash_init(areq);
771         if (ret)
772                 return ret;
773
774         req->len = export->len;
775         req->processed = export->processed;
776
777         req->digest = export->digest;
778
779         memcpy(req->cache, export->cache, HASH_CACHE_SIZE);
780         memcpy(req->state, export->state, req->state_sz);
781
782         return 0;
783 }
784
785 static int safexcel_ahash_cra_init(struct crypto_tfm *tfm)
786 {
787         struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
788         struct safexcel_alg_template *tmpl =
789                 container_of(__crypto_ahash_alg(tfm->__crt_alg),
790                              struct safexcel_alg_template, alg.ahash);
791
792         ctx->priv = tmpl->priv;
793         ctx->base.send = safexcel_ahash_send;
794         ctx->base.handle_result = safexcel_handle_result;
795
796         crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
797                                  sizeof(struct safexcel_ahash_req));
798         return 0;
799 }
800
801 static int safexcel_sha1_init(struct ahash_request *areq)
802 {
803         struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
804         struct safexcel_ahash_req *req = ahash_request_ctx(areq);
805
806         memset(req, 0, sizeof(*req));
807
808         ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1;
809         req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
810         req->state_sz = SHA1_DIGEST_SIZE;
811         req->block_sz = SHA1_BLOCK_SIZE;
812
813         return 0;
814 }
815
816 static int safexcel_sha1_digest(struct ahash_request *areq)
817 {
818         int ret = safexcel_sha1_init(areq);
819
820         if (ret)
821                 return ret;
822
823         return safexcel_ahash_finup(areq);
824 }
825
826 static void safexcel_ahash_cra_exit(struct crypto_tfm *tfm)
827 {
828         struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
829         struct safexcel_crypto_priv *priv = ctx->priv;
830         int ret;
831
832         /* context not allocated, skip invalidation */
833         if (!ctx->base.ctxr)
834                 return;
835
836         if (priv->flags & EIP197_TRC_CACHE) {
837                 ret = safexcel_ahash_exit_inv(tfm);
838                 if (ret)
839                         dev_warn(priv->dev, "hash: invalidation error %d\n", ret);
840         } else {
841                 dma_pool_free(priv->context_pool, ctx->base.ctxr,
842                               ctx->base.ctxr_dma);
843         }
844 }
845
846 struct safexcel_alg_template safexcel_alg_sha1 = {
847         .type = SAFEXCEL_ALG_TYPE_AHASH,
848         .algo_mask = SAFEXCEL_ALG_SHA1,
849         .alg.ahash = {
850                 .init = safexcel_sha1_init,
851                 .update = safexcel_ahash_update,
852                 .final = safexcel_ahash_final,
853                 .finup = safexcel_ahash_finup,
854                 .digest = safexcel_sha1_digest,
855                 .export = safexcel_ahash_export,
856                 .import = safexcel_ahash_import,
857                 .halg = {
858                         .digestsize = SHA1_DIGEST_SIZE,
859                         .statesize = sizeof(struct safexcel_ahash_export_state),
860                         .base = {
861                                 .cra_name = "sha1",
862                                 .cra_driver_name = "safexcel-sha1",
863                                 .cra_priority = SAFEXCEL_CRA_PRIORITY,
864                                 .cra_flags = CRYPTO_ALG_ASYNC |
865                                              CRYPTO_ALG_KERN_DRIVER_ONLY,
866                                 .cra_blocksize = SHA1_BLOCK_SIZE,
867                                 .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
868                                 .cra_init = safexcel_ahash_cra_init,
869                                 .cra_exit = safexcel_ahash_cra_exit,
870                                 .cra_module = THIS_MODULE,
871                         },
872                 },
873         },
874 };
875
876 static int safexcel_hmac_sha1_init(struct ahash_request *areq)
877 {
878         struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
879         struct safexcel_ahash_req *req = ahash_request_ctx(areq);
880
881         memset(req, 0, sizeof(*req));
882
883         /* Start from ipad precompute */
884         memcpy(req->state, ctx->ipad, SHA1_DIGEST_SIZE);
885         /* Already processed the key^ipad part now! */
886         req->len        = SHA1_BLOCK_SIZE;
887         req->processed  = SHA1_BLOCK_SIZE;
888
889         ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1;
890         req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
891         req->state_sz = SHA1_DIGEST_SIZE;
892         req->block_sz = SHA1_BLOCK_SIZE;
893         req->hmac = true;
894
895         return 0;
896 }
897
898 static int safexcel_hmac_sha1_digest(struct ahash_request *areq)
899 {
900         int ret = safexcel_hmac_sha1_init(areq);
901
902         if (ret)
903                 return ret;
904
905         return safexcel_ahash_finup(areq);
906 }
907
908 struct safexcel_ahash_result {
909         struct completion completion;
910         int error;
911 };
912
913 static void safexcel_ahash_complete(struct crypto_async_request *req, int error)
914 {
915         struct safexcel_ahash_result *result = req->data;
916
917         if (error == -EINPROGRESS)
918                 return;
919
920         result->error = error;
921         complete(&result->completion);
922 }
923
924 static int safexcel_hmac_init_pad(struct ahash_request *areq,
925                                   unsigned int blocksize, const u8 *key,
926                                   unsigned int keylen, u8 *ipad, u8 *opad)
927 {
928         struct safexcel_ahash_result result;
929         struct scatterlist sg;
930         int ret, i;
931         u8 *keydup;
932
933         if (keylen <= blocksize) {
934                 memcpy(ipad, key, keylen);
935         } else {
936                 keydup = kmemdup(key, keylen, GFP_KERNEL);
937                 if (!keydup)
938                         return -ENOMEM;
939
940                 ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_BACKLOG,
941                                            safexcel_ahash_complete, &result);
942                 sg_init_one(&sg, keydup, keylen);
943                 ahash_request_set_crypt(areq, &sg, ipad, keylen);
944                 init_completion(&result.completion);
945
946                 ret = crypto_ahash_digest(areq);
947                 if (ret == -EINPROGRESS || ret == -EBUSY) {
948                         wait_for_completion_interruptible(&result.completion);
949                         ret = result.error;
950                 }
951
952                 /* Avoid leaking */
953                 memzero_explicit(keydup, keylen);
954                 kfree(keydup);
955
956                 if (ret)
957                         return ret;
958
959                 keylen = crypto_ahash_digestsize(crypto_ahash_reqtfm(areq));
960         }
961
962         memset(ipad + keylen, 0, blocksize - keylen);
963         memcpy(opad, ipad, blocksize);
964
965         for (i = 0; i < blocksize; i++) {
966                 ipad[i] ^= HMAC_IPAD_VALUE;
967                 opad[i] ^= HMAC_OPAD_VALUE;
968         }
969
970         return 0;
971 }
972
973 static int safexcel_hmac_init_iv(struct ahash_request *areq,
974                                  unsigned int blocksize, u8 *pad, void *state)
975 {
976         struct safexcel_ahash_result result;
977         struct safexcel_ahash_req *req;
978         struct scatterlist sg;
979         int ret;
980
981         ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_BACKLOG,
982                                    safexcel_ahash_complete, &result);
983         sg_init_one(&sg, pad, blocksize);
984         ahash_request_set_crypt(areq, &sg, pad, blocksize);
985         init_completion(&result.completion);
986
987         ret = crypto_ahash_init(areq);
988         if (ret)
989                 return ret;
990
991         req = ahash_request_ctx(areq);
992         req->hmac = true;
993         req->last_req = true;
994
995         ret = crypto_ahash_update(areq);
996         if (ret && ret != -EINPROGRESS && ret != -EBUSY)
997                 return ret;
998
999         wait_for_completion_interruptible(&result.completion);
1000         if (result.error)
1001                 return result.error;
1002
1003         return crypto_ahash_export(areq, state);
1004 }
1005
1006 int safexcel_hmac_setkey(const char *alg, const u8 *key, unsigned int keylen,
1007                          void *istate, void *ostate)
1008 {
1009         struct ahash_request *areq;
1010         struct crypto_ahash *tfm;
1011         unsigned int blocksize;
1012         u8 *ipad, *opad;
1013         int ret;
1014
1015         tfm = crypto_alloc_ahash(alg, 0, 0);
1016         if (IS_ERR(tfm))
1017                 return PTR_ERR(tfm);
1018
1019         areq = ahash_request_alloc(tfm, GFP_KERNEL);
1020         if (!areq) {
1021                 ret = -ENOMEM;
1022                 goto free_ahash;
1023         }
1024
1025         crypto_ahash_clear_flags(tfm, ~0);
1026         blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1027
1028         ipad = kcalloc(2, blocksize, GFP_KERNEL);
1029         if (!ipad) {
1030                 ret = -ENOMEM;
1031                 goto free_request;
1032         }
1033
1034         opad = ipad + blocksize;
1035
1036         ret = safexcel_hmac_init_pad(areq, blocksize, key, keylen, ipad, opad);
1037         if (ret)
1038                 goto free_ipad;
1039
1040         ret = safexcel_hmac_init_iv(areq, blocksize, ipad, istate);
1041         if (ret)
1042                 goto free_ipad;
1043
1044         ret = safexcel_hmac_init_iv(areq, blocksize, opad, ostate);
1045
1046 free_ipad:
1047         kfree(ipad);
1048 free_request:
1049         ahash_request_free(areq);
1050 free_ahash:
1051         crypto_free_ahash(tfm);
1052
1053         return ret;
1054 }
1055
1056 static int safexcel_hmac_alg_setkey(struct crypto_ahash *tfm, const u8 *key,
1057                                     unsigned int keylen, const char *alg,
1058                                     unsigned int state_sz)
1059 {
1060         struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1061         struct safexcel_crypto_priv *priv = ctx->priv;
1062         struct safexcel_ahash_export_state istate, ostate;
1063         int ret;
1064
1065         ret = safexcel_hmac_setkey(alg, key, keylen, &istate, &ostate);
1066         if (ret)
1067                 return ret;
1068
1069         if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr &&
1070             (memcmp(ctx->ipad, istate.state, state_sz) ||
1071              memcmp(ctx->opad, ostate.state, state_sz)))
1072                 ctx->base.needs_inv = true;
1073
1074         memcpy(ctx->ipad, &istate.state, state_sz);
1075         memcpy(ctx->opad, &ostate.state, state_sz);
1076
1077         return 0;
1078 }
1079
1080 static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
1081                                      unsigned int keylen)
1082 {
1083         return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sha1",
1084                                         SHA1_DIGEST_SIZE);
1085 }
1086
1087 struct safexcel_alg_template safexcel_alg_hmac_sha1 = {
1088         .type = SAFEXCEL_ALG_TYPE_AHASH,
1089         .algo_mask = SAFEXCEL_ALG_SHA1,
1090         .alg.ahash = {
1091                 .init = safexcel_hmac_sha1_init,
1092                 .update = safexcel_ahash_update,
1093                 .final = safexcel_ahash_final,
1094                 .finup = safexcel_ahash_finup,
1095                 .digest = safexcel_hmac_sha1_digest,
1096                 .setkey = safexcel_hmac_sha1_setkey,
1097                 .export = safexcel_ahash_export,
1098                 .import = safexcel_ahash_import,
1099                 .halg = {
1100                         .digestsize = SHA1_DIGEST_SIZE,
1101                         .statesize = sizeof(struct safexcel_ahash_export_state),
1102                         .base = {
1103                                 .cra_name = "hmac(sha1)",
1104                                 .cra_driver_name = "safexcel-hmac-sha1",
1105                                 .cra_priority = SAFEXCEL_CRA_PRIORITY,
1106                                 .cra_flags = CRYPTO_ALG_ASYNC |
1107                                              CRYPTO_ALG_KERN_DRIVER_ONLY,
1108                                 .cra_blocksize = SHA1_BLOCK_SIZE,
1109                                 .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
1110                                 .cra_init = safexcel_ahash_cra_init,
1111                                 .cra_exit = safexcel_ahash_cra_exit,
1112                                 .cra_module = THIS_MODULE,
1113                         },
1114                 },
1115         },
1116 };
1117
1118 static int safexcel_sha256_init(struct ahash_request *areq)
1119 {
1120         struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
1121         struct safexcel_ahash_req *req = ahash_request_ctx(areq);
1122
1123         memset(req, 0, sizeof(*req));
1124
1125         ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA256;
1126         req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
1127         req->state_sz = SHA256_DIGEST_SIZE;
1128         req->block_sz = SHA256_BLOCK_SIZE;
1129
1130         return 0;
1131 }
1132
1133 static int safexcel_sha256_digest(struct ahash_request *areq)
1134 {
1135         int ret = safexcel_sha256_init(areq);
1136
1137         if (ret)
1138                 return ret;
1139
1140         return safexcel_ahash_finup(areq);
1141 }
1142
1143 struct safexcel_alg_template safexcel_alg_sha256 = {
1144         .type = SAFEXCEL_ALG_TYPE_AHASH,
1145         .algo_mask = SAFEXCEL_ALG_SHA2_256,
1146         .alg.ahash = {
1147                 .init = safexcel_sha256_init,
1148                 .update = safexcel_ahash_update,
1149                 .final = safexcel_ahash_final,
1150                 .finup = safexcel_ahash_finup,
1151                 .digest = safexcel_sha256_digest,
1152                 .export = safexcel_ahash_export,
1153                 .import = safexcel_ahash_import,
1154                 .halg = {
1155                         .digestsize = SHA256_DIGEST_SIZE,
1156                         .statesize = sizeof(struct safexcel_ahash_export_state),
1157                         .base = {
1158                                 .cra_name = "sha256",
1159                                 .cra_driver_name = "safexcel-sha256",
1160                                 .cra_priority = SAFEXCEL_CRA_PRIORITY,
1161                                 .cra_flags = CRYPTO_ALG_ASYNC |
1162                                              CRYPTO_ALG_KERN_DRIVER_ONLY,
1163                                 .cra_blocksize = SHA256_BLOCK_SIZE,
1164                                 .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
1165                                 .cra_init = safexcel_ahash_cra_init,
1166                                 .cra_exit = safexcel_ahash_cra_exit,
1167                                 .cra_module = THIS_MODULE,
1168                         },
1169                 },
1170         },
1171 };
1172
1173 static int safexcel_sha224_init(struct ahash_request *areq)
1174 {
1175         struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
1176         struct safexcel_ahash_req *req = ahash_request_ctx(areq);
1177
1178         memset(req, 0, sizeof(*req));
1179
1180         ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA224;
1181         req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
1182         req->state_sz = SHA256_DIGEST_SIZE;
1183         req->block_sz = SHA256_BLOCK_SIZE;
1184
1185         return 0;
1186 }
1187
1188 static int safexcel_sha224_digest(struct ahash_request *areq)
1189 {
1190         int ret = safexcel_sha224_init(areq);
1191
1192         if (ret)
1193                 return ret;
1194
1195         return safexcel_ahash_finup(areq);
1196 }
1197
1198 struct safexcel_alg_template safexcel_alg_sha224 = {
1199         .type = SAFEXCEL_ALG_TYPE_AHASH,
1200         .algo_mask = SAFEXCEL_ALG_SHA2_256,
1201         .alg.ahash = {
1202                 .init = safexcel_sha224_init,
1203                 .update = safexcel_ahash_update,
1204                 .final = safexcel_ahash_final,
1205                 .finup = safexcel_ahash_finup,
1206                 .digest = safexcel_sha224_digest,
1207                 .export = safexcel_ahash_export,
1208                 .import = safexcel_ahash_import,
1209                 .halg = {
1210                         .digestsize = SHA224_DIGEST_SIZE,
1211                         .statesize = sizeof(struct safexcel_ahash_export_state),
1212                         .base = {
1213                                 .cra_name = "sha224",
1214                                 .cra_driver_name = "safexcel-sha224",
1215                                 .cra_priority = SAFEXCEL_CRA_PRIORITY,
1216                                 .cra_flags = CRYPTO_ALG_ASYNC |
1217                                              CRYPTO_ALG_KERN_DRIVER_ONLY,
1218                                 .cra_blocksize = SHA224_BLOCK_SIZE,
1219                                 .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
1220                                 .cra_init = safexcel_ahash_cra_init,
1221                                 .cra_exit = safexcel_ahash_cra_exit,
1222                                 .cra_module = THIS_MODULE,
1223                         },
1224                 },
1225         },
1226 };
1227
1228 static int safexcel_hmac_sha224_setkey(struct crypto_ahash *tfm, const u8 *key,
1229                                        unsigned int keylen)
1230 {
1231         return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sha224",
1232                                         SHA256_DIGEST_SIZE);
1233 }
1234
1235 static int safexcel_hmac_sha224_init(struct ahash_request *areq)
1236 {
1237         struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
1238         struct safexcel_ahash_req *req = ahash_request_ctx(areq);
1239
1240         memset(req, 0, sizeof(*req));
1241
1242         /* Start from ipad precompute */
1243         memcpy(req->state, ctx->ipad, SHA256_DIGEST_SIZE);
1244         /* Already processed the key^ipad part now! */
1245         req->len        = SHA256_BLOCK_SIZE;
1246         req->processed  = SHA256_BLOCK_SIZE;
1247
1248         ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA224;
1249         req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
1250         req->state_sz = SHA256_DIGEST_SIZE;
1251         req->block_sz = SHA256_BLOCK_SIZE;
1252         req->hmac = true;
1253
1254         return 0;
1255 }
1256
1257 static int safexcel_hmac_sha224_digest(struct ahash_request *areq)
1258 {
1259         int ret = safexcel_hmac_sha224_init(areq);
1260
1261         if (ret)
1262                 return ret;
1263
1264         return safexcel_ahash_finup(areq);
1265 }
1266
1267 struct safexcel_alg_template safexcel_alg_hmac_sha224 = {
1268         .type = SAFEXCEL_ALG_TYPE_AHASH,
1269         .algo_mask = SAFEXCEL_ALG_SHA2_256,
1270         .alg.ahash = {
1271                 .init = safexcel_hmac_sha224_init,
1272                 .update = safexcel_ahash_update,
1273                 .final = safexcel_ahash_final,
1274                 .finup = safexcel_ahash_finup,
1275                 .digest = safexcel_hmac_sha224_digest,
1276                 .setkey = safexcel_hmac_sha224_setkey,
1277                 .export = safexcel_ahash_export,
1278                 .import = safexcel_ahash_import,
1279                 .halg = {
1280                         .digestsize = SHA224_DIGEST_SIZE,
1281                         .statesize = sizeof(struct safexcel_ahash_export_state),
1282                         .base = {
1283                                 .cra_name = "hmac(sha224)",
1284                                 .cra_driver_name = "safexcel-hmac-sha224",
1285                                 .cra_priority = SAFEXCEL_CRA_PRIORITY,
1286                                 .cra_flags = CRYPTO_ALG_ASYNC |
1287                                              CRYPTO_ALG_KERN_DRIVER_ONLY,
1288                                 .cra_blocksize = SHA224_BLOCK_SIZE,
1289                                 .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
1290                                 .cra_init = safexcel_ahash_cra_init,
1291                                 .cra_exit = safexcel_ahash_cra_exit,
1292                                 .cra_module = THIS_MODULE,
1293                         },
1294                 },
1295         },
1296 };
1297
1298 static int safexcel_hmac_sha256_setkey(struct crypto_ahash *tfm, const u8 *key,
1299                                      unsigned int keylen)
1300 {
1301         return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sha256",
1302                                         SHA256_DIGEST_SIZE);
1303 }
1304
1305 static int safexcel_hmac_sha256_init(struct ahash_request *areq)
1306 {
1307         struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
1308         struct safexcel_ahash_req *req = ahash_request_ctx(areq);
1309
1310         memset(req, 0, sizeof(*req));
1311
1312         /* Start from ipad precompute */
1313         memcpy(req->state, ctx->ipad, SHA256_DIGEST_SIZE);
1314         /* Already processed the key^ipad part now! */
1315         req->len        = SHA256_BLOCK_SIZE;
1316         req->processed  = SHA256_BLOCK_SIZE;
1317
1318         ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA256;
1319         req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
1320         req->state_sz = SHA256_DIGEST_SIZE;
1321         req->block_sz = SHA256_BLOCK_SIZE;
1322         req->hmac = true;
1323
1324         return 0;
1325 }
1326
1327 static int safexcel_hmac_sha256_digest(struct ahash_request *areq)
1328 {
1329         int ret = safexcel_hmac_sha256_init(areq);
1330
1331         if (ret)
1332                 return ret;
1333
1334         return safexcel_ahash_finup(areq);
1335 }
1336
1337 struct safexcel_alg_template safexcel_alg_hmac_sha256 = {
1338         .type = SAFEXCEL_ALG_TYPE_AHASH,
1339         .algo_mask = SAFEXCEL_ALG_SHA2_256,
1340         .alg.ahash = {
1341                 .init = safexcel_hmac_sha256_init,
1342                 .update = safexcel_ahash_update,
1343                 .final = safexcel_ahash_final,
1344                 .finup = safexcel_ahash_finup,
1345                 .digest = safexcel_hmac_sha256_digest,
1346                 .setkey = safexcel_hmac_sha256_setkey,
1347                 .export = safexcel_ahash_export,
1348                 .import = safexcel_ahash_import,
1349                 .halg = {
1350                         .digestsize = SHA256_DIGEST_SIZE,
1351                         .statesize = sizeof(struct safexcel_ahash_export_state),
1352                         .base = {
1353                                 .cra_name = "hmac(sha256)",
1354                                 .cra_driver_name = "safexcel-hmac-sha256",
1355                                 .cra_priority = SAFEXCEL_CRA_PRIORITY,
1356                                 .cra_flags = CRYPTO_ALG_ASYNC |
1357                                              CRYPTO_ALG_KERN_DRIVER_ONLY,
1358                                 .cra_blocksize = SHA256_BLOCK_SIZE,
1359                                 .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
1360                                 .cra_init = safexcel_ahash_cra_init,
1361                                 .cra_exit = safexcel_ahash_cra_exit,
1362                                 .cra_module = THIS_MODULE,
1363                         },
1364                 },
1365         },
1366 };
1367
1368 static int safexcel_sha512_init(struct ahash_request *areq)
1369 {
1370         struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
1371         struct safexcel_ahash_req *req = ahash_request_ctx(areq);
1372
1373         memset(req, 0, sizeof(*req));
1374
1375         ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA512;
1376         req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
1377         req->state_sz = SHA512_DIGEST_SIZE;
1378         req->block_sz = SHA512_BLOCK_SIZE;
1379
1380         return 0;
1381 }
1382
1383 static int safexcel_sha512_digest(struct ahash_request *areq)
1384 {
1385         int ret = safexcel_sha512_init(areq);
1386
1387         if (ret)
1388                 return ret;
1389
1390         return safexcel_ahash_finup(areq);
1391 }
1392
1393 struct safexcel_alg_template safexcel_alg_sha512 = {
1394         .type = SAFEXCEL_ALG_TYPE_AHASH,
1395         .algo_mask = SAFEXCEL_ALG_SHA2_512,
1396         .alg.ahash = {
1397                 .init = safexcel_sha512_init,
1398                 .update = safexcel_ahash_update,
1399                 .final = safexcel_ahash_final,
1400                 .finup = safexcel_ahash_finup,
1401                 .digest = safexcel_sha512_digest,
1402                 .export = safexcel_ahash_export,
1403                 .import = safexcel_ahash_import,
1404                 .halg = {
1405                         .digestsize = SHA512_DIGEST_SIZE,
1406                         .statesize = sizeof(struct safexcel_ahash_export_state),
1407                         .base = {
1408                                 .cra_name = "sha512",
1409                                 .cra_driver_name = "safexcel-sha512",
1410                                 .cra_priority = SAFEXCEL_CRA_PRIORITY,
1411                                 .cra_flags = CRYPTO_ALG_ASYNC |
1412                                              CRYPTO_ALG_KERN_DRIVER_ONLY,
1413                                 .cra_blocksize = SHA512_BLOCK_SIZE,
1414                                 .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
1415                                 .cra_init = safexcel_ahash_cra_init,
1416                                 .cra_exit = safexcel_ahash_cra_exit,
1417                                 .cra_module = THIS_MODULE,
1418                         },
1419                 },
1420         },
1421 };
1422
1423 static int safexcel_sha384_init(struct ahash_request *areq)
1424 {
1425         struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
1426         struct safexcel_ahash_req *req = ahash_request_ctx(areq);
1427
1428         memset(req, 0, sizeof(*req));
1429
1430         ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA384;
1431         req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
1432         req->state_sz = SHA512_DIGEST_SIZE;
1433         req->block_sz = SHA512_BLOCK_SIZE;
1434
1435         return 0;
1436 }
1437
1438 static int safexcel_sha384_digest(struct ahash_request *areq)
1439 {
1440         int ret = safexcel_sha384_init(areq);
1441
1442         if (ret)
1443                 return ret;
1444
1445         return safexcel_ahash_finup(areq);
1446 }
1447
1448 struct safexcel_alg_template safexcel_alg_sha384 = {
1449         .type = SAFEXCEL_ALG_TYPE_AHASH,
1450         .algo_mask = SAFEXCEL_ALG_SHA2_512,
1451         .alg.ahash = {
1452                 .init = safexcel_sha384_init,
1453                 .update = safexcel_ahash_update,
1454                 .final = safexcel_ahash_final,
1455                 .finup = safexcel_ahash_finup,
1456                 .digest = safexcel_sha384_digest,
1457                 .export = safexcel_ahash_export,
1458                 .import = safexcel_ahash_import,
1459                 .halg = {
1460                         .digestsize = SHA384_DIGEST_SIZE,
1461                         .statesize = sizeof(struct safexcel_ahash_export_state),
1462                         .base = {
1463                                 .cra_name = "sha384",
1464                                 .cra_driver_name = "safexcel-sha384",
1465                                 .cra_priority = SAFEXCEL_CRA_PRIORITY,
1466                                 .cra_flags = CRYPTO_ALG_ASYNC |
1467                                              CRYPTO_ALG_KERN_DRIVER_ONLY,
1468                                 .cra_blocksize = SHA384_BLOCK_SIZE,
1469                                 .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
1470                                 .cra_init = safexcel_ahash_cra_init,
1471                                 .cra_exit = safexcel_ahash_cra_exit,
1472                                 .cra_module = THIS_MODULE,
1473                         },
1474                 },
1475         },
1476 };
1477
1478 static int safexcel_hmac_sha512_setkey(struct crypto_ahash *tfm, const u8 *key,
1479                                        unsigned int keylen)
1480 {
1481         return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sha512",
1482                                         SHA512_DIGEST_SIZE);
1483 }
1484
1485 static int safexcel_hmac_sha512_init(struct ahash_request *areq)
1486 {
1487         struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
1488         struct safexcel_ahash_req *req = ahash_request_ctx(areq);
1489
1490         memset(req, 0, sizeof(*req));
1491
1492         /* Start from ipad precompute */
1493         memcpy(req->state, ctx->ipad, SHA512_DIGEST_SIZE);
1494         /* Already processed the key^ipad part now! */
1495         req->len        = SHA512_BLOCK_SIZE;
1496         req->processed  = SHA512_BLOCK_SIZE;
1497
1498         ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA512;
1499         req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
1500         req->state_sz = SHA512_DIGEST_SIZE;
1501         req->block_sz = SHA512_BLOCK_SIZE;
1502         req->hmac = true;
1503
1504         return 0;
1505 }
1506
1507 static int safexcel_hmac_sha512_digest(struct ahash_request *areq)
1508 {
1509         int ret = safexcel_hmac_sha512_init(areq);
1510
1511         if (ret)
1512                 return ret;
1513
1514         return safexcel_ahash_finup(areq);
1515 }
1516
1517 struct safexcel_alg_template safexcel_alg_hmac_sha512 = {
1518         .type = SAFEXCEL_ALG_TYPE_AHASH,
1519         .algo_mask = SAFEXCEL_ALG_SHA2_512,
1520         .alg.ahash = {
1521                 .init = safexcel_hmac_sha512_init,
1522                 .update = safexcel_ahash_update,
1523                 .final = safexcel_ahash_final,
1524                 .finup = safexcel_ahash_finup,
1525                 .digest = safexcel_hmac_sha512_digest,
1526                 .setkey = safexcel_hmac_sha512_setkey,
1527                 .export = safexcel_ahash_export,
1528                 .import = safexcel_ahash_import,
1529                 .halg = {
1530                         .digestsize = SHA512_DIGEST_SIZE,
1531                         .statesize = sizeof(struct safexcel_ahash_export_state),
1532                         .base = {
1533                                 .cra_name = "hmac(sha512)",
1534                                 .cra_driver_name = "safexcel-hmac-sha512",
1535                                 .cra_priority = SAFEXCEL_CRA_PRIORITY,
1536                                 .cra_flags = CRYPTO_ALG_ASYNC |
1537                                              CRYPTO_ALG_KERN_DRIVER_ONLY,
1538                                 .cra_blocksize = SHA512_BLOCK_SIZE,
1539                                 .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
1540                                 .cra_init = safexcel_ahash_cra_init,
1541                                 .cra_exit = safexcel_ahash_cra_exit,
1542                                 .cra_module = THIS_MODULE,
1543                         },
1544                 },
1545         },
1546 };
1547
1548 static int safexcel_hmac_sha384_setkey(struct crypto_ahash *tfm, const u8 *key,
1549                                        unsigned int keylen)
1550 {
1551         return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sha384",
1552                                         SHA512_DIGEST_SIZE);
1553 }
1554
1555 static int safexcel_hmac_sha384_init(struct ahash_request *areq)
1556 {
1557         struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
1558         struct safexcel_ahash_req *req = ahash_request_ctx(areq);
1559
1560         memset(req, 0, sizeof(*req));
1561
1562         /* Start from ipad precompute */
1563         memcpy(req->state, ctx->ipad, SHA512_DIGEST_SIZE);
1564         /* Already processed the key^ipad part now! */
1565         req->len        = SHA512_BLOCK_SIZE;
1566         req->processed  = SHA512_BLOCK_SIZE;
1567
1568         ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA384;
1569         req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
1570         req->state_sz = SHA512_DIGEST_SIZE;
1571         req->block_sz = SHA512_BLOCK_SIZE;
1572         req->hmac = true;
1573
1574         return 0;
1575 }
1576
1577 static int safexcel_hmac_sha384_digest(struct ahash_request *areq)
1578 {
1579         int ret = safexcel_hmac_sha384_init(areq);
1580
1581         if (ret)
1582                 return ret;
1583
1584         return safexcel_ahash_finup(areq);
1585 }
1586
1587 struct safexcel_alg_template safexcel_alg_hmac_sha384 = {
1588         .type = SAFEXCEL_ALG_TYPE_AHASH,
1589         .algo_mask = SAFEXCEL_ALG_SHA2_512,
1590         .alg.ahash = {
1591                 .init = safexcel_hmac_sha384_init,
1592                 .update = safexcel_ahash_update,
1593                 .final = safexcel_ahash_final,
1594                 .finup = safexcel_ahash_finup,
1595                 .digest = safexcel_hmac_sha384_digest,
1596                 .setkey = safexcel_hmac_sha384_setkey,
1597                 .export = safexcel_ahash_export,
1598                 .import = safexcel_ahash_import,
1599                 .halg = {
1600                         .digestsize = SHA384_DIGEST_SIZE,
1601                         .statesize = sizeof(struct safexcel_ahash_export_state),
1602                         .base = {
1603                                 .cra_name = "hmac(sha384)",
1604                                 .cra_driver_name = "safexcel-hmac-sha384",
1605                                 .cra_priority = SAFEXCEL_CRA_PRIORITY,
1606                                 .cra_flags = CRYPTO_ALG_ASYNC |
1607                                              CRYPTO_ALG_KERN_DRIVER_ONLY,
1608                                 .cra_blocksize = SHA384_BLOCK_SIZE,
1609                                 .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
1610                                 .cra_init = safexcel_ahash_cra_init,
1611                                 .cra_exit = safexcel_ahash_cra_exit,
1612                                 .cra_module = THIS_MODULE,
1613                         },
1614                 },
1615         },
1616 };
1617
1618 static int safexcel_md5_init(struct ahash_request *areq)
1619 {
1620         struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
1621         struct safexcel_ahash_req *req = ahash_request_ctx(areq);
1622
1623         memset(req, 0, sizeof(*req));
1624
1625         ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_MD5;
1626         req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
1627         req->state_sz = MD5_DIGEST_SIZE;
1628         req->block_sz = MD5_HMAC_BLOCK_SIZE;
1629
1630         return 0;
1631 }
1632
1633 static int safexcel_md5_digest(struct ahash_request *areq)
1634 {
1635         int ret = safexcel_md5_init(areq);
1636
1637         if (ret)
1638                 return ret;
1639
1640         return safexcel_ahash_finup(areq);
1641 }
1642
1643 struct safexcel_alg_template safexcel_alg_md5 = {
1644         .type = SAFEXCEL_ALG_TYPE_AHASH,
1645         .algo_mask = SAFEXCEL_ALG_MD5,
1646         .alg.ahash = {
1647                 .init = safexcel_md5_init,
1648                 .update = safexcel_ahash_update,
1649                 .final = safexcel_ahash_final,
1650                 .finup = safexcel_ahash_finup,
1651                 .digest = safexcel_md5_digest,
1652                 .export = safexcel_ahash_export,
1653                 .import = safexcel_ahash_import,
1654                 .halg = {
1655                         .digestsize = MD5_DIGEST_SIZE,
1656                         .statesize = sizeof(struct safexcel_ahash_export_state),
1657                         .base = {
1658                                 .cra_name = "md5",
1659                                 .cra_driver_name = "safexcel-md5",
1660                                 .cra_priority = SAFEXCEL_CRA_PRIORITY,
1661                                 .cra_flags = CRYPTO_ALG_ASYNC |
1662                                              CRYPTO_ALG_KERN_DRIVER_ONLY,
1663                                 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
1664                                 .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
1665                                 .cra_init = safexcel_ahash_cra_init,
1666                                 .cra_exit = safexcel_ahash_cra_exit,
1667                                 .cra_module = THIS_MODULE,
1668                         },
1669                 },
1670         },
1671 };
1672
1673 static int safexcel_hmac_md5_init(struct ahash_request *areq)
1674 {
1675         struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
1676         struct safexcel_ahash_req *req = ahash_request_ctx(areq);
1677
1678         memset(req, 0, sizeof(*req));
1679
1680         /* Start from ipad precompute */
1681         memcpy(req->state, ctx->ipad, MD5_DIGEST_SIZE);
1682         /* Already processed the key^ipad part now! */
1683         req->len        = MD5_HMAC_BLOCK_SIZE;
1684         req->processed  = MD5_HMAC_BLOCK_SIZE;
1685
1686         ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_MD5;
1687         req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
1688         req->state_sz = MD5_DIGEST_SIZE;
1689         req->block_sz = MD5_HMAC_BLOCK_SIZE;
1690         req->len_is_le = true; /* MD5 is little endian! ... */
1691         req->hmac = true;
1692
1693         return 0;
1694 }
1695
1696 static int safexcel_hmac_md5_setkey(struct crypto_ahash *tfm, const u8 *key,
1697                                      unsigned int keylen)
1698 {
1699         return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-md5",
1700                                         MD5_DIGEST_SIZE);
1701 }
1702
1703 static int safexcel_hmac_md5_digest(struct ahash_request *areq)
1704 {
1705         int ret = safexcel_hmac_md5_init(areq);
1706
1707         if (ret)
1708                 return ret;
1709
1710         return safexcel_ahash_finup(areq);
1711 }
1712
1713 struct safexcel_alg_template safexcel_alg_hmac_md5 = {
1714         .type = SAFEXCEL_ALG_TYPE_AHASH,
1715         .algo_mask = SAFEXCEL_ALG_MD5,
1716         .alg.ahash = {
1717                 .init = safexcel_hmac_md5_init,
1718                 .update = safexcel_ahash_update,
1719                 .final = safexcel_ahash_final,
1720                 .finup = safexcel_ahash_finup,
1721                 .digest = safexcel_hmac_md5_digest,
1722                 .setkey = safexcel_hmac_md5_setkey,
1723                 .export = safexcel_ahash_export,
1724                 .import = safexcel_ahash_import,
1725                 .halg = {
1726                         .digestsize = MD5_DIGEST_SIZE,
1727                         .statesize = sizeof(struct safexcel_ahash_export_state),
1728                         .base = {
1729                                 .cra_name = "hmac(md5)",
1730                                 .cra_driver_name = "safexcel-hmac-md5",
1731                                 .cra_priority = SAFEXCEL_CRA_PRIORITY,
1732                                 .cra_flags = CRYPTO_ALG_ASYNC |
1733                                              CRYPTO_ALG_KERN_DRIVER_ONLY,
1734                                 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
1735                                 .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
1736                                 .cra_init = safexcel_ahash_cra_init,
1737                                 .cra_exit = safexcel_ahash_cra_exit,
1738                                 .cra_module = THIS_MODULE,
1739                         },
1740                 },
1741         },
1742 };