treewide: kzalloc() -> kcalloc()
[linux-2.6-block.git] / drivers / crypto / inside-secure / safexcel_hash.c
CommitLineData
1b44c5a6
AT
1/*
2 * Copyright (C) 2017 Marvell
3 *
4 * Antoine Tenart <antoine.tenart@free-electrons.com>
5 *
6 * This file is licensed under the terms of the GNU General Public
7 * License version 2. This program is licensed "as is" without any
8 * warranty of any kind, whether express or implied.
9 */
10
aed3731e 11#include <crypto/hmac.h>
1b44c5a6
AT
12#include <crypto/sha.h>
13#include <linux/device.h>
14#include <linux/dma-mapping.h>
15#include <linux/dmapool.h>
16
1b44c5a6
AT
17#include "safexcel.h"
18
19struct safexcel_ahash_ctx {
20 struct safexcel_context base;
21 struct safexcel_crypto_priv *priv;
22
23 u32 alg;
1b44c5a6 24
73f36ea7
AT
25 u32 ipad[SHA256_DIGEST_SIZE / sizeof(u32)];
26 u32 opad[SHA256_DIGEST_SIZE / sizeof(u32)];
1b44c5a6
AT
27};
28
29struct safexcel_ahash_req {
30 bool last_req;
31 bool finish;
32 bool hmac;
1eb7b403 33 bool needs_inv;
1b44c5a6 34
c957f8b3 35 int nents;
b8592027 36 dma_addr_t result_dma;
c957f8b3 37
b869648c
AT
38 u32 digest;
39
1b44c5a6 40 u8 state_sz; /* expected sate size, only set once */
2973633e 41 u32 state[SHA256_DIGEST_SIZE / sizeof(u32)] __aligned(sizeof(u32));
1b44c5a6
AT
42
43 u64 len;
44 u64 processed;
45
46 u8 cache[SHA256_BLOCK_SIZE] __aligned(sizeof(u32));
cff9a175
AT
47 dma_addr_t cache_dma;
48 unsigned int cache_sz;
49
1b44c5a6
AT
50 u8 cache_next[SHA256_BLOCK_SIZE] __aligned(sizeof(u32));
51};
52
1b44c5a6
AT
53static void safexcel_hash_token(struct safexcel_command_desc *cdesc,
54 u32 input_length, u32 result_length)
55{
56 struct safexcel_token *token =
57 (struct safexcel_token *)cdesc->control_data.token;
58
59 token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
60 token[0].packet_length = input_length;
61 token[0].stat = EIP197_TOKEN_STAT_LAST_HASH;
62 token[0].instructions = EIP197_TOKEN_INS_TYPE_HASH;
63
64 token[1].opcode = EIP197_TOKEN_OPCODE_INSERT;
65 token[1].packet_length = result_length;
66 token[1].stat = EIP197_TOKEN_STAT_LAST_HASH |
67 EIP197_TOKEN_STAT_LAST_PACKET;
68 token[1].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
69 EIP197_TOKEN_INS_INSERT_HASH_DIGEST;
70}
71
72static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
73 struct safexcel_ahash_req *req,
74 struct safexcel_command_desc *cdesc,
75 unsigned int digestsize,
76 unsigned int blocksize)
77{
78 int i;
79
80 cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_HASH_OUT;
81 cdesc->control_data.control0 |= ctx->alg;
b869648c 82 cdesc->control_data.control0 |= req->digest;
1b44c5a6 83
b869648c 84 if (req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) {
1b44c5a6
AT
85 if (req->processed) {
86 if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA1)
87 cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(6);
88 else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA224 ||
89 ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA256)
90 cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(9);
91
92 cdesc->control_data.control1 |= CONTEXT_CONTROL_DIGEST_CNT;
93 } else {
94 cdesc->control_data.control0 |= CONTEXT_CONTROL_RESTART_HASH;
95 }
96
97 if (!req->finish)
98 cdesc->control_data.control0 |= CONTEXT_CONTROL_NO_FINISH_HASH;
99
100 /*
101 * Copy the input digest if needed, and setup the context
102 * fields. Do this now as we need it to setup the first command
103 * descriptor.
104 */
105 if (req->processed) {
106 for (i = 0; i < digestsize / sizeof(u32); i++)
107 ctx->base.ctxr->data[i] = cpu_to_le32(req->state[i]);
108
109 if (req->finish)
110 ctx->base.ctxr->data[i] = cpu_to_le32(req->processed / blocksize);
111 }
b869648c 112 } else if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC) {
4505bb02 113 cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(2 * req->state_sz / sizeof(u32));
1b44c5a6 114
4505bb02
AT
115 memcpy(ctx->base.ctxr->data, ctx->ipad, req->state_sz);
116 memcpy(ctx->base.ctxr->data + req->state_sz / sizeof(u32),
117 ctx->opad, req->state_sz);
1b44c5a6
AT
118 }
119}
120
1eb7b403
OH
121static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int ring,
122 struct crypto_async_request *async,
123 bool *should_complete, int *ret)
1b44c5a6
AT
124{
125 struct safexcel_result_desc *rdesc;
126 struct ahash_request *areq = ahash_request_cast(async);
127 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
128 struct safexcel_ahash_req *sreq = ahash_request_ctx(areq);
2973633e 129 int cache_len;
1b44c5a6
AT
130
131 *ret = 0;
132
133 spin_lock_bh(&priv->ring[ring].egress_lock);
134 rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
135 if (IS_ERR(rdesc)) {
136 dev_err(priv->dev,
137 "hash: result: could not retrieve the result descriptor\n");
138 *ret = PTR_ERR(rdesc);
bdfd1909
AT
139 } else {
140 *ret = safexcel_rdesc_check_errors(priv, rdesc);
1b44c5a6
AT
141 }
142
143 safexcel_complete(priv, ring);
144 spin_unlock_bh(&priv->ring[ring].egress_lock);
145
c957f8b3
AT
146 if (sreq->nents) {
147 dma_unmap_sg(priv->dev, areq->src, sreq->nents, DMA_TO_DEVICE);
148 sreq->nents = 0;
149 }
1b44c5a6 150
b8592027
OH
151 if (sreq->result_dma) {
152 dma_unmap_single(priv->dev, sreq->result_dma, sreq->state_sz,
153 DMA_FROM_DEVICE);
154 sreq->result_dma = 0;
155 }
156
cff9a175
AT
157 if (sreq->cache_dma) {
158 dma_unmap_single(priv->dev, sreq->cache_dma, sreq->cache_sz,
159 DMA_TO_DEVICE);
160 sreq->cache_dma = 0;
161 }
1b44c5a6 162
b89a8159
AT
163 if (sreq->finish)
164 memcpy(areq->result, sreq->state,
165 crypto_ahash_digestsize(ahash));
166
1b44c5a6
AT
167 cache_len = sreq->len - sreq->processed;
168 if (cache_len)
169 memcpy(sreq->cache, sreq->cache_next, cache_len);
170
171 *should_complete = true;
172
173 return 1;
174}
175
1eb7b403
OH
176static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
177 struct safexcel_request *request,
178 int *commands, int *results)
1b44c5a6
AT
179{
180 struct ahash_request *areq = ahash_request_cast(async);
181 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
182 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
183 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
184 struct safexcel_crypto_priv *priv = ctx->priv;
185 struct safexcel_command_desc *cdesc, *first_cdesc = NULL;
186 struct safexcel_result_desc *rdesc;
187 struct scatterlist *sg;
c957f8b3 188 int i, queued, len, cache_len, extra, n_cdesc = 0, ret = 0;
1b44c5a6
AT
189
190 queued = len = req->len - req->processed;
666a9c70 191 if (queued <= crypto_ahash_blocksize(ahash))
1b44c5a6
AT
192 cache_len = queued;
193 else
194 cache_len = queued - areq->nbytes;
195
809778e0
AT
196 if (!req->last_req) {
197 /* If this is not the last request and the queued data does not
198 * fit into full blocks, cache it for the next send() call.
199 */
200 extra = queued & (crypto_ahash_blocksize(ahash) - 1);
201 if (!extra)
202 /* If this is not the last request and the queued data
203 * is a multiple of a block, cache the last one for now.
204 */
c1a8fa6e 205 extra = crypto_ahash_blocksize(ahash);
809778e0
AT
206
207 if (extra) {
208 sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
209 req->cache_next, extra,
210 areq->nbytes - extra);
211
212 queued -= extra;
213 len -= extra;
214
215 if (!queued) {
216 *commands = 0;
217 *results = 0;
218 return 0;
219 }
220 }
1b44c5a6
AT
221 }
222
1b44c5a6
AT
223 spin_lock_bh(&priv->ring[ring].egress_lock);
224
225 /* Add a command descriptor for the cached data, if any */
226 if (cache_len) {
cff9a175
AT
227 req->cache_dma = dma_map_single(priv->dev, req->cache,
228 cache_len, DMA_TO_DEVICE);
efa94457 229 if (dma_mapping_error(priv->dev, req->cache_dma)) {
230 spin_unlock_bh(&priv->ring[ring].egress_lock);
cff9a175 231 return -EINVAL;
efa94457 232 }
1b44c5a6 233
cff9a175 234 req->cache_sz = cache_len;
1b44c5a6
AT
235 first_cdesc = safexcel_add_cdesc(priv, ring, 1,
236 (cache_len == len),
cff9a175 237 req->cache_dma, cache_len, len,
1b44c5a6
AT
238 ctx->base.ctxr_dma);
239 if (IS_ERR(first_cdesc)) {
240 ret = PTR_ERR(first_cdesc);
241 goto unmap_cache;
242 }
243 n_cdesc++;
244
245 queued -= cache_len;
246 if (!queued)
247 goto send_command;
248 }
249
250 /* Now handle the current ahash request buffer(s) */
c957f8b3
AT
251 req->nents = dma_map_sg(priv->dev, areq->src,
252 sg_nents_for_len(areq->src, areq->nbytes),
253 DMA_TO_DEVICE);
254 if (!req->nents) {
1b44c5a6
AT
255 ret = -ENOMEM;
256 goto cdesc_rollback;
257 }
258
c957f8b3 259 for_each_sg(areq->src, sg, req->nents, i) {
1b44c5a6
AT
260 int sglen = sg_dma_len(sg);
261
262 /* Do not overflow the request */
263 if (queued - sglen < 0)
264 sglen = queued;
265
266 cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc,
267 !(queued - sglen), sg_dma_address(sg),
268 sglen, len, ctx->base.ctxr_dma);
269 if (IS_ERR(cdesc)) {
270 ret = PTR_ERR(cdesc);
57433b58 271 goto unmap_sg;
1b44c5a6
AT
272 }
273 n_cdesc++;
274
275 if (n_cdesc == 1)
276 first_cdesc = cdesc;
277
278 queued -= sglen;
279 if (!queued)
280 break;
281 }
282
283send_command:
284 /* Setup the context options */
285 safexcel_context_control(ctx, req, first_cdesc, req->state_sz,
286 crypto_ahash_blocksize(ahash));
287
288 /* Add the token */
289 safexcel_hash_token(first_cdesc, len, req->state_sz);
290
b8592027
OH
291 req->result_dma = dma_map_single(priv->dev, req->state, req->state_sz,
292 DMA_FROM_DEVICE);
293 if (dma_mapping_error(priv->dev, req->result_dma)) {
1b44c5a6 294 ret = -EINVAL;
57433b58 295 goto unmap_sg;
1b44c5a6
AT
296 }
297
298 /* Add a result descriptor */
b8592027 299 rdesc = safexcel_add_rdesc(priv, ring, 1, 1, req->result_dma,
1b44c5a6
AT
300 req->state_sz);
301 if (IS_ERR(rdesc)) {
302 ret = PTR_ERR(rdesc);
57240a78 303 goto unmap_result;
1b44c5a6
AT
304 }
305
1b44c5a6
AT
306 spin_unlock_bh(&priv->ring[ring].egress_lock);
307
97858434
AT
308 req->processed += len;
309 request->req = &areq->base;
97858434 310
1b44c5a6
AT
311 *commands = n_cdesc;
312 *results = 1;
313 return 0;
314
57240a78 315unmap_result:
57433b58
AT
316 dma_unmap_single(priv->dev, req->result_dma, req->state_sz,
317 DMA_FROM_DEVICE);
318unmap_sg:
57240a78 319 dma_unmap_sg(priv->dev, areq->src, req->nents, DMA_TO_DEVICE);
1b44c5a6
AT
320cdesc_rollback:
321 for (i = 0; i < n_cdesc; i++)
322 safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
323unmap_cache:
cff9a175
AT
324 if (req->cache_dma) {
325 dma_unmap_single(priv->dev, req->cache_dma, req->cache_sz,
326 DMA_TO_DEVICE);
327 req->cache_sz = 0;
1b44c5a6 328 }
1b44c5a6 329
1b44c5a6
AT
330 spin_unlock_bh(&priv->ring[ring].egress_lock);
331 return ret;
332}
333
334static inline bool safexcel_ahash_needs_inv_get(struct ahash_request *areq)
335{
336 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
337 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
338 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
339 unsigned int state_w_sz = req->state_sz / sizeof(u32);
340 int i;
341
342 for (i = 0; i < state_w_sz; i++)
343 if (ctx->base.ctxr->data[i] != cpu_to_le32(req->state[i]))
344 return true;
345
346 if (ctx->base.ctxr->data[state_w_sz] !=
347 cpu_to_le32(req->processed / crypto_ahash_blocksize(ahash)))
348 return true;
349
350 return false;
351}
352
353static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
354 int ring,
355 struct crypto_async_request *async,
356 bool *should_complete, int *ret)
357{
358 struct safexcel_result_desc *rdesc;
359 struct ahash_request *areq = ahash_request_cast(async);
360 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
361 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(ahash);
362 int enq_ret;
363
364 *ret = 0;
365
366 spin_lock_bh(&priv->ring[ring].egress_lock);
367 rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
368 if (IS_ERR(rdesc)) {
369 dev_err(priv->dev,
370 "hash: invalidate: could not retrieve the result descriptor\n");
371 *ret = PTR_ERR(rdesc);
372 } else if (rdesc->result_data.error_code) {
373 dev_err(priv->dev,
374 "hash: invalidate: result descriptor error (%d)\n",
375 rdesc->result_data.error_code);
376 *ret = -EINVAL;
377 }
378
379 safexcel_complete(priv, ring);
380 spin_unlock_bh(&priv->ring[ring].egress_lock);
381
382 if (ctx->base.exit_inv) {
383 dma_pool_free(priv->context_pool, ctx->base.ctxr,
384 ctx->base.ctxr_dma);
385
386 *should_complete = true;
387 return 1;
388 }
389
86671abb
AT
390 ring = safexcel_select_ring(priv);
391 ctx->base.ring = ring;
1b44c5a6 392
86671abb
AT
393 spin_lock_bh(&priv->ring[ring].queue_lock);
394 enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async);
395 spin_unlock_bh(&priv->ring[ring].queue_lock);
1b44c5a6
AT
396
397 if (enq_ret != -EINPROGRESS)
398 *ret = enq_ret;
399
8472e778
AT
400 queue_work(priv->ring[ring].workqueue,
401 &priv->ring[ring].work_data.work);
86671abb 402
1b44c5a6
AT
403 *should_complete = false;
404
405 return 1;
406}
407
1eb7b403
OH
408static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
409 struct crypto_async_request *async,
410 bool *should_complete, int *ret)
411{
412 struct ahash_request *areq = ahash_request_cast(async);
413 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
414 int err;
415
871df319
AT
416 BUG_ON(priv->version == EIP97 && req->needs_inv);
417
1eb7b403
OH
418 if (req->needs_inv) {
419 req->needs_inv = false;
420 err = safexcel_handle_inv_result(priv, ring, async,
421 should_complete, ret);
422 } else {
423 err = safexcel_handle_req_result(priv, ring, async,
424 should_complete, ret);
425 }
426
427 return err;
428}
429
1b44c5a6
AT
430static int safexcel_ahash_send_inv(struct crypto_async_request *async,
431 int ring, struct safexcel_request *request,
432 int *commands, int *results)
433{
434 struct ahash_request *areq = ahash_request_cast(async);
435 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
436 int ret;
437
5290ad6e 438 ret = safexcel_invalidate_cache(async, ctx->priv,
1b44c5a6
AT
439 ctx->base.ctxr_dma, ring, request);
440 if (unlikely(ret))
441 return ret;
442
443 *commands = 1;
444 *results = 1;
445
446 return 0;
447}
448
1eb7b403
OH
449static int safexcel_ahash_send(struct crypto_async_request *async,
450 int ring, struct safexcel_request *request,
451 int *commands, int *results)
452{
453 struct ahash_request *areq = ahash_request_cast(async);
454 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
455 int ret;
456
457 if (req->needs_inv)
458 ret = safexcel_ahash_send_inv(async, ring, request,
459 commands, results);
460 else
461 ret = safexcel_ahash_send_req(async, ring, request,
462 commands, results);
463 return ret;
464}
465
1b44c5a6
AT
466static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
467{
468 struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
469 struct safexcel_crypto_priv *priv = ctx->priv;
61824806 470 EIP197_REQUEST_ON_STACK(req, ahash, EIP197_AHASH_REQ_SIZE);
7cad2fab 471 struct safexcel_ahash_req *rctx = ahash_request_ctx(req);
3e1166b9 472 struct safexcel_inv_result result = {};
86671abb 473 int ring = ctx->base.ring;
1b44c5a6 474
7cad2fab 475 memset(req, 0, sizeof(struct ahash_request));
1b44c5a6
AT
476
477 /* create invalidation request */
478 init_completion(&result.completion);
7cad2fab 479 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1b44c5a6
AT
480 safexcel_inv_complete, &result);
481
7cad2fab
AT
482 ahash_request_set_tfm(req, __crypto_ahash_cast(tfm));
483 ctx = crypto_tfm_ctx(req->base.tfm);
1b44c5a6 484 ctx->base.exit_inv = true;
1eb7b403 485 rctx->needs_inv = true;
1b44c5a6 486
86671abb 487 spin_lock_bh(&priv->ring[ring].queue_lock);
7cad2fab 488 crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
86671abb 489 spin_unlock_bh(&priv->ring[ring].queue_lock);
1b44c5a6 490
8472e778
AT
491 queue_work(priv->ring[ring].workqueue,
492 &priv->ring[ring].work_data.work);
1b44c5a6 493
b7007dbc 494 wait_for_completion(&result.completion);
1b44c5a6
AT
495
496 if (result.error) {
497 dev_warn(priv->dev, "hash: completion error (%d)\n",
498 result.error);
499 return result.error;
500 }
501
502 return 0;
503}
504
cc75f5ce
AT
505/* safexcel_ahash_cache: cache data until at least one request can be sent to
506 * the engine, aka. when there is at least 1 block size in the pipe.
507 */
1b44c5a6
AT
508static int safexcel_ahash_cache(struct ahash_request *areq)
509{
510 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
511 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
512 int queued, cache_len;
513
cc75f5ce
AT
514 /* cache_len: everyting accepted by the driver but not sent yet,
515 * tot sz handled by update() - last req sz - tot sz handled by send()
516 */
1b44c5a6 517 cache_len = req->len - areq->nbytes - req->processed;
cc75f5ce
AT
518 /* queued: everything accepted by the driver which will be handled by
519 * the next send() calls.
520 * tot sz handled by update() - tot sz handled by send()
521 */
1b44c5a6
AT
522 queued = req->len - req->processed;
523
524 /*
525 * In case there isn't enough bytes to proceed (less than a
526 * block size), cache the data until we have enough.
527 */
528 if (cache_len + areq->nbytes <= crypto_ahash_blocksize(ahash)) {
529 sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
530 req->cache + cache_len,
531 areq->nbytes, 0);
532 return areq->nbytes;
533 }
534
dfbcc08f 535 /* We couldn't cache all the data */
1b44c5a6
AT
536 return -E2BIG;
537}
538
539static int safexcel_ahash_enqueue(struct ahash_request *areq)
540{
541 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
542 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
543 struct safexcel_crypto_priv *priv = ctx->priv;
86671abb 544 int ret, ring;
1b44c5a6 545
1eb7b403 546 req->needs_inv = false;
1b44c5a6 547
1b44c5a6 548 if (ctx->base.ctxr) {
871df319
AT
549 if (priv->version == EIP197 &&
550 !ctx->base.needs_inv && req->processed &&
b869648c 551 req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED)
c4daf4cc
OH
552 /* We're still setting needs_inv here, even though it is
553 * cleared right away, because the needs_inv flag can be
554 * set in other functions and we want to keep the same
555 * logic.
556 */
557 ctx->base.needs_inv = safexcel_ahash_needs_inv_get(areq);
558
1eb7b403
OH
559 if (ctx->base.needs_inv) {
560 ctx->base.needs_inv = false;
561 req->needs_inv = true;
562 }
1b44c5a6
AT
563 } else {
564 ctx->base.ring = safexcel_select_ring(priv);
565 ctx->base.ctxr = dma_pool_zalloc(priv->context_pool,
566 EIP197_GFP_FLAGS(areq->base),
567 &ctx->base.ctxr_dma);
568 if (!ctx->base.ctxr)
569 return -ENOMEM;
570 }
571
86671abb
AT
572 ring = ctx->base.ring;
573
574 spin_lock_bh(&priv->ring[ring].queue_lock);
575 ret = crypto_enqueue_request(&priv->ring[ring].queue, &areq->base);
576 spin_unlock_bh(&priv->ring[ring].queue_lock);
1b44c5a6 577
8472e778
AT
578 queue_work(priv->ring[ring].workqueue,
579 &priv->ring[ring].work_data.work);
1b44c5a6
AT
580
581 return ret;
582}
583
584static int safexcel_ahash_update(struct ahash_request *areq)
585{
1b44c5a6
AT
586 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
587 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
588
589 /* If the request is 0 length, do nothing */
590 if (!areq->nbytes)
591 return 0;
592
593 req->len += areq->nbytes;
594
595 safexcel_ahash_cache(areq);
596
597 /*
598 * We're not doing partial updates when performing an hmac request.
599 * Everything will be handled by the final() call.
600 */
b869648c 601 if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC)
1b44c5a6
AT
602 return 0;
603
604 if (req->hmac)
605 return safexcel_ahash_enqueue(areq);
606
607 if (!req->last_req &&
608 req->len - req->processed > crypto_ahash_blocksize(ahash))
609 return safexcel_ahash_enqueue(areq);
610
611 return 0;
612}
613
614static int safexcel_ahash_final(struct ahash_request *areq)
615{
616 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
617 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
618
619 req->last_req = true;
620 req->finish = true;
621
622 /* If we have an overall 0 length request */
623 if (!(req->len + areq->nbytes)) {
624 if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA1)
625 memcpy(areq->result, sha1_zero_message_hash,
626 SHA1_DIGEST_SIZE);
627 else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA224)
628 memcpy(areq->result, sha224_zero_message_hash,
629 SHA224_DIGEST_SIZE);
630 else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA256)
631 memcpy(areq->result, sha256_zero_message_hash,
632 SHA256_DIGEST_SIZE);
633
634 return 0;
635 }
636
637 return safexcel_ahash_enqueue(areq);
638}
639
640static int safexcel_ahash_finup(struct ahash_request *areq)
641{
642 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
643
644 req->last_req = true;
645 req->finish = true;
646
647 safexcel_ahash_update(areq);
648 return safexcel_ahash_final(areq);
649}
650
651static int safexcel_ahash_export(struct ahash_request *areq, void *out)
652{
653 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
654 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
655 struct safexcel_ahash_export_state *export = out;
656
657 export->len = req->len;
658 export->processed = req->processed;
659
b869648c
AT
660 export->digest = req->digest;
661
1b44c5a6 662 memcpy(export->state, req->state, req->state_sz);
1b44c5a6
AT
663 memcpy(export->cache, req->cache, crypto_ahash_blocksize(ahash));
664
665 return 0;
666}
667
668static int safexcel_ahash_import(struct ahash_request *areq, const void *in)
669{
670 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
671 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
672 const struct safexcel_ahash_export_state *export = in;
673 int ret;
674
675 ret = crypto_ahash_init(areq);
676 if (ret)
677 return ret;
678
679 req->len = export->len;
680 req->processed = export->processed;
681
b869648c
AT
682 req->digest = export->digest;
683
1b44c5a6
AT
684 memcpy(req->cache, export->cache, crypto_ahash_blocksize(ahash));
685 memcpy(req->state, export->state, req->state_sz);
686
687 return 0;
688}
689
690static int safexcel_ahash_cra_init(struct crypto_tfm *tfm)
691{
692 struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
693 struct safexcel_alg_template *tmpl =
694 container_of(__crypto_ahash_alg(tfm->__crt_alg),
695 struct safexcel_alg_template, alg.ahash);
696
697 ctx->priv = tmpl->priv;
1eb7b403
OH
698 ctx->base.send = safexcel_ahash_send;
699 ctx->base.handle_result = safexcel_handle_result;
1b44c5a6
AT
700
701 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
702 sizeof(struct safexcel_ahash_req));
703 return 0;
704}
705
706static int safexcel_sha1_init(struct ahash_request *areq)
707{
708 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
709 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
710
711 memset(req, 0, sizeof(*req));
712
713 req->state[0] = SHA1_H0;
714 req->state[1] = SHA1_H1;
715 req->state[2] = SHA1_H2;
716 req->state[3] = SHA1_H3;
717 req->state[4] = SHA1_H4;
718
719 ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1;
b869648c 720 req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
1b44c5a6
AT
721 req->state_sz = SHA1_DIGEST_SIZE;
722
723 return 0;
724}
725
726static int safexcel_sha1_digest(struct ahash_request *areq)
727{
728 int ret = safexcel_sha1_init(areq);
729
730 if (ret)
731 return ret;
732
733 return safexcel_ahash_finup(areq);
734}
735
736static void safexcel_ahash_cra_exit(struct crypto_tfm *tfm)
737{
738 struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
739 struct safexcel_crypto_priv *priv = ctx->priv;
740 int ret;
741
742 /* context not allocated, skip invalidation */
743 if (!ctx->base.ctxr)
744 return;
745
871df319
AT
746 if (priv->version == EIP197) {
747 ret = safexcel_ahash_exit_inv(tfm);
748 if (ret)
749 dev_warn(priv->dev, "hash: invalidation error %d\n", ret);
750 } else {
751 dma_pool_free(priv->context_pool, ctx->base.ctxr,
752 ctx->base.ctxr_dma);
753 }
1b44c5a6
AT
754}
755
756struct safexcel_alg_template safexcel_alg_sha1 = {
757 .type = SAFEXCEL_ALG_TYPE_AHASH,
758 .alg.ahash = {
759 .init = safexcel_sha1_init,
760 .update = safexcel_ahash_update,
761 .final = safexcel_ahash_final,
762 .finup = safexcel_ahash_finup,
763 .digest = safexcel_sha1_digest,
764 .export = safexcel_ahash_export,
765 .import = safexcel_ahash_import,
766 .halg = {
767 .digestsize = SHA1_DIGEST_SIZE,
768 .statesize = sizeof(struct safexcel_ahash_export_state),
769 .base = {
770 .cra_name = "sha1",
771 .cra_driver_name = "safexcel-sha1",
772 .cra_priority = 300,
773 .cra_flags = CRYPTO_ALG_ASYNC |
774 CRYPTO_ALG_KERN_DRIVER_ONLY,
775 .cra_blocksize = SHA1_BLOCK_SIZE,
776 .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
777 .cra_init = safexcel_ahash_cra_init,
778 .cra_exit = safexcel_ahash_cra_exit,
779 .cra_module = THIS_MODULE,
780 },
781 },
782 },
783};
784
785static int safexcel_hmac_sha1_init(struct ahash_request *areq)
786{
b869648c 787 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
1b44c5a6
AT
788
789 safexcel_sha1_init(areq);
b869648c 790 req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
1b44c5a6
AT
791 return 0;
792}
793
794static int safexcel_hmac_sha1_digest(struct ahash_request *areq)
795{
796 int ret = safexcel_hmac_sha1_init(areq);
797
798 if (ret)
799 return ret;
800
801 return safexcel_ahash_finup(areq);
802}
803
804struct safexcel_ahash_result {
805 struct completion completion;
806 int error;
807};
808
809static void safexcel_ahash_complete(struct crypto_async_request *req, int error)
810{
811 struct safexcel_ahash_result *result = req->data;
812
813 if (error == -EINPROGRESS)
814 return;
815
816 result->error = error;
817 complete(&result->completion);
818}
819
820static int safexcel_hmac_init_pad(struct ahash_request *areq,
821 unsigned int blocksize, const u8 *key,
822 unsigned int keylen, u8 *ipad, u8 *opad)
823{
824 struct safexcel_ahash_result result;
825 struct scatterlist sg;
826 int ret, i;
827 u8 *keydup;
828
829 if (keylen <= blocksize) {
830 memcpy(ipad, key, keylen);
831 } else {
832 keydup = kmemdup(key, keylen, GFP_KERNEL);
833 if (!keydup)
834 return -ENOMEM;
835
836 ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_BACKLOG,
837 safexcel_ahash_complete, &result);
838 sg_init_one(&sg, keydup, keylen);
839 ahash_request_set_crypt(areq, &sg, ipad, keylen);
840 init_completion(&result.completion);
841
842 ret = crypto_ahash_digest(areq);
4dc5475a 843 if (ret == -EINPROGRESS || ret == -EBUSY) {
1b44c5a6
AT
844 wait_for_completion_interruptible(&result.completion);
845 ret = result.error;
846 }
847
848 /* Avoid leaking */
849 memzero_explicit(keydup, keylen);
850 kfree(keydup);
851
852 if (ret)
853 return ret;
854
855 keylen = crypto_ahash_digestsize(crypto_ahash_reqtfm(areq));
856 }
857
858 memset(ipad + keylen, 0, blocksize - keylen);
859 memcpy(opad, ipad, blocksize);
860
861 for (i = 0; i < blocksize; i++) {
aed3731e
AT
862 ipad[i] ^= HMAC_IPAD_VALUE;
863 opad[i] ^= HMAC_OPAD_VALUE;
1b44c5a6
AT
864 }
865
866 return 0;
867}
868
869static int safexcel_hmac_init_iv(struct ahash_request *areq,
870 unsigned int blocksize, u8 *pad, void *state)
871{
872 struct safexcel_ahash_result result;
873 struct safexcel_ahash_req *req;
874 struct scatterlist sg;
875 int ret;
876
877 ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_BACKLOG,
878 safexcel_ahash_complete, &result);
879 sg_init_one(&sg, pad, blocksize);
880 ahash_request_set_crypt(areq, &sg, pad, blocksize);
881 init_completion(&result.completion);
882
883 ret = crypto_ahash_init(areq);
884 if (ret)
885 return ret;
886
887 req = ahash_request_ctx(areq);
888 req->hmac = true;
889 req->last_req = true;
890
891 ret = crypto_ahash_update(areq);
12bf4142 892 if (ret && ret != -EINPROGRESS && ret != -EBUSY)
1b44c5a6
AT
893 return ret;
894
895 wait_for_completion_interruptible(&result.completion);
896 if (result.error)
897 return result.error;
898
899 return crypto_ahash_export(areq, state);
900}
901
f6beaea3
AT
902int safexcel_hmac_setkey(const char *alg, const u8 *key, unsigned int keylen,
903 void *istate, void *ostate)
1b44c5a6
AT
904{
905 struct ahash_request *areq;
906 struct crypto_ahash *tfm;
907 unsigned int blocksize;
908 u8 *ipad, *opad;
909 int ret;
910
911 tfm = crypto_alloc_ahash(alg, CRYPTO_ALG_TYPE_AHASH,
912 CRYPTO_ALG_TYPE_AHASH_MASK);
913 if (IS_ERR(tfm))
914 return PTR_ERR(tfm);
915
916 areq = ahash_request_alloc(tfm, GFP_KERNEL);
917 if (!areq) {
918 ret = -ENOMEM;
919 goto free_ahash;
920 }
921
922 crypto_ahash_clear_flags(tfm, ~0);
923 blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
924
6396bb22 925 ipad = kcalloc(2, blocksize, GFP_KERNEL);
1b44c5a6
AT
926 if (!ipad) {
927 ret = -ENOMEM;
928 goto free_request;
929 }
930
931 opad = ipad + blocksize;
932
933 ret = safexcel_hmac_init_pad(areq, blocksize, key, keylen, ipad, opad);
934 if (ret)
935 goto free_ipad;
936
937 ret = safexcel_hmac_init_iv(areq, blocksize, ipad, istate);
938 if (ret)
939 goto free_ipad;
940
941 ret = safexcel_hmac_init_iv(areq, blocksize, opad, ostate);
942
943free_ipad:
944 kfree(ipad);
945free_request:
946 ahash_request_free(areq);
947free_ahash:
948 crypto_free_ahash(tfm);
949
950 return ret;
951}
952
73f36ea7
AT
953static int safexcel_hmac_alg_setkey(struct crypto_ahash *tfm, const u8 *key,
954 unsigned int keylen, const char *alg,
955 unsigned int state_sz)
1b44c5a6
AT
956{
957 struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
871df319 958 struct safexcel_crypto_priv *priv = ctx->priv;
1b44c5a6
AT
959 struct safexcel_ahash_export_state istate, ostate;
960 int ret, i;
961
73f36ea7 962 ret = safexcel_hmac_setkey(alg, key, keylen, &istate, &ostate);
1b44c5a6
AT
963 if (ret)
964 return ret;
965
871df319 966 if (priv->version == EIP197 && ctx->base.ctxr) {
73f36ea7 967 for (i = 0; i < state_sz / sizeof(u32); i++) {
c4daf4cc
OH
968 if (ctx->ipad[i] != le32_to_cpu(istate.state[i]) ||
969 ctx->opad[i] != le32_to_cpu(ostate.state[i])) {
970 ctx->base.needs_inv = true;
971 break;
972 }
1b44c5a6
AT
973 }
974 }
975
73f36ea7
AT
976 memcpy(ctx->ipad, &istate.state, state_sz);
977 memcpy(ctx->opad, &ostate.state, state_sz);
42ef3bed 978
1b44c5a6
AT
979 return 0;
980}
981
73f36ea7
AT
982static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
983 unsigned int keylen)
984{
985 return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sha1",
986 SHA1_DIGEST_SIZE);
987}
988
1b44c5a6
AT
989struct safexcel_alg_template safexcel_alg_hmac_sha1 = {
990 .type = SAFEXCEL_ALG_TYPE_AHASH,
991 .alg.ahash = {
992 .init = safexcel_hmac_sha1_init,
993 .update = safexcel_ahash_update,
994 .final = safexcel_ahash_final,
995 .finup = safexcel_ahash_finup,
996 .digest = safexcel_hmac_sha1_digest,
997 .setkey = safexcel_hmac_sha1_setkey,
998 .export = safexcel_ahash_export,
999 .import = safexcel_ahash_import,
1000 .halg = {
1001 .digestsize = SHA1_DIGEST_SIZE,
1002 .statesize = sizeof(struct safexcel_ahash_export_state),
1003 .base = {
1004 .cra_name = "hmac(sha1)",
1005 .cra_driver_name = "safexcel-hmac-sha1",
1006 .cra_priority = 300,
1007 .cra_flags = CRYPTO_ALG_ASYNC |
1008 CRYPTO_ALG_KERN_DRIVER_ONLY,
1009 .cra_blocksize = SHA1_BLOCK_SIZE,
1010 .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
1011 .cra_init = safexcel_ahash_cra_init,
1012 .cra_exit = safexcel_ahash_cra_exit,
1013 .cra_module = THIS_MODULE,
1014 },
1015 },
1016 },
1017};
1018
1019static int safexcel_sha256_init(struct ahash_request *areq)
1020{
1021 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
1022 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
1023
1024 memset(req, 0, sizeof(*req));
1025
1026 req->state[0] = SHA256_H0;
1027 req->state[1] = SHA256_H1;
1028 req->state[2] = SHA256_H2;
1029 req->state[3] = SHA256_H3;
1030 req->state[4] = SHA256_H4;
1031 req->state[5] = SHA256_H5;
1032 req->state[6] = SHA256_H6;
1033 req->state[7] = SHA256_H7;
1034
1035 ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA256;
b869648c 1036 req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
1b44c5a6
AT
1037 req->state_sz = SHA256_DIGEST_SIZE;
1038
1039 return 0;
1040}
1041
1042static int safexcel_sha256_digest(struct ahash_request *areq)
1043{
1044 int ret = safexcel_sha256_init(areq);
1045
1046 if (ret)
1047 return ret;
1048
1049 return safexcel_ahash_finup(areq);
1050}
1051
1052struct safexcel_alg_template safexcel_alg_sha256 = {
1053 .type = SAFEXCEL_ALG_TYPE_AHASH,
1054 .alg.ahash = {
1055 .init = safexcel_sha256_init,
1056 .update = safexcel_ahash_update,
1057 .final = safexcel_ahash_final,
1058 .finup = safexcel_ahash_finup,
1059 .digest = safexcel_sha256_digest,
1060 .export = safexcel_ahash_export,
1061 .import = safexcel_ahash_import,
1062 .halg = {
1063 .digestsize = SHA256_DIGEST_SIZE,
1064 .statesize = sizeof(struct safexcel_ahash_export_state),
1065 .base = {
1066 .cra_name = "sha256",
1067 .cra_driver_name = "safexcel-sha256",
1068 .cra_priority = 300,
1069 .cra_flags = CRYPTO_ALG_ASYNC |
1070 CRYPTO_ALG_KERN_DRIVER_ONLY,
1071 .cra_blocksize = SHA256_BLOCK_SIZE,
1072 .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
1073 .cra_init = safexcel_ahash_cra_init,
1074 .cra_exit = safexcel_ahash_cra_exit,
1075 .cra_module = THIS_MODULE,
1076 },
1077 },
1078 },
1079};
1080
1081static int safexcel_sha224_init(struct ahash_request *areq)
1082{
1083 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
1084 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
1085
1086 memset(req, 0, sizeof(*req));
1087
1088 req->state[0] = SHA224_H0;
1089 req->state[1] = SHA224_H1;
1090 req->state[2] = SHA224_H2;
1091 req->state[3] = SHA224_H3;
1092 req->state[4] = SHA224_H4;
1093 req->state[5] = SHA224_H5;
1094 req->state[6] = SHA224_H6;
1095 req->state[7] = SHA224_H7;
1096
1097 ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA224;
b869648c 1098 req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
1b44c5a6
AT
1099 req->state_sz = SHA256_DIGEST_SIZE;
1100
1101 return 0;
1102}
1103
1104static int safexcel_sha224_digest(struct ahash_request *areq)
1105{
1106 int ret = safexcel_sha224_init(areq);
1107
1108 if (ret)
1109 return ret;
1110
1111 return safexcel_ahash_finup(areq);
1112}
1113
1114struct safexcel_alg_template safexcel_alg_sha224 = {
1115 .type = SAFEXCEL_ALG_TYPE_AHASH,
1116 .alg.ahash = {
1117 .init = safexcel_sha224_init,
1118 .update = safexcel_ahash_update,
1119 .final = safexcel_ahash_final,
1120 .finup = safexcel_ahash_finup,
1121 .digest = safexcel_sha224_digest,
1122 .export = safexcel_ahash_export,
1123 .import = safexcel_ahash_import,
1124 .halg = {
1125 .digestsize = SHA224_DIGEST_SIZE,
1126 .statesize = sizeof(struct safexcel_ahash_export_state),
1127 .base = {
1128 .cra_name = "sha224",
1129 .cra_driver_name = "safexcel-sha224",
1130 .cra_priority = 300,
1131 .cra_flags = CRYPTO_ALG_ASYNC |
1132 CRYPTO_ALG_KERN_DRIVER_ONLY,
1133 .cra_blocksize = SHA224_BLOCK_SIZE,
1134 .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
1135 .cra_init = safexcel_ahash_cra_init,
1136 .cra_exit = safexcel_ahash_cra_exit,
1137 .cra_module = THIS_MODULE,
1138 },
1139 },
1140 },
1141};
73f36ea7 1142
3ad618d8
AT
1143static int safexcel_hmac_sha224_setkey(struct crypto_ahash *tfm, const u8 *key,
1144 unsigned int keylen)
1145{
1146 return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sha224",
1147 SHA256_DIGEST_SIZE);
1148}
1149
1150static int safexcel_hmac_sha224_init(struct ahash_request *areq)
1151{
1152 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
1153
1154 safexcel_sha224_init(areq);
1155 req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
1156 return 0;
1157}
1158
1159static int safexcel_hmac_sha224_digest(struct ahash_request *areq)
1160{
1161 int ret = safexcel_hmac_sha224_init(areq);
1162
1163 if (ret)
1164 return ret;
1165
1166 return safexcel_ahash_finup(areq);
1167}
1168
1169struct safexcel_alg_template safexcel_alg_hmac_sha224 = {
1170 .type = SAFEXCEL_ALG_TYPE_AHASH,
1171 .alg.ahash = {
1172 .init = safexcel_hmac_sha224_init,
1173 .update = safexcel_ahash_update,
1174 .final = safexcel_ahash_final,
1175 .finup = safexcel_ahash_finup,
1176 .digest = safexcel_hmac_sha224_digest,
1177 .setkey = safexcel_hmac_sha224_setkey,
1178 .export = safexcel_ahash_export,
1179 .import = safexcel_ahash_import,
1180 .halg = {
1181 .digestsize = SHA224_DIGEST_SIZE,
1182 .statesize = sizeof(struct safexcel_ahash_export_state),
1183 .base = {
1184 .cra_name = "hmac(sha224)",
1185 .cra_driver_name = "safexcel-hmac-sha224",
1186 .cra_priority = 300,
1187 .cra_flags = CRYPTO_ALG_ASYNC |
1188 CRYPTO_ALG_KERN_DRIVER_ONLY,
1189 .cra_blocksize = SHA224_BLOCK_SIZE,
1190 .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
1191 .cra_init = safexcel_ahash_cra_init,
1192 .cra_exit = safexcel_ahash_cra_exit,
1193 .cra_module = THIS_MODULE,
1194 },
1195 },
1196 },
1197};
1198
73f36ea7
AT
1199static int safexcel_hmac_sha256_setkey(struct crypto_ahash *tfm, const u8 *key,
1200 unsigned int keylen)
1201{
1202 return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sha256",
1203 SHA256_DIGEST_SIZE);
1204}
1205
1206static int safexcel_hmac_sha256_init(struct ahash_request *areq)
1207{
1208 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
1209
1210 safexcel_sha256_init(areq);
1211 req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
1212 return 0;
1213}
1214
1215static int safexcel_hmac_sha256_digest(struct ahash_request *areq)
1216{
1217 int ret = safexcel_hmac_sha256_init(areq);
1218
1219 if (ret)
1220 return ret;
1221
1222 return safexcel_ahash_finup(areq);
1223}
1224
1225struct safexcel_alg_template safexcel_alg_hmac_sha256 = {
1226 .type = SAFEXCEL_ALG_TYPE_AHASH,
1227 .alg.ahash = {
1228 .init = safexcel_hmac_sha256_init,
1229 .update = safexcel_ahash_update,
1230 .final = safexcel_ahash_final,
1231 .finup = safexcel_ahash_finup,
1232 .digest = safexcel_hmac_sha256_digest,
1233 .setkey = safexcel_hmac_sha256_setkey,
1234 .export = safexcel_ahash_export,
1235 .import = safexcel_ahash_import,
1236 .halg = {
1237 .digestsize = SHA256_DIGEST_SIZE,
1238 .statesize = sizeof(struct safexcel_ahash_export_state),
1239 .base = {
1240 .cra_name = "hmac(sha256)",
1241 .cra_driver_name = "safexcel-hmac-sha256",
1242 .cra_priority = 300,
1243 .cra_flags = CRYPTO_ALG_ASYNC |
1244 CRYPTO_ALG_KERN_DRIVER_ONLY,
1245 .cra_blocksize = SHA256_BLOCK_SIZE,
1246 .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
1247 .cra_init = safexcel_ahash_cra_init,
1248 .cra_exit = safexcel_ahash_cra_exit,
1249 .cra_module = THIS_MODULE,
1250 },
1251 },
1252 },
1253};