Commit | Line | Data |
---|---|---|
97fb5e8d | 1 | // SPDX-License-Identifier: GPL-2.0-only |
ec8f5d8f SV |
2 | /* |
3 | * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved. | |
ec8f5d8f SV |
4 | */ |
5 | ||
6 | #include <linux/device.h> | |
7 | #include <linux/interrupt.h> | |
8 | #include <crypto/internal/hash.h> | |
9 | ||
10 | #include "common.h" | |
11 | #include "core.h" | |
12 | #include "sha.h" | |
13 | ||
14 | /* crypto hw padding constant for first operation */ | |
15 | #define SHA_PADDING 64 | |
16 | #define SHA_PADDING_MASK (SHA_PADDING - 1) | |
17 | ||
18 | static LIST_HEAD(ahash_algs); | |
19 | ||
58a6535f | 20 | static const u32 std_iv_sha1[SHA256_DIGEST_SIZE / sizeof(u32)] = { |
ec8f5d8f SV |
21 | SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4, 0, 0, 0 |
22 | }; | |
23 | ||
58a6535f | 24 | static const u32 std_iv_sha256[SHA256_DIGEST_SIZE / sizeof(u32)] = { |
ec8f5d8f SV |
25 | SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3, |
26 | SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7 | |
27 | }; | |
28 | ||
29 | static void qce_ahash_done(void *data) | |
30 | { | |
31 | struct crypto_async_request *async_req = data; | |
32 | struct ahash_request *req = ahash_request_cast(async_req); | |
33 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); | |
34 | struct qce_sha_reqctx *rctx = ahash_request_ctx(req); | |
35 | struct qce_alg_template *tmpl = to_ahash_tmpl(async_req->tfm); | |
36 | struct qce_device *qce = tmpl->qce; | |
37 | struct qce_result_dump *result = qce->dma.result_buf; | |
38 | unsigned int digestsize = crypto_ahash_digestsize(ahash); | |
39 | int error; | |
40 | u32 status; | |
41 | ||
42 | error = qce_dma_terminate_all(&qce->dma); | |
43 | if (error) | |
44 | dev_dbg(qce->dev, "ahash dma termination error (%d)\n", error); | |
45 | ||
fea40451 LC |
46 | dma_unmap_sg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE); |
47 | dma_unmap_sg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE); | |
ec8f5d8f SV |
48 | |
49 | memcpy(rctx->digest, result->auth_iv, digestsize); | |
50 | if (req->result) | |
51 | memcpy(req->result, result->auth_iv, digestsize); | |
52 | ||
53 | rctx->byte_count[0] = cpu_to_be32(result->auth_byte_count[0]); | |
54 | rctx->byte_count[1] = cpu_to_be32(result->auth_byte_count[1]); | |
55 | ||
56 | error = qce_check_status(qce, &status); | |
57 | if (error < 0) | |
58 | dev_dbg(qce->dev, "ahash operation error (%x)\n", status); | |
59 | ||
60 | req->src = rctx->src_orig; | |
61 | req->nbytes = rctx->nbytes_orig; | |
62 | rctx->last_blk = false; | |
63 | rctx->first_blk = false; | |
64 | ||
65 | qce->async_req_done(tmpl->qce, error); | |
66 | } | |
67 | ||
68 | static int qce_ahash_async_req_handle(struct crypto_async_request *async_req) | |
69 | { | |
70 | struct ahash_request *req = ahash_request_cast(async_req); | |
71 | struct qce_sha_reqctx *rctx = ahash_request_ctx(req); | |
72 | struct qce_sha_ctx *ctx = crypto_tfm_ctx(async_req->tfm); | |
73 | struct qce_alg_template *tmpl = to_ahash_tmpl(async_req->tfm); | |
74 | struct qce_device *qce = tmpl->qce; | |
75 | unsigned long flags = rctx->flags; | |
76 | int ret; | |
77 | ||
78 | if (IS_SHA_HMAC(flags)) { | |
79 | rctx->authkey = ctx->authkey; | |
80 | rctx->authklen = QCE_SHA_HMAC_KEY_SIZE; | |
81 | } else if (IS_CMAC(flags)) { | |
82 | rctx->authkey = ctx->authkey; | |
83 | rctx->authklen = AES_KEYSIZE_128; | |
84 | } | |
85 | ||
fea40451 | 86 | rctx->src_nents = sg_nents_for_len(req->src, req->nbytes); |
4fa9948c LC |
87 | if (rctx->src_nents < 0) { |
88 | dev_err(qce->dev, "Invalid numbers of src SG.\n"); | |
89 | return rctx->src_nents; | |
90 | } | |
91 | ||
fea40451 | 92 | ret = dma_map_sg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE); |
ec8f5d8f SV |
93 | if (ret < 0) |
94 | return ret; | |
95 | ||
96 | sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ); | |
97 | ||
fea40451 | 98 | ret = dma_map_sg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE); |
ec8f5d8f SV |
99 | if (ret < 0) |
100 | goto error_unmap_src; | |
101 | ||
102 | ret = qce_dma_prep_sgs(&qce->dma, req->src, rctx->src_nents, | |
103 | &rctx->result_sg, 1, qce_ahash_done, async_req); | |
104 | if (ret) | |
105 | goto error_unmap_dst; | |
106 | ||
107 | qce_dma_issue_pending(&qce->dma); | |
108 | ||
109 | ret = qce_start(async_req, tmpl->crypto_alg_type, 0, 0); | |
110 | if (ret) | |
111 | goto error_terminate; | |
112 | ||
113 | return 0; | |
114 | ||
115 | error_terminate: | |
116 | qce_dma_terminate_all(&qce->dma); | |
117 | error_unmap_dst: | |
fea40451 | 118 | dma_unmap_sg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE); |
ec8f5d8f | 119 | error_unmap_src: |
fea40451 | 120 | dma_unmap_sg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE); |
ec8f5d8f SV |
121 | return ret; |
122 | } | |
123 | ||
124 | static int qce_ahash_init(struct ahash_request *req) | |
125 | { | |
126 | struct qce_sha_reqctx *rctx = ahash_request_ctx(req); | |
127 | struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm); | |
58a6535f | 128 | const u32 *std_iv = tmpl->std_iv; |
ec8f5d8f SV |
129 | |
130 | memset(rctx, 0, sizeof(*rctx)); | |
131 | rctx->first_blk = true; | |
132 | rctx->last_blk = false; | |
133 | rctx->flags = tmpl->alg_flags; | |
134 | memcpy(rctx->digest, std_iv, sizeof(rctx->digest)); | |
135 | ||
136 | return 0; | |
137 | } | |
138 | ||
139 | static int qce_ahash_export(struct ahash_request *req, void *out) | |
140 | { | |
141 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); | |
142 | struct qce_sha_reqctx *rctx = ahash_request_ctx(req); | |
143 | unsigned long flags = rctx->flags; | |
144 | unsigned int digestsize = crypto_ahash_digestsize(ahash); | |
145 | unsigned int blocksize = | |
146 | crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash)); | |
147 | ||
148 | if (IS_SHA1(flags) || IS_SHA1_HMAC(flags)) { | |
149 | struct sha1_state *out_state = out; | |
150 | ||
151 | out_state->count = rctx->count; | |
58a6535f SV |
152 | qce_cpu_to_be32p_array((__be32 *)out_state->state, |
153 | rctx->digest, digestsize); | |
ec8f5d8f SV |
154 | memcpy(out_state->buffer, rctx->buf, blocksize); |
155 | } else if (IS_SHA256(flags) || IS_SHA256_HMAC(flags)) { | |
156 | struct sha256_state *out_state = out; | |
157 | ||
158 | out_state->count = rctx->count; | |
58a6535f SV |
159 | qce_cpu_to_be32p_array((__be32 *)out_state->state, |
160 | rctx->digest, digestsize); | |
ec8f5d8f SV |
161 | memcpy(out_state->buf, rctx->buf, blocksize); |
162 | } else { | |
163 | return -EINVAL; | |
164 | } | |
165 | ||
166 | return 0; | |
167 | } | |
168 | ||
169 | static int qce_import_common(struct ahash_request *req, u64 in_count, | |
170 | const u32 *state, const u8 *buffer, bool hmac) | |
171 | { | |
172 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); | |
173 | struct qce_sha_reqctx *rctx = ahash_request_ctx(req); | |
174 | unsigned int digestsize = crypto_ahash_digestsize(ahash); | |
175 | unsigned int blocksize; | |
176 | u64 count = in_count; | |
177 | ||
178 | blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash)); | |
179 | rctx->count = in_count; | |
180 | memcpy(rctx->buf, buffer, blocksize); | |
181 | ||
182 | if (in_count <= blocksize) { | |
183 | rctx->first_blk = 1; | |
184 | } else { | |
185 | rctx->first_blk = 0; | |
186 | /* | |
187 | * For HMAC, there is a hardware padding done when first block | |
188 | * is set. Therefore the byte_count must be incremened by 64 | |
189 | * after the first block operation. | |
190 | */ | |
191 | if (hmac) | |
192 | count += SHA_PADDING; | |
193 | } | |
194 | ||
58a6535f SV |
195 | rctx->byte_count[0] = (__force __be32)(count & ~SHA_PADDING_MASK); |
196 | rctx->byte_count[1] = (__force __be32)(count >> 32); | |
ec8f5d8f SV |
197 | qce_cpu_to_be32p_array((__be32 *)rctx->digest, (const u8 *)state, |
198 | digestsize); | |
199 | rctx->buflen = (unsigned int)(in_count & (blocksize - 1)); | |
200 | ||
201 | return 0; | |
202 | } | |
203 | ||
204 | static int qce_ahash_import(struct ahash_request *req, const void *in) | |
205 | { | |
206 | struct qce_sha_reqctx *rctx = ahash_request_ctx(req); | |
207 | unsigned long flags = rctx->flags; | |
208 | bool hmac = IS_SHA_HMAC(flags); | |
209 | int ret = -EINVAL; | |
210 | ||
211 | if (IS_SHA1(flags) || IS_SHA1_HMAC(flags)) { | |
212 | const struct sha1_state *state = in; | |
213 | ||
214 | ret = qce_import_common(req, state->count, state->state, | |
215 | state->buffer, hmac); | |
216 | } else if (IS_SHA256(flags) || IS_SHA256_HMAC(flags)) { | |
217 | const struct sha256_state *state = in; | |
218 | ||
219 | ret = qce_import_common(req, state->count, state->state, | |
220 | state->buf, hmac); | |
221 | } | |
222 | ||
223 | return ret; | |
224 | } | |
225 | ||
226 | static int qce_ahash_update(struct ahash_request *req) | |
227 | { | |
228 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
229 | struct qce_sha_reqctx *rctx = ahash_request_ctx(req); | |
230 | struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm); | |
231 | struct qce_device *qce = tmpl->qce; | |
232 | struct scatterlist *sg_last, *sg; | |
233 | unsigned int total, len; | |
234 | unsigned int hash_later; | |
235 | unsigned int nbytes; | |
236 | unsigned int blocksize; | |
237 | ||
238 | blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); | |
239 | rctx->count += req->nbytes; | |
240 | ||
241 | /* check for buffer from previous updates and append it */ | |
242 | total = req->nbytes + rctx->buflen; | |
243 | ||
244 | if (total <= blocksize) { | |
245 | scatterwalk_map_and_copy(rctx->buf + rctx->buflen, req->src, | |
246 | 0, req->nbytes, 0); | |
247 | rctx->buflen += req->nbytes; | |
248 | return 0; | |
249 | } | |
250 | ||
251 | /* save the original req structure fields */ | |
252 | rctx->src_orig = req->src; | |
253 | rctx->nbytes_orig = req->nbytes; | |
254 | ||
255 | /* | |
256 | * if we have data from previous update copy them on buffer. The old | |
257 | * data will be combined with current request bytes. | |
258 | */ | |
259 | if (rctx->buflen) | |
260 | memcpy(rctx->tmpbuf, rctx->buf, rctx->buflen); | |
261 | ||
262 | /* calculate how many bytes will be hashed later */ | |
263 | hash_later = total % blocksize; | |
264 | if (hash_later) { | |
265 | unsigned int src_offset = req->nbytes - hash_later; | |
266 | scatterwalk_map_and_copy(rctx->buf, req->src, src_offset, | |
267 | hash_later, 0); | |
268 | } | |
269 | ||
270 | /* here nbytes is multiple of blocksize */ | |
271 | nbytes = total - hash_later; | |
272 | ||
273 | len = rctx->buflen; | |
274 | sg = sg_last = req->src; | |
275 | ||
276 | while (len < nbytes && sg) { | |
277 | if (len + sg_dma_len(sg) > nbytes) | |
278 | break; | |
279 | len += sg_dma_len(sg); | |
280 | sg_last = sg; | |
5be4d4c9 | 281 | sg = sg_next(sg); |
ec8f5d8f SV |
282 | } |
283 | ||
284 | if (!sg_last) | |
285 | return -EINVAL; | |
286 | ||
287 | sg_mark_end(sg_last); | |
288 | ||
289 | if (rctx->buflen) { | |
290 | sg_init_table(rctx->sg, 2); | |
291 | sg_set_buf(rctx->sg, rctx->tmpbuf, rctx->buflen); | |
c56f6d12 | 292 | sg_chain(rctx->sg, 2, req->src); |
ec8f5d8f SV |
293 | req->src = rctx->sg; |
294 | } | |
295 | ||
296 | req->nbytes = nbytes; | |
297 | rctx->buflen = hash_later; | |
298 | ||
299 | return qce->async_req_enqueue(tmpl->qce, &req->base); | |
300 | } | |
301 | ||
302 | static int qce_ahash_final(struct ahash_request *req) | |
303 | { | |
304 | struct qce_sha_reqctx *rctx = ahash_request_ctx(req); | |
305 | struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm); | |
306 | struct qce_device *qce = tmpl->qce; | |
307 | ||
308 | if (!rctx->buflen) | |
309 | return 0; | |
310 | ||
311 | rctx->last_blk = true; | |
312 | ||
313 | rctx->src_orig = req->src; | |
314 | rctx->nbytes_orig = req->nbytes; | |
315 | ||
316 | memcpy(rctx->tmpbuf, rctx->buf, rctx->buflen); | |
317 | sg_init_one(rctx->sg, rctx->tmpbuf, rctx->buflen); | |
318 | ||
319 | req->src = rctx->sg; | |
320 | req->nbytes = rctx->buflen; | |
321 | ||
322 | return qce->async_req_enqueue(tmpl->qce, &req->base); | |
323 | } | |
324 | ||
325 | static int qce_ahash_digest(struct ahash_request *req) | |
326 | { | |
327 | struct qce_sha_reqctx *rctx = ahash_request_ctx(req); | |
328 | struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm); | |
329 | struct qce_device *qce = tmpl->qce; | |
330 | int ret; | |
331 | ||
332 | ret = qce_ahash_init(req); | |
333 | if (ret) | |
334 | return ret; | |
335 | ||
336 | rctx->src_orig = req->src; | |
337 | rctx->nbytes_orig = req->nbytes; | |
338 | rctx->first_blk = true; | |
339 | rctx->last_blk = true; | |
340 | ||
341 | return qce->async_req_enqueue(tmpl->qce, &req->base); | |
342 | } | |
343 | ||
ec8f5d8f SV |
344 | static int qce_ahash_hmac_setkey(struct crypto_ahash *tfm, const u8 *key, |
345 | unsigned int keylen) | |
346 | { | |
347 | unsigned int digestsize = crypto_ahash_digestsize(tfm); | |
348 | struct qce_sha_ctx *ctx = crypto_tfm_ctx(&tfm->base); | |
c70e5f94 | 349 | struct crypto_wait wait; |
ec8f5d8f SV |
350 | struct ahash_request *req; |
351 | struct scatterlist sg; | |
352 | unsigned int blocksize; | |
353 | struct crypto_ahash *ahash_tfm; | |
354 | u8 *buf; | |
355 | int ret; | |
356 | const char *alg_name; | |
357 | ||
358 | blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); | |
359 | memset(ctx->authkey, 0, sizeof(ctx->authkey)); | |
360 | ||
361 | if (keylen <= blocksize) { | |
362 | memcpy(ctx->authkey, key, keylen); | |
363 | return 0; | |
364 | } | |
365 | ||
366 | if (digestsize == SHA1_DIGEST_SIZE) | |
367 | alg_name = "sha1-qce"; | |
368 | else if (digestsize == SHA256_DIGEST_SIZE) | |
369 | alg_name = "sha256-qce"; | |
370 | else | |
371 | return -EINVAL; | |
372 | ||
85d7311f | 373 | ahash_tfm = crypto_alloc_ahash(alg_name, 0, 0); |
ec8f5d8f SV |
374 | if (IS_ERR(ahash_tfm)) |
375 | return PTR_ERR(ahash_tfm); | |
376 | ||
377 | req = ahash_request_alloc(ahash_tfm, GFP_KERNEL); | |
378 | if (!req) { | |
379 | ret = -ENOMEM; | |
380 | goto err_free_ahash; | |
381 | } | |
382 | ||
c70e5f94 | 383 | crypto_init_wait(&wait); |
ec8f5d8f | 384 | ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, |
c70e5f94 | 385 | crypto_req_done, &wait); |
ec8f5d8f SV |
386 | crypto_ahash_clear_flags(ahash_tfm, ~0); |
387 | ||
388 | buf = kzalloc(keylen + QCE_MAX_ALIGN_SIZE, GFP_KERNEL); | |
389 | if (!buf) { | |
390 | ret = -ENOMEM; | |
391 | goto err_free_req; | |
392 | } | |
393 | ||
394 | memcpy(buf, key, keylen); | |
395 | sg_init_one(&sg, buf, keylen); | |
396 | ahash_request_set_crypt(req, &sg, ctx->authkey, keylen); | |
397 | ||
c70e5f94 | 398 | ret = crypto_wait_req(crypto_ahash_digest(req), &wait); |
ec8f5d8f SV |
399 | |
400 | kfree(buf); | |
401 | err_free_req: | |
402 | ahash_request_free(req); | |
403 | err_free_ahash: | |
404 | crypto_free_ahash(ahash_tfm); | |
405 | return ret; | |
406 | } | |
407 | ||
408 | static int qce_ahash_cra_init(struct crypto_tfm *tfm) | |
409 | { | |
410 | struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); | |
411 | struct qce_sha_ctx *ctx = crypto_tfm_ctx(tfm); | |
412 | ||
413 | crypto_ahash_set_reqsize(ahash, sizeof(struct qce_sha_reqctx)); | |
414 | memset(ctx, 0, sizeof(*ctx)); | |
415 | return 0; | |
416 | } | |
417 | ||
418 | struct qce_ahash_def { | |
419 | unsigned long flags; | |
420 | const char *name; | |
421 | const char *drv_name; | |
422 | unsigned int digestsize; | |
423 | unsigned int blocksize; | |
424 | unsigned int statesize; | |
58a6535f | 425 | const u32 *std_iv; |
ec8f5d8f SV |
426 | }; |
427 | ||
428 | static const struct qce_ahash_def ahash_def[] = { | |
429 | { | |
430 | .flags = QCE_HASH_SHA1, | |
431 | .name = "sha1", | |
432 | .drv_name = "sha1-qce", | |
433 | .digestsize = SHA1_DIGEST_SIZE, | |
434 | .blocksize = SHA1_BLOCK_SIZE, | |
435 | .statesize = sizeof(struct sha1_state), | |
436 | .std_iv = std_iv_sha1, | |
437 | }, | |
438 | { | |
439 | .flags = QCE_HASH_SHA256, | |
440 | .name = "sha256", | |
441 | .drv_name = "sha256-qce", | |
442 | .digestsize = SHA256_DIGEST_SIZE, | |
443 | .blocksize = SHA256_BLOCK_SIZE, | |
444 | .statesize = sizeof(struct sha256_state), | |
445 | .std_iv = std_iv_sha256, | |
446 | }, | |
447 | { | |
448 | .flags = QCE_HASH_SHA1_HMAC, | |
449 | .name = "hmac(sha1)", | |
450 | .drv_name = "hmac-sha1-qce", | |
451 | .digestsize = SHA1_DIGEST_SIZE, | |
452 | .blocksize = SHA1_BLOCK_SIZE, | |
453 | .statesize = sizeof(struct sha1_state), | |
454 | .std_iv = std_iv_sha1, | |
455 | }, | |
456 | { | |
457 | .flags = QCE_HASH_SHA256_HMAC, | |
458 | .name = "hmac(sha256)", | |
459 | .drv_name = "hmac-sha256-qce", | |
460 | .digestsize = SHA256_DIGEST_SIZE, | |
461 | .blocksize = SHA256_BLOCK_SIZE, | |
462 | .statesize = sizeof(struct sha256_state), | |
463 | .std_iv = std_iv_sha256, | |
464 | }, | |
465 | }; | |
466 | ||
467 | static int qce_ahash_register_one(const struct qce_ahash_def *def, | |
468 | struct qce_device *qce) | |
469 | { | |
470 | struct qce_alg_template *tmpl; | |
471 | struct ahash_alg *alg; | |
472 | struct crypto_alg *base; | |
473 | int ret; | |
474 | ||
475 | tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL); | |
476 | if (!tmpl) | |
477 | return -ENOMEM; | |
478 | ||
479 | tmpl->std_iv = def->std_iv; | |
480 | ||
481 | alg = &tmpl->alg.ahash; | |
482 | alg->init = qce_ahash_init; | |
483 | alg->update = qce_ahash_update; | |
484 | alg->final = qce_ahash_final; | |
485 | alg->digest = qce_ahash_digest; | |
486 | alg->export = qce_ahash_export; | |
487 | alg->import = qce_ahash_import; | |
488 | if (IS_SHA_HMAC(def->flags)) | |
489 | alg->setkey = qce_ahash_hmac_setkey; | |
490 | alg->halg.digestsize = def->digestsize; | |
491 | alg->halg.statesize = def->statesize; | |
492 | ||
493 | base = &alg->halg.base; | |
494 | base->cra_blocksize = def->blocksize; | |
495 | base->cra_priority = 300; | |
f65eae61 | 496 | base->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY; |
ec8f5d8f SV |
497 | base->cra_ctxsize = sizeof(struct qce_sha_ctx); |
498 | base->cra_alignmask = 0; | |
499 | base->cra_module = THIS_MODULE; | |
500 | base->cra_init = qce_ahash_cra_init; | |
ec8f5d8f SV |
501 | |
502 | snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); | |
503 | snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", | |
504 | def->drv_name); | |
505 | ||
506 | INIT_LIST_HEAD(&tmpl->entry); | |
507 | tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_AHASH; | |
508 | tmpl->alg_flags = def->flags; | |
509 | tmpl->qce = qce; | |
510 | ||
511 | ret = crypto_register_ahash(alg); | |
512 | if (ret) { | |
513 | kfree(tmpl); | |
514 | dev_err(qce->dev, "%s registration failed\n", base->cra_name); | |
515 | return ret; | |
516 | } | |
517 | ||
518 | list_add_tail(&tmpl->entry, &ahash_algs); | |
519 | dev_dbg(qce->dev, "%s is registered\n", base->cra_name); | |
520 | return 0; | |
521 | } | |
522 | ||
523 | static void qce_ahash_unregister(struct qce_device *qce) | |
524 | { | |
525 | struct qce_alg_template *tmpl, *n; | |
526 | ||
527 | list_for_each_entry_safe(tmpl, n, &ahash_algs, entry) { | |
528 | crypto_unregister_ahash(&tmpl->alg.ahash); | |
529 | list_del(&tmpl->entry); | |
530 | kfree(tmpl); | |
531 | } | |
532 | } | |
533 | ||
534 | static int qce_ahash_register(struct qce_device *qce) | |
535 | { | |
536 | int ret, i; | |
537 | ||
538 | for (i = 0; i < ARRAY_SIZE(ahash_def); i++) { | |
539 | ret = qce_ahash_register_one(&ahash_def[i], qce); | |
540 | if (ret) | |
541 | goto err; | |
542 | } | |
543 | ||
544 | return 0; | |
545 | err: | |
546 | qce_ahash_unregister(qce); | |
547 | return ret; | |
548 | } | |
549 | ||
550 | const struct qce_algo_ops ahash_ops = { | |
551 | .type = CRYPTO_ALG_TYPE_AHASH, | |
552 | .register_algs = qce_ahash_register, | |
553 | .unregister_algs = qce_ahash_unregister, | |
554 | .async_req_handle = qce_ahash_async_req_handle, | |
555 | }; |