crypto: drivers - Use kmemdup rather than duplicating its implementation
[linux-2.6-block.git] / drivers / crypto / virtio / virtio_crypto_algs.c
CommitLineData
1ccea77e 1// SPDX-License-Identifier: GPL-2.0-or-later
dbaf0624
G
2 /* Algorithms supported by virtio crypto device
3 *
4 * Authors: Gonglei <arei.gonglei@huawei.com>
5 *
6 * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
dbaf0624
G
7 */
8
9#include <linux/scatterlist.h>
10#include <crypto/algapi.h>
11#include <linux/err.h>
12#include <crypto/scatterwalk.h>
13#include <linux/atomic.h>
14
15#include <uapi/linux/virtio_crypto.h>
16#include "virtio_crypto_common.h"
17
d31e7123
ZX
18
19struct virtio_crypto_ablkcipher_ctx {
67189375 20 struct crypto_engine_ctx enginectx;
d31e7123
ZX
21 struct virtio_crypto *vcrypto;
22 struct crypto_tfm *tfm;
23
24 struct virtio_crypto_sym_session_info enc_sess_info;
25 struct virtio_crypto_sym_session_info dec_sess_info;
26};
27
28struct virtio_crypto_sym_request {
29 struct virtio_crypto_request base;
30
31 /* Cipher or aead */
32 uint32_t type;
33 struct virtio_crypto_ablkcipher_ctx *ablkcipher_ctx;
34 struct ablkcipher_request *ablkcipher_req;
35 uint8_t *iv;
36 /* Encryption? */
37 bool encrypt;
38};
39
d0d859bb
FA
40struct virtio_crypto_algo {
41 uint32_t algonum;
42 uint32_t service;
43 unsigned int active_devs;
44 struct crypto_alg algo;
45};
46
dbaf0624
G
47/*
48 * The algs_lock protects the below global virtio_crypto_active_devs
49 * and crypto algorithms registion.
50 */
51static DEFINE_MUTEX(algs_lock);
d31e7123
ZX
52static void virtio_crypto_ablkcipher_finalize_req(
53 struct virtio_crypto_sym_request *vc_sym_req,
54 struct ablkcipher_request *req,
55 int err);
56
57static void virtio_crypto_dataq_sym_callback
58 (struct virtio_crypto_request *vc_req, int len)
59{
60 struct virtio_crypto_sym_request *vc_sym_req =
61 container_of(vc_req, struct virtio_crypto_sym_request, base);
62 struct ablkcipher_request *ablk_req;
63 int error;
64
65 /* Finish the encrypt or decrypt process */
66 if (vc_sym_req->type == VIRTIO_CRYPTO_SYM_OP_CIPHER) {
67 switch (vc_req->status) {
68 case VIRTIO_CRYPTO_OK:
69 error = 0;
70 break;
71 case VIRTIO_CRYPTO_INVSESS:
72 case VIRTIO_CRYPTO_ERR:
73 error = -EINVAL;
74 break;
75 case VIRTIO_CRYPTO_BADMSG:
76 error = -EBADMSG;
77 break;
78 default:
79 error = -EIO;
80 break;
81 }
82 ablk_req = vc_sym_req->ablkcipher_req;
83 virtio_crypto_ablkcipher_finalize_req(vc_sym_req,
84 ablk_req, error);
85 }
86}
dbaf0624
G
87
88static u64 virtio_crypto_alg_sg_nents_length(struct scatterlist *sg)
89{
90 u64 total = 0;
91
92 for (total = 0; sg; sg = sg_next(sg))
93 total += sg->length;
94
95 return total;
96}
97
98static int
99virtio_crypto_alg_validate_key(int key_len, uint32_t *alg)
100{
101 switch (key_len) {
102 case AES_KEYSIZE_128:
103 case AES_KEYSIZE_192:
104 case AES_KEYSIZE_256:
105 *alg = VIRTIO_CRYPTO_CIPHER_AES_CBC;
106 break;
107 default:
108 pr_err("virtio_crypto: Unsupported key length: %d\n",
109 key_len);
110 return -EINVAL;
111 }
112 return 0;
113}
114
115static int virtio_crypto_alg_ablkcipher_init_session(
116 struct virtio_crypto_ablkcipher_ctx *ctx,
117 uint32_t alg, const uint8_t *key,
118 unsigned int keylen,
119 int encrypt)
120{
121 struct scatterlist outhdr, key_sg, inhdr, *sgs[3];
122 unsigned int tmp;
123 struct virtio_crypto *vcrypto = ctx->vcrypto;
124 int op = encrypt ? VIRTIO_CRYPTO_OP_ENCRYPT : VIRTIO_CRYPTO_OP_DECRYPT;
125 int err;
126 unsigned int num_out = 0, num_in = 0;
127
128 /*
129 * Avoid to do DMA from the stack, switch to using
130 * dynamically-allocated for the key
131 */
cc2a58f1 132 uint8_t *cipher_key = kmemdup(key, keylen, GFP_ATOMIC);
dbaf0624
G
133
134 if (!cipher_key)
135 return -ENOMEM;
136
dbaf0624
G
137 spin_lock(&vcrypto->ctrl_lock);
138 /* Pad ctrl header */
139 vcrypto->ctrl.header.opcode =
140 cpu_to_le32(VIRTIO_CRYPTO_CIPHER_CREATE_SESSION);
141 vcrypto->ctrl.header.algo = cpu_to_le32(alg);
142 /* Set the default dataqueue id to 0 */
143 vcrypto->ctrl.header.queue_id = 0;
144
145 vcrypto->input.status = cpu_to_le32(VIRTIO_CRYPTO_ERR);
146 /* Pad cipher's parameters */
147 vcrypto->ctrl.u.sym_create_session.op_type =
148 cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER);
149 vcrypto->ctrl.u.sym_create_session.u.cipher.para.algo =
150 vcrypto->ctrl.header.algo;
151 vcrypto->ctrl.u.sym_create_session.u.cipher.para.keylen =
152 cpu_to_le32(keylen);
153 vcrypto->ctrl.u.sym_create_session.u.cipher.para.op =
154 cpu_to_le32(op);
155
156 sg_init_one(&outhdr, &vcrypto->ctrl, sizeof(vcrypto->ctrl));
157 sgs[num_out++] = &outhdr;
158
159 /* Set key */
160 sg_init_one(&key_sg, cipher_key, keylen);
161 sgs[num_out++] = &key_sg;
162
163 /* Return status and session id back */
164 sg_init_one(&inhdr, &vcrypto->input, sizeof(vcrypto->input));
165 sgs[num_out + num_in++] = &inhdr;
166
167 err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, num_out,
168 num_in, vcrypto, GFP_ATOMIC);
169 if (err < 0) {
170 spin_unlock(&vcrypto->ctrl_lock);
171 kzfree(cipher_key);
172 return err;
173 }
174 virtqueue_kick(vcrypto->ctrl_vq);
175
176 /*
177 * Trapping into the hypervisor, so the request should be
178 * handled immediately.
179 */
180 while (!virtqueue_get_buf(vcrypto->ctrl_vq, &tmp) &&
181 !virtqueue_is_broken(vcrypto->ctrl_vq))
182 cpu_relax();
183
184 if (le32_to_cpu(vcrypto->input.status) != VIRTIO_CRYPTO_OK) {
185 spin_unlock(&vcrypto->ctrl_lock);
186 pr_err("virtio_crypto: Create session failed status: %u\n",
187 le32_to_cpu(vcrypto->input.status));
188 kzfree(cipher_key);
189 return -EINVAL;
190 }
191
192 if (encrypt)
193 ctx->enc_sess_info.session_id =
194 le64_to_cpu(vcrypto->input.session_id);
195 else
196 ctx->dec_sess_info.session_id =
197 le64_to_cpu(vcrypto->input.session_id);
198
199 spin_unlock(&vcrypto->ctrl_lock);
200
201 kzfree(cipher_key);
202 return 0;
203}
204
205static int virtio_crypto_alg_ablkcipher_close_session(
206 struct virtio_crypto_ablkcipher_ctx *ctx,
207 int encrypt)
208{
209 struct scatterlist outhdr, status_sg, *sgs[2];
210 unsigned int tmp;
211 struct virtio_crypto_destroy_session_req *destroy_session;
212 struct virtio_crypto *vcrypto = ctx->vcrypto;
213 int err;
214 unsigned int num_out = 0, num_in = 0;
215
216 spin_lock(&vcrypto->ctrl_lock);
217 vcrypto->ctrl_status.status = VIRTIO_CRYPTO_ERR;
218 /* Pad ctrl header */
219 vcrypto->ctrl.header.opcode =
220 cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION);
221 /* Set the default virtqueue id to 0 */
222 vcrypto->ctrl.header.queue_id = 0;
223
224 destroy_session = &vcrypto->ctrl.u.destroy_session;
225
226 if (encrypt)
227 destroy_session->session_id =
228 cpu_to_le64(ctx->enc_sess_info.session_id);
229 else
230 destroy_session->session_id =
231 cpu_to_le64(ctx->dec_sess_info.session_id);
232
233 sg_init_one(&outhdr, &vcrypto->ctrl, sizeof(vcrypto->ctrl));
234 sgs[num_out++] = &outhdr;
235
236 /* Return status and session id back */
237 sg_init_one(&status_sg, &vcrypto->ctrl_status.status,
238 sizeof(vcrypto->ctrl_status.status));
239 sgs[num_out + num_in++] = &status_sg;
240
241 err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, num_out,
242 num_in, vcrypto, GFP_ATOMIC);
243 if (err < 0) {
244 spin_unlock(&vcrypto->ctrl_lock);
245 return err;
246 }
247 virtqueue_kick(vcrypto->ctrl_vq);
248
249 while (!virtqueue_get_buf(vcrypto->ctrl_vq, &tmp) &&
250 !virtqueue_is_broken(vcrypto->ctrl_vq))
251 cpu_relax();
252
253 if (vcrypto->ctrl_status.status != VIRTIO_CRYPTO_OK) {
254 spin_unlock(&vcrypto->ctrl_lock);
255 pr_err("virtio_crypto: Close session failed status: %u, session_id: 0x%llx\n",
256 vcrypto->ctrl_status.status,
257 destroy_session->session_id);
258
259 return -EINVAL;
260 }
261 spin_unlock(&vcrypto->ctrl_lock);
262
263 return 0;
264}
265
266static int virtio_crypto_alg_ablkcipher_init_sessions(
267 struct virtio_crypto_ablkcipher_ctx *ctx,
268 const uint8_t *key, unsigned int keylen)
269{
270 uint32_t alg;
271 int ret;
272 struct virtio_crypto *vcrypto = ctx->vcrypto;
273
274 if (keylen > vcrypto->max_cipher_key_len) {
275 pr_err("virtio_crypto: the key is too long\n");
276 goto bad_key;
277 }
278
279 if (virtio_crypto_alg_validate_key(keylen, &alg))
280 goto bad_key;
281
282 /* Create encryption session */
283 ret = virtio_crypto_alg_ablkcipher_init_session(ctx,
284 alg, key, keylen, 1);
285 if (ret)
286 return ret;
287 /* Create decryption session */
288 ret = virtio_crypto_alg_ablkcipher_init_session(ctx,
289 alg, key, keylen, 0);
290 if (ret) {
291 virtio_crypto_alg_ablkcipher_close_session(ctx, 1);
292 return ret;
293 }
294 return 0;
295
296bad_key:
297 crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
298 return -EINVAL;
299}
300
301/* Note: kernel crypto API realization */
302static int virtio_crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
303 const uint8_t *key,
304 unsigned int keylen)
305{
306 struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
d0d859bb 307 uint32_t alg;
dbaf0624
G
308 int ret;
309
d0d859bb
FA
310 ret = virtio_crypto_alg_validate_key(keylen, &alg);
311 if (ret)
312 return ret;
313
dbaf0624
G
314 if (!ctx->vcrypto) {
315 /* New key */
316 int node = virtio_crypto_get_current_node();
317 struct virtio_crypto *vcrypto =
d0d859bb
FA
318 virtcrypto_get_dev_node(node,
319 VIRTIO_CRYPTO_SERVICE_CIPHER, alg);
dbaf0624 320 if (!vcrypto) {
d0d859bb 321 pr_err("virtio_crypto: Could not find a virtio device in the system or unsupported algo\n");
dbaf0624
G
322 return -ENODEV;
323 }
324
325 ctx->vcrypto = vcrypto;
326 } else {
327 /* Rekeying, we should close the created sessions previously */
328 virtio_crypto_alg_ablkcipher_close_session(ctx, 1);
329 virtio_crypto_alg_ablkcipher_close_session(ctx, 0);
330 }
331
332 ret = virtio_crypto_alg_ablkcipher_init_sessions(ctx, key, keylen);
333 if (ret) {
334 virtcrypto_dev_put(ctx->vcrypto);
335 ctx->vcrypto = NULL;
336
337 return ret;
338 }
339
340 return 0;
341}
342
343static int
d31e7123 344__virtio_crypto_ablkcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req,
dbaf0624 345 struct ablkcipher_request *req,
d79b5d0b 346 struct data_queue *data_vq)
dbaf0624
G
347{
348 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
d31e7123
ZX
349 struct virtio_crypto_ablkcipher_ctx *ctx = vc_sym_req->ablkcipher_ctx;
350 struct virtio_crypto_request *vc_req = &vc_sym_req->base;
dbaf0624 351 unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
dbaf0624
G
352 struct virtio_crypto *vcrypto = ctx->vcrypto;
353 struct virtio_crypto_op_data_req *req_data;
354 int src_nents, dst_nents;
355 int err;
356 unsigned long flags;
357 struct scatterlist outhdr, iv_sg, status_sg, **sgs;
358 int i;
359 u64 dst_len;
360 unsigned int num_out = 0, num_in = 0;
361 int sg_total;
362 uint8_t *iv;
363
364 src_nents = sg_nents_for_len(req->src, req->nbytes);
365 dst_nents = sg_nents(req->dst);
366
367 pr_debug("virtio_crypto: Number of sgs (src_nents: %d, dst_nents: %d)\n",
368 src_nents, dst_nents);
369
370 /* Why 3? outhdr + iv + inhdr */
371 sg_total = src_nents + dst_nents + 3;
f6adeef7 372 sgs = kcalloc_node(sg_total, sizeof(*sgs), GFP_KERNEL,
dbaf0624
G
373 dev_to_node(&vcrypto->vdev->dev));
374 if (!sgs)
375 return -ENOMEM;
376
f6adeef7 377 req_data = kzalloc_node(sizeof(*req_data), GFP_KERNEL,
dbaf0624
G
378 dev_to_node(&vcrypto->vdev->dev));
379 if (!req_data) {
380 kfree(sgs);
381 return -ENOMEM;
382 }
383
384 vc_req->req_data = req_data;
d31e7123 385 vc_sym_req->type = VIRTIO_CRYPTO_SYM_OP_CIPHER;
dbaf0624 386 /* Head of operation */
d31e7123 387 if (vc_sym_req->encrypt) {
dbaf0624
G
388 req_data->header.session_id =
389 cpu_to_le64(ctx->enc_sess_info.session_id);
390 req_data->header.opcode =
391 cpu_to_le32(VIRTIO_CRYPTO_CIPHER_ENCRYPT);
392 } else {
393 req_data->header.session_id =
394 cpu_to_le64(ctx->dec_sess_info.session_id);
1bb64d86 395 req_data->header.opcode =
dbaf0624
G
396 cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DECRYPT);
397 }
398 req_data->u.sym_req.op_type = cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER);
399 req_data->u.sym_req.u.cipher.para.iv_len = cpu_to_le32(ivsize);
400 req_data->u.sym_req.u.cipher.para.src_data_len =
401 cpu_to_le32(req->nbytes);
402
403 dst_len = virtio_crypto_alg_sg_nents_length(req->dst);
404 if (unlikely(dst_len > U32_MAX)) {
405 pr_err("virtio_crypto: The dst_len is beyond U32_MAX\n");
406 err = -EINVAL;
407 goto free;
408 }
409
410 pr_debug("virtio_crypto: src_len: %u, dst_len: %llu\n",
411 req->nbytes, dst_len);
412
413 if (unlikely(req->nbytes + dst_len + ivsize +
414 sizeof(vc_req->status) > vcrypto->max_size)) {
415 pr_err("virtio_crypto: The length is too big\n");
416 err = -EINVAL;
417 goto free;
418 }
419
420 req_data->u.sym_req.u.cipher.para.dst_data_len =
421 cpu_to_le32((uint32_t)dst_len);
422
423 /* Outhdr */
424 sg_init_one(&outhdr, req_data, sizeof(*req_data));
425 sgs[num_out++] = &outhdr;
426
427 /* IV */
428
429 /*
430 * Avoid to do DMA from the stack, switch to using
431 * dynamically-allocated for the IV
432 */
433 iv = kzalloc_node(ivsize, GFP_ATOMIC,
434 dev_to_node(&vcrypto->vdev->dev));
435 if (!iv) {
436 err = -ENOMEM;
437 goto free;
438 }
439 memcpy(iv, req->info, ivsize);
440 sg_init_one(&iv_sg, iv, ivsize);
441 sgs[num_out++] = &iv_sg;
d31e7123 442 vc_sym_req->iv = iv;
dbaf0624
G
443
444 /* Source data */
445 for (i = 0; i < src_nents; i++)
446 sgs[num_out++] = &req->src[i];
447
448 /* Destination data */
449 for (i = 0; i < dst_nents; i++)
450 sgs[num_out + num_in++] = &req->dst[i];
451
452 /* Status */
453 sg_init_one(&status_sg, &vc_req->status, sizeof(vc_req->status));
454 sgs[num_out + num_in++] = &status_sg;
455
456 vc_req->sgs = sgs;
457
458 spin_lock_irqsave(&data_vq->lock, flags);
459 err = virtqueue_add_sgs(data_vq->vq, sgs, num_out,
460 num_in, vc_req, GFP_ATOMIC);
461 virtqueue_kick(data_vq->vq);
462 spin_unlock_irqrestore(&data_vq->lock, flags);
463 if (unlikely(err < 0))
464 goto free_iv;
465
466 return 0;
467
468free_iv:
469 kzfree(iv);
470free:
471 kzfree(req_data);
472 kfree(sgs);
473 return err;
474}
475
476static int virtio_crypto_ablkcipher_encrypt(struct ablkcipher_request *req)
477{
478 struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
479 struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(atfm);
d31e7123
ZX
480 struct virtio_crypto_sym_request *vc_sym_req =
481 ablkcipher_request_ctx(req);
482 struct virtio_crypto_request *vc_req = &vc_sym_req->base;
dbaf0624 483 struct virtio_crypto *vcrypto = ctx->vcrypto;
dbaf0624
G
484 /* Use the first data virtqueue as default */
485 struct data_queue *data_vq = &vcrypto->data_vq[0];
486
d79b5d0b 487 vc_req->dataq = data_vq;
d31e7123
ZX
488 vc_req->alg_cb = virtio_crypto_dataq_sym_callback;
489 vc_sym_req->ablkcipher_ctx = ctx;
490 vc_sym_req->ablkcipher_req = req;
491 vc_sym_req->encrypt = true;
dbaf0624 492
67189375 493 return crypto_transfer_ablkcipher_request_to_engine(data_vq->engine, req);
dbaf0624
G
494}
495
496static int virtio_crypto_ablkcipher_decrypt(struct ablkcipher_request *req)
497{
498 struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
499 struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(atfm);
d31e7123
ZX
500 struct virtio_crypto_sym_request *vc_sym_req =
501 ablkcipher_request_ctx(req);
502 struct virtio_crypto_request *vc_req = &vc_sym_req->base;
dbaf0624 503 struct virtio_crypto *vcrypto = ctx->vcrypto;
dbaf0624
G
504 /* Use the first data virtqueue as default */
505 struct data_queue *data_vq = &vcrypto->data_vq[0];
506
d79b5d0b 507 vc_req->dataq = data_vq;
d31e7123
ZX
508 vc_req->alg_cb = virtio_crypto_dataq_sym_callback;
509 vc_sym_req->ablkcipher_ctx = ctx;
510 vc_sym_req->ablkcipher_req = req;
511 vc_sym_req->encrypt = false;
dbaf0624 512
67189375 513 return crypto_transfer_ablkcipher_request_to_engine(data_vq->engine, req);
dbaf0624
G
514}
515
516static int virtio_crypto_ablkcipher_init(struct crypto_tfm *tfm)
517{
518 struct virtio_crypto_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
519
d31e7123 520 tfm->crt_ablkcipher.reqsize = sizeof(struct virtio_crypto_sym_request);
dbaf0624
G
521 ctx->tfm = tfm;
522
67189375
CL
523 ctx->enginectx.op.do_one_request = virtio_crypto_ablkcipher_crypt_req;
524 ctx->enginectx.op.prepare_request = NULL;
525 ctx->enginectx.op.unprepare_request = NULL;
dbaf0624
G
526 return 0;
527}
528
529static void virtio_crypto_ablkcipher_exit(struct crypto_tfm *tfm)
530{
531 struct virtio_crypto_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
532
533 if (!ctx->vcrypto)
534 return;
535
536 virtio_crypto_alg_ablkcipher_close_session(ctx, 1);
537 virtio_crypto_alg_ablkcipher_close_session(ctx, 0);
538 virtcrypto_dev_put(ctx->vcrypto);
539 ctx->vcrypto = NULL;
540}
541
d79b5d0b 542int virtio_crypto_ablkcipher_crypt_req(
67189375 543 struct crypto_engine *engine, void *vreq)
d79b5d0b 544{
67189375 545 struct ablkcipher_request *req = container_of(vreq, struct ablkcipher_request, base);
d31e7123
ZX
546 struct virtio_crypto_sym_request *vc_sym_req =
547 ablkcipher_request_ctx(req);
548 struct virtio_crypto_request *vc_req = &vc_sym_req->base;
d79b5d0b
GA
549 struct data_queue *data_vq = vc_req->dataq;
550 int ret;
551
d31e7123 552 ret = __virtio_crypto_ablkcipher_do_req(vc_sym_req, req, data_vq);
d79b5d0b
GA
553 if (ret < 0)
554 return ret;
555
556 virtqueue_kick(data_vq->vq);
557
558 return 0;
559}
560
d31e7123
ZX
561static void virtio_crypto_ablkcipher_finalize_req(
562 struct virtio_crypto_sym_request *vc_sym_req,
d79b5d0b
GA
563 struct ablkcipher_request *req,
564 int err)
565{
67189375
CL
566 crypto_finalize_ablkcipher_request(vc_sym_req->base.dataq->engine,
567 req, err);
d31e7123
ZX
568 kzfree(vc_sym_req->iv);
569 virtcrypto_clear_request(&vc_sym_req->base);
d79b5d0b
GA
570}
571
d0d859bb
FA
572static struct virtio_crypto_algo virtio_crypto_algs[] = { {
573 .algonum = VIRTIO_CRYPTO_CIPHER_AES_CBC,
574 .service = VIRTIO_CRYPTO_SERVICE_CIPHER,
575 .algo = {
576 .cra_name = "cbc(aes)",
577 .cra_driver_name = "virtio_crypto_aes_cbc",
578 .cra_priority = 150,
579 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
580 .cra_blocksize = AES_BLOCK_SIZE,
581 .cra_ctxsize = sizeof(struct virtio_crypto_ablkcipher_ctx),
582 .cra_alignmask = 0,
583 .cra_module = THIS_MODULE,
584 .cra_type = &crypto_ablkcipher_type,
585 .cra_init = virtio_crypto_ablkcipher_init,
586 .cra_exit = virtio_crypto_ablkcipher_exit,
587 .cra_u = {
588 .ablkcipher = {
589 .setkey = virtio_crypto_ablkcipher_setkey,
590 .decrypt = virtio_crypto_ablkcipher_decrypt,
591 .encrypt = virtio_crypto_ablkcipher_encrypt,
592 .min_keysize = AES_MIN_KEY_SIZE,
593 .max_keysize = AES_MAX_KEY_SIZE,
594 .ivsize = AES_BLOCK_SIZE,
595 },
dbaf0624
G
596 },
597 },
598} };
599
d0d859bb 600int virtio_crypto_algs_register(struct virtio_crypto *vcrypto)
dbaf0624
G
601{
602 int ret = 0;
d0d859bb 603 int i = 0;
dbaf0624
G
604
605 mutex_lock(&algs_lock);
dbaf0624 606
d0d859bb
FA
607 for (i = 0; i < ARRAY_SIZE(virtio_crypto_algs); i++) {
608
609 uint32_t service = virtio_crypto_algs[i].service;
610 uint32_t algonum = virtio_crypto_algs[i].algonum;
611
612 if (!virtcrypto_algo_is_supported(vcrypto, service, algonum))
613 continue;
614
615 if (virtio_crypto_algs[i].active_devs == 0) {
616 ret = crypto_register_alg(&virtio_crypto_algs[i].algo);
617 if (ret)
618 goto unlock;
619 }
620
621 virtio_crypto_algs[i].active_devs++;
622 dev_info(&vcrypto->vdev->dev, "Registered algo %s\n",
623 virtio_crypto_algs[i].algo.cra_name);
624 }
dbaf0624
G
625
626unlock:
627 mutex_unlock(&algs_lock);
628 return ret;
629}
630
d0d859bb 631void virtio_crypto_algs_unregister(struct virtio_crypto *vcrypto)
dbaf0624 632{
d0d859bb
FA
633 int i = 0;
634
dbaf0624 635 mutex_lock(&algs_lock);
dbaf0624 636
d0d859bb
FA
637 for (i = 0; i < ARRAY_SIZE(virtio_crypto_algs); i++) {
638
639 uint32_t service = virtio_crypto_algs[i].service;
640 uint32_t algonum = virtio_crypto_algs[i].algonum;
641
642 if (virtio_crypto_algs[i].active_devs == 0 ||
643 !virtcrypto_algo_is_supported(vcrypto, service, algonum))
644 continue;
645
646 if (virtio_crypto_algs[i].active_devs == 1)
647 crypto_unregister_alg(&virtio_crypto_algs[i].algo);
648
649 virtio_crypto_algs[i].active_devs--;
650 }
dbaf0624 651
dbaf0624
G
652 mutex_unlock(&algs_lock);
653}