1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Algorithms supported by virtio crypto device
4 * Authors: Gonglei <arei.gonglei@huawei.com>
6 * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
9 #include <linux/scatterlist.h>
10 #include <crypto/algapi.h>
11 #include <linux/err.h>
12 #include <crypto/scatterwalk.h>
13 #include <linux/atomic.h>
15 #include <uapi/linux/virtio_crypto.h>
16 #include "virtio_crypto_common.h"
19 struct virtio_crypto_ablkcipher_ctx {
20 struct crypto_engine_ctx enginectx;
21 struct virtio_crypto *vcrypto;
22 struct crypto_tfm *tfm;
24 struct virtio_crypto_sym_session_info enc_sess_info;
25 struct virtio_crypto_sym_session_info dec_sess_info;
28 struct virtio_crypto_sym_request {
29 struct virtio_crypto_request base;
33 struct virtio_crypto_ablkcipher_ctx *ablkcipher_ctx;
34 struct ablkcipher_request *ablkcipher_req;
40 struct virtio_crypto_algo {
43 unsigned int active_devs;
44 struct crypto_alg algo;
48 * The algs_lock protects the below global virtio_crypto_active_devs
49 * and crypto algorithms registion.
51 static DEFINE_MUTEX(algs_lock);
52 static void virtio_crypto_ablkcipher_finalize_req(
53 struct virtio_crypto_sym_request *vc_sym_req,
54 struct ablkcipher_request *req,
57 static void virtio_crypto_dataq_sym_callback
58 (struct virtio_crypto_request *vc_req, int len)
60 struct virtio_crypto_sym_request *vc_sym_req =
61 container_of(vc_req, struct virtio_crypto_sym_request, base);
62 struct ablkcipher_request *ablk_req;
65 /* Finish the encrypt or decrypt process */
66 if (vc_sym_req->type == VIRTIO_CRYPTO_SYM_OP_CIPHER) {
67 switch (vc_req->status) {
68 case VIRTIO_CRYPTO_OK:
71 case VIRTIO_CRYPTO_INVSESS:
72 case VIRTIO_CRYPTO_ERR:
75 case VIRTIO_CRYPTO_BADMSG:
82 ablk_req = vc_sym_req->ablkcipher_req;
83 virtio_crypto_ablkcipher_finalize_req(vc_sym_req,
88 static u64 virtio_crypto_alg_sg_nents_length(struct scatterlist *sg)
92 for (total = 0; sg; sg = sg_next(sg))
99 virtio_crypto_alg_validate_key(int key_len, uint32_t *alg)
102 case AES_KEYSIZE_128:
103 case AES_KEYSIZE_192:
104 case AES_KEYSIZE_256:
105 *alg = VIRTIO_CRYPTO_CIPHER_AES_CBC;
108 pr_err("virtio_crypto: Unsupported key length: %d\n",
115 static int virtio_crypto_alg_ablkcipher_init_session(
116 struct virtio_crypto_ablkcipher_ctx *ctx,
117 uint32_t alg, const uint8_t *key,
121 struct scatterlist outhdr, key_sg, inhdr, *sgs[3];
123 struct virtio_crypto *vcrypto = ctx->vcrypto;
124 int op = encrypt ? VIRTIO_CRYPTO_OP_ENCRYPT : VIRTIO_CRYPTO_OP_DECRYPT;
126 unsigned int num_out = 0, num_in = 0;
129 * Avoid to do DMA from the stack, switch to using
130 * dynamically-allocated for the key
132 uint8_t *cipher_key = kmalloc(keylen, GFP_ATOMIC);
137 memcpy(cipher_key, key, keylen);
139 spin_lock(&vcrypto->ctrl_lock);
140 /* Pad ctrl header */
141 vcrypto->ctrl.header.opcode =
142 cpu_to_le32(VIRTIO_CRYPTO_CIPHER_CREATE_SESSION);
143 vcrypto->ctrl.header.algo = cpu_to_le32(alg);
144 /* Set the default dataqueue id to 0 */
145 vcrypto->ctrl.header.queue_id = 0;
147 vcrypto->input.status = cpu_to_le32(VIRTIO_CRYPTO_ERR);
148 /* Pad cipher's parameters */
149 vcrypto->ctrl.u.sym_create_session.op_type =
150 cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER);
151 vcrypto->ctrl.u.sym_create_session.u.cipher.para.algo =
152 vcrypto->ctrl.header.algo;
153 vcrypto->ctrl.u.sym_create_session.u.cipher.para.keylen =
155 vcrypto->ctrl.u.sym_create_session.u.cipher.para.op =
158 sg_init_one(&outhdr, &vcrypto->ctrl, sizeof(vcrypto->ctrl));
159 sgs[num_out++] = &outhdr;
162 sg_init_one(&key_sg, cipher_key, keylen);
163 sgs[num_out++] = &key_sg;
165 /* Return status and session id back */
166 sg_init_one(&inhdr, &vcrypto->input, sizeof(vcrypto->input));
167 sgs[num_out + num_in++] = &inhdr;
169 err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, num_out,
170 num_in, vcrypto, GFP_ATOMIC);
172 spin_unlock(&vcrypto->ctrl_lock);
176 virtqueue_kick(vcrypto->ctrl_vq);
179 * Trapping into the hypervisor, so the request should be
180 * handled immediately.
182 while (!virtqueue_get_buf(vcrypto->ctrl_vq, &tmp) &&
183 !virtqueue_is_broken(vcrypto->ctrl_vq))
186 if (le32_to_cpu(vcrypto->input.status) != VIRTIO_CRYPTO_OK) {
187 spin_unlock(&vcrypto->ctrl_lock);
188 pr_err("virtio_crypto: Create session failed status: %u\n",
189 le32_to_cpu(vcrypto->input.status));
195 ctx->enc_sess_info.session_id =
196 le64_to_cpu(vcrypto->input.session_id);
198 ctx->dec_sess_info.session_id =
199 le64_to_cpu(vcrypto->input.session_id);
201 spin_unlock(&vcrypto->ctrl_lock);
207 static int virtio_crypto_alg_ablkcipher_close_session(
208 struct virtio_crypto_ablkcipher_ctx *ctx,
211 struct scatterlist outhdr, status_sg, *sgs[2];
213 struct virtio_crypto_destroy_session_req *destroy_session;
214 struct virtio_crypto *vcrypto = ctx->vcrypto;
216 unsigned int num_out = 0, num_in = 0;
218 spin_lock(&vcrypto->ctrl_lock);
219 vcrypto->ctrl_status.status = VIRTIO_CRYPTO_ERR;
220 /* Pad ctrl header */
221 vcrypto->ctrl.header.opcode =
222 cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION);
223 /* Set the default virtqueue id to 0 */
224 vcrypto->ctrl.header.queue_id = 0;
226 destroy_session = &vcrypto->ctrl.u.destroy_session;
229 destroy_session->session_id =
230 cpu_to_le64(ctx->enc_sess_info.session_id);
232 destroy_session->session_id =
233 cpu_to_le64(ctx->dec_sess_info.session_id);
235 sg_init_one(&outhdr, &vcrypto->ctrl, sizeof(vcrypto->ctrl));
236 sgs[num_out++] = &outhdr;
238 /* Return status and session id back */
239 sg_init_one(&status_sg, &vcrypto->ctrl_status.status,
240 sizeof(vcrypto->ctrl_status.status));
241 sgs[num_out + num_in++] = &status_sg;
243 err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, num_out,
244 num_in, vcrypto, GFP_ATOMIC);
246 spin_unlock(&vcrypto->ctrl_lock);
249 virtqueue_kick(vcrypto->ctrl_vq);
251 while (!virtqueue_get_buf(vcrypto->ctrl_vq, &tmp) &&
252 !virtqueue_is_broken(vcrypto->ctrl_vq))
255 if (vcrypto->ctrl_status.status != VIRTIO_CRYPTO_OK) {
256 spin_unlock(&vcrypto->ctrl_lock);
257 pr_err("virtio_crypto: Close session failed status: %u, session_id: 0x%llx\n",
258 vcrypto->ctrl_status.status,
259 destroy_session->session_id);
263 spin_unlock(&vcrypto->ctrl_lock);
268 static int virtio_crypto_alg_ablkcipher_init_sessions(
269 struct virtio_crypto_ablkcipher_ctx *ctx,
270 const uint8_t *key, unsigned int keylen)
274 struct virtio_crypto *vcrypto = ctx->vcrypto;
276 if (keylen > vcrypto->max_cipher_key_len) {
277 pr_err("virtio_crypto: the key is too long\n");
281 if (virtio_crypto_alg_validate_key(keylen, &alg))
284 /* Create encryption session */
285 ret = virtio_crypto_alg_ablkcipher_init_session(ctx,
286 alg, key, keylen, 1);
289 /* Create decryption session */
290 ret = virtio_crypto_alg_ablkcipher_init_session(ctx,
291 alg, key, keylen, 0);
293 virtio_crypto_alg_ablkcipher_close_session(ctx, 1);
299 crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
303 /* Note: kernel crypto API realization */
304 static int virtio_crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
308 struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
312 ret = virtio_crypto_alg_validate_key(keylen, &alg);
318 int node = virtio_crypto_get_current_node();
319 struct virtio_crypto *vcrypto =
320 virtcrypto_get_dev_node(node,
321 VIRTIO_CRYPTO_SERVICE_CIPHER, alg);
323 pr_err("virtio_crypto: Could not find a virtio device in the system or unsupported algo\n");
327 ctx->vcrypto = vcrypto;
329 /* Rekeying, we should close the created sessions previously */
330 virtio_crypto_alg_ablkcipher_close_session(ctx, 1);
331 virtio_crypto_alg_ablkcipher_close_session(ctx, 0);
334 ret = virtio_crypto_alg_ablkcipher_init_sessions(ctx, key, keylen);
336 virtcrypto_dev_put(ctx->vcrypto);
346 __virtio_crypto_ablkcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req,
347 struct ablkcipher_request *req,
348 struct data_queue *data_vq)
350 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
351 struct virtio_crypto_ablkcipher_ctx *ctx = vc_sym_req->ablkcipher_ctx;
352 struct virtio_crypto_request *vc_req = &vc_sym_req->base;
353 unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
354 struct virtio_crypto *vcrypto = ctx->vcrypto;
355 struct virtio_crypto_op_data_req *req_data;
356 int src_nents, dst_nents;
359 struct scatterlist outhdr, iv_sg, status_sg, **sgs;
362 unsigned int num_out = 0, num_in = 0;
366 src_nents = sg_nents_for_len(req->src, req->nbytes);
367 dst_nents = sg_nents(req->dst);
369 pr_debug("virtio_crypto: Number of sgs (src_nents: %d, dst_nents: %d)\n",
370 src_nents, dst_nents);
372 /* Why 3? outhdr + iv + inhdr */
373 sg_total = src_nents + dst_nents + 3;
374 sgs = kcalloc_node(sg_total, sizeof(*sgs), GFP_KERNEL,
375 dev_to_node(&vcrypto->vdev->dev));
379 req_data = kzalloc_node(sizeof(*req_data), GFP_KERNEL,
380 dev_to_node(&vcrypto->vdev->dev));
386 vc_req->req_data = req_data;
387 vc_sym_req->type = VIRTIO_CRYPTO_SYM_OP_CIPHER;
388 /* Head of operation */
389 if (vc_sym_req->encrypt) {
390 req_data->header.session_id =
391 cpu_to_le64(ctx->enc_sess_info.session_id);
392 req_data->header.opcode =
393 cpu_to_le32(VIRTIO_CRYPTO_CIPHER_ENCRYPT);
395 req_data->header.session_id =
396 cpu_to_le64(ctx->dec_sess_info.session_id);
397 req_data->header.opcode =
398 cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DECRYPT);
400 req_data->u.sym_req.op_type = cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER);
401 req_data->u.sym_req.u.cipher.para.iv_len = cpu_to_le32(ivsize);
402 req_data->u.sym_req.u.cipher.para.src_data_len =
403 cpu_to_le32(req->nbytes);
405 dst_len = virtio_crypto_alg_sg_nents_length(req->dst);
406 if (unlikely(dst_len > U32_MAX)) {
407 pr_err("virtio_crypto: The dst_len is beyond U32_MAX\n");
412 pr_debug("virtio_crypto: src_len: %u, dst_len: %llu\n",
413 req->nbytes, dst_len);
415 if (unlikely(req->nbytes + dst_len + ivsize +
416 sizeof(vc_req->status) > vcrypto->max_size)) {
417 pr_err("virtio_crypto: The length is too big\n");
422 req_data->u.sym_req.u.cipher.para.dst_data_len =
423 cpu_to_le32((uint32_t)dst_len);
426 sg_init_one(&outhdr, req_data, sizeof(*req_data));
427 sgs[num_out++] = &outhdr;
432 * Avoid to do DMA from the stack, switch to using
433 * dynamically-allocated for the IV
435 iv = kzalloc_node(ivsize, GFP_ATOMIC,
436 dev_to_node(&vcrypto->vdev->dev));
441 memcpy(iv, req->info, ivsize);
442 sg_init_one(&iv_sg, iv, ivsize);
443 sgs[num_out++] = &iv_sg;
447 for (i = 0; i < src_nents; i++)
448 sgs[num_out++] = &req->src[i];
450 /* Destination data */
451 for (i = 0; i < dst_nents; i++)
452 sgs[num_out + num_in++] = &req->dst[i];
455 sg_init_one(&status_sg, &vc_req->status, sizeof(vc_req->status));
456 sgs[num_out + num_in++] = &status_sg;
460 spin_lock_irqsave(&data_vq->lock, flags);
461 err = virtqueue_add_sgs(data_vq->vq, sgs, num_out,
462 num_in, vc_req, GFP_ATOMIC);
463 virtqueue_kick(data_vq->vq);
464 spin_unlock_irqrestore(&data_vq->lock, flags);
465 if (unlikely(err < 0))
478 static int virtio_crypto_ablkcipher_encrypt(struct ablkcipher_request *req)
480 struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
481 struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(atfm);
482 struct virtio_crypto_sym_request *vc_sym_req =
483 ablkcipher_request_ctx(req);
484 struct virtio_crypto_request *vc_req = &vc_sym_req->base;
485 struct virtio_crypto *vcrypto = ctx->vcrypto;
486 /* Use the first data virtqueue as default */
487 struct data_queue *data_vq = &vcrypto->data_vq[0];
489 vc_req->dataq = data_vq;
490 vc_req->alg_cb = virtio_crypto_dataq_sym_callback;
491 vc_sym_req->ablkcipher_ctx = ctx;
492 vc_sym_req->ablkcipher_req = req;
493 vc_sym_req->encrypt = true;
495 return crypto_transfer_ablkcipher_request_to_engine(data_vq->engine, req);
498 static int virtio_crypto_ablkcipher_decrypt(struct ablkcipher_request *req)
500 struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
501 struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(atfm);
502 struct virtio_crypto_sym_request *vc_sym_req =
503 ablkcipher_request_ctx(req);
504 struct virtio_crypto_request *vc_req = &vc_sym_req->base;
505 struct virtio_crypto *vcrypto = ctx->vcrypto;
506 /* Use the first data virtqueue as default */
507 struct data_queue *data_vq = &vcrypto->data_vq[0];
509 vc_req->dataq = data_vq;
510 vc_req->alg_cb = virtio_crypto_dataq_sym_callback;
511 vc_sym_req->ablkcipher_ctx = ctx;
512 vc_sym_req->ablkcipher_req = req;
513 vc_sym_req->encrypt = false;
515 return crypto_transfer_ablkcipher_request_to_engine(data_vq->engine, req);
518 static int virtio_crypto_ablkcipher_init(struct crypto_tfm *tfm)
520 struct virtio_crypto_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
522 tfm->crt_ablkcipher.reqsize = sizeof(struct virtio_crypto_sym_request);
525 ctx->enginectx.op.do_one_request = virtio_crypto_ablkcipher_crypt_req;
526 ctx->enginectx.op.prepare_request = NULL;
527 ctx->enginectx.op.unprepare_request = NULL;
531 static void virtio_crypto_ablkcipher_exit(struct crypto_tfm *tfm)
533 struct virtio_crypto_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
538 virtio_crypto_alg_ablkcipher_close_session(ctx, 1);
539 virtio_crypto_alg_ablkcipher_close_session(ctx, 0);
540 virtcrypto_dev_put(ctx->vcrypto);
544 int virtio_crypto_ablkcipher_crypt_req(
545 struct crypto_engine *engine, void *vreq)
547 struct ablkcipher_request *req = container_of(vreq, struct ablkcipher_request, base);
548 struct virtio_crypto_sym_request *vc_sym_req =
549 ablkcipher_request_ctx(req);
550 struct virtio_crypto_request *vc_req = &vc_sym_req->base;
551 struct data_queue *data_vq = vc_req->dataq;
554 ret = __virtio_crypto_ablkcipher_do_req(vc_sym_req, req, data_vq);
558 virtqueue_kick(data_vq->vq);
563 static void virtio_crypto_ablkcipher_finalize_req(
564 struct virtio_crypto_sym_request *vc_sym_req,
565 struct ablkcipher_request *req,
568 crypto_finalize_ablkcipher_request(vc_sym_req->base.dataq->engine,
570 kzfree(vc_sym_req->iv);
571 virtcrypto_clear_request(&vc_sym_req->base);
574 static struct virtio_crypto_algo virtio_crypto_algs[] = { {
575 .algonum = VIRTIO_CRYPTO_CIPHER_AES_CBC,
576 .service = VIRTIO_CRYPTO_SERVICE_CIPHER,
578 .cra_name = "cbc(aes)",
579 .cra_driver_name = "virtio_crypto_aes_cbc",
581 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
582 .cra_blocksize = AES_BLOCK_SIZE,
583 .cra_ctxsize = sizeof(struct virtio_crypto_ablkcipher_ctx),
585 .cra_module = THIS_MODULE,
586 .cra_type = &crypto_ablkcipher_type,
587 .cra_init = virtio_crypto_ablkcipher_init,
588 .cra_exit = virtio_crypto_ablkcipher_exit,
591 .setkey = virtio_crypto_ablkcipher_setkey,
592 .decrypt = virtio_crypto_ablkcipher_decrypt,
593 .encrypt = virtio_crypto_ablkcipher_encrypt,
594 .min_keysize = AES_MIN_KEY_SIZE,
595 .max_keysize = AES_MAX_KEY_SIZE,
596 .ivsize = AES_BLOCK_SIZE,
602 int virtio_crypto_algs_register(struct virtio_crypto *vcrypto)
607 mutex_lock(&algs_lock);
609 for (i = 0; i < ARRAY_SIZE(virtio_crypto_algs); i++) {
611 uint32_t service = virtio_crypto_algs[i].service;
612 uint32_t algonum = virtio_crypto_algs[i].algonum;
614 if (!virtcrypto_algo_is_supported(vcrypto, service, algonum))
617 if (virtio_crypto_algs[i].active_devs == 0) {
618 ret = crypto_register_alg(&virtio_crypto_algs[i].algo);
623 virtio_crypto_algs[i].active_devs++;
624 dev_info(&vcrypto->vdev->dev, "Registered algo %s\n",
625 virtio_crypto_algs[i].algo.cra_name);
629 mutex_unlock(&algs_lock);
633 void virtio_crypto_algs_unregister(struct virtio_crypto *vcrypto)
637 mutex_lock(&algs_lock);
639 for (i = 0; i < ARRAY_SIZE(virtio_crypto_algs); i++) {
641 uint32_t service = virtio_crypto_algs[i].service;
642 uint32_t algonum = virtio_crypto_algs[i].algonum;
644 if (virtio_crypto_algs[i].active_devs == 0 ||
645 !virtcrypto_algo_is_supported(vcrypto, service, algonum))
648 if (virtio_crypto_algs[i].active_devs == 1)
649 crypto_unregister_alg(&virtio_crypto_algs[i].algo);
651 virtio_crypto_algs[i].active_devs--;
654 mutex_unlock(&algs_lock);