Commit | Line | Data |
---|---|---|
dbaf0624 G |
1 | /* Algorithms supported by virtio crypto device |
2 | * | |
3 | * Authors: Gonglei <arei.gonglei@huawei.com> | |
4 | * | |
5 | * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD. | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License as published by | |
9 | * the Free Software Foundation; either version 2 of the License, or | |
10 | * (at your option) any later version. | |
11 | * | |
12 | * This program is distributed in the hope that it will be useful, | |
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
15 | * GNU General Public License for more details. | |
16 | * | |
17 | * You should have received a copy of the GNU General Public License | |
18 | * along with this program; if not, see <http://www.gnu.org/licenses/>. | |
19 | */ | |
20 | ||
21 | #include <linux/scatterlist.h> | |
22 | #include <crypto/algapi.h> | |
23 | #include <linux/err.h> | |
24 | #include <crypto/scatterwalk.h> | |
25 | #include <linux/atomic.h> | |
26 | ||
27 | #include <uapi/linux/virtio_crypto.h> | |
28 | #include "virtio_crypto_common.h" | |
29 | ||
30 | /* | |
31 | * The algs_lock protects the below global virtio_crypto_active_devs | |
32 | * and crypto algorithms registion. | |
33 | */ | |
34 | static DEFINE_MUTEX(algs_lock); | |
35 | static unsigned int virtio_crypto_active_devs; | |
36 | ||
37 | static u64 virtio_crypto_alg_sg_nents_length(struct scatterlist *sg) | |
38 | { | |
39 | u64 total = 0; | |
40 | ||
41 | for (total = 0; sg; sg = sg_next(sg)) | |
42 | total += sg->length; | |
43 | ||
44 | return total; | |
45 | } | |
46 | ||
47 | static int | |
48 | virtio_crypto_alg_validate_key(int key_len, uint32_t *alg) | |
49 | { | |
50 | switch (key_len) { | |
51 | case AES_KEYSIZE_128: | |
52 | case AES_KEYSIZE_192: | |
53 | case AES_KEYSIZE_256: | |
54 | *alg = VIRTIO_CRYPTO_CIPHER_AES_CBC; | |
55 | break; | |
56 | default: | |
57 | pr_err("virtio_crypto: Unsupported key length: %d\n", | |
58 | key_len); | |
59 | return -EINVAL; | |
60 | } | |
61 | return 0; | |
62 | } | |
63 | ||
64 | static int virtio_crypto_alg_ablkcipher_init_session( | |
65 | struct virtio_crypto_ablkcipher_ctx *ctx, | |
66 | uint32_t alg, const uint8_t *key, | |
67 | unsigned int keylen, | |
68 | int encrypt) | |
69 | { | |
70 | struct scatterlist outhdr, key_sg, inhdr, *sgs[3]; | |
71 | unsigned int tmp; | |
72 | struct virtio_crypto *vcrypto = ctx->vcrypto; | |
73 | int op = encrypt ? VIRTIO_CRYPTO_OP_ENCRYPT : VIRTIO_CRYPTO_OP_DECRYPT; | |
74 | int err; | |
75 | unsigned int num_out = 0, num_in = 0; | |
76 | ||
77 | /* | |
78 | * Avoid to do DMA from the stack, switch to using | |
79 | * dynamically-allocated for the key | |
80 | */ | |
81 | uint8_t *cipher_key = kmalloc(keylen, GFP_ATOMIC); | |
82 | ||
83 | if (!cipher_key) | |
84 | return -ENOMEM; | |
85 | ||
86 | memcpy(cipher_key, key, keylen); | |
87 | ||
88 | spin_lock(&vcrypto->ctrl_lock); | |
89 | /* Pad ctrl header */ | |
90 | vcrypto->ctrl.header.opcode = | |
91 | cpu_to_le32(VIRTIO_CRYPTO_CIPHER_CREATE_SESSION); | |
92 | vcrypto->ctrl.header.algo = cpu_to_le32(alg); | |
93 | /* Set the default dataqueue id to 0 */ | |
94 | vcrypto->ctrl.header.queue_id = 0; | |
95 | ||
96 | vcrypto->input.status = cpu_to_le32(VIRTIO_CRYPTO_ERR); | |
97 | /* Pad cipher's parameters */ | |
98 | vcrypto->ctrl.u.sym_create_session.op_type = | |
99 | cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER); | |
100 | vcrypto->ctrl.u.sym_create_session.u.cipher.para.algo = | |
101 | vcrypto->ctrl.header.algo; | |
102 | vcrypto->ctrl.u.sym_create_session.u.cipher.para.keylen = | |
103 | cpu_to_le32(keylen); | |
104 | vcrypto->ctrl.u.sym_create_session.u.cipher.para.op = | |
105 | cpu_to_le32(op); | |
106 | ||
107 | sg_init_one(&outhdr, &vcrypto->ctrl, sizeof(vcrypto->ctrl)); | |
108 | sgs[num_out++] = &outhdr; | |
109 | ||
110 | /* Set key */ | |
111 | sg_init_one(&key_sg, cipher_key, keylen); | |
112 | sgs[num_out++] = &key_sg; | |
113 | ||
114 | /* Return status and session id back */ | |
115 | sg_init_one(&inhdr, &vcrypto->input, sizeof(vcrypto->input)); | |
116 | sgs[num_out + num_in++] = &inhdr; | |
117 | ||
118 | err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, num_out, | |
119 | num_in, vcrypto, GFP_ATOMIC); | |
120 | if (err < 0) { | |
121 | spin_unlock(&vcrypto->ctrl_lock); | |
122 | kzfree(cipher_key); | |
123 | return err; | |
124 | } | |
125 | virtqueue_kick(vcrypto->ctrl_vq); | |
126 | ||
127 | /* | |
128 | * Trapping into the hypervisor, so the request should be | |
129 | * handled immediately. | |
130 | */ | |
131 | while (!virtqueue_get_buf(vcrypto->ctrl_vq, &tmp) && | |
132 | !virtqueue_is_broken(vcrypto->ctrl_vq)) | |
133 | cpu_relax(); | |
134 | ||
135 | if (le32_to_cpu(vcrypto->input.status) != VIRTIO_CRYPTO_OK) { | |
136 | spin_unlock(&vcrypto->ctrl_lock); | |
137 | pr_err("virtio_crypto: Create session failed status: %u\n", | |
138 | le32_to_cpu(vcrypto->input.status)); | |
139 | kzfree(cipher_key); | |
140 | return -EINVAL; | |
141 | } | |
142 | ||
143 | if (encrypt) | |
144 | ctx->enc_sess_info.session_id = | |
145 | le64_to_cpu(vcrypto->input.session_id); | |
146 | else | |
147 | ctx->dec_sess_info.session_id = | |
148 | le64_to_cpu(vcrypto->input.session_id); | |
149 | ||
150 | spin_unlock(&vcrypto->ctrl_lock); | |
151 | ||
152 | kzfree(cipher_key); | |
153 | return 0; | |
154 | } | |
155 | ||
156 | static int virtio_crypto_alg_ablkcipher_close_session( | |
157 | struct virtio_crypto_ablkcipher_ctx *ctx, | |
158 | int encrypt) | |
159 | { | |
160 | struct scatterlist outhdr, status_sg, *sgs[2]; | |
161 | unsigned int tmp; | |
162 | struct virtio_crypto_destroy_session_req *destroy_session; | |
163 | struct virtio_crypto *vcrypto = ctx->vcrypto; | |
164 | int err; | |
165 | unsigned int num_out = 0, num_in = 0; | |
166 | ||
167 | spin_lock(&vcrypto->ctrl_lock); | |
168 | vcrypto->ctrl_status.status = VIRTIO_CRYPTO_ERR; | |
169 | /* Pad ctrl header */ | |
170 | vcrypto->ctrl.header.opcode = | |
171 | cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION); | |
172 | /* Set the default virtqueue id to 0 */ | |
173 | vcrypto->ctrl.header.queue_id = 0; | |
174 | ||
175 | destroy_session = &vcrypto->ctrl.u.destroy_session; | |
176 | ||
177 | if (encrypt) | |
178 | destroy_session->session_id = | |
179 | cpu_to_le64(ctx->enc_sess_info.session_id); | |
180 | else | |
181 | destroy_session->session_id = | |
182 | cpu_to_le64(ctx->dec_sess_info.session_id); | |
183 | ||
184 | sg_init_one(&outhdr, &vcrypto->ctrl, sizeof(vcrypto->ctrl)); | |
185 | sgs[num_out++] = &outhdr; | |
186 | ||
187 | /* Return status and session id back */ | |
188 | sg_init_one(&status_sg, &vcrypto->ctrl_status.status, | |
189 | sizeof(vcrypto->ctrl_status.status)); | |
190 | sgs[num_out + num_in++] = &status_sg; | |
191 | ||
192 | err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, num_out, | |
193 | num_in, vcrypto, GFP_ATOMIC); | |
194 | if (err < 0) { | |
195 | spin_unlock(&vcrypto->ctrl_lock); | |
196 | return err; | |
197 | } | |
198 | virtqueue_kick(vcrypto->ctrl_vq); | |
199 | ||
200 | while (!virtqueue_get_buf(vcrypto->ctrl_vq, &tmp) && | |
201 | !virtqueue_is_broken(vcrypto->ctrl_vq)) | |
202 | cpu_relax(); | |
203 | ||
204 | if (vcrypto->ctrl_status.status != VIRTIO_CRYPTO_OK) { | |
205 | spin_unlock(&vcrypto->ctrl_lock); | |
206 | pr_err("virtio_crypto: Close session failed status: %u, session_id: 0x%llx\n", | |
207 | vcrypto->ctrl_status.status, | |
208 | destroy_session->session_id); | |
209 | ||
210 | return -EINVAL; | |
211 | } | |
212 | spin_unlock(&vcrypto->ctrl_lock); | |
213 | ||
214 | return 0; | |
215 | } | |
216 | ||
217 | static int virtio_crypto_alg_ablkcipher_init_sessions( | |
218 | struct virtio_crypto_ablkcipher_ctx *ctx, | |
219 | const uint8_t *key, unsigned int keylen) | |
220 | { | |
221 | uint32_t alg; | |
222 | int ret; | |
223 | struct virtio_crypto *vcrypto = ctx->vcrypto; | |
224 | ||
225 | if (keylen > vcrypto->max_cipher_key_len) { | |
226 | pr_err("virtio_crypto: the key is too long\n"); | |
227 | goto bad_key; | |
228 | } | |
229 | ||
230 | if (virtio_crypto_alg_validate_key(keylen, &alg)) | |
231 | goto bad_key; | |
232 | ||
233 | /* Create encryption session */ | |
234 | ret = virtio_crypto_alg_ablkcipher_init_session(ctx, | |
235 | alg, key, keylen, 1); | |
236 | if (ret) | |
237 | return ret; | |
238 | /* Create decryption session */ | |
239 | ret = virtio_crypto_alg_ablkcipher_init_session(ctx, | |
240 | alg, key, keylen, 0); | |
241 | if (ret) { | |
242 | virtio_crypto_alg_ablkcipher_close_session(ctx, 1); | |
243 | return ret; | |
244 | } | |
245 | return 0; | |
246 | ||
247 | bad_key: | |
248 | crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); | |
249 | return -EINVAL; | |
250 | } | |
251 | ||
252 | /* Note: kernel crypto API realization */ | |
253 | static int virtio_crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm, | |
254 | const uint8_t *key, | |
255 | unsigned int keylen) | |
256 | { | |
257 | struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm); | |
258 | int ret; | |
259 | ||
260 | if (!ctx->vcrypto) { | |
261 | /* New key */ | |
262 | int node = virtio_crypto_get_current_node(); | |
263 | struct virtio_crypto *vcrypto = | |
264 | virtcrypto_get_dev_node(node); | |
265 | if (!vcrypto) { | |
266 | pr_err("virtio_crypto: Could not find a virtio device in the system"); | |
267 | return -ENODEV; | |
268 | } | |
269 | ||
270 | ctx->vcrypto = vcrypto; | |
271 | } else { | |
272 | /* Rekeying, we should close the created sessions previously */ | |
273 | virtio_crypto_alg_ablkcipher_close_session(ctx, 1); | |
274 | virtio_crypto_alg_ablkcipher_close_session(ctx, 0); | |
275 | } | |
276 | ||
277 | ret = virtio_crypto_alg_ablkcipher_init_sessions(ctx, key, keylen); | |
278 | if (ret) { | |
279 | virtcrypto_dev_put(ctx->vcrypto); | |
280 | ctx->vcrypto = NULL; | |
281 | ||
282 | return ret; | |
283 | } | |
284 | ||
285 | return 0; | |
286 | } | |
287 | ||
288 | static int | |
289 | __virtio_crypto_ablkcipher_do_req(struct virtio_crypto_request *vc_req, | |
290 | struct ablkcipher_request *req, | |
d79b5d0b | 291 | struct data_queue *data_vq) |
dbaf0624 G |
292 | { |
293 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | |
294 | unsigned int ivsize = crypto_ablkcipher_ivsize(tfm); | |
295 | struct virtio_crypto_ablkcipher_ctx *ctx = vc_req->ablkcipher_ctx; | |
296 | struct virtio_crypto *vcrypto = ctx->vcrypto; | |
297 | struct virtio_crypto_op_data_req *req_data; | |
298 | int src_nents, dst_nents; | |
299 | int err; | |
300 | unsigned long flags; | |
301 | struct scatterlist outhdr, iv_sg, status_sg, **sgs; | |
302 | int i; | |
303 | u64 dst_len; | |
304 | unsigned int num_out = 0, num_in = 0; | |
305 | int sg_total; | |
306 | uint8_t *iv; | |
307 | ||
308 | src_nents = sg_nents_for_len(req->src, req->nbytes); | |
309 | dst_nents = sg_nents(req->dst); | |
310 | ||
311 | pr_debug("virtio_crypto: Number of sgs (src_nents: %d, dst_nents: %d)\n", | |
312 | src_nents, dst_nents); | |
313 | ||
314 | /* Why 3? outhdr + iv + inhdr */ | |
315 | sg_total = src_nents + dst_nents + 3; | |
316 | sgs = kzalloc_node(sg_total * sizeof(*sgs), GFP_ATOMIC, | |
317 | dev_to_node(&vcrypto->vdev->dev)); | |
318 | if (!sgs) | |
319 | return -ENOMEM; | |
320 | ||
321 | req_data = kzalloc_node(sizeof(*req_data), GFP_ATOMIC, | |
322 | dev_to_node(&vcrypto->vdev->dev)); | |
323 | if (!req_data) { | |
324 | kfree(sgs); | |
325 | return -ENOMEM; | |
326 | } | |
327 | ||
328 | vc_req->req_data = req_data; | |
329 | vc_req->type = VIRTIO_CRYPTO_SYM_OP_CIPHER; | |
330 | /* Head of operation */ | |
d79b5d0b | 331 | if (vc_req->encrypt) { |
dbaf0624 G |
332 | req_data->header.session_id = |
333 | cpu_to_le64(ctx->enc_sess_info.session_id); | |
334 | req_data->header.opcode = | |
335 | cpu_to_le32(VIRTIO_CRYPTO_CIPHER_ENCRYPT); | |
336 | } else { | |
337 | req_data->header.session_id = | |
338 | cpu_to_le64(ctx->dec_sess_info.session_id); | |
339 | req_data->header.opcode = | |
340 | cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DECRYPT); | |
341 | } | |
342 | req_data->u.sym_req.op_type = cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER); | |
343 | req_data->u.sym_req.u.cipher.para.iv_len = cpu_to_le32(ivsize); | |
344 | req_data->u.sym_req.u.cipher.para.src_data_len = | |
345 | cpu_to_le32(req->nbytes); | |
346 | ||
347 | dst_len = virtio_crypto_alg_sg_nents_length(req->dst); | |
348 | if (unlikely(dst_len > U32_MAX)) { | |
349 | pr_err("virtio_crypto: The dst_len is beyond U32_MAX\n"); | |
350 | err = -EINVAL; | |
351 | goto free; | |
352 | } | |
353 | ||
354 | pr_debug("virtio_crypto: src_len: %u, dst_len: %llu\n", | |
355 | req->nbytes, dst_len); | |
356 | ||
357 | if (unlikely(req->nbytes + dst_len + ivsize + | |
358 | sizeof(vc_req->status) > vcrypto->max_size)) { | |
359 | pr_err("virtio_crypto: The length is too big\n"); | |
360 | err = -EINVAL; | |
361 | goto free; | |
362 | } | |
363 | ||
364 | req_data->u.sym_req.u.cipher.para.dst_data_len = | |
365 | cpu_to_le32((uint32_t)dst_len); | |
366 | ||
367 | /* Outhdr */ | |
368 | sg_init_one(&outhdr, req_data, sizeof(*req_data)); | |
369 | sgs[num_out++] = &outhdr; | |
370 | ||
371 | /* IV */ | |
372 | ||
373 | /* | |
374 | * Avoid to do DMA from the stack, switch to using | |
375 | * dynamically-allocated for the IV | |
376 | */ | |
377 | iv = kzalloc_node(ivsize, GFP_ATOMIC, | |
378 | dev_to_node(&vcrypto->vdev->dev)); | |
379 | if (!iv) { | |
380 | err = -ENOMEM; | |
381 | goto free; | |
382 | } | |
383 | memcpy(iv, req->info, ivsize); | |
384 | sg_init_one(&iv_sg, iv, ivsize); | |
385 | sgs[num_out++] = &iv_sg; | |
386 | vc_req->iv = iv; | |
387 | ||
388 | /* Source data */ | |
389 | for (i = 0; i < src_nents; i++) | |
390 | sgs[num_out++] = &req->src[i]; | |
391 | ||
392 | /* Destination data */ | |
393 | for (i = 0; i < dst_nents; i++) | |
394 | sgs[num_out + num_in++] = &req->dst[i]; | |
395 | ||
396 | /* Status */ | |
397 | sg_init_one(&status_sg, &vc_req->status, sizeof(vc_req->status)); | |
398 | sgs[num_out + num_in++] = &status_sg; | |
399 | ||
400 | vc_req->sgs = sgs; | |
401 | ||
402 | spin_lock_irqsave(&data_vq->lock, flags); | |
403 | err = virtqueue_add_sgs(data_vq->vq, sgs, num_out, | |
404 | num_in, vc_req, GFP_ATOMIC); | |
405 | virtqueue_kick(data_vq->vq); | |
406 | spin_unlock_irqrestore(&data_vq->lock, flags); | |
407 | if (unlikely(err < 0)) | |
408 | goto free_iv; | |
409 | ||
410 | return 0; | |
411 | ||
412 | free_iv: | |
413 | kzfree(iv); | |
414 | free: | |
415 | kzfree(req_data); | |
416 | kfree(sgs); | |
417 | return err; | |
418 | } | |
419 | ||
420 | static int virtio_crypto_ablkcipher_encrypt(struct ablkcipher_request *req) | |
421 | { | |
422 | struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req); | |
423 | struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(atfm); | |
424 | struct virtio_crypto_request *vc_req = ablkcipher_request_ctx(req); | |
425 | struct virtio_crypto *vcrypto = ctx->vcrypto; | |
dbaf0624 G |
426 | /* Use the first data virtqueue as default */ |
427 | struct data_queue *data_vq = &vcrypto->data_vq[0]; | |
428 | ||
429 | vc_req->ablkcipher_ctx = ctx; | |
430 | vc_req->ablkcipher_req = req; | |
d79b5d0b GA |
431 | vc_req->encrypt = true; |
432 | vc_req->dataq = data_vq; | |
dbaf0624 | 433 | |
d79b5d0b | 434 | return crypto_transfer_cipher_request_to_engine(data_vq->engine, req); |
dbaf0624 G |
435 | } |
436 | ||
437 | static int virtio_crypto_ablkcipher_decrypt(struct ablkcipher_request *req) | |
438 | { | |
439 | struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req); | |
440 | struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(atfm); | |
441 | struct virtio_crypto_request *vc_req = ablkcipher_request_ctx(req); | |
442 | struct virtio_crypto *vcrypto = ctx->vcrypto; | |
dbaf0624 G |
443 | /* Use the first data virtqueue as default */ |
444 | struct data_queue *data_vq = &vcrypto->data_vq[0]; | |
445 | ||
446 | vc_req->ablkcipher_ctx = ctx; | |
447 | vc_req->ablkcipher_req = req; | |
448 | ||
d79b5d0b GA |
449 | vc_req->encrypt = false; |
450 | vc_req->dataq = data_vq; | |
dbaf0624 | 451 | |
d79b5d0b | 452 | return crypto_transfer_cipher_request_to_engine(data_vq->engine, req); |
dbaf0624 G |
453 | } |
454 | ||
455 | static int virtio_crypto_ablkcipher_init(struct crypto_tfm *tfm) | |
456 | { | |
457 | struct virtio_crypto_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm); | |
458 | ||
459 | tfm->crt_ablkcipher.reqsize = sizeof(struct virtio_crypto_request); | |
460 | ctx->tfm = tfm; | |
461 | ||
462 | return 0; | |
463 | } | |
464 | ||
465 | static void virtio_crypto_ablkcipher_exit(struct crypto_tfm *tfm) | |
466 | { | |
467 | struct virtio_crypto_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm); | |
468 | ||
469 | if (!ctx->vcrypto) | |
470 | return; | |
471 | ||
472 | virtio_crypto_alg_ablkcipher_close_session(ctx, 1); | |
473 | virtio_crypto_alg_ablkcipher_close_session(ctx, 0); | |
474 | virtcrypto_dev_put(ctx->vcrypto); | |
475 | ctx->vcrypto = NULL; | |
476 | } | |
477 | ||
d79b5d0b GA |
478 | int virtio_crypto_ablkcipher_crypt_req( |
479 | struct crypto_engine *engine, | |
480 | struct ablkcipher_request *req) | |
481 | { | |
482 | struct virtio_crypto_request *vc_req = ablkcipher_request_ctx(req); | |
483 | struct data_queue *data_vq = vc_req->dataq; | |
484 | int ret; | |
485 | ||
486 | ret = __virtio_crypto_ablkcipher_do_req(vc_req, req, data_vq); | |
487 | if (ret < 0) | |
488 | return ret; | |
489 | ||
490 | virtqueue_kick(data_vq->vq); | |
491 | ||
492 | return 0; | |
493 | } | |
494 | ||
495 | void virtio_crypto_ablkcipher_finalize_req( | |
496 | struct virtio_crypto_request *vc_req, | |
497 | struct ablkcipher_request *req, | |
498 | int err) | |
499 | { | |
500 | crypto_finalize_cipher_request(vc_req->dataq->engine, req, err); | |
501 | ||
502 | virtcrypto_clear_request(vc_req); | |
503 | } | |
504 | ||
dbaf0624 G |
505 | static struct crypto_alg virtio_crypto_algs[] = { { |
506 | .cra_name = "cbc(aes)", | |
507 | .cra_driver_name = "virtio_crypto_aes_cbc", | |
87170961 | 508 | .cra_priority = 150, |
dbaf0624 G |
509 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, |
510 | .cra_blocksize = AES_BLOCK_SIZE, | |
511 | .cra_ctxsize = sizeof(struct virtio_crypto_ablkcipher_ctx), | |
512 | .cra_alignmask = 0, | |
513 | .cra_module = THIS_MODULE, | |
514 | .cra_type = &crypto_ablkcipher_type, | |
515 | .cra_init = virtio_crypto_ablkcipher_init, | |
516 | .cra_exit = virtio_crypto_ablkcipher_exit, | |
517 | .cra_u = { | |
518 | .ablkcipher = { | |
519 | .setkey = virtio_crypto_ablkcipher_setkey, | |
520 | .decrypt = virtio_crypto_ablkcipher_decrypt, | |
521 | .encrypt = virtio_crypto_ablkcipher_encrypt, | |
522 | .min_keysize = AES_MIN_KEY_SIZE, | |
523 | .max_keysize = AES_MAX_KEY_SIZE, | |
524 | .ivsize = AES_BLOCK_SIZE, | |
525 | }, | |
526 | }, | |
527 | } }; | |
528 | ||
529 | int virtio_crypto_algs_register(void) | |
530 | { | |
531 | int ret = 0; | |
532 | ||
533 | mutex_lock(&algs_lock); | |
534 | if (++virtio_crypto_active_devs != 1) | |
535 | goto unlock; | |
536 | ||
537 | ret = crypto_register_algs(virtio_crypto_algs, | |
538 | ARRAY_SIZE(virtio_crypto_algs)); | |
539 | if (ret) | |
540 | virtio_crypto_active_devs--; | |
541 | ||
542 | unlock: | |
543 | mutex_unlock(&algs_lock); | |
544 | return ret; | |
545 | } | |
546 | ||
547 | void virtio_crypto_algs_unregister(void) | |
548 | { | |
549 | mutex_lock(&algs_lock); | |
550 | if (--virtio_crypto_active_devs != 0) | |
551 | goto unlock; | |
552 | ||
553 | crypto_unregister_algs(virtio_crypto_algs, | |
554 | ARRAY_SIZE(virtio_crypto_algs)); | |
555 | ||
556 | unlock: | |
557 | mutex_unlock(&algs_lock); | |
558 | } |