Commit | Line | Data |
---|---|---|
1ccea77e | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
dbaf0624 G |
2 | /* Driver for Virtio crypto device. |
3 | * | |
4 | * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD. | |
dbaf0624 G |
5 | */ |
6 | ||
7 | #include <linux/err.h> | |
8 | #include <linux/module.h> | |
9 | #include <linux/virtio_config.h> | |
10 | #include <linux/cpu.h> | |
11 | ||
12 | #include <uapi/linux/virtio_crypto.h> | |
13 | #include "virtio_crypto_common.h" | |
14 | ||
15 | ||
d79b5d0b | 16 | void |
dbaf0624 G |
17 | virtcrypto_clear_request(struct virtio_crypto_request *vc_req) |
18 | { | |
19 | if (vc_req) { | |
453431a5 | 20 | kfree_sensitive(vc_req->req_data); |
dbaf0624 G |
21 | kfree(vc_req->sgs); |
22 | } | |
23 | } | |
24 | ||
977231e8 | 25 | static void virtio_crypto_ctrlq_callback(struct virtio_crypto_ctrl_request *vc_ctrl_req) |
26 | { | |
27 | complete(&vc_ctrl_req->compl); | |
28 | } | |
29 | ||
30 | static void virtcrypto_ctrlq_callback(struct virtqueue *vq) | |
31 | { | |
32 | struct virtio_crypto *vcrypto = vq->vdev->priv; | |
33 | struct virtio_crypto_ctrl_request *vc_ctrl_req; | |
34 | unsigned long flags; | |
35 | unsigned int len; | |
36 | ||
37 | spin_lock_irqsave(&vcrypto->ctrl_lock, flags); | |
38 | do { | |
39 | virtqueue_disable_cb(vq); | |
40 | while ((vc_ctrl_req = virtqueue_get_buf(vq, &len)) != NULL) { | |
41 | spin_unlock_irqrestore(&vcrypto->ctrl_lock, flags); | |
42 | virtio_crypto_ctrlq_callback(vc_ctrl_req); | |
43 | spin_lock_irqsave(&vcrypto->ctrl_lock, flags); | |
44 | } | |
45 | if (unlikely(virtqueue_is_broken(vq))) | |
46 | break; | |
47 | } while (!virtqueue_enable_cb(vq)); | |
48 | spin_unlock_irqrestore(&vcrypto->ctrl_lock, flags); | |
49 | } | |
50 | ||
51 | int virtio_crypto_ctrl_vq_request(struct virtio_crypto *vcrypto, struct scatterlist *sgs[], | |
52 | unsigned int out_sgs, unsigned int in_sgs, | |
53 | struct virtio_crypto_ctrl_request *vc_ctrl_req) | |
54 | { | |
55 | int err; | |
56 | unsigned long flags; | |
57 | ||
58 | init_completion(&vc_ctrl_req->compl); | |
59 | ||
60 | spin_lock_irqsave(&vcrypto->ctrl_lock, flags); | |
61 | err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, out_sgs, in_sgs, vc_ctrl_req, GFP_ATOMIC); | |
62 | if (err < 0) { | |
63 | spin_unlock_irqrestore(&vcrypto->ctrl_lock, flags); | |
64 | return err; | |
65 | } | |
66 | ||
67 | virtqueue_kick(vcrypto->ctrl_vq); | |
68 | spin_unlock_irqrestore(&vcrypto->ctrl_lock, flags); | |
69 | ||
70 | wait_for_completion(&vc_ctrl_req->compl); | |
71 | ||
72 | return 0; | |
73 | } | |
74 | ||
dbaf0624 G |
75 | static void virtcrypto_dataq_callback(struct virtqueue *vq) |
76 | { | |
77 | struct virtio_crypto *vcrypto = vq->vdev->priv; | |
78 | struct virtio_crypto_request *vc_req; | |
79 | unsigned long flags; | |
80 | unsigned int len; | |
dbaf0624 G |
81 | unsigned int qid = vq->index; |
82 | ||
83 | spin_lock_irqsave(&vcrypto->data_vq[qid].lock, flags); | |
84 | do { | |
85 | virtqueue_disable_cb(vq); | |
86 | while ((vc_req = virtqueue_get_buf(vq, &len)) != NULL) { | |
d31e7123 ZX |
87 | spin_unlock_irqrestore( |
88 | &vcrypto->data_vq[qid].lock, flags); | |
89 | if (vc_req->alg_cb) | |
90 | vc_req->alg_cb(vc_req, len); | |
91 | spin_lock_irqsave( | |
92 | &vcrypto->data_vq[qid].lock, flags); | |
dbaf0624 G |
93 | } |
94 | } while (!virtqueue_enable_cb(vq)); | |
95 | spin_unlock_irqrestore(&vcrypto->data_vq[qid].lock, flags); | |
96 | } | |
97 | ||
98 | static int virtcrypto_find_vqs(struct virtio_crypto *vi) | |
99 | { | |
100 | vq_callback_t **callbacks; | |
101 | struct virtqueue **vqs; | |
102 | int ret = -ENOMEM; | |
103 | int i, total_vqs; | |
104 | const char **names; | |
d79b5d0b | 105 | struct device *dev = &vi->vdev->dev; |
dbaf0624 G |
106 | |
107 | /* | |
108 | * We expect 1 data virtqueue, followed by | |
109 | * possible N-1 data queues used in multiqueue mode, | |
110 | * followed by control vq. | |
111 | */ | |
112 | total_vqs = vi->max_data_queues + 1; | |
113 | ||
114 | /* Allocate space for find_vqs parameters */ | |
115 | vqs = kcalloc(total_vqs, sizeof(*vqs), GFP_KERNEL); | |
116 | if (!vqs) | |
117 | goto err_vq; | |
118 | callbacks = kcalloc(total_vqs, sizeof(*callbacks), GFP_KERNEL); | |
119 | if (!callbacks) | |
120 | goto err_callback; | |
121 | names = kcalloc(total_vqs, sizeof(*names), GFP_KERNEL); | |
122 | if (!names) | |
123 | goto err_names; | |
124 | ||
125 | /* Parameters for control virtqueue */ | |
977231e8 | 126 | callbacks[total_vqs - 1] = virtcrypto_ctrlq_callback; |
dbaf0624 G |
127 | names[total_vqs - 1] = "controlq"; |
128 | ||
129 | /* Allocate/initialize parameters for data virtqueues */ | |
130 | for (i = 0; i < vi->max_data_queues; i++) { | |
131 | callbacks[i] = virtcrypto_dataq_callback; | |
132 | snprintf(vi->data_vq[i].name, sizeof(vi->data_vq[i].name), | |
133 | "dataq.%d", i); | |
134 | names[i] = vi->data_vq[i].name; | |
135 | } | |
136 | ||
9b2bbdb2 | 137 | ret = virtio_find_vqs(vi->vdev, total_vqs, vqs, callbacks, names, NULL); |
dbaf0624 G |
138 | if (ret) |
139 | goto err_find; | |
140 | ||
141 | vi->ctrl_vq = vqs[total_vqs - 1]; | |
142 | ||
143 | for (i = 0; i < vi->max_data_queues; i++) { | |
144 | spin_lock_init(&vi->data_vq[i].lock); | |
145 | vi->data_vq[i].vq = vqs[i]; | |
d79b5d0b | 146 | /* Initialize crypto engine */ |
4e0d352a | 147 | vi->data_vq[i].engine = crypto_engine_alloc_init_and_set(dev, true, NULL, true, |
148 | virtqueue_get_vring_size(vqs[i])); | |
d79b5d0b GA |
149 | if (!vi->data_vq[i].engine) { |
150 | ret = -ENOMEM; | |
151 | goto err_engine; | |
152 | } | |
dbaf0624 G |
153 | } |
154 | ||
155 | kfree(names); | |
156 | kfree(callbacks); | |
157 | kfree(vqs); | |
158 | ||
159 | return 0; | |
160 | ||
d79b5d0b | 161 | err_engine: |
dbaf0624 G |
162 | err_find: |
163 | kfree(names); | |
164 | err_names: | |
165 | kfree(callbacks); | |
166 | err_callback: | |
167 | kfree(vqs); | |
168 | err_vq: | |
169 | return ret; | |
170 | } | |
171 | ||
172 | static int virtcrypto_alloc_queues(struct virtio_crypto *vi) | |
173 | { | |
174 | vi->data_vq = kcalloc(vi->max_data_queues, sizeof(*vi->data_vq), | |
175 | GFP_KERNEL); | |
176 | if (!vi->data_vq) | |
177 | return -ENOMEM; | |
178 | ||
179 | return 0; | |
180 | } | |
181 | ||
182 | static void virtcrypto_clean_affinity(struct virtio_crypto *vi, long hcpu) | |
183 | { | |
184 | int i; | |
185 | ||
186 | if (vi->affinity_hint_set) { | |
187 | for (i = 0; i < vi->max_data_queues; i++) | |
19e226e8 | 188 | virtqueue_set_affinity(vi->data_vq[i].vq, NULL); |
dbaf0624 G |
189 | |
190 | vi->affinity_hint_set = false; | |
191 | } | |
192 | } | |
193 | ||
194 | static void virtcrypto_set_affinity(struct virtio_crypto *vcrypto) | |
195 | { | |
196 | int i = 0; | |
197 | int cpu; | |
198 | ||
199 | /* | |
200 | * In single queue mode, we don't set the cpu affinity. | |
201 | */ | |
202 | if (vcrypto->curr_queue == 1 || vcrypto->max_data_queues == 1) { | |
203 | virtcrypto_clean_affinity(vcrypto, -1); | |
204 | return; | |
205 | } | |
206 | ||
207 | /* | |
208 | * In multiqueue mode, we let the queue to be private to one cpu | |
209 | * by setting the affinity hint to eliminate the contention. | |
210 | * | |
211 | * TODO: adds cpu hotplug support by register cpu notifier. | |
212 | * | |
213 | */ | |
214 | for_each_online_cpu(cpu) { | |
19e226e8 | 215 | virtqueue_set_affinity(vcrypto->data_vq[i].vq, cpumask_of(cpu)); |
dbaf0624 G |
216 | if (++i >= vcrypto->max_data_queues) |
217 | break; | |
218 | } | |
219 | ||
220 | vcrypto->affinity_hint_set = true; | |
221 | } | |
222 | ||
223 | static void virtcrypto_free_queues(struct virtio_crypto *vi) | |
224 | { | |
225 | kfree(vi->data_vq); | |
226 | } | |
227 | ||
228 | static int virtcrypto_init_vqs(struct virtio_crypto *vi) | |
229 | { | |
230 | int ret; | |
231 | ||
232 | /* Allocate send & receive queues */ | |
233 | ret = virtcrypto_alloc_queues(vi); | |
234 | if (ret) | |
235 | goto err; | |
236 | ||
237 | ret = virtcrypto_find_vqs(vi); | |
238 | if (ret) | |
239 | goto err_free; | |
240 | ||
d01a9f70 | 241 | cpus_read_lock(); |
dbaf0624 | 242 | virtcrypto_set_affinity(vi); |
d01a9f70 | 243 | cpus_read_unlock(); |
dbaf0624 G |
244 | |
245 | return 0; | |
246 | ||
247 | err_free: | |
248 | virtcrypto_free_queues(vi); | |
249 | err: | |
250 | return ret; | |
251 | } | |
252 | ||
253 | static int virtcrypto_update_status(struct virtio_crypto *vcrypto) | |
254 | { | |
255 | u32 status; | |
256 | int err; | |
257 | ||
b13a5407 MT |
258 | virtio_cread_le(vcrypto->vdev, |
259 | struct virtio_crypto_config, status, &status); | |
dbaf0624 G |
260 | |
261 | /* | |
262 | * Unknown status bits would be a host error and the driver | |
263 | * should consider the device to be broken. | |
264 | */ | |
265 | if (status & (~VIRTIO_CRYPTO_S_HW_READY)) { | |
266 | dev_warn(&vcrypto->vdev->dev, | |
267 | "Unknown status bits: 0x%x\n", status); | |
268 | ||
269 | virtio_break_device(vcrypto->vdev); | |
270 | return -EPERM; | |
271 | } | |
272 | ||
273 | if (vcrypto->status == status) | |
274 | return 0; | |
275 | ||
276 | vcrypto->status = status; | |
277 | ||
278 | if (vcrypto->status & VIRTIO_CRYPTO_S_HW_READY) { | |
279 | err = virtcrypto_dev_start(vcrypto); | |
280 | if (err) { | |
281 | dev_err(&vcrypto->vdev->dev, | |
282 | "Failed to start virtio crypto device.\n"); | |
283 | ||
284 | return -EPERM; | |
285 | } | |
d31e7123 | 286 | dev_info(&vcrypto->vdev->dev, "Accelerator device is ready\n"); |
dbaf0624 G |
287 | } else { |
288 | virtcrypto_dev_stop(vcrypto); | |
289 | dev_info(&vcrypto->vdev->dev, "Accelerator is not ready\n"); | |
290 | } | |
291 | ||
292 | return 0; | |
293 | } | |
294 | ||
d79b5d0b GA |
295 | static int virtcrypto_start_crypto_engines(struct virtio_crypto *vcrypto) |
296 | { | |
297 | int32_t i; | |
298 | int ret; | |
299 | ||
300 | for (i = 0; i < vcrypto->max_data_queues; i++) { | |
301 | if (vcrypto->data_vq[i].engine) { | |
302 | ret = crypto_engine_start(vcrypto->data_vq[i].engine); | |
303 | if (ret) | |
304 | goto err; | |
305 | } | |
306 | } | |
307 | ||
308 | return 0; | |
309 | ||
310 | err: | |
311 | while (--i >= 0) | |
312 | if (vcrypto->data_vq[i].engine) | |
313 | crypto_engine_exit(vcrypto->data_vq[i].engine); | |
314 | ||
315 | return ret; | |
316 | } | |
317 | ||
318 | static void virtcrypto_clear_crypto_engines(struct virtio_crypto *vcrypto) | |
319 | { | |
320 | u32 i; | |
321 | ||
322 | for (i = 0; i < vcrypto->max_data_queues; i++) | |
323 | if (vcrypto->data_vq[i].engine) | |
324 | crypto_engine_exit(vcrypto->data_vq[i].engine); | |
325 | } | |
326 | ||
dbaf0624 G |
327 | static void virtcrypto_del_vqs(struct virtio_crypto *vcrypto) |
328 | { | |
329 | struct virtio_device *vdev = vcrypto->vdev; | |
330 | ||
331 | virtcrypto_clean_affinity(vcrypto, -1); | |
332 | ||
333 | vdev->config->del_vqs(vdev); | |
334 | ||
335 | virtcrypto_free_queues(vcrypto); | |
336 | } | |
337 | ||
fa2e6947 | 338 | static void vcrypto_config_changed_work(struct work_struct *work) |
339 | { | |
340 | struct virtio_crypto *vcrypto = | |
341 | container_of(work, struct virtio_crypto, config_work); | |
342 | ||
343 | virtcrypto_update_status(vcrypto); | |
344 | } | |
345 | ||
dbaf0624 G |
346 | static int virtcrypto_probe(struct virtio_device *vdev) |
347 | { | |
348 | int err = -EFAULT; | |
349 | struct virtio_crypto *vcrypto; | |
350 | u32 max_data_queues = 0, max_cipher_key_len = 0; | |
351 | u32 max_auth_key_len = 0; | |
352 | u64 max_size = 0; | |
b551bac1 FA |
353 | u32 cipher_algo_l = 0; |
354 | u32 cipher_algo_h = 0; | |
355 | u32 hash_algo = 0; | |
356 | u32 mac_algo_l = 0; | |
357 | u32 mac_algo_h = 0; | |
358 | u32 aead_algo = 0; | |
59ca6c93 | 359 | u32 akcipher_algo = 0; |
b551bac1 | 360 | u32 crypto_services = 0; |
dbaf0624 G |
361 | |
362 | if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) | |
363 | return -ENODEV; | |
364 | ||
365 | if (!vdev->config->get) { | |
366 | dev_err(&vdev->dev, "%s failure: config access disabled\n", | |
367 | __func__); | |
368 | return -EINVAL; | |
369 | } | |
370 | ||
371 | if (num_possible_nodes() > 1 && dev_to_node(&vdev->dev) < 0) { | |
372 | /* | |
373 | * If the accelerator is connected to a node with no memory | |
374 | * there is no point in using the accelerator since the remote | |
375 | * memory transaction will be very slow. | |
376 | */ | |
377 | dev_err(&vdev->dev, "Invalid NUMA configuration.\n"); | |
378 | return -EINVAL; | |
379 | } | |
380 | ||
381 | vcrypto = kzalloc_node(sizeof(*vcrypto), GFP_KERNEL, | |
382 | dev_to_node(&vdev->dev)); | |
383 | if (!vcrypto) | |
384 | return -ENOMEM; | |
385 | ||
b13a5407 | 386 | virtio_cread_le(vdev, struct virtio_crypto_config, |
dbaf0624 G |
387 | max_dataqueues, &max_data_queues); |
388 | if (max_data_queues < 1) | |
389 | max_data_queues = 1; | |
390 | ||
b13a5407 MT |
391 | virtio_cread_le(vdev, struct virtio_crypto_config, |
392 | max_cipher_key_len, &max_cipher_key_len); | |
393 | virtio_cread_le(vdev, struct virtio_crypto_config, | |
394 | max_auth_key_len, &max_auth_key_len); | |
395 | virtio_cread_le(vdev, struct virtio_crypto_config, | |
396 | max_size, &max_size); | |
397 | virtio_cread_le(vdev, struct virtio_crypto_config, | |
398 | crypto_services, &crypto_services); | |
399 | virtio_cread_le(vdev, struct virtio_crypto_config, | |
400 | cipher_algo_l, &cipher_algo_l); | |
401 | virtio_cread_le(vdev, struct virtio_crypto_config, | |
402 | cipher_algo_h, &cipher_algo_h); | |
403 | virtio_cread_le(vdev, struct virtio_crypto_config, | |
404 | hash_algo, &hash_algo); | |
405 | virtio_cread_le(vdev, struct virtio_crypto_config, | |
406 | mac_algo_l, &mac_algo_l); | |
407 | virtio_cread_le(vdev, struct virtio_crypto_config, | |
408 | mac_algo_h, &mac_algo_h); | |
409 | virtio_cread_le(vdev, struct virtio_crypto_config, | |
410 | aead_algo, &aead_algo); | |
59ca6c93 | 411 | if (crypto_services & (1 << VIRTIO_CRYPTO_SERVICE_AKCIPHER)) |
412 | virtio_cread_le(vdev, struct virtio_crypto_config, | |
413 | akcipher_algo, &akcipher_algo); | |
dbaf0624 G |
414 | |
415 | /* Add virtio crypto device to global table */ | |
416 | err = virtcrypto_devmgr_add_dev(vcrypto); | |
417 | if (err) { | |
418 | dev_err(&vdev->dev, "Failed to add new virtio crypto device.\n"); | |
419 | goto free; | |
420 | } | |
421 | vcrypto->owner = THIS_MODULE; | |
422 | vcrypto = vdev->priv = vcrypto; | |
423 | vcrypto->vdev = vdev; | |
424 | ||
425 | spin_lock_init(&vcrypto->ctrl_lock); | |
426 | ||
427 | /* Use single data queue as default */ | |
428 | vcrypto->curr_queue = 1; | |
429 | vcrypto->max_data_queues = max_data_queues; | |
430 | vcrypto->max_cipher_key_len = max_cipher_key_len; | |
431 | vcrypto->max_auth_key_len = max_auth_key_len; | |
432 | vcrypto->max_size = max_size; | |
b551bac1 FA |
433 | vcrypto->crypto_services = crypto_services; |
434 | vcrypto->cipher_algo_l = cipher_algo_l; | |
435 | vcrypto->cipher_algo_h = cipher_algo_h; | |
436 | vcrypto->mac_algo_l = mac_algo_l; | |
437 | vcrypto->mac_algo_h = mac_algo_h; | |
438 | vcrypto->hash_algo = hash_algo; | |
439 | vcrypto->aead_algo = aead_algo; | |
59ca6c93 | 440 | vcrypto->akcipher_algo = akcipher_algo; |
dbaf0624 G |
441 | |
442 | dev_info(&vdev->dev, | |
443 | "max_queues: %u, max_cipher_key_len: %u, max_auth_key_len: %u, max_size 0x%llx\n", | |
444 | vcrypto->max_data_queues, | |
445 | vcrypto->max_cipher_key_len, | |
446 | vcrypto->max_auth_key_len, | |
447 | vcrypto->max_size); | |
448 | ||
449 | err = virtcrypto_init_vqs(vcrypto); | |
450 | if (err) { | |
451 | dev_err(&vdev->dev, "Failed to initialize vqs.\n"); | |
452 | goto free_dev; | |
453 | } | |
d79b5d0b GA |
454 | |
455 | err = virtcrypto_start_crypto_engines(vcrypto); | |
456 | if (err) | |
457 | goto free_vqs; | |
458 | ||
dbaf0624 G |
459 | virtio_device_ready(vdev); |
460 | ||
461 | err = virtcrypto_update_status(vcrypto); | |
462 | if (err) | |
d79b5d0b | 463 | goto free_engines; |
dbaf0624 | 464 | |
fa2e6947 | 465 | INIT_WORK(&vcrypto->config_work, vcrypto_config_changed_work); |
466 | ||
dbaf0624 G |
467 | return 0; |
468 | ||
d79b5d0b GA |
469 | free_engines: |
470 | virtcrypto_clear_crypto_engines(vcrypto); | |
dbaf0624 | 471 | free_vqs: |
d9679d00 | 472 | virtio_reset_device(vdev); |
dbaf0624 G |
473 | virtcrypto_del_vqs(vcrypto); |
474 | free_dev: | |
475 | virtcrypto_devmgr_rm_dev(vcrypto); | |
476 | free: | |
477 | kfree(vcrypto); | |
478 | return err; | |
479 | } | |
480 | ||
481 | static void virtcrypto_free_unused_reqs(struct virtio_crypto *vcrypto) | |
482 | { | |
483 | struct virtio_crypto_request *vc_req; | |
484 | int i; | |
485 | struct virtqueue *vq; | |
486 | ||
487 | for (i = 0; i < vcrypto->max_data_queues; i++) { | |
488 | vq = vcrypto->data_vq[i].vq; | |
489 | while ((vc_req = virtqueue_detach_unused_buf(vq)) != NULL) { | |
490 | kfree(vc_req->req_data); | |
491 | kfree(vc_req->sgs); | |
492 | } | |
7a5103b8 | 493 | cond_resched(); |
dbaf0624 G |
494 | } |
495 | } | |
496 | ||
497 | static void virtcrypto_remove(struct virtio_device *vdev) | |
498 | { | |
499 | struct virtio_crypto *vcrypto = vdev->priv; | |
500 | ||
501 | dev_info(&vdev->dev, "Start virtcrypto_remove.\n"); | |
502 | ||
fa2e6947 | 503 | flush_work(&vcrypto->config_work); |
dbaf0624 G |
504 | if (virtcrypto_dev_started(vcrypto)) |
505 | virtcrypto_dev_stop(vcrypto); | |
d9679d00 | 506 | virtio_reset_device(vdev); |
dbaf0624 | 507 | virtcrypto_free_unused_reqs(vcrypto); |
d79b5d0b | 508 | virtcrypto_clear_crypto_engines(vcrypto); |
dbaf0624 G |
509 | virtcrypto_del_vqs(vcrypto); |
510 | virtcrypto_devmgr_rm_dev(vcrypto); | |
511 | kfree(vcrypto); | |
512 | } | |
513 | ||
514 | static void virtcrypto_config_changed(struct virtio_device *vdev) | |
515 | { | |
516 | struct virtio_crypto *vcrypto = vdev->priv; | |
517 | ||
fa2e6947 | 518 | schedule_work(&vcrypto->config_work); |
dbaf0624 G |
519 | } |
520 | ||
521 | #ifdef CONFIG_PM_SLEEP | |
522 | static int virtcrypto_freeze(struct virtio_device *vdev) | |
523 | { | |
524 | struct virtio_crypto *vcrypto = vdev->priv; | |
525 | ||
fa2e6947 | 526 | flush_work(&vcrypto->config_work); |
d9679d00 | 527 | virtio_reset_device(vdev); |
dbaf0624 G |
528 | virtcrypto_free_unused_reqs(vcrypto); |
529 | if (virtcrypto_dev_started(vcrypto)) | |
530 | virtcrypto_dev_stop(vcrypto); | |
531 | ||
d79b5d0b | 532 | virtcrypto_clear_crypto_engines(vcrypto); |
dbaf0624 G |
533 | virtcrypto_del_vqs(vcrypto); |
534 | return 0; | |
535 | } | |
536 | ||
537 | static int virtcrypto_restore(struct virtio_device *vdev) | |
538 | { | |
539 | struct virtio_crypto *vcrypto = vdev->priv; | |
540 | int err; | |
541 | ||
542 | err = virtcrypto_init_vqs(vcrypto); | |
543 | if (err) | |
544 | return err; | |
545 | ||
d79b5d0b GA |
546 | err = virtcrypto_start_crypto_engines(vcrypto); |
547 | if (err) | |
548 | goto free_vqs; | |
549 | ||
dbaf0624 | 550 | virtio_device_ready(vdev); |
d79b5d0b | 551 | |
dbaf0624 G |
552 | err = virtcrypto_dev_start(vcrypto); |
553 | if (err) { | |
554 | dev_err(&vdev->dev, "Failed to start virtio crypto device.\n"); | |
d79b5d0b | 555 | goto free_engines; |
dbaf0624 G |
556 | } |
557 | ||
558 | return 0; | |
d79b5d0b GA |
559 | |
560 | free_engines: | |
561 | virtcrypto_clear_crypto_engines(vcrypto); | |
562 | free_vqs: | |
d9679d00 | 563 | virtio_reset_device(vdev); |
d79b5d0b GA |
564 | virtcrypto_del_vqs(vcrypto); |
565 | return err; | |
dbaf0624 G |
566 | } |
567 | #endif | |
568 | ||
ad6a0664 | 569 | static const unsigned int features[] = { |
dbaf0624 G |
570 | /* none */ |
571 | }; | |
572 | ||
ad6a0664 | 573 | static const struct virtio_device_id id_table[] = { |
dbaf0624 G |
574 | { VIRTIO_ID_CRYPTO, VIRTIO_DEV_ANY_ID }, |
575 | { 0 }, | |
576 | }; | |
577 | ||
578 | static struct virtio_driver virtio_crypto_driver = { | |
579 | .driver.name = KBUILD_MODNAME, | |
580 | .driver.owner = THIS_MODULE, | |
581 | .feature_table = features, | |
582 | .feature_table_size = ARRAY_SIZE(features), | |
583 | .id_table = id_table, | |
584 | .probe = virtcrypto_probe, | |
585 | .remove = virtcrypto_remove, | |
586 | .config_changed = virtcrypto_config_changed, | |
587 | #ifdef CONFIG_PM_SLEEP | |
588 | .freeze = virtcrypto_freeze, | |
589 | .restore = virtcrypto_restore, | |
590 | #endif | |
591 | }; | |
592 | ||
593 | module_virtio_driver(virtio_crypto_driver); | |
594 | ||
595 | MODULE_DEVICE_TABLE(virtio, id_table); | |
596 | MODULE_DESCRIPTION("virtio crypto device driver"); | |
597 | MODULE_LICENSE("GPL"); | |
598 | MODULE_AUTHOR("Gonglei <arei.gonglei@huawei.com>"); |