Commit | Line | Data |
---|---|---|
2874c5fd | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
735d37b5 BW |
2 | /* |
3 | * Handle async block request by crypto hardware engine. | |
4 | * | |
5 | * Copyright (C) 2016 Linaro, Inc. | |
6 | * | |
7 | * Author: Baolin Wang <baolin.wang@linaro.org> | |
735d37b5 BW |
8 | */ |
9 | ||
68021dee HX |
10 | #include <crypto/aead.h> |
11 | #include <crypto/akcipher.h> | |
68021dee | 12 | #include <crypto/hash.h> |
45c461c5 | 13 | #include <crypto/internal/engine.h> |
68021dee HX |
14 | #include <crypto/kpp.h> |
15 | #include <crypto/skcipher.h> | |
735d37b5 BW |
16 | #include <linux/err.h> |
17 | #include <linux/delay.h> | |
0c3dc787 | 18 | #include <linux/device.h> |
ae7e81c0 | 19 | #include <uapi/linux/sched/types.h> |
735d37b5 BW |
20 | #include "internal.h" |
21 | ||
22 | #define CRYPTO_ENGINE_MAX_QLEN 10 | |
23 | ||
218d1cc1 CL |
24 | /** |
25 | * crypto_finalize_request - finalize one request if the request is done | |
26 | * @engine: the hardware engine | |
27 | * @req: the request need to be finalized | |
28 | * @err: error number | |
29 | */ | |
30 | static void crypto_finalize_request(struct crypto_engine *engine, | |
6a89f492 | 31 | struct crypto_async_request *req, int err) |
218d1cc1 CL |
32 | { |
33 | unsigned long flags; | |
218d1cc1 | 34 | |
6a89f492 IP |
35 | /* |
36 | * If hardware cannot enqueue more requests | |
37 | * and retry mechanism is not supported | |
38 | * make sure we are completing the current request | |
39 | */ | |
40 | if (!engine->retry_support) { | |
41 | spin_lock_irqsave(&engine->queue_lock, flags); | |
42 | if (engine->cur_req == req) { | |
6a89f492 IP |
43 | engine->cur_req = NULL; |
44 | } | |
45 | spin_unlock_irqrestore(&engine->queue_lock, flags); | |
46 | } | |
218d1cc1 | 47 | |
4058cf08 | 48 | lockdep_assert_in_softirq(); |
6909823d | 49 | crypto_request_complete(req, err); |
218d1cc1 CL |
50 | |
51 | kthread_queue_work(engine->kworker, &engine->pump_requests); | |
52 | } | |
53 | ||
735d37b5 BW |
54 | /** |
55 | * crypto_pump_requests - dequeue one request from engine queue to process | |
56 | * @engine: the hardware engine | |
57 | * @in_kthread: true if we are in the context of the request pump thread | |
58 | * | |
59 | * This function checks if there is any request in the engine queue that | |
60 | * needs processing and if so call out to the driver to initialize hardware | |
61 | * and handle each request. | |
62 | */ | |
63 | static void crypto_pump_requests(struct crypto_engine *engine, | |
64 | bool in_kthread) | |
65 | { | |
66 | struct crypto_async_request *async_req, *backlog; | |
735d37b5 BW |
67 | unsigned long flags; |
68 | bool was_busy = false; | |
218d1cc1 CL |
69 | int ret; |
70 | struct crypto_engine_ctx *enginectx; | |
735d37b5 BW |
71 | |
72 | spin_lock_irqsave(&engine->queue_lock, flags); | |
73 | ||
74 | /* Make sure we are not already running a request */ | |
6a89f492 | 75 | if (!engine->retry_support && engine->cur_req) |
735d37b5 BW |
76 | goto out; |
77 | ||
78 | /* If another context is idling then defer */ | |
79 | if (engine->idling) { | |
c4ca2b0b | 80 | kthread_queue_work(engine->kworker, &engine->pump_requests); |
735d37b5 BW |
81 | goto out; |
82 | } | |
83 | ||
84 | /* Check if the engine queue is idle */ | |
85 | if (!crypto_queue_len(&engine->queue) || !engine->running) { | |
86 | if (!engine->busy) | |
87 | goto out; | |
88 | ||
89 | /* Only do teardown in the thread */ | |
90 | if (!in_kthread) { | |
c4ca2b0b | 91 | kthread_queue_work(engine->kworker, |
735d37b5 BW |
92 | &engine->pump_requests); |
93 | goto out; | |
94 | } | |
95 | ||
96 | engine->busy = false; | |
97 | engine->idling = true; | |
98 | spin_unlock_irqrestore(&engine->queue_lock, flags); | |
99 | ||
100 | if (engine->unprepare_crypt_hardware && | |
101 | engine->unprepare_crypt_hardware(engine)) | |
88d58ef8 | 102 | dev_err(engine->dev, "failed to unprepare crypt hardware\n"); |
735d37b5 BW |
103 | |
104 | spin_lock_irqsave(&engine->queue_lock, flags); | |
105 | engine->idling = false; | |
106 | goto out; | |
107 | } | |
108 | ||
6a89f492 | 109 | start_request: |
735d37b5 BW |
110 | /* Get the fist request from the engine queue to handle */ |
111 | backlog = crypto_get_backlog(&engine->queue); | |
112 | async_req = crypto_dequeue_request(&engine->queue); | |
113 | if (!async_req) | |
114 | goto out; | |
115 | ||
6a89f492 IP |
116 | /* |
117 | * If hardware doesn't support the retry mechanism, | |
118 | * keep track of the request we are processing now. | |
119 | * We'll need it on completion (crypto_finalize_request). | |
120 | */ | |
121 | if (!engine->retry_support) | |
122 | engine->cur_req = async_req; | |
123 | ||
735d37b5 BW |
124 | if (engine->busy) |
125 | was_busy = true; | |
126 | else | |
127 | engine->busy = true; | |
128 | ||
129 | spin_unlock_irqrestore(&engine->queue_lock, flags); | |
130 | ||
131 | /* Until here we get the request need to be encrypted successfully */ | |
132 | if (!was_busy && engine->prepare_crypt_hardware) { | |
133 | ret = engine->prepare_crypt_hardware(engine); | |
134 | if (ret) { | |
88d58ef8 | 135 | dev_err(engine->dev, "failed to prepare crypt hardware\n"); |
bcd6e41d | 136 | goto req_err_1; |
735d37b5 BW |
137 | } |
138 | } | |
139 | ||
218d1cc1 CL |
140 | enginectx = crypto_tfm_ctx(async_req->tfm); |
141 | ||
218d1cc1 CL |
142 | if (!enginectx->op.do_one_request) { |
143 | dev_err(engine->dev, "failed to do request\n"); | |
144 | ret = -EINVAL; | |
6a89f492 | 145 | goto req_err_1; |
735d37b5 | 146 | } |
6a89f492 | 147 | |
218d1cc1 | 148 | ret = enginectx->op.do_one_request(engine, async_req); |
6a89f492 IP |
149 | |
150 | /* Request unsuccessfully executed by hardware */ | |
151 | if (ret < 0) { | |
152 | /* | |
153 | * If hardware queue is full (-ENOSPC), requeue request | |
154 | * regardless of backlog flag. | |
6a89f492 IP |
155 | * Otherwise, unprepare and complete the request. |
156 | */ | |
157 | if (!engine->retry_support || | |
d1c72f6e | 158 | (ret != -ENOSPC)) { |
6a89f492 IP |
159 | dev_err(engine->dev, |
160 | "Failed to do one request from queue: %d\n", | |
161 | ret); | |
162 | goto req_err_1; | |
163 | } | |
6a89f492 IP |
164 | spin_lock_irqsave(&engine->queue_lock, flags); |
165 | /* | |
166 | * If hardware was unable to execute request, enqueue it | |
167 | * back in front of crypto-engine queue, to keep the order | |
168 | * of requests. | |
169 | */ | |
170 | crypto_enqueue_request_head(&engine->queue, async_req); | |
171 | ||
172 | kthread_queue_work(engine->kworker, &engine->pump_requests); | |
173 | goto out; | |
218d1cc1 | 174 | } |
735d37b5 | 175 | |
6a89f492 IP |
176 | goto retry; |
177 | ||
178 | req_err_1: | |
6909823d | 179 | crypto_request_complete(async_req, ret); |
6a89f492 IP |
180 | |
181 | retry: | |
4140aafc OB |
182 | if (backlog) |
183 | crypto_request_complete(backlog, -EINPROGRESS); | |
184 | ||
6a89f492 IP |
185 | /* If retry mechanism is supported, send new requests to engine */ |
186 | if (engine->retry_support) { | |
187 | spin_lock_irqsave(&engine->queue_lock, flags); | |
188 | goto start_request; | |
189 | } | |
735d37b5 BW |
190 | return; |
191 | ||
192 | out: | |
193 | spin_unlock_irqrestore(&engine->queue_lock, flags); | |
8d908226 IP |
194 | |
195 | /* | |
196 | * Batch requests is possible only if | |
197 | * hardware can enqueue multiple requests | |
198 | */ | |
199 | if (engine->do_batch_requests) { | |
200 | ret = engine->do_batch_requests(engine); | |
201 | if (ret) | |
202 | dev_err(engine->dev, "failed to do batch requests: %d\n", | |
203 | ret); | |
204 | } | |
205 | ||
6a89f492 | 206 | return; |
735d37b5 BW |
207 | } |
208 | ||
209 | static void crypto_pump_work(struct kthread_work *work) | |
210 | { | |
211 | struct crypto_engine *engine = | |
212 | container_of(work, struct crypto_engine, pump_requests); | |
213 | ||
214 | crypto_pump_requests(engine, true); | |
215 | } | |
216 | ||
217 | /** | |
218d1cc1 | 218 | * crypto_transfer_request - transfer the new request into the engine queue |
735d37b5 BW |
219 | * @engine: the hardware engine |
220 | * @req: the request need to be listed into the engine queue | |
d5db91d2 | 221 | * @need_pump: indicates whether queue the pump of request to kthread_work |
735d37b5 | 222 | */ |
218d1cc1 CL |
223 | static int crypto_transfer_request(struct crypto_engine *engine, |
224 | struct crypto_async_request *req, | |
4cba7cf0 | 225 | bool need_pump) |
735d37b5 BW |
226 | { |
227 | unsigned long flags; | |
228 | int ret; | |
229 | ||
230 | spin_lock_irqsave(&engine->queue_lock, flags); | |
231 | ||
232 | if (!engine->running) { | |
233 | spin_unlock_irqrestore(&engine->queue_lock, flags); | |
234 | return -ESHUTDOWN; | |
235 | } | |
236 | ||
218d1cc1 | 237 | ret = crypto_enqueue_request(&engine->queue, req); |
735d37b5 BW |
238 | |
239 | if (!engine->busy && need_pump) | |
c4ca2b0b | 240 | kthread_queue_work(engine->kworker, &engine->pump_requests); |
735d37b5 BW |
241 | |
242 | spin_unlock_irqrestore(&engine->queue_lock, flags); | |
243 | return ret; | |
244 | } | |
4cba7cf0 CL |
245 | |
246 | /** | |
218d1cc1 | 247 | * crypto_transfer_request_to_engine - transfer one request to list |
4cba7cf0 CL |
248 | * into the engine queue |
249 | * @engine: the hardware engine | |
250 | * @req: the request need to be listed into the engine queue | |
251 | */ | |
218d1cc1 CL |
252 | static int crypto_transfer_request_to_engine(struct crypto_engine *engine, |
253 | struct crypto_async_request *req) | |
4cba7cf0 | 254 | { |
218d1cc1 | 255 | return crypto_transfer_request(engine, req, true); |
4cba7cf0 | 256 | } |
4cba7cf0 | 257 | |
218d1cc1 CL |
258 | /** |
259 | * crypto_transfer_aead_request_to_engine - transfer one aead_request | |
260 | * to list into the engine queue | |
261 | * @engine: the hardware engine | |
262 | * @req: the request need to be listed into the engine queue | |
263 | */ | |
264 | int crypto_transfer_aead_request_to_engine(struct crypto_engine *engine, | |
265 | struct aead_request *req) | |
266 | { | |
267 | return crypto_transfer_request_to_engine(engine, &req->base); | |
268 | } | |
269 | EXPORT_SYMBOL_GPL(crypto_transfer_aead_request_to_engine); | |
4cba7cf0 | 270 | |
218d1cc1 CL |
271 | /** |
272 | * crypto_transfer_akcipher_request_to_engine - transfer one akcipher_request | |
273 | * to list into the engine queue | |
274 | * @engine: the hardware engine | |
275 | * @req: the request need to be listed into the engine queue | |
276 | */ | |
277 | int crypto_transfer_akcipher_request_to_engine(struct crypto_engine *engine, | |
278 | struct akcipher_request *req) | |
279 | { | |
280 | return crypto_transfer_request_to_engine(engine, &req->base); | |
4cba7cf0 | 281 | } |
218d1cc1 | 282 | EXPORT_SYMBOL_GPL(crypto_transfer_akcipher_request_to_engine); |
735d37b5 BW |
283 | |
284 | /** | |
218d1cc1 CL |
285 | * crypto_transfer_hash_request_to_engine - transfer one ahash_request |
286 | * to list into the engine queue | |
735d37b5 BW |
287 | * @engine: the hardware engine |
288 | * @req: the request need to be listed into the engine queue | |
289 | */ | |
4cba7cf0 CL |
290 | int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine, |
291 | struct ahash_request *req) | |
735d37b5 | 292 | { |
218d1cc1 | 293 | return crypto_transfer_request_to_engine(engine, &req->base); |
735d37b5 | 294 | } |
4cba7cf0 | 295 | EXPORT_SYMBOL_GPL(crypto_transfer_hash_request_to_engine); |
735d37b5 | 296 | |
1730c5aa PK |
297 | /** |
298 | * crypto_transfer_kpp_request_to_engine - transfer one kpp_request to list | |
299 | * into the engine queue | |
300 | * @engine: the hardware engine | |
301 | * @req: the request need to be listed into the engine queue | |
302 | */ | |
303 | int crypto_transfer_kpp_request_to_engine(struct crypto_engine *engine, | |
304 | struct kpp_request *req) | |
305 | { | |
306 | return crypto_transfer_request_to_engine(engine, &req->base); | |
307 | } | |
308 | EXPORT_SYMBOL_GPL(crypto_transfer_kpp_request_to_engine); | |
309 | ||
735d37b5 | 310 | /** |
218d1cc1 CL |
311 | * crypto_transfer_skcipher_request_to_engine - transfer one skcipher_request |
312 | * to list into the engine queue | |
313 | * @engine: the hardware engine | |
314 | * @req: the request need to be listed into the engine queue | |
315 | */ | |
316 | int crypto_transfer_skcipher_request_to_engine(struct crypto_engine *engine, | |
317 | struct skcipher_request *req) | |
318 | { | |
319 | return crypto_transfer_request_to_engine(engine, &req->base); | |
320 | } | |
321 | EXPORT_SYMBOL_GPL(crypto_transfer_skcipher_request_to_engine); | |
322 | ||
218d1cc1 CL |
323 | /** |
324 | * crypto_finalize_aead_request - finalize one aead_request if | |
325 | * the request is done | |
326 | * @engine: the hardware engine | |
327 | * @req: the request need to be finalized | |
328 | * @err: error number | |
329 | */ | |
330 | void crypto_finalize_aead_request(struct crypto_engine *engine, | |
331 | struct aead_request *req, int err) | |
332 | { | |
333 | return crypto_finalize_request(engine, &req->base, err); | |
334 | } | |
335 | EXPORT_SYMBOL_GPL(crypto_finalize_aead_request); | |
735d37b5 | 336 | |
218d1cc1 CL |
337 | /** |
338 | * crypto_finalize_akcipher_request - finalize one akcipher_request if | |
339 | * the request is done | |
340 | * @engine: the hardware engine | |
341 | * @req: the request need to be finalized | |
342 | * @err: error number | |
343 | */ | |
344 | void crypto_finalize_akcipher_request(struct crypto_engine *engine, | |
345 | struct akcipher_request *req, int err) | |
346 | { | |
347 | return crypto_finalize_request(engine, &req->base, err); | |
4cba7cf0 | 348 | } |
218d1cc1 | 349 | EXPORT_SYMBOL_GPL(crypto_finalize_akcipher_request); |
4cba7cf0 CL |
350 | |
351 | /** | |
218d1cc1 CL |
352 | * crypto_finalize_hash_request - finalize one ahash_request if |
353 | * the request is done | |
4cba7cf0 CL |
354 | * @engine: the hardware engine |
355 | * @req: the request need to be finalized | |
356 | * @err: error number | |
357 | */ | |
358 | void crypto_finalize_hash_request(struct crypto_engine *engine, | |
359 | struct ahash_request *req, int err) | |
360 | { | |
218d1cc1 | 361 | return crypto_finalize_request(engine, &req->base, err); |
735d37b5 | 362 | } |
4cba7cf0 | 363 | EXPORT_SYMBOL_GPL(crypto_finalize_hash_request); |
735d37b5 | 364 | |
1730c5aa PK |
365 | /** |
366 | * crypto_finalize_kpp_request - finalize one kpp_request if the request is done | |
367 | * @engine: the hardware engine | |
368 | * @req: the request need to be finalized | |
369 | * @err: error number | |
370 | */ | |
371 | void crypto_finalize_kpp_request(struct crypto_engine *engine, | |
372 | struct kpp_request *req, int err) | |
373 | { | |
374 | return crypto_finalize_request(engine, &req->base, err); | |
375 | } | |
376 | EXPORT_SYMBOL_GPL(crypto_finalize_kpp_request); | |
377 | ||
218d1cc1 CL |
378 | /** |
379 | * crypto_finalize_skcipher_request - finalize one skcipher_request if | |
380 | * the request is done | |
381 | * @engine: the hardware engine | |
382 | * @req: the request need to be finalized | |
383 | * @err: error number | |
384 | */ | |
385 | void crypto_finalize_skcipher_request(struct crypto_engine *engine, | |
386 | struct skcipher_request *req, int err) | |
387 | { | |
388 | return crypto_finalize_request(engine, &req->base, err); | |
389 | } | |
390 | EXPORT_SYMBOL_GPL(crypto_finalize_skcipher_request); | |
391 | ||
735d37b5 BW |
392 | /** |
393 | * crypto_engine_start - start the hardware engine | |
394 | * @engine: the hardware engine need to be started | |
395 | * | |
396 | * Return 0 on success, else on fail. | |
397 | */ | |
398 | int crypto_engine_start(struct crypto_engine *engine) | |
399 | { | |
400 | unsigned long flags; | |
401 | ||
402 | spin_lock_irqsave(&engine->queue_lock, flags); | |
403 | ||
404 | if (engine->running || engine->busy) { | |
405 | spin_unlock_irqrestore(&engine->queue_lock, flags); | |
406 | return -EBUSY; | |
407 | } | |
408 | ||
409 | engine->running = true; | |
410 | spin_unlock_irqrestore(&engine->queue_lock, flags); | |
411 | ||
c4ca2b0b | 412 | kthread_queue_work(engine->kworker, &engine->pump_requests); |
735d37b5 BW |
413 | |
414 | return 0; | |
415 | } | |
416 | EXPORT_SYMBOL_GPL(crypto_engine_start); | |
417 | ||
418 | /** | |
419 | * crypto_engine_stop - stop the hardware engine | |
420 | * @engine: the hardware engine need to be stopped | |
421 | * | |
422 | * Return 0 on success, else on fail. | |
423 | */ | |
424 | int crypto_engine_stop(struct crypto_engine *engine) | |
425 | { | |
426 | unsigned long flags; | |
4cba7cf0 | 427 | unsigned int limit = 500; |
735d37b5 BW |
428 | int ret = 0; |
429 | ||
430 | spin_lock_irqsave(&engine->queue_lock, flags); | |
431 | ||
432 | /* | |
433 | * If the engine queue is not empty or the engine is on busy state, | |
434 | * we need to wait for a while to pump the requests of engine queue. | |
435 | */ | |
436 | while ((crypto_queue_len(&engine->queue) || engine->busy) && limit--) { | |
437 | spin_unlock_irqrestore(&engine->queue_lock, flags); | |
438 | msleep(20); | |
439 | spin_lock_irqsave(&engine->queue_lock, flags); | |
440 | } | |
441 | ||
442 | if (crypto_queue_len(&engine->queue) || engine->busy) | |
443 | ret = -EBUSY; | |
444 | else | |
445 | engine->running = false; | |
446 | ||
447 | spin_unlock_irqrestore(&engine->queue_lock, flags); | |
448 | ||
449 | if (ret) | |
88d58ef8 | 450 | dev_warn(engine->dev, "could not stop engine\n"); |
735d37b5 BW |
451 | |
452 | return ret; | |
453 | } | |
454 | EXPORT_SYMBOL_GPL(crypto_engine_stop); | |
455 | ||
456 | /** | |
6a89f492 IP |
457 | * crypto_engine_alloc_init_and_set - allocate crypto hardware engine structure |
458 | * and initialize it by setting the maximum number of entries in the software | |
459 | * crypto-engine queue. | |
735d37b5 | 460 | * @dev: the device attached with one hardware engine |
6a89f492 | 461 | * @retry_support: whether hardware has support for retry mechanism |
40a3af45 | 462 | * @cbk_do_batch: pointer to a callback function to be invoked when executing |
8d908226 IP |
463 | * a batch of requests. |
464 | * This has the form: | |
465 | * callback(struct crypto_engine *engine) | |
466 | * where: | |
42a9a08b | 467 | * engine: the crypto engine structure. |
735d37b5 | 468 | * @rt: whether this queue is set to run as a realtime task |
6a89f492 | 469 | * @qlen: maximum size of the crypto-engine queue |
735d37b5 BW |
470 | * |
471 | * This must be called from context that can sleep. | |
472 | * Return: the crypto engine structure on success, else NULL. | |
473 | */ | |
6a89f492 IP |
474 | struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev, |
475 | bool retry_support, | |
8d908226 | 476 | int (*cbk_do_batch)(struct crypto_engine *engine), |
6a89f492 | 477 | bool rt, int qlen) |
735d37b5 | 478 | { |
735d37b5 BW |
479 | struct crypto_engine *engine; |
480 | ||
481 | if (!dev) | |
482 | return NULL; | |
483 | ||
484 | engine = devm_kzalloc(dev, sizeof(*engine), GFP_KERNEL); | |
485 | if (!engine) | |
486 | return NULL; | |
487 | ||
88d58ef8 | 488 | engine->dev = dev; |
735d37b5 BW |
489 | engine->rt = rt; |
490 | engine->running = false; | |
491 | engine->busy = false; | |
492 | engine->idling = false; | |
6a89f492 | 493 | engine->retry_support = retry_support; |
735d37b5 | 494 | engine->priv_data = dev; |
8d908226 IP |
495 | /* |
496 | * Batch requests is possible only if | |
497 | * hardware has support for retry mechanism. | |
498 | */ | |
499 | engine->do_batch_requests = retry_support ? cbk_do_batch : NULL; | |
500 | ||
735d37b5 BW |
501 | snprintf(engine->name, sizeof(engine->name), |
502 | "%s-engine", dev_name(dev)); | |
503 | ||
6a89f492 | 504 | crypto_init_queue(&engine->queue, qlen); |
735d37b5 BW |
505 | spin_lock_init(&engine->queue_lock); |
506 | ||
c4ca2b0b PM |
507 | engine->kworker = kthread_create_worker(0, "%s", engine->name); |
508 | if (IS_ERR(engine->kworker)) { | |
735d37b5 BW |
509 | dev_err(dev, "failed to create crypto request pump task\n"); |
510 | return NULL; | |
511 | } | |
3989144f | 512 | kthread_init_work(&engine->pump_requests, crypto_pump_work); |
735d37b5 BW |
513 | |
514 | if (engine->rt) { | |
515 | dev_info(dev, "will run requests pump with realtime priority\n"); | |
dbc6d0d5 | 516 | sched_set_fifo(engine->kworker->task); |
735d37b5 BW |
517 | } |
518 | ||
519 | return engine; | |
520 | } | |
6a89f492 IP |
521 | EXPORT_SYMBOL_GPL(crypto_engine_alloc_init_and_set); |
522 | ||
523 | /** | |
524 | * crypto_engine_alloc_init - allocate crypto hardware engine structure and | |
525 | * initialize it. | |
526 | * @dev: the device attached with one hardware engine | |
527 | * @rt: whether this queue is set to run as a realtime task | |
528 | * | |
529 | * This must be called from context that can sleep. | |
530 | * Return: the crypto engine structure on success, else NULL. | |
531 | */ | |
532 | struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt) | |
533 | { | |
8d908226 | 534 | return crypto_engine_alloc_init_and_set(dev, false, NULL, rt, |
6a89f492 IP |
535 | CRYPTO_ENGINE_MAX_QLEN); |
536 | } | |
735d37b5 BW |
537 | EXPORT_SYMBOL_GPL(crypto_engine_alloc_init); |
538 | ||
539 | /** | |
540 | * crypto_engine_exit - free the resources of hardware engine when exit | |
541 | * @engine: the hardware engine need to be freed | |
542 | * | |
543 | * Return 0 for success. | |
544 | */ | |
545 | int crypto_engine_exit(struct crypto_engine *engine) | |
546 | { | |
547 | int ret; | |
548 | ||
549 | ret = crypto_engine_stop(engine); | |
550 | if (ret) | |
551 | return ret; | |
552 | ||
c4ca2b0b | 553 | kthread_destroy_worker(engine->kworker); |
735d37b5 BW |
554 | |
555 | return 0; | |
556 | } | |
557 | EXPORT_SYMBOL_GPL(crypto_engine_exit); | |
558 | ||
559 | MODULE_LICENSE("GPL"); | |
560 | MODULE_DESCRIPTION("Crypto hardware engine framework"); |