Commit | Line | Data |
---|---|---|
124b53d0 HX |
1 | /* |
2 | * Software async crypto daemon. | |
3 | * | |
4 | * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify it | |
7 | * under the terms of the GNU General Public License as published by the Free | |
8 | * Software Foundation; either version 2 of the License, or (at your option) | |
9 | * any later version. | |
10 | * | |
11 | */ | |
12 | ||
13 | #include <crypto/algapi.h> | |
18e33e6d | 14 | #include <crypto/internal/hash.h> |
1cac2cbc | 15 | #include <crypto/cryptd.h> |
124b53d0 HX |
16 | #include <linux/err.h> |
17 | #include <linux/init.h> | |
18 | #include <linux/kernel.h> | |
19 | #include <linux/kthread.h> | |
20 | #include <linux/list.h> | |
21 | #include <linux/module.h> | |
22 | #include <linux/mutex.h> | |
23 | #include <linux/scatterlist.h> | |
24 | #include <linux/sched.h> | |
25 | #include <linux/slab.h> | |
26 | #include <linux/spinlock.h> | |
27 | ||
28 | #define CRYPTD_MAX_QLEN 100 | |
29 | ||
30 | struct cryptd_state { | |
31 | spinlock_t lock; | |
32 | struct mutex mutex; | |
33 | struct crypto_queue queue; | |
34 | struct task_struct *task; | |
35 | }; | |
36 | ||
37 | struct cryptd_instance_ctx { | |
38 | struct crypto_spawn spawn; | |
39 | struct cryptd_state *state; | |
40 | }; | |
41 | ||
42 | struct cryptd_blkcipher_ctx { | |
43 | struct crypto_blkcipher *child; | |
44 | }; | |
45 | ||
46 | struct cryptd_blkcipher_request_ctx { | |
47 | crypto_completion_t complete; | |
48 | }; | |
49 | ||
b8a28251 LH |
50 | struct cryptd_hash_ctx { |
51 | struct crypto_hash *child; | |
52 | }; | |
53 | ||
54 | struct cryptd_hash_request_ctx { | |
55 | crypto_completion_t complete; | |
56 | }; | |
124b53d0 HX |
57 | |
58 | static inline struct cryptd_state *cryptd_get_state(struct crypto_tfm *tfm) | |
59 | { | |
60 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); | |
61 | struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst); | |
62 | return ictx->state; | |
63 | } | |
64 | ||
65 | static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent, | |
66 | const u8 *key, unsigned int keylen) | |
67 | { | |
68 | struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(parent); | |
69 | struct crypto_blkcipher *child = ctx->child; | |
70 | int err; | |
71 | ||
72 | crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); | |
73 | crypto_blkcipher_set_flags(child, crypto_ablkcipher_get_flags(parent) & | |
74 | CRYPTO_TFM_REQ_MASK); | |
75 | err = crypto_blkcipher_setkey(child, key, keylen); | |
76 | crypto_ablkcipher_set_flags(parent, crypto_blkcipher_get_flags(child) & | |
77 | CRYPTO_TFM_RES_MASK); | |
78 | return err; | |
79 | } | |
80 | ||
81 | static void cryptd_blkcipher_crypt(struct ablkcipher_request *req, | |
82 | struct crypto_blkcipher *child, | |
83 | int err, | |
84 | int (*crypt)(struct blkcipher_desc *desc, | |
85 | struct scatterlist *dst, | |
86 | struct scatterlist *src, | |
87 | unsigned int len)) | |
88 | { | |
89 | struct cryptd_blkcipher_request_ctx *rctx; | |
90 | struct blkcipher_desc desc; | |
91 | ||
92 | rctx = ablkcipher_request_ctx(req); | |
93 | ||
93aa7f8a HX |
94 | if (unlikely(err == -EINPROGRESS)) |
95 | goto out; | |
124b53d0 HX |
96 | |
97 | desc.tfm = child; | |
98 | desc.info = req->info; | |
99 | desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; | |
100 | ||
101 | err = crypt(&desc, req->dst, req->src, req->nbytes); | |
102 | ||
103 | req->base.complete = rctx->complete; | |
104 | ||
93aa7f8a | 105 | out: |
124b53d0 | 106 | local_bh_disable(); |
93aa7f8a | 107 | rctx->complete(&req->base, err); |
124b53d0 HX |
108 | local_bh_enable(); |
109 | } | |
110 | ||
111 | static void cryptd_blkcipher_encrypt(struct crypto_async_request *req, int err) | |
112 | { | |
113 | struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm); | |
114 | struct crypto_blkcipher *child = ctx->child; | |
115 | ||
116 | cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err, | |
117 | crypto_blkcipher_crt(child)->encrypt); | |
118 | } | |
119 | ||
120 | static void cryptd_blkcipher_decrypt(struct crypto_async_request *req, int err) | |
121 | { | |
122 | struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm); | |
123 | struct crypto_blkcipher *child = ctx->child; | |
124 | ||
125 | cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err, | |
126 | crypto_blkcipher_crt(child)->decrypt); | |
127 | } | |
128 | ||
129 | static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req, | |
130 | crypto_completion_t complete) | |
131 | { | |
132 | struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req); | |
133 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | |
134 | struct cryptd_state *state = | |
135 | cryptd_get_state(crypto_ablkcipher_tfm(tfm)); | |
136 | int err; | |
137 | ||
138 | rctx->complete = req->base.complete; | |
139 | req->base.complete = complete; | |
140 | ||
141 | spin_lock_bh(&state->lock); | |
2de98e75 | 142 | err = ablkcipher_enqueue_request(&state->queue, req); |
124b53d0 HX |
143 | spin_unlock_bh(&state->lock); |
144 | ||
145 | wake_up_process(state->task); | |
146 | return err; | |
147 | } | |
148 | ||
149 | static int cryptd_blkcipher_encrypt_enqueue(struct ablkcipher_request *req) | |
150 | { | |
151 | return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_encrypt); | |
152 | } | |
153 | ||
154 | static int cryptd_blkcipher_decrypt_enqueue(struct ablkcipher_request *req) | |
155 | { | |
156 | return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_decrypt); | |
157 | } | |
158 | ||
159 | static int cryptd_blkcipher_init_tfm(struct crypto_tfm *tfm) | |
160 | { | |
161 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); | |
162 | struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst); | |
163 | struct crypto_spawn *spawn = &ictx->spawn; | |
164 | struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm); | |
165 | struct crypto_blkcipher *cipher; | |
166 | ||
167 | cipher = crypto_spawn_blkcipher(spawn); | |
168 | if (IS_ERR(cipher)) | |
169 | return PTR_ERR(cipher); | |
170 | ||
171 | ctx->child = cipher; | |
172 | tfm->crt_ablkcipher.reqsize = | |
173 | sizeof(struct cryptd_blkcipher_request_ctx); | |
174 | return 0; | |
175 | } | |
176 | ||
177 | static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm) | |
178 | { | |
179 | struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm); | |
180 | struct cryptd_state *state = cryptd_get_state(tfm); | |
181 | int active; | |
182 | ||
183 | mutex_lock(&state->mutex); | |
2de98e75 HX |
184 | active = ablkcipher_tfm_in_queue(&state->queue, |
185 | __crypto_ablkcipher_cast(tfm)); | |
124b53d0 HX |
186 | mutex_unlock(&state->mutex); |
187 | ||
188 | BUG_ON(active); | |
189 | ||
190 | crypto_free_blkcipher(ctx->child); | |
191 | } | |
192 | ||
193 | static struct crypto_instance *cryptd_alloc_instance(struct crypto_alg *alg, | |
194 | struct cryptd_state *state) | |
195 | { | |
196 | struct crypto_instance *inst; | |
197 | struct cryptd_instance_ctx *ctx; | |
198 | int err; | |
199 | ||
200 | inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); | |
b1145ce3 JL |
201 | if (!inst) { |
202 | inst = ERR_PTR(-ENOMEM); | |
124b53d0 | 203 | goto out; |
b1145ce3 | 204 | } |
124b53d0 HX |
205 | |
206 | err = -ENAMETOOLONG; | |
207 | if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, | |
208 | "cryptd(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) | |
209 | goto out_free_inst; | |
210 | ||
211 | ctx = crypto_instance_ctx(inst); | |
212 | err = crypto_init_spawn(&ctx->spawn, alg, inst, | |
213 | CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); | |
214 | if (err) | |
215 | goto out_free_inst; | |
216 | ||
217 | ctx->state = state; | |
218 | ||
219 | memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); | |
220 | ||
221 | inst->alg.cra_priority = alg->cra_priority + 50; | |
222 | inst->alg.cra_blocksize = alg->cra_blocksize; | |
223 | inst->alg.cra_alignmask = alg->cra_alignmask; | |
224 | ||
225 | out: | |
226 | return inst; | |
227 | ||
228 | out_free_inst: | |
229 | kfree(inst); | |
230 | inst = ERR_PTR(err); | |
231 | goto out; | |
232 | } | |
233 | ||
234 | static struct crypto_instance *cryptd_alloc_blkcipher( | |
235 | struct rtattr **tb, struct cryptd_state *state) | |
236 | { | |
237 | struct crypto_instance *inst; | |
238 | struct crypto_alg *alg; | |
239 | ||
240 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_BLKCIPHER, | |
332f8840 | 241 | CRYPTO_ALG_TYPE_MASK); |
124b53d0 | 242 | if (IS_ERR(alg)) |
e231c2ee | 243 | return ERR_CAST(alg); |
124b53d0 HX |
244 | |
245 | inst = cryptd_alloc_instance(alg, state); | |
246 | if (IS_ERR(inst)) | |
247 | goto out_put_alg; | |
248 | ||
332f8840 | 249 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC; |
124b53d0 HX |
250 | inst->alg.cra_type = &crypto_ablkcipher_type; |
251 | ||
252 | inst->alg.cra_ablkcipher.ivsize = alg->cra_blkcipher.ivsize; | |
253 | inst->alg.cra_ablkcipher.min_keysize = alg->cra_blkcipher.min_keysize; | |
254 | inst->alg.cra_ablkcipher.max_keysize = alg->cra_blkcipher.max_keysize; | |
255 | ||
927eead5 HX |
256 | inst->alg.cra_ablkcipher.geniv = alg->cra_blkcipher.geniv; |
257 | ||
124b53d0 HX |
258 | inst->alg.cra_ctxsize = sizeof(struct cryptd_blkcipher_ctx); |
259 | ||
260 | inst->alg.cra_init = cryptd_blkcipher_init_tfm; | |
261 | inst->alg.cra_exit = cryptd_blkcipher_exit_tfm; | |
262 | ||
263 | inst->alg.cra_ablkcipher.setkey = cryptd_blkcipher_setkey; | |
264 | inst->alg.cra_ablkcipher.encrypt = cryptd_blkcipher_encrypt_enqueue; | |
265 | inst->alg.cra_ablkcipher.decrypt = cryptd_blkcipher_decrypt_enqueue; | |
266 | ||
124b53d0 HX |
267 | out_put_alg: |
268 | crypto_mod_put(alg); | |
269 | return inst; | |
270 | } | |
271 | ||
b8a28251 LH |
272 | static int cryptd_hash_init_tfm(struct crypto_tfm *tfm) |
273 | { | |
274 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); | |
275 | struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst); | |
276 | struct crypto_spawn *spawn = &ictx->spawn; | |
277 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); | |
278 | struct crypto_hash *cipher; | |
279 | ||
280 | cipher = crypto_spawn_hash(spawn); | |
281 | if (IS_ERR(cipher)) | |
282 | return PTR_ERR(cipher); | |
283 | ||
284 | ctx->child = cipher; | |
285 | tfm->crt_ahash.reqsize = | |
286 | sizeof(struct cryptd_hash_request_ctx); | |
287 | return 0; | |
288 | } | |
289 | ||
290 | static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm) | |
291 | { | |
292 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); | |
293 | struct cryptd_state *state = cryptd_get_state(tfm); | |
294 | int active; | |
295 | ||
296 | mutex_lock(&state->mutex); | |
297 | active = ahash_tfm_in_queue(&state->queue, | |
298 | __crypto_ahash_cast(tfm)); | |
299 | mutex_unlock(&state->mutex); | |
300 | ||
301 | BUG_ON(active); | |
302 | ||
303 | crypto_free_hash(ctx->child); | |
304 | } | |
305 | ||
306 | static int cryptd_hash_setkey(struct crypto_ahash *parent, | |
307 | const u8 *key, unsigned int keylen) | |
308 | { | |
309 | struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent); | |
310 | struct crypto_hash *child = ctx->child; | |
311 | int err; | |
312 | ||
313 | crypto_hash_clear_flags(child, CRYPTO_TFM_REQ_MASK); | |
314 | crypto_hash_set_flags(child, crypto_ahash_get_flags(parent) & | |
315 | CRYPTO_TFM_REQ_MASK); | |
316 | err = crypto_hash_setkey(child, key, keylen); | |
317 | crypto_ahash_set_flags(parent, crypto_hash_get_flags(child) & | |
318 | CRYPTO_TFM_RES_MASK); | |
319 | return err; | |
320 | } | |
321 | ||
322 | static int cryptd_hash_enqueue(struct ahash_request *req, | |
323 | crypto_completion_t complete) | |
324 | { | |
325 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | |
326 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
327 | struct cryptd_state *state = | |
328 | cryptd_get_state(crypto_ahash_tfm(tfm)); | |
329 | int err; | |
330 | ||
331 | rctx->complete = req->base.complete; | |
332 | req->base.complete = complete; | |
333 | ||
334 | spin_lock_bh(&state->lock); | |
335 | err = ahash_enqueue_request(&state->queue, req); | |
336 | spin_unlock_bh(&state->lock); | |
337 | ||
338 | wake_up_process(state->task); | |
339 | return err; | |
340 | } | |
341 | ||
342 | static void cryptd_hash_init(struct crypto_async_request *req_async, int err) | |
343 | { | |
344 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); | |
345 | struct crypto_hash *child = ctx->child; | |
346 | struct ahash_request *req = ahash_request_cast(req_async); | |
347 | struct cryptd_hash_request_ctx *rctx; | |
348 | struct hash_desc desc; | |
349 | ||
350 | rctx = ahash_request_ctx(req); | |
351 | ||
352 | if (unlikely(err == -EINPROGRESS)) | |
353 | goto out; | |
354 | ||
355 | desc.tfm = child; | |
356 | desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; | |
357 | ||
358 | err = crypto_hash_crt(child)->init(&desc); | |
359 | ||
360 | req->base.complete = rctx->complete; | |
361 | ||
362 | out: | |
363 | local_bh_disable(); | |
364 | rctx->complete(&req->base, err); | |
365 | local_bh_enable(); | |
366 | } | |
367 | ||
368 | static int cryptd_hash_init_enqueue(struct ahash_request *req) | |
369 | { | |
370 | return cryptd_hash_enqueue(req, cryptd_hash_init); | |
371 | } | |
372 | ||
373 | static void cryptd_hash_update(struct crypto_async_request *req_async, int err) | |
374 | { | |
375 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); | |
376 | struct crypto_hash *child = ctx->child; | |
377 | struct ahash_request *req = ahash_request_cast(req_async); | |
378 | struct cryptd_hash_request_ctx *rctx; | |
379 | struct hash_desc desc; | |
380 | ||
381 | rctx = ahash_request_ctx(req); | |
382 | ||
383 | if (unlikely(err == -EINPROGRESS)) | |
384 | goto out; | |
385 | ||
386 | desc.tfm = child; | |
387 | desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; | |
388 | ||
389 | err = crypto_hash_crt(child)->update(&desc, | |
390 | req->src, | |
391 | req->nbytes); | |
392 | ||
393 | req->base.complete = rctx->complete; | |
394 | ||
395 | out: | |
396 | local_bh_disable(); | |
397 | rctx->complete(&req->base, err); | |
398 | local_bh_enable(); | |
399 | } | |
400 | ||
401 | static int cryptd_hash_update_enqueue(struct ahash_request *req) | |
402 | { | |
403 | return cryptd_hash_enqueue(req, cryptd_hash_update); | |
404 | } | |
405 | ||
406 | static void cryptd_hash_final(struct crypto_async_request *req_async, int err) | |
407 | { | |
408 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); | |
409 | struct crypto_hash *child = ctx->child; | |
410 | struct ahash_request *req = ahash_request_cast(req_async); | |
411 | struct cryptd_hash_request_ctx *rctx; | |
412 | struct hash_desc desc; | |
413 | ||
414 | rctx = ahash_request_ctx(req); | |
415 | ||
416 | if (unlikely(err == -EINPROGRESS)) | |
417 | goto out; | |
418 | ||
419 | desc.tfm = child; | |
420 | desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; | |
421 | ||
422 | err = crypto_hash_crt(child)->final(&desc, req->result); | |
423 | ||
424 | req->base.complete = rctx->complete; | |
425 | ||
426 | out: | |
427 | local_bh_disable(); | |
428 | rctx->complete(&req->base, err); | |
429 | local_bh_enable(); | |
430 | } | |
431 | ||
432 | static int cryptd_hash_final_enqueue(struct ahash_request *req) | |
433 | { | |
434 | return cryptd_hash_enqueue(req, cryptd_hash_final); | |
435 | } | |
436 | ||
437 | static void cryptd_hash_digest(struct crypto_async_request *req_async, int err) | |
438 | { | |
439 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); | |
440 | struct crypto_hash *child = ctx->child; | |
441 | struct ahash_request *req = ahash_request_cast(req_async); | |
442 | struct cryptd_hash_request_ctx *rctx; | |
443 | struct hash_desc desc; | |
444 | ||
445 | rctx = ahash_request_ctx(req); | |
446 | ||
447 | if (unlikely(err == -EINPROGRESS)) | |
448 | goto out; | |
449 | ||
450 | desc.tfm = child; | |
451 | desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; | |
452 | ||
453 | err = crypto_hash_crt(child)->digest(&desc, | |
454 | req->src, | |
455 | req->nbytes, | |
456 | req->result); | |
457 | ||
458 | req->base.complete = rctx->complete; | |
459 | ||
460 | out: | |
461 | local_bh_disable(); | |
462 | rctx->complete(&req->base, err); | |
463 | local_bh_enable(); | |
464 | } | |
465 | ||
466 | static int cryptd_hash_digest_enqueue(struct ahash_request *req) | |
467 | { | |
468 | return cryptd_hash_enqueue(req, cryptd_hash_digest); | |
469 | } | |
470 | ||
471 | static struct crypto_instance *cryptd_alloc_hash( | |
472 | struct rtattr **tb, struct cryptd_state *state) | |
473 | { | |
474 | struct crypto_instance *inst; | |
475 | struct crypto_alg *alg; | |
476 | ||
477 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_HASH, | |
478 | CRYPTO_ALG_TYPE_HASH_MASK); | |
479 | if (IS_ERR(alg)) | |
480 | return ERR_PTR(PTR_ERR(alg)); | |
481 | ||
482 | inst = cryptd_alloc_instance(alg, state); | |
483 | if (IS_ERR(inst)) | |
484 | goto out_put_alg; | |
485 | ||
486 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC; | |
487 | inst->alg.cra_type = &crypto_ahash_type; | |
488 | ||
489 | inst->alg.cra_ahash.digestsize = alg->cra_hash.digestsize; | |
490 | inst->alg.cra_ctxsize = sizeof(struct cryptd_hash_ctx); | |
491 | ||
492 | inst->alg.cra_init = cryptd_hash_init_tfm; | |
493 | inst->alg.cra_exit = cryptd_hash_exit_tfm; | |
494 | ||
495 | inst->alg.cra_ahash.init = cryptd_hash_init_enqueue; | |
496 | inst->alg.cra_ahash.update = cryptd_hash_update_enqueue; | |
497 | inst->alg.cra_ahash.final = cryptd_hash_final_enqueue; | |
498 | inst->alg.cra_ahash.setkey = cryptd_hash_setkey; | |
499 | inst->alg.cra_ahash.digest = cryptd_hash_digest_enqueue; | |
500 | ||
501 | out_put_alg: | |
502 | crypto_mod_put(alg); | |
503 | return inst; | |
504 | } | |
505 | ||
124b53d0 HX |
506 | static struct cryptd_state state; |
507 | ||
508 | static struct crypto_instance *cryptd_alloc(struct rtattr **tb) | |
509 | { | |
510 | struct crypto_attr_type *algt; | |
511 | ||
512 | algt = crypto_get_attr_type(tb); | |
513 | if (IS_ERR(algt)) | |
e231c2ee | 514 | return ERR_CAST(algt); |
124b53d0 HX |
515 | |
516 | switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { | |
517 | case CRYPTO_ALG_TYPE_BLKCIPHER: | |
518 | return cryptd_alloc_blkcipher(tb, &state); | |
b8a28251 LH |
519 | case CRYPTO_ALG_TYPE_DIGEST: |
520 | return cryptd_alloc_hash(tb, &state); | |
124b53d0 HX |
521 | } |
522 | ||
523 | return ERR_PTR(-EINVAL); | |
524 | } | |
525 | ||
526 | static void cryptd_free(struct crypto_instance *inst) | |
527 | { | |
528 | struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst); | |
529 | ||
530 | crypto_drop_spawn(&ctx->spawn); | |
531 | kfree(inst); | |
532 | } | |
533 | ||
534 | static struct crypto_template cryptd_tmpl = { | |
535 | .name = "cryptd", | |
536 | .alloc = cryptd_alloc, | |
537 | .free = cryptd_free, | |
538 | .module = THIS_MODULE, | |
539 | }; | |
540 | ||
1cac2cbc HY |
541 | struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name, |
542 | u32 type, u32 mask) | |
543 | { | |
544 | char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; | |
545 | struct crypto_ablkcipher *tfm; | |
546 | ||
547 | if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, | |
548 | "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) | |
549 | return ERR_PTR(-EINVAL); | |
550 | tfm = crypto_alloc_ablkcipher(cryptd_alg_name, type, mask); | |
551 | if (IS_ERR(tfm)) | |
552 | return ERR_CAST(tfm); | |
553 | if (crypto_ablkcipher_tfm(tfm)->__crt_alg->cra_module != THIS_MODULE) { | |
554 | crypto_free_ablkcipher(tfm); | |
555 | return ERR_PTR(-EINVAL); | |
556 | } | |
557 | ||
558 | return __cryptd_ablkcipher_cast(tfm); | |
559 | } | |
560 | EXPORT_SYMBOL_GPL(cryptd_alloc_ablkcipher); | |
561 | ||
562 | struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm) | |
563 | { | |
564 | struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base); | |
565 | return ctx->child; | |
566 | } | |
567 | EXPORT_SYMBOL_GPL(cryptd_ablkcipher_child); | |
568 | ||
569 | void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm) | |
570 | { | |
571 | crypto_free_ablkcipher(&tfm->base); | |
572 | } | |
573 | EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher); | |
574 | ||
124b53d0 HX |
575 | static inline int cryptd_create_thread(struct cryptd_state *state, |
576 | int (*fn)(void *data), const char *name) | |
577 | { | |
578 | spin_lock_init(&state->lock); | |
579 | mutex_init(&state->mutex); | |
580 | crypto_init_queue(&state->queue, CRYPTD_MAX_QLEN); | |
581 | ||
189fe317 | 582 | state->task = kthread_run(fn, state, name); |
124b53d0 HX |
583 | if (IS_ERR(state->task)) |
584 | return PTR_ERR(state->task); | |
585 | ||
586 | return 0; | |
587 | } | |
588 | ||
589 | static inline void cryptd_stop_thread(struct cryptd_state *state) | |
590 | { | |
591 | BUG_ON(state->queue.qlen); | |
592 | kthread_stop(state->task); | |
593 | } | |
594 | ||
595 | static int cryptd_thread(void *data) | |
596 | { | |
597 | struct cryptd_state *state = data; | |
598 | int stop; | |
599 | ||
189fe317 RW |
600 | current->flags |= PF_NOFREEZE; |
601 | ||
124b53d0 HX |
602 | do { |
603 | struct crypto_async_request *req, *backlog; | |
604 | ||
605 | mutex_lock(&state->mutex); | |
606 | __set_current_state(TASK_INTERRUPTIBLE); | |
607 | ||
608 | spin_lock_bh(&state->lock); | |
609 | backlog = crypto_get_backlog(&state->queue); | |
610 | req = crypto_dequeue_request(&state->queue); | |
611 | spin_unlock_bh(&state->lock); | |
612 | ||
613 | stop = kthread_should_stop(); | |
614 | ||
615 | if (stop || req) { | |
616 | __set_current_state(TASK_RUNNING); | |
617 | if (req) { | |
618 | if (backlog) | |
619 | backlog->complete(backlog, | |
620 | -EINPROGRESS); | |
621 | req->complete(req, 0); | |
622 | } | |
623 | } | |
624 | ||
625 | mutex_unlock(&state->mutex); | |
626 | ||
627 | schedule(); | |
628 | } while (!stop); | |
629 | ||
630 | return 0; | |
631 | } | |
632 | ||
633 | static int __init cryptd_init(void) | |
634 | { | |
635 | int err; | |
636 | ||
637 | err = cryptd_create_thread(&state, cryptd_thread, "cryptd"); | |
638 | if (err) | |
639 | return err; | |
640 | ||
641 | err = crypto_register_template(&cryptd_tmpl); | |
642 | if (err) | |
643 | kthread_stop(state.task); | |
644 | ||
645 | return err; | |
646 | } | |
647 | ||
648 | static void __exit cryptd_exit(void) | |
649 | { | |
650 | cryptd_stop_thread(&state); | |
651 | crypto_unregister_template(&cryptd_tmpl); | |
652 | } | |
653 | ||
654 | module_init(cryptd_init); | |
655 | module_exit(cryptd_exit); | |
656 | ||
657 | MODULE_LICENSE("GPL"); | |
658 | MODULE_DESCRIPTION("Software async crypto daemon"); |