Commit | Line | Data |
---|---|---|
1c93d9cf EP |
1 | /* |
2 | * 2007+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net> | |
3 | * All rights reserved. | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License as published by | |
7 | * the Free Software Foundation; either version 2 of the License, or | |
8 | * (at your option) any later version. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, | |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
13 | * GNU General Public License for more details. | |
14 | */ | |
15 | ||
16 | #include <linux/bio.h> | |
17 | #include <linux/crypto.h> | |
18 | #include <linux/dst.h> | |
19 | #include <linux/kernel.h> | |
20 | #include <linux/scatterlist.h> | |
21 | #include <linux/slab.h> | |
22 | ||
23 | /* | |
24 | * Tricky bastard, but IV can be more complex with time... | |
25 | */ | |
26 | static inline u64 dst_gen_iv(struct dst_trans *t) | |
27 | { | |
28 | return t->gen; | |
29 | } | |
30 | ||
31 | /* | |
32 | * Crypto machinery: hash/cipher support for the given crypto controls. | |
33 | */ | |
34 | static struct crypto_hash *dst_init_hash(struct dst_crypto_ctl *ctl, u8 *key) | |
35 | { | |
36 | int err; | |
37 | struct crypto_hash *hash; | |
38 | ||
39 | hash = crypto_alloc_hash(ctl->hash_algo, 0, CRYPTO_ALG_ASYNC); | |
40 | if (IS_ERR(hash)) { | |
41 | err = PTR_ERR(hash); | |
42 | dprintk("%s: failed to allocate hash '%s', err: %d.\n", | |
43 | __func__, ctl->hash_algo, err); | |
44 | goto err_out_exit; | |
45 | } | |
46 | ||
47 | ctl->crypto_attached_size = crypto_hash_digestsize(hash); | |
48 | ||
49 | if (!ctl->hash_keysize) | |
50 | return hash; | |
51 | ||
52 | err = crypto_hash_setkey(hash, key, ctl->hash_keysize); | |
53 | if (err) { | |
54 | dprintk("%s: failed to set key for hash '%s', err: %d.\n", | |
55 | __func__, ctl->hash_algo, err); | |
56 | goto err_out_free; | |
57 | } | |
58 | ||
59 | return hash; | |
60 | ||
61 | err_out_free: | |
62 | crypto_free_hash(hash); | |
63 | err_out_exit: | |
64 | return ERR_PTR(err); | |
65 | } | |
66 | ||
d52ac3f2 MZ |
67 | static struct crypto_ablkcipher *dst_init_cipher(struct dst_crypto_ctl *ctl, |
68 | u8 *key) | |
1c93d9cf EP |
69 | { |
70 | int err = -EINVAL; | |
71 | struct crypto_ablkcipher *cipher; | |
72 | ||
73 | if (!ctl->cipher_keysize) | |
74 | goto err_out_exit; | |
75 | ||
76 | cipher = crypto_alloc_ablkcipher(ctl->cipher_algo, 0, 0); | |
77 | if (IS_ERR(cipher)) { | |
78 | err = PTR_ERR(cipher); | |
79 | dprintk("%s: failed to allocate cipher '%s', err: %d.\n", | |
80 | __func__, ctl->cipher_algo, err); | |
81 | goto err_out_exit; | |
82 | } | |
83 | ||
84 | crypto_ablkcipher_clear_flags(cipher, ~0); | |
85 | ||
86 | err = crypto_ablkcipher_setkey(cipher, key, ctl->cipher_keysize); | |
87 | if (err) { | |
88 | dprintk("%s: failed to set key for cipher '%s', err: %d.\n", | |
89 | __func__, ctl->cipher_algo, err); | |
90 | goto err_out_free; | |
91 | } | |
92 | ||
93 | return cipher; | |
94 | ||
95 | err_out_free: | |
96 | crypto_free_ablkcipher(cipher); | |
97 | err_out_exit: | |
98 | return ERR_PTR(err); | |
99 | } | |
100 | ||
101 | /* | |
102 | * Crypto engine has a pool of pages to encrypt data into before sending | |
103 | * it over the network. This pool is freed/allocated here. | |
104 | */ | |
105 | static void dst_crypto_pages_free(struct dst_crypto_engine *e) | |
106 | { | |
107 | unsigned int i; | |
108 | ||
d52ac3f2 | 109 | for (i = 0; i < e->page_num; ++i) |
1c93d9cf EP |
110 | __free_page(e->pages[i]); |
111 | kfree(e->pages); | |
112 | } | |
113 | ||
114 | static int dst_crypto_pages_alloc(struct dst_crypto_engine *e, int num) | |
115 | { | |
116 | int i; | |
117 | ||
118 | e->pages = kmalloc(num * sizeof(struct page **), GFP_KERNEL); | |
119 | if (!e->pages) | |
120 | return -ENOMEM; | |
121 | ||
d52ac3f2 | 122 | for (i = 0; i < num; ++i) { |
1c93d9cf EP |
123 | e->pages[i] = alloc_page(GFP_KERNEL); |
124 | if (!e->pages[i]) | |
125 | goto err_out_free_pages; | |
126 | } | |
127 | ||
128 | e->page_num = num; | |
129 | return 0; | |
130 | ||
131 | err_out_free_pages: | |
132 | while (--i >= 0) | |
133 | __free_page(e->pages[i]); | |
134 | ||
135 | kfree(e->pages); | |
136 | return -ENOMEM; | |
137 | } | |
138 | ||
139 | /* | |
140 | * Initialize crypto engine for given node. | |
141 | * Setup cipher/hash, keys, pool of threads and private data. | |
142 | */ | |
d52ac3f2 MZ |
143 | static int dst_crypto_engine_init(struct dst_crypto_engine *e, |
144 | struct dst_node *n) | |
1c93d9cf EP |
145 | { |
146 | int err; | |
147 | struct dst_crypto_ctl *ctl = &n->crypto; | |
148 | ||
149 | err = dst_crypto_pages_alloc(e, n->max_pages); | |
150 | if (err) | |
151 | goto err_out_exit; | |
152 | ||
153 | e->size = PAGE_SIZE; | |
154 | e->data = kmalloc(e->size, GFP_KERNEL); | |
155 | if (!e->data) { | |
156 | err = -ENOMEM; | |
157 | goto err_out_free_pages; | |
158 | } | |
159 | ||
160 | if (ctl->hash_algo[0]) { | |
161 | e->hash = dst_init_hash(ctl, n->hash_key); | |
162 | if (IS_ERR(e->hash)) { | |
163 | err = PTR_ERR(e->hash); | |
164 | e->hash = NULL; | |
165 | goto err_out_free; | |
166 | } | |
167 | } | |
168 | ||
169 | if (ctl->cipher_algo[0]) { | |
170 | e->cipher = dst_init_cipher(ctl, n->cipher_key); | |
171 | if (IS_ERR(e->cipher)) { | |
172 | err = PTR_ERR(e->cipher); | |
173 | e->cipher = NULL; | |
174 | goto err_out_free_hash; | |
175 | } | |
176 | } | |
177 | ||
178 | return 0; | |
179 | ||
180 | err_out_free_hash: | |
181 | crypto_free_hash(e->hash); | |
182 | err_out_free: | |
183 | kfree(e->data); | |
184 | err_out_free_pages: | |
185 | dst_crypto_pages_free(e); | |
186 | err_out_exit: | |
187 | return err; | |
188 | } | |
189 | ||
190 | static void dst_crypto_engine_exit(struct dst_crypto_engine *e) | |
191 | { | |
192 | if (e->hash) | |
193 | crypto_free_hash(e->hash); | |
194 | if (e->cipher) | |
195 | crypto_free_ablkcipher(e->cipher); | |
196 | dst_crypto_pages_free(e); | |
197 | kfree(e->data); | |
198 | } | |
199 | ||
200 | /* | |
201 | * Waiting for cipher processing to be completed. | |
202 | */ | |
d52ac3f2 | 203 | struct dst_crypto_completion { |
1c93d9cf EP |
204 | struct completion complete; |
205 | int error; | |
206 | }; | |
207 | ||
208 | static void dst_crypto_complete(struct crypto_async_request *req, int err) | |
209 | { | |
210 | struct dst_crypto_completion *c = req->data; | |
211 | ||
212 | if (err == -EINPROGRESS) | |
213 | return; | |
214 | ||
215 | dprintk("%s: req: %p, err: %d.\n", __func__, req, err); | |
216 | c->error = err; | |
217 | complete(&c->complete); | |
218 | } | |
219 | ||
220 | static int dst_crypto_process(struct ablkcipher_request *req, | |
221 | struct scatterlist *sg_dst, struct scatterlist *sg_src, | |
222 | void *iv, int enc, unsigned long timeout) | |
223 | { | |
224 | struct dst_crypto_completion c; | |
225 | int err; | |
226 | ||
227 | init_completion(&c.complete); | |
228 | c.error = -EINPROGRESS; | |
229 | ||
230 | ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, | |
231 | dst_crypto_complete, &c); | |
232 | ||
233 | ablkcipher_request_set_crypt(req, sg_src, sg_dst, sg_src->length, iv); | |
234 | ||
235 | if (enc) | |
236 | err = crypto_ablkcipher_encrypt(req); | |
237 | else | |
238 | err = crypto_ablkcipher_decrypt(req); | |
239 | ||
240 | switch (err) { | |
d52ac3f2 MZ |
241 | case -EINPROGRESS: |
242 | case -EBUSY: | |
243 | err = wait_for_completion_interruptible_timeout(&c.complete, | |
244 | timeout); | |
245 | if (!err) | |
246 | err = -ETIMEDOUT; | |
247 | else | |
248 | err = c.error; | |
249 | break; | |
250 | default: | |
251 | break; | |
1c93d9cf EP |
252 | } |
253 | ||
254 | return err; | |
255 | } | |
256 | ||
257 | /* | |
258 | * DST uses generic iteration approach for data crypto processing. | |
259 | * Single block IO request is switched into array of scatterlists, | |
260 | * which are submitted to the crypto processing iterator. | |
261 | * | |
262 | * Input and output iterator initialization are different, since | |
263 | * in output case we can not encrypt data in-place and need a | |
264 | * temporary storage, which is then being sent to the remote peer. | |
265 | */ | |
266 | static int dst_trans_iter_out(struct bio *bio, struct dst_crypto_engine *e, | |
d52ac3f2 | 267 | int (*iterator) (struct dst_crypto_engine *e, |
1c93d9cf EP |
268 | struct scatterlist *dst, |
269 | struct scatterlist *src)) | |
270 | { | |
271 | struct bio_vec *bv; | |
272 | int err, i; | |
273 | ||
274 | sg_init_table(e->src, bio->bi_vcnt); | |
275 | sg_init_table(e->dst, bio->bi_vcnt); | |
276 | ||
277 | bio_for_each_segment(bv, bio, i) { | |
278 | sg_set_page(&e->src[i], bv->bv_page, bv->bv_len, bv->bv_offset); | |
279 | sg_set_page(&e->dst[i], e->pages[i], bv->bv_len, bv->bv_offset); | |
280 | ||
281 | err = iterator(e, &e->dst[i], &e->src[i]); | |
282 | if (err) | |
283 | return err; | |
284 | } | |
285 | ||
286 | return 0; | |
287 | } | |
288 | ||
289 | static int dst_trans_iter_in(struct bio *bio, struct dst_crypto_engine *e, | |
d52ac3f2 | 290 | int (*iterator) (struct dst_crypto_engine *e, |
1c93d9cf EP |
291 | struct scatterlist *dst, |
292 | struct scatterlist *src)) | |
293 | { | |
294 | struct bio_vec *bv; | |
295 | int err, i; | |
296 | ||
297 | sg_init_table(e->src, bio->bi_vcnt); | |
298 | sg_init_table(e->dst, bio->bi_vcnt); | |
299 | ||
300 | bio_for_each_segment(bv, bio, i) { | |
301 | sg_set_page(&e->src[i], bv->bv_page, bv->bv_len, bv->bv_offset); | |
302 | sg_set_page(&e->dst[i], bv->bv_page, bv->bv_len, bv->bv_offset); | |
303 | ||
304 | err = iterator(e, &e->dst[i], &e->src[i]); | |
305 | if (err) | |
306 | return err; | |
307 | } | |
308 | ||
309 | return 0; | |
310 | } | |
311 | ||
312 | static int dst_crypt_iterator(struct dst_crypto_engine *e, | |
313 | struct scatterlist *sg_dst, struct scatterlist *sg_src) | |
314 | { | |
315 | struct ablkcipher_request *req = e->data; | |
316 | u8 iv[32]; | |
317 | ||
318 | memset(iv, 0, sizeof(iv)); | |
319 | ||
320 | memcpy(iv, &e->iv, sizeof(e->iv)); | |
321 | ||
322 | return dst_crypto_process(req, sg_dst, sg_src, iv, e->enc, e->timeout); | |
323 | } | |
324 | ||
325 | static int dst_crypt(struct dst_crypto_engine *e, struct bio *bio) | |
326 | { | |
327 | struct ablkcipher_request *req = e->data; | |
328 | ||
329 | memset(req, 0, sizeof(struct ablkcipher_request)); | |
330 | ablkcipher_request_set_tfm(req, e->cipher); | |
331 | ||
332 | if (e->enc) | |
333 | return dst_trans_iter_out(bio, e, dst_crypt_iterator); | |
334 | else | |
335 | return dst_trans_iter_in(bio, e, dst_crypt_iterator); | |
336 | } | |
337 | ||
338 | static int dst_hash_iterator(struct dst_crypto_engine *e, | |
339 | struct scatterlist *sg_dst, struct scatterlist *sg_src) | |
340 | { | |
341 | return crypto_hash_update(e->data, sg_src, sg_src->length); | |
342 | } | |
343 | ||
344 | static int dst_hash(struct dst_crypto_engine *e, struct bio *bio, void *dst) | |
345 | { | |
346 | struct hash_desc *desc = e->data; | |
347 | int err; | |
348 | ||
349 | desc->tfm = e->hash; | |
350 | desc->flags = 0; | |
351 | ||
352 | err = crypto_hash_init(desc); | |
353 | if (err) | |
354 | return err; | |
355 | ||
356 | err = dst_trans_iter_in(bio, e, dst_hash_iterator); | |
357 | if (err) | |
358 | return err; | |
359 | ||
360 | err = crypto_hash_final(desc, dst); | |
361 | if (err) | |
362 | return err; | |
363 | ||
364 | return 0; | |
365 | } | |
366 | ||
367 | /* | |
368 | * Initialize/cleanup a crypto thread. The only thing it should | |
369 | * do is to allocate a pool of pages as temporary storage. | |
370 | * And to setup cipher and/or hash. | |
371 | */ | |
372 | static void *dst_crypto_thread_init(void *data) | |
373 | { | |
374 | struct dst_node *n = data; | |
375 | struct dst_crypto_engine *e; | |
376 | int err = -ENOMEM; | |
377 | ||
378 | e = kzalloc(sizeof(struct dst_crypto_engine), GFP_KERNEL); | |
379 | if (!e) | |
380 | goto err_out_exit; | |
381 | e->src = kcalloc(2 * n->max_pages, sizeof(struct scatterlist), | |
382 | GFP_KERNEL); | |
383 | if (!e->src) | |
384 | goto err_out_free; | |
385 | ||
386 | e->dst = e->src + n->max_pages; | |
387 | ||
388 | err = dst_crypto_engine_init(e, n); | |
389 | if (err) | |
390 | goto err_out_free_all; | |
391 | ||
392 | return e; | |
393 | ||
394 | err_out_free_all: | |
395 | kfree(e->src); | |
396 | err_out_free: | |
397 | kfree(e); | |
398 | err_out_exit: | |
399 | return ERR_PTR(err); | |
400 | } | |
401 | ||
402 | static void dst_crypto_thread_cleanup(void *private) | |
403 | { | |
404 | struct dst_crypto_engine *e = private; | |
405 | ||
406 | dst_crypto_engine_exit(e); | |
407 | kfree(e->src); | |
408 | kfree(e); | |
409 | } | |
410 | ||
411 | /* | |
412 | * Initialize crypto engine for given node: store keys, create pool | |
413 | * of threads, initialize each one. | |
414 | * | |
d52ac3f2 MZ |
415 | * Each thread has unique ID, but 0 and 1 are reserved for receiving and |
416 | * accepting threads (if export node), so IDs could start from 2, but starting | |
417 | * them from 10 allows easily understand what this thread is for. | |
1c93d9cf EP |
418 | */ |
419 | int dst_node_crypto_init(struct dst_node *n, struct dst_crypto_ctl *ctl) | |
420 | { | |
421 | void *key = (ctl + 1); | |
422 | int err = -ENOMEM, i; | |
423 | char name[32]; | |
424 | ||
425 | if (ctl->hash_keysize) { | |
426 | n->hash_key = kmalloc(ctl->hash_keysize, GFP_KERNEL); | |
427 | if (!n->hash_key) | |
428 | goto err_out_exit; | |
429 | memcpy(n->hash_key, key, ctl->hash_keysize); | |
430 | } | |
431 | ||
432 | if (ctl->cipher_keysize) { | |
433 | n->cipher_key = kmalloc(ctl->cipher_keysize, GFP_KERNEL); | |
434 | if (!n->cipher_key) | |
435 | goto err_out_free_hash; | |
436 | memcpy(n->cipher_key, key, ctl->cipher_keysize); | |
437 | } | |
438 | memcpy(&n->crypto, ctl, sizeof(struct dst_crypto_ctl)); | |
439 | ||
d52ac3f2 | 440 | for (i = 0; i < ctl->thread_num; ++i) { |
1c93d9cf EP |
441 | snprintf(name, sizeof(name), "%s-crypto-%d", n->name, i); |
442 | /* Unique ids... */ | |
d52ac3f2 | 443 | err = thread_pool_add_worker(n->pool, name, i + 10, |
1c93d9cf EP |
444 | dst_crypto_thread_init, dst_crypto_thread_cleanup, n); |
445 | if (err) | |
446 | goto err_out_free_threads; | |
447 | } | |
448 | ||
449 | return 0; | |
450 | ||
451 | err_out_free_threads: | |
452 | while (--i >= 0) | |
453 | thread_pool_del_worker_id(n->pool, i+10); | |
454 | ||
455 | if (ctl->cipher_keysize) | |
456 | kfree(n->cipher_key); | |
457 | ctl->cipher_keysize = 0; | |
458 | err_out_free_hash: | |
459 | if (ctl->hash_keysize) | |
460 | kfree(n->hash_key); | |
461 | ctl->hash_keysize = 0; | |
462 | err_out_exit: | |
463 | return err; | |
464 | } | |
465 | ||
466 | void dst_node_crypto_exit(struct dst_node *n) | |
467 | { | |
468 | struct dst_crypto_ctl *ctl = &n->crypto; | |
469 | ||
470 | if (ctl->cipher_algo[0] || ctl->hash_algo[0]) { | |
471 | kfree(n->hash_key); | |
472 | kfree(n->cipher_key); | |
473 | } | |
474 | } | |
475 | ||
476 | /* | |
477 | * Thrad pool setup callback. Just stores a transaction in private data. | |
478 | */ | |
479 | static int dst_trans_crypto_setup(void *crypto_engine, void *trans) | |
480 | { | |
481 | struct dst_crypto_engine *e = crypto_engine; | |
482 | ||
483 | e->private = trans; | |
484 | return 0; | |
485 | } | |
486 | ||
487 | #if 0 | |
488 | static void dst_dump_bio(struct bio *bio) | |
489 | { | |
490 | u8 *p; | |
491 | struct bio_vec *bv; | |
492 | int i; | |
493 | ||
494 | bio_for_each_segment(bv, bio, i) { | |
495 | dprintk("%s: %llu/%u: size: %u, offset: %u, data: ", | |
496 | __func__, bio->bi_sector, bio->bi_size, | |
497 | bv->bv_len, bv->bv_offset); | |
498 | ||
499 | p = kmap(bv->bv_page) + bv->bv_offset; | |
d52ac3f2 MZ |
500 | for (i = 0; i < bv->bv_len; ++i) |
501 | printk(KERN_DEBUG "%02x ", p[i]); | |
1c93d9cf EP |
502 | kunmap(bv->bv_page); |
503 | printk("\n"); | |
504 | } | |
505 | } | |
506 | #endif | |
507 | ||
508 | /* | |
509 | * Encrypt/hash data and send it to the network. | |
510 | */ | |
511 | static int dst_crypto_process_sending(struct dst_crypto_engine *e, | |
512 | struct bio *bio, u8 *hash) | |
513 | { | |
514 | int err; | |
515 | ||
516 | if (e->cipher) { | |
517 | err = dst_crypt(e, bio); | |
518 | if (err) | |
519 | goto err_out_exit; | |
520 | } | |
521 | ||
522 | if (e->hash) { | |
523 | err = dst_hash(e, bio, hash); | |
524 | if (err) | |
525 | goto err_out_exit; | |
526 | ||
527 | #ifdef CONFIG_DST_DEBUG | |
528 | { | |
529 | unsigned int i; | |
530 | ||
531 | /* dst_dump_bio(bio); */ | |
532 | ||
533 | printk(KERN_DEBUG "%s: bio: %llu/%u, rw: %lu, hash: ", | |
534 | __func__, (u64)bio->bi_sector, | |
535 | bio->bi_size, bio_data_dir(bio)); | |
d52ac3f2 | 536 | for (i = 0; i < crypto_hash_digestsize(e->hash); ++i) |
1c93d9cf EP |
537 | printk("%02x ", hash[i]); |
538 | printk("\n"); | |
539 | } | |
540 | #endif | |
541 | } | |
542 | ||
543 | return 0; | |
544 | ||
545 | err_out_exit: | |
546 | return err; | |
547 | } | |
548 | ||
549 | /* | |
550 | * Check if received data is valid. Decipher if it is. | |
551 | */ | |
552 | static int dst_crypto_process_receiving(struct dst_crypto_engine *e, | |
553 | struct bio *bio, u8 *hash, u8 *recv_hash) | |
554 | { | |
555 | int err; | |
556 | ||
557 | if (e->hash) { | |
558 | int mismatch; | |
559 | ||
560 | err = dst_hash(e, bio, hash); | |
561 | if (err) | |
562 | goto err_out_exit; | |
563 | ||
564 | mismatch = !!memcmp(recv_hash, hash, | |
565 | crypto_hash_digestsize(e->hash)); | |
566 | #ifdef CONFIG_DST_DEBUG | |
567 | /* dst_dump_bio(bio); */ | |
568 | ||
569 | printk(KERN_DEBUG "%s: bio: %llu/%u, rw: %lu, hash mismatch: %d", | |
570 | __func__, (u64)bio->bi_sector, bio->bi_size, | |
571 | bio_data_dir(bio), mismatch); | |
572 | if (mismatch) { | |
573 | unsigned int i; | |
574 | ||
575 | printk(", recv/calc: "); | |
d52ac3f2 | 576 | for (i = 0; i < crypto_hash_digestsize(e->hash); ++i) |
1c93d9cf | 577 | printk("%02x/%02x ", recv_hash[i], hash[i]); |
d52ac3f2 | 578 | |
1c93d9cf EP |
579 | } |
580 | printk("\n"); | |
581 | #endif | |
582 | err = -1; | |
583 | if (mismatch) | |
584 | goto err_out_exit; | |
585 | } | |
586 | ||
587 | if (e->cipher) { | |
588 | err = dst_crypt(e, bio); | |
589 | if (err) | |
590 | goto err_out_exit; | |
591 | } | |
592 | ||
593 | return 0; | |
594 | ||
595 | err_out_exit: | |
596 | return err; | |
597 | } | |
598 | ||
599 | /* | |
600 | * Thread pool callback to encrypt data and send it to the netowork. | |
601 | */ | |
602 | static int dst_trans_crypto_action(void *crypto_engine, void *schedule_data) | |
603 | { | |
604 | struct dst_crypto_engine *e = crypto_engine; | |
605 | struct dst_trans *t = schedule_data; | |
606 | struct bio *bio = t->bio; | |
607 | int err; | |
608 | ||
609 | dprintk("%s: t: %p, gen: %llu, cipher: %p, hash: %p.\n", | |
610 | __func__, t, t->gen, e->cipher, e->hash); | |
611 | ||
612 | e->enc = t->enc; | |
613 | e->iv = dst_gen_iv(t); | |
614 | ||
615 | if (bio_data_dir(bio) == WRITE) { | |
616 | err = dst_crypto_process_sending(e, bio, t->cmd.hash); | |
617 | if (err) | |
618 | goto err_out_exit; | |
619 | ||
620 | if (e->hash) { | |
621 | t->cmd.csize = crypto_hash_digestsize(e->hash); | |
622 | t->cmd.size += t->cmd.csize; | |
623 | } | |
624 | ||
625 | return dst_trans_send(t); | |
626 | } else { | |
627 | u8 *hash = e->data + e->size/2; | |
628 | ||
629 | err = dst_crypto_process_receiving(e, bio, hash, t->cmd.hash); | |
630 | if (err) | |
631 | goto err_out_exit; | |
632 | ||
633 | dst_trans_remove(t); | |
634 | dst_trans_put(t); | |
635 | } | |
636 | ||
637 | return 0; | |
638 | ||
639 | err_out_exit: | |
640 | t->error = err; | |
641 | dst_trans_put(t); | |
642 | return err; | |
643 | } | |
644 | ||
645 | /* | |
646 | * Schedule crypto processing for given transaction. | |
647 | */ | |
648 | int dst_trans_crypto(struct dst_trans *t) | |
649 | { | |
650 | struct dst_node *n = t->n; | |
651 | int err; | |
652 | ||
653 | err = thread_pool_schedule(n->pool, | |
654 | dst_trans_crypto_setup, dst_trans_crypto_action, | |
655 | t, MAX_SCHEDULE_TIMEOUT); | |
656 | if (err) | |
657 | goto err_out_exit; | |
658 | ||
659 | return 0; | |
660 | ||
661 | err_out_exit: | |
662 | dst_trans_put(t); | |
663 | return err; | |
664 | } | |
665 | ||
666 | /* | |
667 | * Crypto machinery for the export node. | |
668 | */ | |
669 | static int dst_export_crypto_setup(void *crypto_engine, void *bio) | |
670 | { | |
671 | struct dst_crypto_engine *e = crypto_engine; | |
672 | ||
673 | e->private = bio; | |
674 | return 0; | |
675 | } | |
676 | ||
677 | static int dst_export_crypto_action(void *crypto_engine, void *schedule_data) | |
678 | { | |
679 | struct dst_crypto_engine *e = crypto_engine; | |
680 | struct bio *bio = schedule_data; | |
681 | struct dst_export_priv *p = bio->bi_private; | |
682 | int err; | |
683 | ||
d52ac3f2 MZ |
684 | dprintk("%s: e: %p, data: %p, bio: %llu/%u, dir: %lu.\n", |
685 | __func__, e, e->data, (u64)bio->bi_sector, | |
686 | bio->bi_size, bio_data_dir(bio)); | |
1c93d9cf EP |
687 | |
688 | e->enc = (bio_data_dir(bio) == READ); | |
689 | e->iv = p->cmd.id; | |
690 | ||
691 | if (bio_data_dir(bio) == WRITE) { | |
692 | u8 *hash = e->data + e->size/2; | |
693 | ||
694 | err = dst_crypto_process_receiving(e, bio, hash, p->cmd.hash); | |
695 | if (err) | |
696 | goto err_out_exit; | |
697 | ||
698 | generic_make_request(bio); | |
699 | } else { | |
700 | err = dst_crypto_process_sending(e, bio, p->cmd.hash); | |
701 | if (err) | |
702 | goto err_out_exit; | |
703 | ||
704 | if (e->hash) { | |
705 | p->cmd.csize = crypto_hash_digestsize(e->hash); | |
706 | p->cmd.size += p->cmd.csize; | |
707 | } | |
708 | ||
709 | err = dst_export_send_bio(bio); | |
710 | } | |
711 | return 0; | |
712 | ||
713 | err_out_exit: | |
714 | bio_put(bio); | |
715 | return err; | |
716 | } | |
717 | ||
718 | int dst_export_crypto(struct dst_node *n, struct bio *bio) | |
719 | { | |
720 | int err; | |
721 | ||
722 | err = thread_pool_schedule(n->pool, | |
723 | dst_export_crypto_setup, dst_export_crypto_action, | |
724 | bio, MAX_SCHEDULE_TIMEOUT); | |
725 | if (err) | |
726 | goto err_out_exit; | |
727 | ||
728 | return 0; | |
729 | ||
730 | err_out_exit: | |
731 | bio_put(bio); | |
732 | return err; | |
733 | } |