Commit | Line | Data |
---|---|---|
03b55b9d EP |
1 | /* |
2 | * 2007+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net> | |
3 | * All rights reserved. | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License as published by | |
7 | * the Free Software Foundation; either version 2 of the License, or | |
8 | * (at your option) any later version. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, | |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
13 | * GNU General Public License for more details. | |
14 | */ | |
15 | ||
16 | #include <linux/blkdev.h> | |
17 | #include <linux/bio.h> | |
18 | #include <linux/dst.h> | |
19 | #include <linux/in.h> | |
20 | #include <linux/in6.h> | |
21 | #include <linux/poll.h> | |
22 | #include <linux/slab.h> | |
23 | #include <linux/socket.h> | |
24 | ||
25 | #include <net/sock.h> | |
26 | ||
27 | /* | |
28 | * Export bioset is used for server block IO requests. | |
29 | */ | |
30 | static struct bio_set *dst_bio_set; | |
31 | ||
32 | int __init dst_export_init(void) | |
33 | { | |
34 | int err = -ENOMEM; | |
35 | ||
3e5510ab | 36 | dst_bio_set = bioset_create(32, sizeof(struct dst_export_priv)); |
03b55b9d EP |
37 | if (!dst_bio_set) |
38 | goto err_out_exit; | |
39 | ||
40 | return 0; | |
41 | ||
42 | err_out_exit: | |
43 | return err; | |
44 | } | |
45 | ||
46 | void dst_export_exit(void) | |
47 | { | |
48 | bioset_free(dst_bio_set); | |
49 | } | |
50 | ||
51 | /* | |
52 | * When client connects and autonegotiates with the server node, | |
53 | * its permissions are checked in a security attributes and sent | |
54 | * back. | |
55 | */ | |
d52ac3f2 MZ |
56 | static unsigned int dst_check_permissions(struct dst_state *main, |
57 | struct dst_state *st) | |
03b55b9d EP |
58 | { |
59 | struct dst_node *n = main->node; | |
60 | struct dst_secure *sentry; | |
61 | struct dst_secure_user *s; | |
62 | struct saddr *sa = &st->ctl.addr; | |
63 | unsigned int perm = 0; | |
64 | ||
65 | mutex_lock(&n->security_lock); | |
66 | list_for_each_entry(sentry, &n->security_list, sec_entry) { | |
67 | s = &sentry->sec; | |
68 | ||
69 | if (s->addr.sa_family != sa->sa_family) | |
70 | continue; | |
71 | ||
72 | if (s->addr.sa_data_len != sa->sa_data_len) | |
73 | continue; | |
74 | ||
75 | /* | |
76 | * This '2' below is a port field. This may be very wrong to do | |
d52ac3f2 MZ |
77 | * in atalk for example though. If there will be any need |
78 | * to extent protocol to something else, I can create | |
79 | * per-family helpers and use them instead of this memcmp. | |
03b55b9d EP |
80 | */ |
81 | if (memcmp(s->addr.sa_data + 2, sa->sa_data + 2, | |
82 | sa->sa_data_len - 2)) | |
83 | continue; | |
84 | ||
85 | perm = s->permissions; | |
86 | } | |
87 | mutex_unlock(&n->security_lock); | |
88 | ||
89 | return perm; | |
90 | } | |
91 | ||
92 | /* | |
93 | * Accept new client: allocate appropriate network state and check permissions. | |
94 | */ | |
95 | static struct dst_state *dst_accept_client(struct dst_state *st) | |
96 | { | |
97 | unsigned int revents = 0; | |
98 | unsigned int err_mask = POLLERR | POLLHUP | POLLRDHUP; | |
99 | unsigned int mask = err_mask | POLLIN; | |
100 | struct dst_node *n = st->node; | |
101 | int err = 0; | |
102 | struct socket *sock = NULL; | |
103 | struct dst_state *new; | |
104 | ||
105 | while (!err && !sock) { | |
106 | revents = dst_state_poll(st); | |
107 | ||
108 | if (!(revents & mask)) { | |
109 | DEFINE_WAIT(wait); | |
110 | ||
111 | for (;;) { | |
112 | prepare_to_wait(&st->thread_wait, | |
113 | &wait, TASK_INTERRUPTIBLE); | |
114 | if (!n->trans_scan_timeout || st->need_exit) | |
115 | break; | |
116 | ||
117 | revents = dst_state_poll(st); | |
118 | ||
119 | if (revents & mask) | |
120 | break; | |
121 | ||
122 | if (signal_pending(current)) | |
123 | break; | |
124 | ||
125 | /* | |
126 | * Magic HZ? Polling check above is not safe in | |
127 | * all cases (like socket reset in BH context), | |
128 | * so it is simpler just to postpone it to the | |
d52ac3f2 MZ |
129 | * process context instead of implementing |
130 | * special locking there. | |
03b55b9d EP |
131 | */ |
132 | schedule_timeout(HZ); | |
133 | } | |
134 | finish_wait(&st->thread_wait, &wait); | |
135 | } | |
136 | ||
137 | err = -ECONNRESET; | |
138 | dst_state_lock(st); | |
139 | ||
140 | dprintk("%s: st: %p, revents: %x [err: %d, in: %d].\n", | |
141 | __func__, st, revents, revents & err_mask, | |
142 | revents & POLLIN); | |
143 | ||
144 | if (revents & err_mask) { | |
145 | dprintk("%s: revents: %x, socket: %p, err: %d.\n", | |
146 | __func__, revents, st->socket, err); | |
147 | err = -ECONNRESET; | |
148 | } | |
149 | ||
150 | if (!n->trans_scan_timeout || st->need_exit) | |
151 | err = -ENODEV; | |
152 | ||
153 | if (st->socket && (revents & POLLIN)) | |
154 | err = kernel_accept(st->socket, &sock, 0); | |
155 | ||
156 | dst_state_unlock(st); | |
157 | } | |
158 | ||
159 | if (err) | |
160 | goto err_out_exit; | |
161 | ||
162 | new = dst_state_alloc(st->node); | |
d0e0507a | 163 | if (IS_ERR(new)) { |
03b55b9d EP |
164 | err = -ENOMEM; |
165 | goto err_out_release; | |
166 | } | |
167 | new->socket = sock; | |
168 | ||
169 | new->ctl.addr.sa_data_len = sizeof(struct sockaddr); | |
170 | err = kernel_getpeername(sock, (struct sockaddr *)&new->ctl.addr, | |
171 | (int *)&new->ctl.addr.sa_data_len); | |
172 | if (err) | |
173 | goto err_out_put; | |
174 | ||
175 | new->permissions = dst_check_permissions(st, new); | |
176 | if (new->permissions == 0) { | |
177 | err = -EPERM; | |
178 | dst_dump_addr(sock, (struct sockaddr *)&new->ctl.addr, | |
179 | "Client is not allowed to connect"); | |
180 | goto err_out_put; | |
181 | } | |
182 | ||
183 | err = dst_poll_init(new); | |
184 | if (err) | |
185 | goto err_out_put; | |
186 | ||
187 | dst_dump_addr(sock, (struct sockaddr *)&new->ctl.addr, | |
188 | "Connected client"); | |
189 | ||
190 | return new; | |
191 | ||
192 | err_out_put: | |
193 | dst_state_put(new); | |
194 | err_out_release: | |
195 | sock_release(sock); | |
196 | err_out_exit: | |
197 | return ERR_PTR(err); | |
198 | } | |
199 | ||
200 | /* | |
201 | * Each server's block request sometime finishes. | |
202 | * Usually it happens in hard irq context of the appropriate controller, | |
203 | * so to play good with all cases we just queue BIO into the queue | |
204 | * and wake up processing thread, which gets completed request and | |
205 | * send (encrypting if needed) it back to the client (if it was a read | |
bbc9a991 | 206 | * request), or sends back reply that writing successfully completed. |
03b55b9d EP |
207 | */ |
208 | static int dst_export_process_request_queue(struct dst_state *st) | |
209 | { | |
210 | unsigned long flags; | |
211 | struct dst_export_priv *p = NULL; | |
212 | struct bio *bio; | |
213 | int err = 0; | |
214 | ||
215 | while (!list_empty(&st->request_list)) { | |
216 | spin_lock_irqsave(&st->request_lock, flags); | |
217 | if (!list_empty(&st->request_list)) { | |
218 | p = list_first_entry(&st->request_list, | |
219 | struct dst_export_priv, request_entry); | |
220 | list_del(&p->request_entry); | |
221 | } | |
222 | spin_unlock_irqrestore(&st->request_lock, flags); | |
223 | ||
224 | if (!p) | |
225 | break; | |
226 | ||
227 | bio = p->bio; | |
228 | ||
229 | if (dst_need_crypto(st->node) && (bio_data_dir(bio) == READ)) | |
230 | err = dst_export_crypto(st->node, bio); | |
231 | else | |
232 | err = dst_export_send_bio(bio); | |
233 | ||
234 | if (err) | |
235 | break; | |
236 | } | |
237 | ||
238 | return err; | |
239 | } | |
240 | ||
241 | /* | |
242 | * Cleanup export state. | |
243 | * It has to wait until all requests are finished, | |
244 | * and then free them all. | |
245 | */ | |
246 | static void dst_state_cleanup_export(struct dst_state *st) | |
247 | { | |
248 | struct dst_export_priv *p; | |
249 | unsigned long flags; | |
250 | ||
251 | /* | |
252 | * This loop waits for all pending bios to be completed and freed. | |
253 | */ | |
254 | while (atomic_read(&st->refcnt) > 1) { | |
255 | dprintk("%s: st: %p, refcnt: %d, list_empty: %d.\n", | |
256 | __func__, st, atomic_read(&st->refcnt), | |
257 | list_empty(&st->request_list)); | |
258 | wait_event_timeout(st->thread_wait, | |
259 | (atomic_read(&st->refcnt) == 1) || | |
260 | !list_empty(&st->request_list), | |
261 | HZ/2); | |
262 | ||
263 | while (!list_empty(&st->request_list)) { | |
264 | p = NULL; | |
265 | spin_lock_irqsave(&st->request_lock, flags); | |
266 | if (!list_empty(&st->request_list)) { | |
267 | p = list_first_entry(&st->request_list, | |
268 | struct dst_export_priv, request_entry); | |
269 | list_del(&p->request_entry); | |
270 | } | |
271 | spin_unlock_irqrestore(&st->request_lock, flags); | |
272 | ||
273 | if (p) | |
274 | bio_put(p->bio); | |
275 | ||
d52ac3f2 MZ |
276 | dprintk("%s: st: %p, refcnt: %d, list_empty: %d, p: " |
277 | "%p.\n", __func__, st, atomic_read(&st->refcnt), | |
03b55b9d EP |
278 | list_empty(&st->request_list), p); |
279 | } | |
280 | } | |
281 | ||
282 | dst_state_put(st); | |
283 | } | |
284 | ||
285 | /* | |
286 | * Client accepting thread. | |
287 | * Not only accepts new connection, but also schedules receiving thread | |
288 | * and performs request completion described above. | |
289 | */ | |
290 | static int dst_accept(void *init_data, void *schedule_data) | |
291 | { | |
292 | struct dst_state *main_st = schedule_data; | |
293 | struct dst_node *n = init_data; | |
294 | struct dst_state *st; | |
295 | int err; | |
296 | ||
297 | while (n->trans_scan_timeout && !main_st->need_exit) { | |
298 | dprintk("%s: main_st: %p, n: %p.\n", __func__, main_st, n); | |
299 | st = dst_accept_client(main_st); | |
300 | if (IS_ERR(st)) | |
301 | continue; | |
302 | ||
303 | err = dst_state_schedule_receiver(st); | |
304 | if (!err) { | |
305 | while (n->trans_scan_timeout) { | |
306 | err = wait_event_interruptible_timeout(st->thread_wait, | |
d52ac3f2 MZ |
307 | !list_empty(&st->request_list) || |
308 | !n->trans_scan_timeout || | |
309 | st->need_exit, | |
03b55b9d EP |
310 | HZ); |
311 | ||
312 | if (!n->trans_scan_timeout || st->need_exit) | |
313 | break; | |
314 | ||
315 | if (list_empty(&st->request_list)) | |
316 | continue; | |
317 | ||
318 | err = dst_export_process_request_queue(st); | |
319 | if (err) | |
320 | break; | |
321 | } | |
322 | ||
323 | st->need_exit = 1; | |
324 | wake_up(&st->thread_wait); | |
325 | } | |
326 | ||
327 | dst_state_cleanup_export(st); | |
328 | } | |
329 | ||
330 | dprintk("%s: freeing listening socket st: %p.\n", __func__, main_st); | |
331 | ||
332 | dst_state_lock(main_st); | |
333 | dst_poll_exit(main_st); | |
334 | dst_state_socket_release(main_st); | |
335 | dst_state_unlock(main_st); | |
336 | dst_state_put(main_st); | |
337 | dprintk("%s: freed listening socket st: %p.\n", __func__, main_st); | |
338 | ||
339 | return 0; | |
340 | } | |
341 | ||
342 | int dst_start_export(struct dst_node *n) | |
343 | { | |
344 | if (list_empty(&n->security_list)) { | |
d52ac3f2 MZ |
345 | printk(KERN_ERR "You are trying to export node '%s' " |
346 | "without security attributes.\nNo clients will " | |
347 | "be allowed to connect. Exiting.\n", n->name); | |
03b55b9d EP |
348 | return -EINVAL; |
349 | } | |
350 | return dst_node_trans_init(n, sizeof(struct dst_export_priv)); | |
351 | } | |
352 | ||
353 | /* | |
354 | * Initialize listening state and schedule accepting thread. | |
355 | */ | |
356 | int dst_node_init_listened(struct dst_node *n, struct dst_export_ctl *le) | |
357 | { | |
358 | struct dst_state *st; | |
359 | int err = -ENOMEM; | |
360 | struct dst_network_ctl *ctl = &le->ctl; | |
361 | ||
362 | memcpy(&n->info->net, ctl, sizeof(struct dst_network_ctl)); | |
363 | ||
364 | st = dst_state_alloc(n); | |
365 | if (IS_ERR(st)) { | |
366 | err = PTR_ERR(st); | |
367 | goto err_out_exit; | |
368 | } | |
369 | memcpy(&st->ctl, ctl, sizeof(struct dst_network_ctl)); | |
370 | ||
371 | err = dst_state_socket_create(st); | |
372 | if (err) | |
373 | goto err_out_put; | |
374 | ||
375 | st->socket->sk->sk_reuse = 1; | |
376 | ||
377 | err = kernel_bind(st->socket, (struct sockaddr *)&ctl->addr, | |
378 | ctl->addr.sa_data_len); | |
379 | if (err) | |
380 | goto err_out_socket_release; | |
381 | ||
382 | err = kernel_listen(st->socket, 1024); | |
383 | if (err) | |
384 | goto err_out_socket_release; | |
385 | n->state = st; | |
386 | ||
387 | err = dst_poll_init(st); | |
388 | if (err) | |
389 | goto err_out_socket_release; | |
390 | ||
391 | dst_state_get(st); | |
392 | ||
393 | err = thread_pool_schedule(n->pool, dst_thread_setup, | |
394 | dst_accept, st, MAX_SCHEDULE_TIMEOUT); | |
395 | if (err) | |
396 | goto err_out_poll_exit; | |
397 | ||
398 | return 0; | |
399 | ||
400 | err_out_poll_exit: | |
401 | dst_poll_exit(st); | |
402 | err_out_socket_release: | |
403 | dst_state_socket_release(st); | |
404 | err_out_put: | |
405 | dst_state_put(st); | |
406 | err_out_exit: | |
407 | n->state = NULL; | |
408 | return err; | |
409 | } | |
410 | ||
411 | /* | |
412 | * Free bio and related private data. | |
413 | * Also drop a reference counter for appropriate state, | |
414 | * which waits when there are no more block IOs in-flight. | |
415 | */ | |
416 | static void dst_bio_destructor(struct bio *bio) | |
417 | { | |
418 | struct bio_vec *bv; | |
419 | struct dst_export_priv *priv = bio->bi_private; | |
420 | int i; | |
421 | ||
422 | bio_for_each_segment(bv, bio, i) { | |
423 | if (!bv->bv_page) | |
424 | break; | |
425 | ||
426 | __free_page(bv->bv_page); | |
427 | } | |
428 | ||
3e5510ab | 429 | if (priv) |
03b55b9d | 430 | dst_state_put(priv->state); |
03b55b9d EP |
431 | bio_free(bio, dst_bio_set); |
432 | } | |
433 | ||
434 | /* | |
435 | * Block IO completion. Queue request to be sent back to | |
436 | * the client (or just confirmation). | |
437 | */ | |
438 | static void dst_bio_end_io(struct bio *bio, int err) | |
439 | { | |
440 | struct dst_export_priv *p = bio->bi_private; | |
441 | struct dst_state *st = p->state; | |
442 | unsigned long flags; | |
443 | ||
444 | spin_lock_irqsave(&st->request_lock, flags); | |
445 | list_add_tail(&p->request_entry, &st->request_list); | |
446 | spin_unlock_irqrestore(&st->request_lock, flags); | |
447 | ||
448 | wake_up(&st->thread_wait); | |
449 | } | |
450 | ||
451 | /* | |
452 | * Allocate read request for the server. | |
453 | */ | |
454 | static int dst_export_read_request(struct bio *bio, unsigned int total_size) | |
455 | { | |
456 | unsigned int size; | |
457 | struct page *page; | |
458 | int err; | |
459 | ||
460 | while (total_size) { | |
461 | err = -ENOMEM; | |
462 | page = alloc_page(GFP_KERNEL); | |
463 | if (!page) | |
464 | goto err_out_exit; | |
465 | ||
466 | size = min_t(unsigned int, PAGE_SIZE, total_size); | |
467 | ||
468 | err = bio_add_page(bio, page, size, 0); | |
469 | dprintk("%s: bio: %llu/%u, size: %u, err: %d.\n", | |
470 | __func__, (u64)bio->bi_sector, bio->bi_size, | |
471 | size, err); | |
472 | if (err <= 0) | |
473 | goto err_out_free_page; | |
474 | ||
475 | total_size -= size; | |
476 | } | |
477 | ||
478 | return 0; | |
479 | ||
480 | err_out_free_page: | |
481 | __free_page(page); | |
482 | err_out_exit: | |
483 | return err; | |
484 | } | |
485 | ||
486 | /* | |
487 | * Allocate write request for the server. | |
488 | * Should not only get pages, but also read data from the network. | |
489 | */ | |
490 | static int dst_export_write_request(struct dst_state *st, | |
491 | struct bio *bio, unsigned int total_size) | |
492 | { | |
493 | unsigned int size; | |
494 | struct page *page; | |
495 | void *data; | |
496 | int err; | |
497 | ||
498 | while (total_size) { | |
499 | err = -ENOMEM; | |
500 | page = alloc_page(GFP_KERNEL); | |
501 | if (!page) | |
502 | goto err_out_exit; | |
503 | ||
504 | data = kmap(page); | |
505 | if (!data) | |
506 | goto err_out_free_page; | |
507 | ||
508 | size = min_t(unsigned int, PAGE_SIZE, total_size); | |
509 | ||
510 | err = dst_data_recv(st, data, size); | |
511 | if (err) | |
512 | goto err_out_unmap_page; | |
513 | ||
514 | err = bio_add_page(bio, page, size, 0); | |
515 | if (err <= 0) | |
516 | goto err_out_unmap_page; | |
517 | ||
518 | kunmap(page); | |
519 | ||
520 | total_size -= size; | |
521 | } | |
522 | ||
523 | return 0; | |
524 | ||
525 | err_out_unmap_page: | |
526 | kunmap(page); | |
527 | err_out_free_page: | |
528 | __free_page(page); | |
529 | err_out_exit: | |
530 | return err; | |
531 | } | |
532 | ||
533 | /* | |
534 | * Groovy, we've gotten an IO request from the client. | |
535 | * Allocate BIO from the bioset, private data from the mempool | |
536 | * and lots of pages for IO. | |
537 | */ | |
538 | int dst_process_io(struct dst_state *st) | |
539 | { | |
540 | struct dst_node *n = st->node; | |
541 | struct dst_cmd *cmd = st->data; | |
542 | struct bio *bio; | |
543 | struct dst_export_priv *priv; | |
544 | int err = -ENOMEM; | |
545 | ||
546 | if (unlikely(!n->bdev)) { | |
547 | err = -EINVAL; | |
548 | goto err_out_exit; | |
549 | } | |
550 | ||
551 | bio = bio_alloc_bioset(GFP_KERNEL, | |
552 | PAGE_ALIGN(cmd->size) >> PAGE_SHIFT, | |
553 | dst_bio_set); | |
554 | if (!bio) | |
555 | goto err_out_exit; | |
03b55b9d | 556 | |
d52ac3f2 MZ |
557 | priv = (struct dst_export_priv *)(((void *)bio) - |
558 | sizeof (struct dst_export_priv)); | |
03b55b9d EP |
559 | |
560 | priv->state = dst_state_get(st); | |
561 | priv->bio = bio; | |
562 | ||
563 | bio->bi_private = priv; | |
564 | bio->bi_end_io = dst_bio_end_io; | |
565 | bio->bi_destructor = dst_bio_destructor; | |
566 | bio->bi_bdev = n->bdev; | |
567 | ||
568 | /* | |
569 | * Server side is only interested in two low bits: | |
570 | * uptodate (set by itself actually) and rw block | |
571 | */ | |
572 | bio->bi_flags |= cmd->flags & 3; | |
573 | ||
574 | bio->bi_rw = cmd->rw; | |
575 | bio->bi_size = 0; | |
576 | bio->bi_sector = cmd->sector; | |
577 | ||
578 | dst_bio_to_cmd(bio, &priv->cmd, DST_IO_RESPONSE, cmd->id); | |
579 | ||
580 | priv->cmd.flags = 0; | |
581 | priv->cmd.size = cmd->size; | |
582 | ||
583 | if (bio_data_dir(bio) == WRITE) { | |
584 | err = dst_recv_cdata(st, priv->cmd.hash); | |
585 | if (err) | |
586 | goto err_out_free; | |
587 | ||
588 | err = dst_export_write_request(st, bio, cmd->size); | |
589 | if (err) | |
590 | goto err_out_free; | |
591 | ||
592 | if (dst_need_crypto(n)) | |
593 | return dst_export_crypto(n, bio); | |
594 | } else { | |
595 | err = dst_export_read_request(bio, cmd->size); | |
596 | if (err) | |
597 | goto err_out_free; | |
598 | } | |
599 | ||
600 | dprintk("%s: bio: %llu/%u, rw: %lu, dir: %lu, flags: %lx, phys: %d.\n", | |
601 | __func__, (u64)bio->bi_sector, bio->bi_size, | |
602 | bio->bi_rw, bio_data_dir(bio), | |
603 | bio->bi_flags, bio->bi_phys_segments); | |
604 | ||
605 | generic_make_request(bio); | |
606 | ||
607 | return 0; | |
608 | ||
609 | err_out_free: | |
610 | bio_put(bio); | |
611 | err_out_exit: | |
612 | return err; | |
613 | } | |
614 | ||
615 | /* | |
616 | * Ok, block IO is ready, let's send it back to the client... | |
617 | */ | |
618 | int dst_export_send_bio(struct bio *bio) | |
619 | { | |
620 | struct dst_export_priv *p = bio->bi_private; | |
621 | struct dst_state *st = p->state; | |
622 | struct dst_cmd *cmd = &p->cmd; | |
623 | int err; | |
624 | ||
625 | dprintk("%s: id: %llu, bio: %llu/%u, csize: %u, flags: %lu, rw: %lu.\n", | |
626 | __func__, cmd->id, (u64)bio->bi_sector, bio->bi_size, | |
627 | cmd->csize, bio->bi_flags, bio->bi_rw); | |
628 | ||
629 | dst_convert_cmd(cmd); | |
630 | ||
631 | dst_state_lock(st); | |
632 | if (!st->socket) { | |
633 | err = -ECONNRESET; | |
634 | goto err_out_unlock; | |
635 | } | |
636 | ||
637 | if (bio_data_dir(bio) == WRITE) { | |
638 | /* ... or just confirmation that writing has completed. */ | |
639 | cmd->size = cmd->csize = 0; | |
640 | err = dst_data_send_header(st->socket, cmd, | |
641 | sizeof(struct dst_cmd), 0); | |
642 | if (err) | |
643 | goto err_out_unlock; | |
644 | } else { | |
645 | err = dst_send_bio(st, cmd, bio); | |
646 | if (err) | |
647 | goto err_out_unlock; | |
648 | } | |
649 | ||
650 | dst_state_unlock(st); | |
651 | ||
652 | bio_put(bio); | |
653 | return 0; | |
654 | ||
655 | err_out_unlock: | |
656 | dst_state_unlock(st); | |
657 | ||
658 | bio_put(bio); | |
659 | return err; | |
660 | } |