1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Network block device - make block devices work over TCP
5 * Note that you can not swap over this thing, yet. Seems to work but
6 * deadlocks sometimes - you can not swap over TCP in general.
8 * Copyright 1997-2000, 2008 Pavel Machek <pavel@ucw.cz>
9 * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com>
11 * (part of code stolen from loop.c)
14 #define pr_fmt(fmt) "nbd: " fmt
16 #include <linux/major.h>
18 #include <linux/blkdev.h>
19 #include <linux/module.h>
20 #include <linux/init.h>
21 #include <linux/sched.h>
22 #include <linux/sched/mm.h>
24 #include <linux/bio.h>
25 #include <linux/stat.h>
26 #include <linux/errno.h>
27 #include <linux/file.h>
28 #include <linux/ioctl.h>
29 #include <linux/mutex.h>
30 #include <linux/compiler.h>
31 #include <linux/completion.h>
32 #include <linux/err.h>
33 #include <linux/kernel.h>
34 #include <linux/slab.h>
36 #include <linux/net.h>
37 #include <linux/kthread.h>
38 #include <linux/types.h>
39 #include <linux/debugfs.h>
40 #include <linux/blk-mq.h>
42 #include <linux/uaccess.h>
43 #include <asm/types.h>
45 #include <linux/nbd.h>
46 #include <linux/nbd-netlink.h>
47 #include <net/genetlink.h>
49 #define CREATE_TRACE_POINTS
50 #include <trace/events/nbd.h>
52 static DEFINE_IDR(nbd_index_idr);
53 static DEFINE_MUTEX(nbd_index_mutex);
54 static struct workqueue_struct *nbd_del_wq;
55 static int nbd_total_devices = 0;
60 struct request *pending;
67 struct recv_thread_args {
68 struct work_struct work;
69 struct nbd_device *nbd;
70 struct nbd_sock *nsock;
74 struct link_dead_args {
75 struct work_struct work;
79 #define NBD_RT_TIMEDOUT 0
80 #define NBD_RT_DISCONNECT_REQUESTED 1
81 #define NBD_RT_DISCONNECTED 2
82 #define NBD_RT_HAS_PID_FILE 3
83 #define NBD_RT_HAS_CONFIG_REF 4
84 #define NBD_RT_BOUND 5
85 #define NBD_RT_DISCONNECT_ON_CLOSE 6
86 #define NBD_RT_HAS_BACKEND_FILE 7
88 #define NBD_DESTROY_ON_DISCONNECT 0
89 #define NBD_DISCONNECT_REQUESTED 1
93 unsigned long runtime_flags;
94 u64 dead_conn_timeout;
96 struct nbd_sock **socks;
98 atomic_t live_connections;
99 wait_queue_head_t conn_wait;
101 atomic_t recv_threads;
102 wait_queue_head_t recv_wq;
103 unsigned int blksize_bits;
105 #if IS_ENABLED(CONFIG_DEBUG_FS)
106 struct dentry *dbg_dir;
110 static inline unsigned int nbd_blksize(struct nbd_config *config)
112 return 1u << config->blksize_bits;
116 struct blk_mq_tag_set tag_set;
119 refcount_t config_refs;
121 struct nbd_config *config;
122 struct mutex config_lock;
123 struct gendisk *disk;
124 struct workqueue_struct *recv_workq;
125 struct work_struct remove_work;
127 struct list_head list;
128 struct task_struct *task_setup;
131 pid_t pid; /* pid of nbd-client, if attached */
136 #define NBD_CMD_REQUEUED 1
138 * This flag will be set if nbd_queue_rq() succeed, and will be checked and
139 * cleared in completion. Both setting and clearing of the flag are protected
142 #define NBD_CMD_INFLIGHT 2
145 struct nbd_device *nbd;
155 #if IS_ENABLED(CONFIG_DEBUG_FS)
156 static struct dentry *nbd_dbg_dir;
159 #define nbd_name(nbd) ((nbd)->disk->disk_name)
161 #define NBD_DEF_BLKSIZE_BITS 10
163 static unsigned int nbds_max = 16;
164 static int max_part = 16;
165 static int part_shift;
167 static int nbd_dev_dbg_init(struct nbd_device *nbd);
168 static void nbd_dev_dbg_close(struct nbd_device *nbd);
169 static void nbd_config_put(struct nbd_device *nbd);
170 static void nbd_connect_reply(struct genl_info *info, int index);
171 static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info);
172 static void nbd_dead_link_work(struct work_struct *work);
173 static void nbd_disconnect_and_put(struct nbd_device *nbd);
175 static inline struct device *nbd_to_dev(struct nbd_device *nbd)
177 return disk_to_dev(nbd->disk);
180 static void nbd_requeue_cmd(struct nbd_cmd *cmd)
182 struct request *req = blk_mq_rq_from_pdu(cmd);
184 if (!test_and_set_bit(NBD_CMD_REQUEUED, &cmd->flags))
185 blk_mq_requeue_request(req, true);
188 #define NBD_COOKIE_BITS 32
190 static u64 nbd_cmd_handle(struct nbd_cmd *cmd)
192 struct request *req = blk_mq_rq_from_pdu(cmd);
193 u32 tag = blk_mq_unique_tag(req);
194 u64 cookie = cmd->cmd_cookie;
196 return (cookie << NBD_COOKIE_BITS) | tag;
199 static u32 nbd_handle_to_tag(u64 handle)
204 static u32 nbd_handle_to_cookie(u64 handle)
206 return (u32)(handle >> NBD_COOKIE_BITS);
209 static const char *nbdcmd_to_ascii(int cmd)
212 case NBD_CMD_READ: return "read";
213 case NBD_CMD_WRITE: return "write";
214 case NBD_CMD_DISC: return "disconnect";
215 case NBD_CMD_FLUSH: return "flush";
216 case NBD_CMD_TRIM: return "trim/discard";
221 static ssize_t pid_show(struct device *dev,
222 struct device_attribute *attr, char *buf)
224 struct gendisk *disk = dev_to_disk(dev);
225 struct nbd_device *nbd = (struct nbd_device *)disk->private_data;
227 return sprintf(buf, "%d\n", nbd->pid);
230 static const struct device_attribute pid_attr = {
231 .attr = { .name = "pid", .mode = 0444},
235 static ssize_t backend_show(struct device *dev,
236 struct device_attribute *attr, char *buf)
238 struct gendisk *disk = dev_to_disk(dev);
239 struct nbd_device *nbd = (struct nbd_device *)disk->private_data;
241 return sprintf(buf, "%s\n", nbd->backend ?: "");
244 static const struct device_attribute backend_attr = {
245 .attr = { .name = "backend", .mode = 0444},
246 .show = backend_show,
249 static void nbd_dev_remove(struct nbd_device *nbd)
251 struct gendisk *disk = nbd->disk;
254 blk_mq_free_tag_set(&nbd->tag_set);
257 * Remove from idr after del_gendisk() completes, so if the same ID is
258 * reused, the following add_disk() will succeed.
260 mutex_lock(&nbd_index_mutex);
261 idr_remove(&nbd_index_idr, nbd->index);
262 mutex_unlock(&nbd_index_mutex);
263 destroy_workqueue(nbd->recv_workq);
267 static void nbd_dev_remove_work(struct work_struct *work)
269 nbd_dev_remove(container_of(work, struct nbd_device, remove_work));
272 static void nbd_put(struct nbd_device *nbd)
274 if (!refcount_dec_and_test(&nbd->refs))
277 /* Call del_gendisk() asynchrounously to prevent deadlock */
278 if (test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags))
279 queue_work(nbd_del_wq, &nbd->remove_work);
284 static int nbd_disconnected(struct nbd_config *config)
286 return test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags) ||
287 test_bit(NBD_RT_DISCONNECT_REQUESTED, &config->runtime_flags);
290 static void nbd_mark_nsock_dead(struct nbd_device *nbd, struct nbd_sock *nsock,
293 if (!nsock->dead && notify && !nbd_disconnected(nbd->config)) {
294 struct link_dead_args *args;
295 args = kmalloc(sizeof(struct link_dead_args), GFP_NOIO);
297 INIT_WORK(&args->work, nbd_dead_link_work);
298 args->index = nbd->index;
299 queue_work(system_wq, &args->work);
303 kernel_sock_shutdown(nsock->sock, SHUT_RDWR);
304 if (atomic_dec_return(&nbd->config->live_connections) == 0) {
305 if (test_and_clear_bit(NBD_RT_DISCONNECT_REQUESTED,
306 &nbd->config->runtime_flags)) {
307 set_bit(NBD_RT_DISCONNECTED,
308 &nbd->config->runtime_flags);
309 dev_info(nbd_to_dev(nbd),
310 "Disconnected due to user request.\n");
315 nsock->pending = NULL;
319 static int __nbd_set_size(struct nbd_device *nbd, loff_t bytesize,
322 struct queue_limits lim;
326 blksize = 1u << NBD_DEF_BLKSIZE_BITS;
328 if (blk_validate_block_size(blksize))
334 nbd->config->bytesize = bytesize;
335 nbd->config->blksize_bits = __ffs(blksize);
340 lim = queue_limits_start_update(nbd->disk->queue);
341 if (nbd->config->flags & NBD_FLAG_SEND_TRIM)
342 lim.max_hw_discard_sectors = UINT_MAX;
344 lim.max_hw_discard_sectors = 0;
345 lim.logical_block_size = blksize;
346 lim.physical_block_size = blksize;
347 error = queue_limits_commit_update(nbd->disk->queue, &lim);
352 set_bit(GD_NEED_PART_SCAN, &nbd->disk->state);
353 if (!set_capacity_and_notify(nbd->disk, bytesize >> 9))
354 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
358 static int nbd_set_size(struct nbd_device *nbd, loff_t bytesize,
363 blk_mq_freeze_queue(nbd->disk->queue);
364 error = __nbd_set_size(nbd, bytesize, blksize);
365 blk_mq_unfreeze_queue(nbd->disk->queue);
370 static void nbd_complete_rq(struct request *req)
372 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
374 dev_dbg(nbd_to_dev(cmd->nbd), "request %p: %s\n", req,
375 cmd->status ? "failed" : "done");
377 blk_mq_end_request(req, cmd->status);
381 * Forcibly shutdown the socket causing all listeners to error
383 static void sock_shutdown(struct nbd_device *nbd)
385 struct nbd_config *config = nbd->config;
388 if (config->num_connections == 0)
390 if (test_and_set_bit(NBD_RT_DISCONNECTED, &config->runtime_flags))
393 for (i = 0; i < config->num_connections; i++) {
394 struct nbd_sock *nsock = config->socks[i];
395 mutex_lock(&nsock->tx_lock);
396 nbd_mark_nsock_dead(nbd, nsock, 0);
397 mutex_unlock(&nsock->tx_lock);
399 dev_warn(disk_to_dev(nbd->disk), "shutting down sockets\n");
402 static u32 req_to_nbd_cmd_type(struct request *req)
404 switch (req_op(req)) {
408 return NBD_CMD_FLUSH;
410 return NBD_CMD_WRITE;
418 static struct nbd_config *nbd_get_config_unlocked(struct nbd_device *nbd)
420 if (refcount_inc_not_zero(&nbd->config_refs)) {
422 * Add smp_mb__after_atomic to ensure that reading nbd->config_refs
423 * and reading nbd->config is ordered. The pair is the barrier in
424 * nbd_alloc_and_init_config(), avoid nbd->config_refs is set
425 * before nbd->config.
427 smp_mb__after_atomic();
434 static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req)
436 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
437 struct nbd_device *nbd = cmd->nbd;
438 struct nbd_config *config;
440 if (!mutex_trylock(&cmd->lock))
441 return BLK_EH_RESET_TIMER;
443 if (!test_bit(NBD_CMD_INFLIGHT, &cmd->flags)) {
444 mutex_unlock(&cmd->lock);
448 config = nbd_get_config_unlocked(nbd);
450 cmd->status = BLK_STS_TIMEOUT;
451 __clear_bit(NBD_CMD_INFLIGHT, &cmd->flags);
452 mutex_unlock(&cmd->lock);
456 if (config->num_connections > 1 ||
457 (config->num_connections == 1 && nbd->tag_set.timeout)) {
458 dev_err_ratelimited(nbd_to_dev(nbd),
459 "Connection timed out, retrying (%d/%d alive)\n",
460 atomic_read(&config->live_connections),
461 config->num_connections);
463 * Hooray we have more connections, requeue this IO, the submit
464 * path will put it on a real connection. Or if only one
465 * connection is configured, the submit path will wait util
466 * a new connection is reconfigured or util dead timeout.
469 if (cmd->index < config->num_connections) {
470 struct nbd_sock *nsock =
471 config->socks[cmd->index];
472 mutex_lock(&nsock->tx_lock);
473 /* We can have multiple outstanding requests, so
474 * we don't want to mark the nsock dead if we've
475 * already reconnected with a new socket, so
476 * only mark it dead if its the same socket we
479 if (cmd->cookie == nsock->cookie)
480 nbd_mark_nsock_dead(nbd, nsock, 1);
481 mutex_unlock(&nsock->tx_lock);
483 mutex_unlock(&cmd->lock);
484 nbd_requeue_cmd(cmd);
490 if (!nbd->tag_set.timeout) {
492 * Userspace sets timeout=0 to disable socket disconnection,
493 * so just warn and reset the timer.
495 struct nbd_sock *nsock = config->socks[cmd->index];
497 dev_info(nbd_to_dev(nbd), "Possible stuck request %p: control (%s@%llu,%uB). Runtime %u seconds\n",
498 req, nbdcmd_to_ascii(req_to_nbd_cmd_type(req)),
499 (unsigned long long)blk_rq_pos(req) << 9,
500 blk_rq_bytes(req), (req->timeout / HZ) * cmd->retries);
502 mutex_lock(&nsock->tx_lock);
503 if (cmd->cookie != nsock->cookie) {
504 nbd_requeue_cmd(cmd);
505 mutex_unlock(&nsock->tx_lock);
506 mutex_unlock(&cmd->lock);
510 mutex_unlock(&nsock->tx_lock);
511 mutex_unlock(&cmd->lock);
513 return BLK_EH_RESET_TIMER;
516 dev_err_ratelimited(nbd_to_dev(nbd), "Connection timed out\n");
517 set_bit(NBD_RT_TIMEDOUT, &config->runtime_flags);
518 cmd->status = BLK_STS_IOERR;
519 __clear_bit(NBD_CMD_INFLIGHT, &cmd->flags);
520 mutex_unlock(&cmd->lock);
524 blk_mq_complete_request(req);
528 static int __sock_xmit(struct nbd_device *nbd, struct socket *sock, int send,
529 struct iov_iter *iter, int msg_flags, int *sent)
532 struct msghdr msg = {} ;
533 unsigned int noreclaim_flag;
535 if (unlikely(!sock)) {
536 dev_err_ratelimited(disk_to_dev(nbd->disk),
537 "Attempted %s on closed socket in sock_xmit\n",
538 (send ? "send" : "recv"));
542 msg.msg_iter = *iter;
544 noreclaim_flag = memalloc_noreclaim_save();
546 sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC;
547 sock->sk->sk_use_task_frag = false;
548 msg.msg_flags = msg_flags | MSG_NOSIGNAL;
551 result = sock_sendmsg(sock, &msg);
553 result = sock_recvmsg(sock, &msg, msg.msg_flags);
557 result = -EPIPE; /* short read */
562 } while (msg_data_left(&msg));
564 memalloc_noreclaim_restore(noreclaim_flag);
570 * Send or receive packet. Return a positive value on success and
571 * negtive value on failure, and never return 0.
573 static int sock_xmit(struct nbd_device *nbd, int index, int send,
574 struct iov_iter *iter, int msg_flags, int *sent)
576 struct nbd_config *config = nbd->config;
577 struct socket *sock = config->socks[index]->sock;
579 return __sock_xmit(nbd, sock, send, iter, msg_flags, sent);
583 * Different settings for sk->sk_sndtimeo can result in different return values
584 * if there is a signal pending when we enter sendmsg, because reasons?
586 static inline int was_interrupted(int result)
588 return result == -ERESTARTSYS || result == -EINTR;
591 /* always call with the tx_lock held */
592 static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
594 struct request *req = blk_mq_rq_from_pdu(cmd);
595 struct nbd_config *config = nbd->config;
596 struct nbd_sock *nsock = config->socks[index];
598 struct nbd_request request = {.magic = htonl(NBD_REQUEST_MAGIC)};
599 struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)};
600 struct iov_iter from;
601 unsigned long size = blk_rq_bytes(req);
605 u32 nbd_cmd_flags = 0;
606 int sent = nsock->sent, skip = 0;
608 iov_iter_kvec(&from, ITER_SOURCE, &iov, 1, sizeof(request));
610 type = req_to_nbd_cmd_type(req);
614 if (rq_data_dir(req) == WRITE &&
615 (config->flags & NBD_FLAG_READ_ONLY)) {
616 dev_err_ratelimited(disk_to_dev(nbd->disk),
617 "Write on read-only\n");
621 if (req->cmd_flags & REQ_FUA)
622 nbd_cmd_flags |= NBD_CMD_FLAG_FUA;
624 /* We did a partial send previously, and we at least sent the whole
625 * request struct, so just go and send the rest of the pages in the
629 if (sent >= sizeof(request)) {
630 skip = sent - sizeof(request);
632 /* initialize handle for tracing purposes */
633 handle = nbd_cmd_handle(cmd);
637 iov_iter_advance(&from, sent);
642 cmd->cookie = nsock->cookie;
644 request.type = htonl(type | nbd_cmd_flags);
645 if (type != NBD_CMD_FLUSH) {
646 request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
647 request.len = htonl(size);
649 handle = nbd_cmd_handle(cmd);
650 request.cookie = cpu_to_be64(handle);
652 trace_nbd_send_request(&request, nbd->index, blk_mq_rq_from_pdu(cmd));
654 dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n",
655 req, nbdcmd_to_ascii(type),
656 (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req));
657 result = sock_xmit(nbd, index, 1, &from,
658 (type == NBD_CMD_WRITE) ? MSG_MORE : 0, &sent);
659 trace_nbd_header_sent(req, handle);
661 if (was_interrupted(result)) {
662 /* If we haven't sent anything we can just return BUSY,
663 * however if we have sent something we need to make
664 * sure we only allow this req to be sent until we are
668 nsock->pending = req;
671 set_bit(NBD_CMD_REQUEUED, &cmd->flags);
672 return BLK_STS_RESOURCE;
674 dev_err_ratelimited(disk_to_dev(nbd->disk),
675 "Send control failed (result %d)\n", result);
679 if (type != NBD_CMD_WRITE)
684 struct bio *next = bio->bi_next;
685 struct bvec_iter iter;
688 bio_for_each_segment(bvec, bio, iter) {
689 bool is_last = !next && bio_iter_last(bvec, iter);
690 int flags = is_last ? 0 : MSG_MORE;
692 dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n",
694 iov_iter_bvec(&from, ITER_SOURCE, &bvec, 1, bvec.bv_len);
696 if (skip >= iov_iter_count(&from)) {
697 skip -= iov_iter_count(&from);
700 iov_iter_advance(&from, skip);
703 result = sock_xmit(nbd, index, 1, &from, flags, &sent);
705 if (was_interrupted(result)) {
706 /* We've already sent the header, we
707 * have no choice but to set pending and
710 nsock->pending = req;
712 set_bit(NBD_CMD_REQUEUED, &cmd->flags);
713 return BLK_STS_RESOURCE;
715 dev_err(disk_to_dev(nbd->disk),
716 "Send data failed (result %d)\n",
721 * The completion might already have come in,
722 * so break for the last one instead of letting
723 * the iterator do it. This prevents use-after-free
732 trace_nbd_payload_sent(req, handle);
733 nsock->pending = NULL;
738 static int nbd_read_reply(struct nbd_device *nbd, struct socket *sock,
739 struct nbd_reply *reply)
741 struct kvec iov = {.iov_base = reply, .iov_len = sizeof(*reply)};
746 iov_iter_kvec(&to, ITER_DEST, &iov, 1, sizeof(*reply));
747 result = __sock_xmit(nbd, sock, 0, &to, MSG_WAITALL, NULL);
749 if (!nbd_disconnected(nbd->config))
750 dev_err(disk_to_dev(nbd->disk),
751 "Receive control failed (result %d)\n", result);
755 if (ntohl(reply->magic) != NBD_REPLY_MAGIC) {
756 dev_err(disk_to_dev(nbd->disk), "Wrong magic (0x%lx)\n",
757 (unsigned long)ntohl(reply->magic));
764 /* NULL returned = something went wrong, inform userspace */
765 static struct nbd_cmd *nbd_handle_reply(struct nbd_device *nbd, int index,
766 struct nbd_reply *reply)
770 struct request *req = NULL;
776 handle = be64_to_cpu(reply->cookie);
777 tag = nbd_handle_to_tag(handle);
778 hwq = blk_mq_unique_tag_to_hwq(tag);
779 if (hwq < nbd->tag_set.nr_hw_queues)
780 req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq],
781 blk_mq_unique_tag_to_tag(tag));
782 if (!req || !blk_mq_request_started(req)) {
783 dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%d) %p\n",
785 return ERR_PTR(-ENOENT);
787 trace_nbd_header_received(req, handle);
788 cmd = blk_mq_rq_to_pdu(req);
790 mutex_lock(&cmd->lock);
791 if (!test_bit(NBD_CMD_INFLIGHT, &cmd->flags)) {
792 dev_err(disk_to_dev(nbd->disk), "Suspicious reply %d (status %u flags %lu)",
793 tag, cmd->status, cmd->flags);
797 if (cmd->index != index) {
798 dev_err(disk_to_dev(nbd->disk), "Unexpected reply %d from different sock %d (expected %d)",
799 tag, index, cmd->index);
803 if (cmd->cmd_cookie != nbd_handle_to_cookie(handle)) {
804 dev_err(disk_to_dev(nbd->disk), "Double reply on req %p, cmd_cookie %u, handle cookie %u\n",
805 req, cmd->cmd_cookie, nbd_handle_to_cookie(handle));
809 if (cmd->status != BLK_STS_OK) {
810 dev_err(disk_to_dev(nbd->disk), "Command already handled %p\n",
815 if (test_bit(NBD_CMD_REQUEUED, &cmd->flags)) {
816 dev_err(disk_to_dev(nbd->disk), "Raced with timeout on req %p\n",
821 if (ntohl(reply->error)) {
822 dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
823 ntohl(reply->error));
824 cmd->status = BLK_STS_IOERR;
828 dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", req);
829 if (rq_data_dir(req) != WRITE) {
830 struct req_iterator iter;
834 rq_for_each_segment(bvec, req, iter) {
835 iov_iter_bvec(&to, ITER_DEST, &bvec, 1, bvec.bv_len);
836 result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
838 dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
841 * If we've disconnected, we need to make sure we
842 * complete this request, otherwise error out
843 * and let the timeout stuff handle resubmitting
844 * this request onto another connection.
846 if (nbd_disconnected(nbd->config)) {
847 cmd->status = BLK_STS_IOERR;
853 dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n",
858 trace_nbd_payload_received(req, handle);
859 mutex_unlock(&cmd->lock);
860 return ret ? ERR_PTR(ret) : cmd;
863 static void recv_work(struct work_struct *work)
865 struct recv_thread_args *args = container_of(work,
866 struct recv_thread_args,
868 struct nbd_device *nbd = args->nbd;
869 struct nbd_config *config = nbd->config;
870 struct request_queue *q = nbd->disk->queue;
871 struct nbd_sock *nsock = args->nsock;
876 struct nbd_reply reply;
878 if (nbd_read_reply(nbd, nsock->sock, &reply))
882 * Grab .q_usage_counter so request pool won't go away, then no
883 * request use-after-free is possible during nbd_handle_reply().
884 * If queue is frozen, there won't be any inflight requests, we
885 * needn't to handle the incoming garbage message.
887 if (!percpu_ref_tryget(&q->q_usage_counter)) {
888 dev_err(disk_to_dev(nbd->disk), "%s: no io inflight\n",
893 cmd = nbd_handle_reply(nbd, args->index, &reply);
895 percpu_ref_put(&q->q_usage_counter);
899 rq = blk_mq_rq_from_pdu(cmd);
900 if (likely(!blk_should_fake_timeout(rq->q))) {
903 mutex_lock(&cmd->lock);
904 complete = __test_and_clear_bit(NBD_CMD_INFLIGHT,
906 mutex_unlock(&cmd->lock);
908 blk_mq_complete_request(rq);
910 percpu_ref_put(&q->q_usage_counter);
913 mutex_lock(&nsock->tx_lock);
914 nbd_mark_nsock_dead(nbd, nsock, 1);
915 mutex_unlock(&nsock->tx_lock);
918 atomic_dec(&config->recv_threads);
919 wake_up(&config->recv_wq);
923 static bool nbd_clear_req(struct request *req, void *data)
925 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
927 /* don't abort one completed request */
928 if (blk_mq_request_completed(req))
931 mutex_lock(&cmd->lock);
932 if (!__test_and_clear_bit(NBD_CMD_INFLIGHT, &cmd->flags)) {
933 mutex_unlock(&cmd->lock);
936 cmd->status = BLK_STS_IOERR;
937 mutex_unlock(&cmd->lock);
939 blk_mq_complete_request(req);
943 static void nbd_clear_que(struct nbd_device *nbd)
945 blk_mq_quiesce_queue(nbd->disk->queue);
946 blk_mq_tagset_busy_iter(&nbd->tag_set, nbd_clear_req, NULL);
947 blk_mq_unquiesce_queue(nbd->disk->queue);
948 dev_dbg(disk_to_dev(nbd->disk), "queue cleared\n");
951 static int find_fallback(struct nbd_device *nbd, int index)
953 struct nbd_config *config = nbd->config;
955 struct nbd_sock *nsock = config->socks[index];
956 int fallback = nsock->fallback_index;
958 if (test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags))
961 if (config->num_connections <= 1) {
962 dev_err_ratelimited(disk_to_dev(nbd->disk),
963 "Dead connection, failed to find a fallback\n");
967 if (fallback >= 0 && fallback < config->num_connections &&
968 !config->socks[fallback]->dead)
971 if (nsock->fallback_index < 0 ||
972 nsock->fallback_index >= config->num_connections ||
973 config->socks[nsock->fallback_index]->dead) {
975 for (i = 0; i < config->num_connections; i++) {
978 if (!config->socks[i]->dead) {
983 nsock->fallback_index = new_index;
985 dev_err_ratelimited(disk_to_dev(nbd->disk),
986 "Dead connection, failed to find a fallback\n");
990 new_index = nsock->fallback_index;
994 static int wait_for_reconnect(struct nbd_device *nbd)
996 struct nbd_config *config = nbd->config;
997 if (!config->dead_conn_timeout)
1000 if (!wait_event_timeout(config->conn_wait,
1001 test_bit(NBD_RT_DISCONNECTED,
1002 &config->runtime_flags) ||
1003 atomic_read(&config->live_connections) > 0,
1004 config->dead_conn_timeout))
1007 return !test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags);
1010 static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
1012 struct request *req = blk_mq_rq_from_pdu(cmd);
1013 struct nbd_device *nbd = cmd->nbd;
1014 struct nbd_config *config;
1015 struct nbd_sock *nsock;
1018 config = nbd_get_config_unlocked(nbd);
1020 dev_err_ratelimited(disk_to_dev(nbd->disk),
1021 "Socks array is empty\n");
1025 if (index >= config->num_connections) {
1026 dev_err_ratelimited(disk_to_dev(nbd->disk),
1027 "Attempted send on invalid socket\n");
1028 nbd_config_put(nbd);
1031 cmd->status = BLK_STS_OK;
1033 nsock = config->socks[index];
1034 mutex_lock(&nsock->tx_lock);
1036 int old_index = index;
1037 index = find_fallback(nbd, index);
1038 mutex_unlock(&nsock->tx_lock);
1040 if (wait_for_reconnect(nbd)) {
1044 /* All the sockets should already be down at this point,
1045 * we just want to make sure that DISCONNECTED is set so
1046 * any requests that come in that were queue'ed waiting
1047 * for the reconnect timer don't trigger the timer again
1048 * and instead just error out.
1051 nbd_config_put(nbd);
1057 /* Handle the case that we have a pending request that was partially
1058 * transmitted that _has_ to be serviced first. We need to call requeue
1059 * here so that it gets put _after_ the request that is already on the
1062 blk_mq_start_request(req);
1063 if (unlikely(nsock->pending && nsock->pending != req)) {
1064 nbd_requeue_cmd(cmd);
1069 * Some failures are related to the link going down, so anything that
1070 * returns EAGAIN can be retried on a different socket.
1072 ret = nbd_send_cmd(nbd, cmd, index);
1074 * Access to this flag is protected by cmd->lock, thus it's safe to set
1075 * the flag after nbd_send_cmd() succeed to send request to server.
1078 __set_bit(NBD_CMD_INFLIGHT, &cmd->flags);
1079 else if (ret == -EAGAIN) {
1080 dev_err_ratelimited(disk_to_dev(nbd->disk),
1081 "Request send failed, requeueing\n");
1082 nbd_mark_nsock_dead(nbd, nsock, 1);
1083 nbd_requeue_cmd(cmd);
1087 mutex_unlock(&nsock->tx_lock);
1088 nbd_config_put(nbd);
1092 static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
1093 const struct blk_mq_queue_data *bd)
1095 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
1099 * Since we look at the bio's to send the request over the network we
1100 * need to make sure the completion work doesn't mark this request done
1101 * before we are done doing our send. This keeps us from dereferencing
1102 * freed data if we have particularly fast completions (ie we get the
1103 * completion before we exit sock_xmit on the last bvec) or in the case
1104 * that the server is misbehaving (or there was an error) before we're
1105 * done sending everything over the wire.
1107 mutex_lock(&cmd->lock);
1108 clear_bit(NBD_CMD_REQUEUED, &cmd->flags);
1110 /* We can be called directly from the user space process, which means we
1111 * could possibly have signals pending so our sendmsg will fail. In
1112 * this case we need to return that we are busy, otherwise error out as
1115 ret = nbd_handle_cmd(cmd, hctx->queue_num);
1117 ret = BLK_STS_IOERR;
1120 mutex_unlock(&cmd->lock);
1125 static struct socket *nbd_get_socket(struct nbd_device *nbd, unsigned long fd,
1128 struct socket *sock;
1131 sock = sockfd_lookup(fd, err);
1135 if (sock->ops->shutdown == sock_no_shutdown) {
1136 dev_err(disk_to_dev(nbd->disk), "Unsupported socket: shutdown callout must be supported.\n");
1145 static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
1148 struct nbd_config *config = nbd->config;
1149 struct socket *sock;
1150 struct nbd_sock **socks;
1151 struct nbd_sock *nsock;
1154 /* Arg will be cast to int, check it to avoid overflow */
1157 sock = nbd_get_socket(nbd, arg, &err);
1162 * We need to make sure we don't get any errant requests while we're
1163 * reallocating the ->socks array.
1165 blk_mq_freeze_queue(nbd->disk->queue);
1167 if (!netlink && !nbd->task_setup &&
1168 !test_bit(NBD_RT_BOUND, &config->runtime_flags))
1169 nbd->task_setup = current;
1172 (nbd->task_setup != current ||
1173 test_bit(NBD_RT_BOUND, &config->runtime_flags))) {
1174 dev_err(disk_to_dev(nbd->disk),
1175 "Device being setup by another task");
1180 nsock = kzalloc(sizeof(*nsock), GFP_KERNEL);
1186 socks = krealloc(config->socks, (config->num_connections + 1) *
1187 sizeof(struct nbd_sock *), GFP_KERNEL);
1194 config->socks = socks;
1196 nsock->fallback_index = -1;
1197 nsock->dead = false;
1198 mutex_init(&nsock->tx_lock);
1200 nsock->pending = NULL;
1203 socks[config->num_connections++] = nsock;
1204 atomic_inc(&config->live_connections);
1205 blk_mq_unfreeze_queue(nbd->disk->queue);
1210 blk_mq_unfreeze_queue(nbd->disk->queue);
1215 static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
1217 struct nbd_config *config = nbd->config;
1218 struct socket *sock, *old;
1219 struct recv_thread_args *args;
1223 sock = nbd_get_socket(nbd, arg, &err);
1227 args = kzalloc(sizeof(*args), GFP_KERNEL);
1233 for (i = 0; i < config->num_connections; i++) {
1234 struct nbd_sock *nsock = config->socks[i];
1239 mutex_lock(&nsock->tx_lock);
1241 mutex_unlock(&nsock->tx_lock);
1244 sk_set_memalloc(sock->sk);
1245 if (nbd->tag_set.timeout)
1246 sock->sk->sk_sndtimeo = nbd->tag_set.timeout;
1247 atomic_inc(&config->recv_threads);
1248 refcount_inc(&nbd->config_refs);
1250 nsock->fallback_index = -1;
1252 nsock->dead = false;
1253 INIT_WORK(&args->work, recv_work);
1256 args->nsock = nsock;
1258 mutex_unlock(&nsock->tx_lock);
1261 clear_bit(NBD_RT_DISCONNECTED, &config->runtime_flags);
1263 /* We take the tx_mutex in an error path in the recv_work, so we
1264 * need to queue_work outside of the tx_mutex.
1266 queue_work(nbd->recv_workq, &args->work);
1268 atomic_inc(&config->live_connections);
1269 wake_up(&config->conn_wait);
1277 static void nbd_bdev_reset(struct nbd_device *nbd)
1279 if (disk_openers(nbd->disk) > 1)
1281 set_capacity(nbd->disk, 0);
1284 static void nbd_parse_flags(struct nbd_device *nbd)
1286 struct nbd_config *config = nbd->config;
1287 if (config->flags & NBD_FLAG_READ_ONLY)
1288 set_disk_ro(nbd->disk, true);
1290 set_disk_ro(nbd->disk, false);
1291 if (config->flags & NBD_FLAG_SEND_FLUSH) {
1292 if (config->flags & NBD_FLAG_SEND_FUA)
1293 blk_queue_write_cache(nbd->disk->queue, true, true);
1295 blk_queue_write_cache(nbd->disk->queue, true, false);
1298 blk_queue_write_cache(nbd->disk->queue, false, false);
1301 static void send_disconnects(struct nbd_device *nbd)
1303 struct nbd_config *config = nbd->config;
1304 struct nbd_request request = {
1305 .magic = htonl(NBD_REQUEST_MAGIC),
1306 .type = htonl(NBD_CMD_DISC),
1308 struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)};
1309 struct iov_iter from;
1312 for (i = 0; i < config->num_connections; i++) {
1313 struct nbd_sock *nsock = config->socks[i];
1315 iov_iter_kvec(&from, ITER_SOURCE, &iov, 1, sizeof(request));
1316 mutex_lock(&nsock->tx_lock);
1317 ret = sock_xmit(nbd, i, 1, &from, 0, NULL);
1319 dev_err(disk_to_dev(nbd->disk),
1320 "Send disconnect failed %d\n", ret);
1321 mutex_unlock(&nsock->tx_lock);
1325 static int nbd_disconnect(struct nbd_device *nbd)
1327 struct nbd_config *config = nbd->config;
1329 dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n");
1330 set_bit(NBD_RT_DISCONNECT_REQUESTED, &config->runtime_flags);
1331 set_bit(NBD_DISCONNECT_REQUESTED, &nbd->flags);
1332 send_disconnects(nbd);
1336 static void nbd_clear_sock(struct nbd_device *nbd)
1340 nbd->task_setup = NULL;
1343 static void nbd_config_put(struct nbd_device *nbd)
1345 if (refcount_dec_and_mutex_lock(&nbd->config_refs,
1346 &nbd->config_lock)) {
1347 struct nbd_config *config = nbd->config;
1348 nbd_dev_dbg_close(nbd);
1349 invalidate_disk(nbd->disk);
1350 if (nbd->config->bytesize)
1351 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
1352 if (test_and_clear_bit(NBD_RT_HAS_PID_FILE,
1353 &config->runtime_flags))
1354 device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
1356 if (test_and_clear_bit(NBD_RT_HAS_BACKEND_FILE,
1357 &config->runtime_flags)) {
1358 device_remove_file(disk_to_dev(nbd->disk), &backend_attr);
1359 kfree(nbd->backend);
1360 nbd->backend = NULL;
1362 nbd_clear_sock(nbd);
1363 if (config->num_connections) {
1365 for (i = 0; i < config->num_connections; i++) {
1366 sockfd_put(config->socks[i]->sock);
1367 kfree(config->socks[i]);
1369 kfree(config->socks);
1374 nbd->tag_set.timeout = 0;
1376 mutex_unlock(&nbd->config_lock);
1378 module_put(THIS_MODULE);
1382 static int nbd_start_device(struct nbd_device *nbd)
1384 struct nbd_config *config = nbd->config;
1385 int num_connections = config->num_connections;
1392 if (num_connections > 1 &&
1393 !(config->flags & NBD_FLAG_CAN_MULTI_CONN)) {
1394 dev_err(disk_to_dev(nbd->disk), "server does not support multiple connections per device.\n");
1398 blk_mq_update_nr_hw_queues(&nbd->tag_set, config->num_connections);
1399 nbd->pid = task_pid_nr(current);
1401 nbd_parse_flags(nbd);
1403 error = device_create_file(disk_to_dev(nbd->disk), &pid_attr);
1405 dev_err(disk_to_dev(nbd->disk), "device_create_file failed for pid!\n");
1408 set_bit(NBD_RT_HAS_PID_FILE, &config->runtime_flags);
1410 nbd_dev_dbg_init(nbd);
1411 for (i = 0; i < num_connections; i++) {
1412 struct recv_thread_args *args;
1414 args = kzalloc(sizeof(*args), GFP_KERNEL);
1418 * If num_connections is m (2 < m),
1419 * and NO.1 ~ NO.n(1 < n < m) kzallocs are successful.
1420 * But NO.(n + 1) failed. We still have n recv threads.
1421 * So, add flush_workqueue here to prevent recv threads
1422 * dropping the last config_refs and trying to destroy
1423 * the workqueue from inside the workqueue.
1426 flush_workqueue(nbd->recv_workq);
1429 sk_set_memalloc(config->socks[i]->sock->sk);
1430 if (nbd->tag_set.timeout)
1431 config->socks[i]->sock->sk->sk_sndtimeo =
1432 nbd->tag_set.timeout;
1433 atomic_inc(&config->recv_threads);
1434 refcount_inc(&nbd->config_refs);
1435 INIT_WORK(&args->work, recv_work);
1437 args->nsock = config->socks[i];
1439 queue_work(nbd->recv_workq, &args->work);
1441 return nbd_set_size(nbd, config->bytesize, nbd_blksize(config));
1444 static int nbd_start_device_ioctl(struct nbd_device *nbd)
1446 struct nbd_config *config = nbd->config;
1449 ret = nbd_start_device(nbd);
1454 set_bit(GD_NEED_PART_SCAN, &nbd->disk->state);
1455 mutex_unlock(&nbd->config_lock);
1456 ret = wait_event_interruptible(config->recv_wq,
1457 atomic_read(&config->recv_threads) == 0);
1463 flush_workqueue(nbd->recv_workq);
1464 mutex_lock(&nbd->config_lock);
1465 nbd_bdev_reset(nbd);
1466 /* user requested, ignore socket errors */
1467 if (test_bit(NBD_RT_DISCONNECT_REQUESTED, &config->runtime_flags))
1469 if (test_bit(NBD_RT_TIMEDOUT, &config->runtime_flags))
1474 static void nbd_clear_sock_ioctl(struct nbd_device *nbd)
1476 nbd_clear_sock(nbd);
1477 disk_force_media_change(nbd->disk);
1478 nbd_bdev_reset(nbd);
1479 if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
1480 &nbd->config->runtime_flags))
1481 nbd_config_put(nbd);
1484 static void nbd_set_cmd_timeout(struct nbd_device *nbd, u64 timeout)
1486 nbd->tag_set.timeout = timeout * HZ;
1488 blk_queue_rq_timeout(nbd->disk->queue, timeout * HZ);
1490 blk_queue_rq_timeout(nbd->disk->queue, 30 * HZ);
1493 /* Must be called with config_lock held */
1494 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
1495 unsigned int cmd, unsigned long arg)
1497 struct nbd_config *config = nbd->config;
1501 case NBD_DISCONNECT:
1502 return nbd_disconnect(nbd);
1503 case NBD_CLEAR_SOCK:
1504 nbd_clear_sock_ioctl(nbd);
1507 return nbd_add_socket(nbd, arg, false);
1508 case NBD_SET_BLKSIZE:
1509 return nbd_set_size(nbd, config->bytesize, arg);
1511 return nbd_set_size(nbd, arg, nbd_blksize(config));
1512 case NBD_SET_SIZE_BLOCKS:
1513 if (check_shl_overflow(arg, config->blksize_bits, &bytesize))
1515 return nbd_set_size(nbd, bytesize, nbd_blksize(config));
1516 case NBD_SET_TIMEOUT:
1517 nbd_set_cmd_timeout(nbd, arg);
1521 config->flags = arg;
1524 return nbd_start_device_ioctl(nbd);
1527 * This is for compatibility only. The queue is always cleared
1528 * by NBD_DO_IT or NBD_CLEAR_SOCK.
1531 case NBD_PRINT_DEBUG:
1533 * For compatibility only, we no longer keep a list of
1534 * outstanding requests.
1541 static int nbd_ioctl(struct block_device *bdev, blk_mode_t mode,
1542 unsigned int cmd, unsigned long arg)
1544 struct nbd_device *nbd = bdev->bd_disk->private_data;
1545 struct nbd_config *config = nbd->config;
1546 int error = -EINVAL;
1548 if (!capable(CAP_SYS_ADMIN))
1551 /* The block layer will pass back some non-nbd ioctls in case we have
1552 * special handling for them, but we don't so just return an error.
1554 if (_IOC_TYPE(cmd) != 0xab)
1557 mutex_lock(&nbd->config_lock);
1559 /* Don't allow ioctl operations on a nbd device that was created with
1560 * netlink, unless it's DISCONNECT or CLEAR_SOCK, which are fine.
1562 if (!test_bit(NBD_RT_BOUND, &config->runtime_flags) ||
1563 (cmd == NBD_DISCONNECT || cmd == NBD_CLEAR_SOCK))
1564 error = __nbd_ioctl(bdev, nbd, cmd, arg);
1566 dev_err(nbd_to_dev(nbd), "Cannot use ioctl interface on a netlink controlled device.\n");
1567 mutex_unlock(&nbd->config_lock);
1571 static int nbd_alloc_and_init_config(struct nbd_device *nbd)
1573 struct nbd_config *config;
1575 if (WARN_ON(nbd->config))
1578 if (!try_module_get(THIS_MODULE))
1581 config = kzalloc(sizeof(struct nbd_config), GFP_NOFS);
1583 module_put(THIS_MODULE);
1587 atomic_set(&config->recv_threads, 0);
1588 init_waitqueue_head(&config->recv_wq);
1589 init_waitqueue_head(&config->conn_wait);
1590 config->blksize_bits = NBD_DEF_BLKSIZE_BITS;
1591 atomic_set(&config->live_connections, 0);
1593 nbd->config = config;
1595 * Order refcount_set(&nbd->config_refs, 1) and nbd->config assignment,
1596 * its pair is the barrier in nbd_get_config_unlocked().
1597 * So nbd_get_config_unlocked() won't see nbd->config as null after
1598 * refcount_inc_not_zero() succeed.
1600 smp_mb__before_atomic();
1601 refcount_set(&nbd->config_refs, 1);
1606 static int nbd_open(struct gendisk *disk, blk_mode_t mode)
1608 struct nbd_device *nbd;
1609 struct nbd_config *config;
1612 mutex_lock(&nbd_index_mutex);
1613 nbd = disk->private_data;
1618 if (!refcount_inc_not_zero(&nbd->refs)) {
1623 config = nbd_get_config_unlocked(nbd);
1625 mutex_lock(&nbd->config_lock);
1626 if (refcount_inc_not_zero(&nbd->config_refs)) {
1627 mutex_unlock(&nbd->config_lock);
1630 ret = nbd_alloc_and_init_config(nbd);
1632 mutex_unlock(&nbd->config_lock);
1636 refcount_inc(&nbd->refs);
1637 mutex_unlock(&nbd->config_lock);
1639 set_bit(GD_NEED_PART_SCAN, &disk->state);
1640 } else if (nbd_disconnected(config)) {
1642 set_bit(GD_NEED_PART_SCAN, &disk->state);
1645 mutex_unlock(&nbd_index_mutex);
1649 static void nbd_release(struct gendisk *disk)
1651 struct nbd_device *nbd = disk->private_data;
1653 if (test_bit(NBD_RT_DISCONNECT_ON_CLOSE, &nbd->config->runtime_flags) &&
1654 disk_openers(disk) == 0)
1655 nbd_disconnect_and_put(nbd);
1657 nbd_config_put(nbd);
1661 static void nbd_free_disk(struct gendisk *disk)
1663 struct nbd_device *nbd = disk->private_data;
1668 static const struct block_device_operations nbd_fops =
1670 .owner = THIS_MODULE,
1672 .release = nbd_release,
1674 .compat_ioctl = nbd_ioctl,
1675 .free_disk = nbd_free_disk,
1678 #if IS_ENABLED(CONFIG_DEBUG_FS)
1680 static int nbd_dbg_tasks_show(struct seq_file *s, void *unused)
1682 struct nbd_device *nbd = s->private;
1685 seq_printf(s, "recv: %d\n", nbd->pid);
1690 DEFINE_SHOW_ATTRIBUTE(nbd_dbg_tasks);
1692 static int nbd_dbg_flags_show(struct seq_file *s, void *unused)
1694 struct nbd_device *nbd = s->private;
1695 u32 flags = nbd->config->flags;
1697 seq_printf(s, "Hex: 0x%08x\n\n", flags);
1699 seq_puts(s, "Known flags:\n");
1701 if (flags & NBD_FLAG_HAS_FLAGS)
1702 seq_puts(s, "NBD_FLAG_HAS_FLAGS\n");
1703 if (flags & NBD_FLAG_READ_ONLY)
1704 seq_puts(s, "NBD_FLAG_READ_ONLY\n");
1705 if (flags & NBD_FLAG_SEND_FLUSH)
1706 seq_puts(s, "NBD_FLAG_SEND_FLUSH\n");
1707 if (flags & NBD_FLAG_SEND_FUA)
1708 seq_puts(s, "NBD_FLAG_SEND_FUA\n");
1709 if (flags & NBD_FLAG_SEND_TRIM)
1710 seq_puts(s, "NBD_FLAG_SEND_TRIM\n");
1715 DEFINE_SHOW_ATTRIBUTE(nbd_dbg_flags);
1717 static int nbd_dev_dbg_init(struct nbd_device *nbd)
1720 struct nbd_config *config = nbd->config;
1725 dir = debugfs_create_dir(nbd_name(nbd), nbd_dbg_dir);
1727 dev_err(nbd_to_dev(nbd), "Failed to create debugfs dir for '%s'\n",
1731 config->dbg_dir = dir;
1733 debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_fops);
1734 debugfs_create_u64("size_bytes", 0444, dir, &config->bytesize);
1735 debugfs_create_u32("timeout", 0444, dir, &nbd->tag_set.timeout);
1736 debugfs_create_u32("blocksize_bits", 0444, dir, &config->blksize_bits);
1737 debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_fops);
1742 static void nbd_dev_dbg_close(struct nbd_device *nbd)
1744 debugfs_remove_recursive(nbd->config->dbg_dir);
1747 static int nbd_dbg_init(void)
1749 struct dentry *dbg_dir;
1751 dbg_dir = debugfs_create_dir("nbd", NULL);
1752 if (IS_ERR(dbg_dir))
1755 nbd_dbg_dir = dbg_dir;
1760 static void nbd_dbg_close(void)
1762 debugfs_remove_recursive(nbd_dbg_dir);
1765 #else /* IS_ENABLED(CONFIG_DEBUG_FS) */
1767 static int nbd_dev_dbg_init(struct nbd_device *nbd)
1772 static void nbd_dev_dbg_close(struct nbd_device *nbd)
1776 static int nbd_dbg_init(void)
1781 static void nbd_dbg_close(void)
1787 static int nbd_init_request(struct blk_mq_tag_set *set, struct request *rq,
1788 unsigned int hctx_idx, unsigned int numa_node)
1790 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(rq);
1791 cmd->nbd = set->driver_data;
1793 mutex_init(&cmd->lock);
1797 static const struct blk_mq_ops nbd_mq_ops = {
1798 .queue_rq = nbd_queue_rq,
1799 .complete = nbd_complete_rq,
1800 .init_request = nbd_init_request,
1801 .timeout = nbd_xmit_timeout,
1804 static struct nbd_device *nbd_dev_add(int index, unsigned int refs)
1806 struct queue_limits lim = {
1807 .max_hw_sectors = 65536,
1808 .max_user_sectors = 256,
1809 .max_segments = USHRT_MAX,
1810 .max_segment_size = UINT_MAX,
1812 struct nbd_device *nbd;
1813 struct gendisk *disk;
1816 nbd = kzalloc(sizeof(struct nbd_device), GFP_KERNEL);
1820 nbd->tag_set.ops = &nbd_mq_ops;
1821 nbd->tag_set.nr_hw_queues = 1;
1822 nbd->tag_set.queue_depth = 128;
1823 nbd->tag_set.numa_node = NUMA_NO_NODE;
1824 nbd->tag_set.cmd_size = sizeof(struct nbd_cmd);
1825 nbd->tag_set.flags = BLK_MQ_F_SHOULD_MERGE |
1827 nbd->tag_set.driver_data = nbd;
1828 INIT_WORK(&nbd->remove_work, nbd_dev_remove_work);
1829 nbd->backend = NULL;
1831 err = blk_mq_alloc_tag_set(&nbd->tag_set);
1835 mutex_lock(&nbd_index_mutex);
1837 err = idr_alloc(&nbd_index_idr, nbd, index, index + 1,
1842 err = idr_alloc(&nbd_index_idr, nbd, 0,
1843 (MINORMASK >> part_shift) + 1, GFP_KERNEL);
1848 mutex_unlock(&nbd_index_mutex);
1852 disk = blk_mq_alloc_disk(&nbd->tag_set, &lim, NULL);
1854 err = PTR_ERR(disk);
1859 nbd->recv_workq = alloc_workqueue("nbd%d-recv",
1860 WQ_MEM_RECLAIM | WQ_HIGHPRI |
1861 WQ_UNBOUND, 0, nbd->index);
1862 if (!nbd->recv_workq) {
1863 dev_err(disk_to_dev(nbd->disk), "Could not allocate knbd recv work queue.\n");
1869 * Tell the block layer that we are not a rotational device
1871 blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue);
1873 mutex_init(&nbd->config_lock);
1874 refcount_set(&nbd->config_refs, 0);
1876 * Start out with a zero references to keep other threads from using
1877 * this device until it is fully initialized.
1879 refcount_set(&nbd->refs, 0);
1880 INIT_LIST_HEAD(&nbd->list);
1881 disk->major = NBD_MAJOR;
1882 disk->first_minor = index << part_shift;
1883 disk->minors = 1 << part_shift;
1884 disk->fops = &nbd_fops;
1885 disk->private_data = nbd;
1886 sprintf(disk->disk_name, "nbd%d", index);
1887 err = add_disk(disk);
1892 * Now publish the device.
1894 refcount_set(&nbd->refs, refs);
1895 nbd_total_devices++;
1899 destroy_workqueue(nbd->recv_workq);
1903 mutex_lock(&nbd_index_mutex);
1904 idr_remove(&nbd_index_idr, index);
1905 mutex_unlock(&nbd_index_mutex);
1907 blk_mq_free_tag_set(&nbd->tag_set);
1911 return ERR_PTR(err);
1914 static struct nbd_device *nbd_find_get_unused(void)
1916 struct nbd_device *nbd;
1919 lockdep_assert_held(&nbd_index_mutex);
1921 idr_for_each_entry(&nbd_index_idr, nbd, id) {
1922 if (refcount_read(&nbd->config_refs) ||
1923 test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags))
1925 if (refcount_inc_not_zero(&nbd->refs))
1932 /* Netlink interface. */
1933 static const struct nla_policy nbd_attr_policy[NBD_ATTR_MAX + 1] = {
1934 [NBD_ATTR_INDEX] = { .type = NLA_U32 },
1935 [NBD_ATTR_SIZE_BYTES] = { .type = NLA_U64 },
1936 [NBD_ATTR_BLOCK_SIZE_BYTES] = { .type = NLA_U64 },
1937 [NBD_ATTR_TIMEOUT] = { .type = NLA_U64 },
1938 [NBD_ATTR_SERVER_FLAGS] = { .type = NLA_U64 },
1939 [NBD_ATTR_CLIENT_FLAGS] = { .type = NLA_U64 },
1940 [NBD_ATTR_SOCKETS] = { .type = NLA_NESTED},
1941 [NBD_ATTR_DEAD_CONN_TIMEOUT] = { .type = NLA_U64 },
1942 [NBD_ATTR_DEVICE_LIST] = { .type = NLA_NESTED},
1943 [NBD_ATTR_BACKEND_IDENTIFIER] = { .type = NLA_STRING},
1946 static const struct nla_policy nbd_sock_policy[NBD_SOCK_MAX + 1] = {
1947 [NBD_SOCK_FD] = { .type = NLA_U32 },
1950 /* We don't use this right now since we don't parse the incoming list, but we
1951 * still want it here so userspace knows what to expect.
1953 static const struct nla_policy __attribute__((unused))
1954 nbd_device_policy[NBD_DEVICE_ATTR_MAX + 1] = {
1955 [NBD_DEVICE_INDEX] = { .type = NLA_U32 },
1956 [NBD_DEVICE_CONNECTED] = { .type = NLA_U8 },
1959 static int nbd_genl_size_set(struct genl_info *info, struct nbd_device *nbd)
1961 struct nbd_config *config = nbd->config;
1962 u64 bsize = nbd_blksize(config);
1963 u64 bytes = config->bytesize;
1965 if (info->attrs[NBD_ATTR_SIZE_BYTES])
1966 bytes = nla_get_u64(info->attrs[NBD_ATTR_SIZE_BYTES]);
1968 if (info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES])
1969 bsize = nla_get_u64(info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]);
1971 if (bytes != config->bytesize || bsize != nbd_blksize(config))
1972 return nbd_set_size(nbd, bytes, bsize);
1976 static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info)
1978 struct nbd_device *nbd;
1979 struct nbd_config *config;
1982 bool put_dev = false;
1984 if (!netlink_capable(skb, CAP_SYS_ADMIN))
1987 if (info->attrs[NBD_ATTR_INDEX]) {
1988 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
1991 * Too big first_minor can cause duplicate creation of
1992 * sysfs files/links, since index << part_shift might overflow, or
1993 * MKDEV() expect that the max bits of first_minor is 20.
1995 if (index < 0 || index > MINORMASK >> part_shift) {
1996 pr_err("illegal input index %d\n", index);
2000 if (GENL_REQ_ATTR_CHECK(info, NBD_ATTR_SOCKETS)) {
2001 pr_err("must specify at least one socket\n");
2004 if (GENL_REQ_ATTR_CHECK(info, NBD_ATTR_SIZE_BYTES)) {
2005 pr_err("must specify a size in bytes for the device\n");
2009 mutex_lock(&nbd_index_mutex);
2011 nbd = nbd_find_get_unused();
2013 nbd = idr_find(&nbd_index_idr, index);
2015 if ((test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags) &&
2016 test_bit(NBD_DISCONNECT_REQUESTED, &nbd->flags)) ||
2017 !refcount_inc_not_zero(&nbd->refs)) {
2018 mutex_unlock(&nbd_index_mutex);
2019 pr_err("device at index %d is going down\n",
2025 mutex_unlock(&nbd_index_mutex);
2028 nbd = nbd_dev_add(index, 2);
2030 pr_err("failed to add new device\n");
2031 return PTR_ERR(nbd);
2035 mutex_lock(&nbd->config_lock);
2036 if (refcount_read(&nbd->config_refs)) {
2037 mutex_unlock(&nbd->config_lock);
2041 pr_err("nbd%d already in use\n", index);
2045 ret = nbd_alloc_and_init_config(nbd);
2047 mutex_unlock(&nbd->config_lock);
2049 pr_err("couldn't allocate config\n");
2053 config = nbd->config;
2054 set_bit(NBD_RT_BOUND, &config->runtime_flags);
2055 ret = nbd_genl_size_set(info, nbd);
2059 if (info->attrs[NBD_ATTR_TIMEOUT])
2060 nbd_set_cmd_timeout(nbd,
2061 nla_get_u64(info->attrs[NBD_ATTR_TIMEOUT]));
2062 if (info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]) {
2063 config->dead_conn_timeout =
2064 nla_get_u64(info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]);
2065 config->dead_conn_timeout *= HZ;
2067 if (info->attrs[NBD_ATTR_SERVER_FLAGS])
2069 nla_get_u64(info->attrs[NBD_ATTR_SERVER_FLAGS]);
2070 if (info->attrs[NBD_ATTR_CLIENT_FLAGS]) {
2071 u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]);
2072 if (flags & NBD_CFLAG_DESTROY_ON_DISCONNECT) {
2074 * We have 1 ref to keep the device around, and then 1
2075 * ref for our current operation here, which will be
2076 * inherited by the config. If we already have
2077 * DESTROY_ON_DISCONNECT set then we know we don't have
2078 * that extra ref already held so we don't need the
2081 if (!test_and_set_bit(NBD_DESTROY_ON_DISCONNECT,
2085 if (test_and_clear_bit(NBD_DESTROY_ON_DISCONNECT,
2087 refcount_inc(&nbd->refs);
2089 if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) {
2090 set_bit(NBD_RT_DISCONNECT_ON_CLOSE,
2091 &config->runtime_flags);
2095 if (info->attrs[NBD_ATTR_SOCKETS]) {
2096 struct nlattr *attr;
2099 nla_for_each_nested(attr, info->attrs[NBD_ATTR_SOCKETS],
2101 struct nlattr *socks[NBD_SOCK_MAX+1];
2103 if (nla_type(attr) != NBD_SOCK_ITEM) {
2104 pr_err("socks must be embedded in a SOCK_ITEM attr\n");
2108 ret = nla_parse_nested_deprecated(socks, NBD_SOCK_MAX,
2113 pr_err("error processing sock list\n");
2117 if (!socks[NBD_SOCK_FD])
2119 fd = (int)nla_get_u32(socks[NBD_SOCK_FD]);
2120 ret = nbd_add_socket(nbd, fd, true);
2125 ret = nbd_start_device(nbd);
2128 if (info->attrs[NBD_ATTR_BACKEND_IDENTIFIER]) {
2129 nbd->backend = nla_strdup(info->attrs[NBD_ATTR_BACKEND_IDENTIFIER],
2131 if (!nbd->backend) {
2136 ret = device_create_file(disk_to_dev(nbd->disk), &backend_attr);
2138 dev_err(disk_to_dev(nbd->disk),
2139 "device_create_file failed for backend!\n");
2142 set_bit(NBD_RT_HAS_BACKEND_FILE, &config->runtime_flags);
2144 mutex_unlock(&nbd->config_lock);
2146 set_bit(NBD_RT_HAS_CONFIG_REF, &config->runtime_flags);
2147 refcount_inc(&nbd->config_refs);
2148 nbd_connect_reply(info, nbd->index);
2150 nbd_config_put(nbd);
2156 static void nbd_disconnect_and_put(struct nbd_device *nbd)
2158 mutex_lock(&nbd->config_lock);
2159 nbd_disconnect(nbd);
2161 wake_up(&nbd->config->conn_wait);
2163 * Make sure recv thread has finished, we can safely call nbd_clear_que()
2164 * to cancel the inflight I/Os.
2166 flush_workqueue(nbd->recv_workq);
2168 nbd->task_setup = NULL;
2169 mutex_unlock(&nbd->config_lock);
2171 if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
2172 &nbd->config->runtime_flags))
2173 nbd_config_put(nbd);
2176 static int nbd_genl_disconnect(struct sk_buff *skb, struct genl_info *info)
2178 struct nbd_device *nbd;
2181 if (!netlink_capable(skb, CAP_SYS_ADMIN))
2184 if (GENL_REQ_ATTR_CHECK(info, NBD_ATTR_INDEX)) {
2185 pr_err("must specify an index to disconnect\n");
2188 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
2189 mutex_lock(&nbd_index_mutex);
2190 nbd = idr_find(&nbd_index_idr, index);
2192 mutex_unlock(&nbd_index_mutex);
2193 pr_err("couldn't find device at index %d\n", index);
2196 if (!refcount_inc_not_zero(&nbd->refs)) {
2197 mutex_unlock(&nbd_index_mutex);
2198 pr_err("device at index %d is going down\n", index);
2201 mutex_unlock(&nbd_index_mutex);
2202 if (!refcount_inc_not_zero(&nbd->config_refs))
2204 nbd_disconnect_and_put(nbd);
2205 nbd_config_put(nbd);
2211 static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
2213 struct nbd_device *nbd = NULL;
2214 struct nbd_config *config;
2217 bool put_dev = false;
2219 if (!netlink_capable(skb, CAP_SYS_ADMIN))
2222 if (GENL_REQ_ATTR_CHECK(info, NBD_ATTR_INDEX)) {
2223 pr_err("must specify a device to reconfigure\n");
2226 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
2227 mutex_lock(&nbd_index_mutex);
2228 nbd = idr_find(&nbd_index_idr, index);
2230 mutex_unlock(&nbd_index_mutex);
2231 pr_err("couldn't find a device at index %d\n", index);
2235 if (info->attrs[NBD_ATTR_BACKEND_IDENTIFIER]) {
2236 if (nla_strcmp(info->attrs[NBD_ATTR_BACKEND_IDENTIFIER],
2238 mutex_unlock(&nbd_index_mutex);
2239 dev_err(nbd_to_dev(nbd),
2240 "backend image doesn't match with %s\n",
2245 mutex_unlock(&nbd_index_mutex);
2246 dev_err(nbd_to_dev(nbd), "must specify backend\n");
2250 if (!refcount_inc_not_zero(&nbd->refs)) {
2251 mutex_unlock(&nbd_index_mutex);
2252 pr_err("device at index %d is going down\n", index);
2255 mutex_unlock(&nbd_index_mutex);
2257 config = nbd_get_config_unlocked(nbd);
2259 dev_err(nbd_to_dev(nbd),
2260 "not configured, cannot reconfigure\n");
2265 mutex_lock(&nbd->config_lock);
2266 if (!test_bit(NBD_RT_BOUND, &config->runtime_flags) ||
2268 dev_err(nbd_to_dev(nbd),
2269 "not configured, cannot reconfigure\n");
2274 ret = nbd_genl_size_set(info, nbd);
2278 if (info->attrs[NBD_ATTR_TIMEOUT])
2279 nbd_set_cmd_timeout(nbd,
2280 nla_get_u64(info->attrs[NBD_ATTR_TIMEOUT]));
2281 if (info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]) {
2282 config->dead_conn_timeout =
2283 nla_get_u64(info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]);
2284 config->dead_conn_timeout *= HZ;
2286 if (info->attrs[NBD_ATTR_CLIENT_FLAGS]) {
2287 u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]);
2288 if (flags & NBD_CFLAG_DESTROY_ON_DISCONNECT) {
2289 if (!test_and_set_bit(NBD_DESTROY_ON_DISCONNECT,
2293 if (test_and_clear_bit(NBD_DESTROY_ON_DISCONNECT,
2295 refcount_inc(&nbd->refs);
2298 if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) {
2299 set_bit(NBD_RT_DISCONNECT_ON_CLOSE,
2300 &config->runtime_flags);
2302 clear_bit(NBD_RT_DISCONNECT_ON_CLOSE,
2303 &config->runtime_flags);
2307 if (info->attrs[NBD_ATTR_SOCKETS]) {
2308 struct nlattr *attr;
2311 nla_for_each_nested(attr, info->attrs[NBD_ATTR_SOCKETS],
2313 struct nlattr *socks[NBD_SOCK_MAX+1];
2315 if (nla_type(attr) != NBD_SOCK_ITEM) {
2316 pr_err("socks must be embedded in a SOCK_ITEM attr\n");
2320 ret = nla_parse_nested_deprecated(socks, NBD_SOCK_MAX,
2325 pr_err("error processing sock list\n");
2329 if (!socks[NBD_SOCK_FD])
2331 fd = (int)nla_get_u32(socks[NBD_SOCK_FD]);
2332 ret = nbd_reconnect_socket(nbd, fd);
2338 dev_info(nbd_to_dev(nbd), "reconnected socket\n");
2342 mutex_unlock(&nbd->config_lock);
2343 nbd_config_put(nbd);
2350 static const struct genl_small_ops nbd_connect_genl_ops[] = {
2352 .cmd = NBD_CMD_CONNECT,
2353 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2354 .doit = nbd_genl_connect,
2357 .cmd = NBD_CMD_DISCONNECT,
2358 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2359 .doit = nbd_genl_disconnect,
2362 .cmd = NBD_CMD_RECONFIGURE,
2363 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2364 .doit = nbd_genl_reconfigure,
2367 .cmd = NBD_CMD_STATUS,
2368 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2369 .doit = nbd_genl_status,
2373 static const struct genl_multicast_group nbd_mcast_grps[] = {
2374 { .name = NBD_GENL_MCAST_GROUP_NAME, },
2377 static struct genl_family nbd_genl_family __ro_after_init = {
2379 .name = NBD_GENL_FAMILY_NAME,
2380 .version = NBD_GENL_VERSION,
2381 .module = THIS_MODULE,
2382 .small_ops = nbd_connect_genl_ops,
2383 .n_small_ops = ARRAY_SIZE(nbd_connect_genl_ops),
2384 .resv_start_op = NBD_CMD_STATUS + 1,
2385 .maxattr = NBD_ATTR_MAX,
2387 .policy = nbd_attr_policy,
2388 .mcgrps = nbd_mcast_grps,
2389 .n_mcgrps = ARRAY_SIZE(nbd_mcast_grps),
2391 MODULE_ALIAS_GENL_FAMILY(NBD_GENL_FAMILY_NAME);
2393 static int populate_nbd_status(struct nbd_device *nbd, struct sk_buff *reply)
2395 struct nlattr *dev_opt;
2399 /* This is a little racey, but for status it's ok. The
2400 * reason we don't take a ref here is because we can't
2401 * take a ref in the index == -1 case as we would need
2402 * to put under the nbd_index_mutex, which could
2403 * deadlock if we are configured to remove ourselves
2404 * once we're disconnected.
2406 if (refcount_read(&nbd->config_refs))
2408 dev_opt = nla_nest_start_noflag(reply, NBD_DEVICE_ITEM);
2411 ret = nla_put_u32(reply, NBD_DEVICE_INDEX, nbd->index);
2414 ret = nla_put_u8(reply, NBD_DEVICE_CONNECTED,
2418 nla_nest_end(reply, dev_opt);
2422 static int status_cb(int id, void *ptr, void *data)
2424 struct nbd_device *nbd = ptr;
2425 return populate_nbd_status(nbd, (struct sk_buff *)data);
2428 static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info)
2430 struct nlattr *dev_list;
2431 struct sk_buff *reply;
2437 if (info->attrs[NBD_ATTR_INDEX])
2438 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
2440 mutex_lock(&nbd_index_mutex);
2442 msg_size = nla_total_size(nla_attr_size(sizeof(u32)) +
2443 nla_attr_size(sizeof(u8)));
2444 msg_size *= (index == -1) ? nbd_total_devices : 1;
2446 reply = genlmsg_new(msg_size, GFP_KERNEL);
2449 reply_head = genlmsg_put_reply(reply, info, &nbd_genl_family, 0,
2456 dev_list = nla_nest_start_noflag(reply, NBD_ATTR_DEVICE_LIST);
2464 ret = idr_for_each(&nbd_index_idr, &status_cb, reply);
2470 struct nbd_device *nbd;
2471 nbd = idr_find(&nbd_index_idr, index);
2473 ret = populate_nbd_status(nbd, reply);
2480 nla_nest_end(reply, dev_list);
2481 genlmsg_end(reply, reply_head);
2482 ret = genlmsg_reply(reply, info);
2484 mutex_unlock(&nbd_index_mutex);
2488 static void nbd_connect_reply(struct genl_info *info, int index)
2490 struct sk_buff *skb;
2494 skb = genlmsg_new(nla_total_size(sizeof(u32)), GFP_KERNEL);
2497 msg_head = genlmsg_put_reply(skb, info, &nbd_genl_family, 0,
2503 ret = nla_put_u32(skb, NBD_ATTR_INDEX, index);
2508 genlmsg_end(skb, msg_head);
2509 genlmsg_reply(skb, info);
2512 static void nbd_mcast_index(int index)
2514 struct sk_buff *skb;
2518 skb = genlmsg_new(nla_total_size(sizeof(u32)), GFP_KERNEL);
2521 msg_head = genlmsg_put(skb, 0, 0, &nbd_genl_family, 0,
2527 ret = nla_put_u32(skb, NBD_ATTR_INDEX, index);
2532 genlmsg_end(skb, msg_head);
2533 genlmsg_multicast(&nbd_genl_family, skb, 0, 0, GFP_KERNEL);
2536 static void nbd_dead_link_work(struct work_struct *work)
2538 struct link_dead_args *args = container_of(work, struct link_dead_args,
2540 nbd_mcast_index(args->index);
2544 static int __init nbd_init(void)
2548 BUILD_BUG_ON(sizeof(struct nbd_request) != 28);
2551 pr_err("max_part must be >= 0\n");
2557 part_shift = fls(max_part);
2560 * Adjust max_part according to part_shift as it is exported
2561 * to user space so that user can know the max number of
2562 * partition kernel should be able to manage.
2564 * Note that -1 is required because partition 0 is reserved
2565 * for the whole disk.
2567 max_part = (1UL << part_shift) - 1;
2570 if ((1UL << part_shift) > DISK_MAX_PARTS)
2573 if (nbds_max > 1UL << (MINORBITS - part_shift))
2576 if (register_blkdev(NBD_MAJOR, "nbd"))
2579 nbd_del_wq = alloc_workqueue("nbd-del", WQ_UNBOUND, 0);
2581 unregister_blkdev(NBD_MAJOR, "nbd");
2585 if (genl_register_family(&nbd_genl_family)) {
2586 destroy_workqueue(nbd_del_wq);
2587 unregister_blkdev(NBD_MAJOR, "nbd");
2592 for (i = 0; i < nbds_max; i++)
2597 static int nbd_exit_cb(int id, void *ptr, void *data)
2599 struct list_head *list = (struct list_head *)data;
2600 struct nbd_device *nbd = ptr;
2602 /* Skip nbd that is being removed asynchronously */
2603 if (refcount_read(&nbd->refs))
2604 list_add_tail(&nbd->list, list);
2609 static void __exit nbd_cleanup(void)
2611 struct nbd_device *nbd;
2612 LIST_HEAD(del_list);
2615 * Unregister netlink interface prior to waiting
2616 * for the completion of netlink commands.
2618 genl_unregister_family(&nbd_genl_family);
2622 mutex_lock(&nbd_index_mutex);
2623 idr_for_each(&nbd_index_idr, &nbd_exit_cb, &del_list);
2624 mutex_unlock(&nbd_index_mutex);
2626 while (!list_empty(&del_list)) {
2627 nbd = list_first_entry(&del_list, struct nbd_device, list);
2628 list_del_init(&nbd->list);
2629 if (refcount_read(&nbd->config_refs))
2630 pr_err("possibly leaking nbd_config (ref %d)\n",
2631 refcount_read(&nbd->config_refs));
2632 if (refcount_read(&nbd->refs) != 1)
2633 pr_err("possibly leaking a device\n");
2637 /* Also wait for nbd_dev_remove_work() completes */
2638 destroy_workqueue(nbd_del_wq);
2640 idr_destroy(&nbd_index_idr);
2641 unregister_blkdev(NBD_MAJOR, "nbd");
2644 module_init(nbd_init);
2645 module_exit(nbd_cleanup);
2647 MODULE_DESCRIPTION("Network Block Device");
2648 MODULE_LICENSE("GPL");
2650 module_param(nbds_max, int, 0444);
2651 MODULE_PARM_DESC(nbds_max, "number of network block devices to initialize (default: 16)");
2652 module_param(max_part, int, 0444);
2653 MODULE_PARM_DESC(max_part, "number of partitions per device (default: 16)");