1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Userspace block device - block device which IO is handled from userspace
5 * Take full use of io_uring passthrough command for communicating with
6 * ublk userspace daemon(ublksrvd) for handling basic IO request.
8 * Copyright 2022 Ming Lei <ming.lei@redhat.com>
10 * (part of code stolen from loop.c)
12 #include <linux/module.h>
13 #include <linux/moduleparam.h>
14 #include <linux/sched.h>
16 #include <linux/pagemap.h>
17 #include <linux/file.h>
18 #include <linux/stat.h>
19 #include <linux/errno.h>
20 #include <linux/major.h>
21 #include <linux/wait.h>
22 #include <linux/blkdev.h>
23 #include <linux/init.h>
24 #include <linux/swap.h>
25 #include <linux/slab.h>
26 #include <linux/compat.h>
27 #include <linux/mutex.h>
28 #include <linux/writeback.h>
29 #include <linux/completion.h>
30 #include <linux/highmem.h>
31 #include <linux/sysfs.h>
32 #include <linux/miscdevice.h>
33 #include <linux/falloc.h>
34 #include <linux/uio.h>
35 #include <linux/ioprio.h>
36 #include <linux/sched/mm.h>
37 #include <linux/uaccess.h>
38 #include <linux/cdev.h>
39 #include <linux/io_uring.h>
40 #include <linux/blk-mq.h>
41 #include <linux/delay.h>
44 #include <linux/task_work.h>
45 #include <linux/namei.h>
46 #include <uapi/linux/ublk_cmd.h>
48 #define UBLK_MINORS (1U << MINORBITS)
50 /* All UBLK_F_* have to be included into UBLK_F_ALL */
51 #define UBLK_F_ALL (UBLK_F_SUPPORT_ZERO_COPY \
52 | UBLK_F_URING_CMD_COMP_IN_TASK \
53 | UBLK_F_NEED_GET_DATA \
54 | UBLK_F_USER_RECOVERY \
55 | UBLK_F_USER_RECOVERY_REISSUE \
56 | UBLK_F_UNPRIVILEGED_DEV \
57 | UBLK_F_CMD_IOCTL_ENCODE)
59 /* All UBLK_PARAM_TYPE_* should be included here */
60 #define UBLK_PARAM_TYPE_ALL (UBLK_PARAM_TYPE_BASIC | \
61 UBLK_PARAM_TYPE_DISCARD | UBLK_PARAM_TYPE_DEVT)
64 struct llist_node node;
65 struct callback_head work;
68 struct ublk_uring_cmd_pdu {
69 struct ublk_queue *ubq;
73 * io command is active: sqe cmd is received, and its cqe isn't done
75 * If the flag is set, the io command is owned by ublk driver, and waited
76 * for incoming blk-mq request from the ublk block device.
78 * If the flag is cleared, the io command will be completed, and owned by
81 #define UBLK_IO_FLAG_ACTIVE 0x01
84 * IO command is completed via cqe, and it is being handled by ublksrv, and
87 * Basically exclusively with UBLK_IO_FLAG_ACTIVE, so can be served for
90 #define UBLK_IO_FLAG_OWNED_BY_SRV 0x02
93 * IO command is aborted, so this flag is set in case of
94 * !UBLK_IO_FLAG_ACTIVE.
96 * After this flag is observed, any pending or new incoming request
97 * associated with this io command will be failed immediately
99 #define UBLK_IO_FLAG_ABORTED 0x04
102 * UBLK_IO_FLAG_NEED_GET_DATA is set because IO command requires
103 * get data buffer address from ublksrv.
105 * Then, bio data could be copied into this data buffer for a WRITE request
106 * after the IO command is issued again and UBLK_IO_FLAG_NEED_GET_DATA is unset.
108 #define UBLK_IO_FLAG_NEED_GET_DATA 0x08
111 /* userspace buffer address from io cmd */
116 struct io_uring_cmd *cmd;
124 struct task_struct *ubq_daemon;
127 struct llist_head io_cmds;
129 unsigned long io_addr; /* mapped vm address */
130 unsigned int max_io_sz;
133 unsigned short nr_io_ready; /* how many ios setup */
134 struct ublk_device *dev;
135 struct ublk_io ios[];
138 #define UBLK_DAEMON_MONITOR_PERIOD (5 * HZ)
141 struct gendisk *ub_disk;
145 unsigned int queue_size;
146 struct ublksrv_ctrl_dev_info dev_info;
148 struct blk_mq_tag_set tag_set;
151 struct device cdev_dev;
153 #define UB_STATE_OPEN 0
154 #define UB_STATE_USED 1
155 #define UB_STATE_DELETED 2
162 struct mm_struct *mm;
164 struct ublk_params params;
166 struct completion completion;
167 unsigned int nr_queues_ready;
168 unsigned int nr_privileged_daemon;
171 * Our ubq->daemon may be killed without any notification, so
172 * monitor each queue's daemon periodically
174 struct delayed_work monitor_work;
175 struct work_struct quiesce_work;
176 struct work_struct stop_work;
179 /* header of ublk_params */
180 struct ublk_params_header {
185 static dev_t ublk_chr_devt;
186 static struct class *ublk_chr_class;
188 static DEFINE_IDR(ublk_index_idr);
189 static DEFINE_SPINLOCK(ublk_idr_lock);
190 static wait_queue_head_t ublk_idr_wq; /* wait until one idr is freed */
192 static DEFINE_MUTEX(ublk_ctl_mutex);
195 * Max ublk devices allowed to add
197 * It can be extended to one per-user limit in future or even controlled
200 static unsigned int ublks_max = 64;
201 static unsigned int ublks_added; /* protected by ublk_ctl_mutex */
203 static struct miscdevice ublk_misc;
205 static void ublk_dev_param_basic_apply(struct ublk_device *ub)
207 struct request_queue *q = ub->ub_disk->queue;
208 const struct ublk_param_basic *p = &ub->params.basic;
210 blk_queue_logical_block_size(q, 1 << p->logical_bs_shift);
211 blk_queue_physical_block_size(q, 1 << p->physical_bs_shift);
212 blk_queue_io_min(q, 1 << p->io_min_shift);
213 blk_queue_io_opt(q, 1 << p->io_opt_shift);
215 blk_queue_write_cache(q, p->attrs & UBLK_ATTR_VOLATILE_CACHE,
216 p->attrs & UBLK_ATTR_FUA);
217 if (p->attrs & UBLK_ATTR_ROTATIONAL)
218 blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
220 blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
222 blk_queue_max_hw_sectors(q, p->max_sectors);
223 blk_queue_chunk_sectors(q, p->chunk_sectors);
224 blk_queue_virt_boundary(q, p->virt_boundary_mask);
226 if (p->attrs & UBLK_ATTR_READ_ONLY)
227 set_disk_ro(ub->ub_disk, true);
229 set_capacity(ub->ub_disk, p->dev_sectors);
232 static void ublk_dev_param_discard_apply(struct ublk_device *ub)
234 struct request_queue *q = ub->ub_disk->queue;
235 const struct ublk_param_discard *p = &ub->params.discard;
237 q->limits.discard_alignment = p->discard_alignment;
238 q->limits.discard_granularity = p->discard_granularity;
239 blk_queue_max_discard_sectors(q, p->max_discard_sectors);
240 blk_queue_max_write_zeroes_sectors(q,
241 p->max_write_zeroes_sectors);
242 blk_queue_max_discard_segments(q, p->max_discard_segments);
245 static int ublk_validate_params(const struct ublk_device *ub)
247 /* basic param is the only one which must be set */
248 if (ub->params.types & UBLK_PARAM_TYPE_BASIC) {
249 const struct ublk_param_basic *p = &ub->params.basic;
251 if (p->logical_bs_shift > PAGE_SHIFT)
254 if (p->logical_bs_shift > p->physical_bs_shift)
257 if (p->max_sectors > (ub->dev_info.max_io_buf_bytes >> 9))
262 if (ub->params.types & UBLK_PARAM_TYPE_DISCARD) {
263 const struct ublk_param_discard *p = &ub->params.discard;
265 /* So far, only support single segment discard */
266 if (p->max_discard_sectors && p->max_discard_segments != 1)
269 if (!p->discard_granularity)
273 /* dev_t is read-only */
274 if (ub->params.types & UBLK_PARAM_TYPE_DEVT)
280 static int ublk_apply_params(struct ublk_device *ub)
282 if (!(ub->params.types & UBLK_PARAM_TYPE_BASIC))
285 ublk_dev_param_basic_apply(ub);
287 if (ub->params.types & UBLK_PARAM_TYPE_DISCARD)
288 ublk_dev_param_discard_apply(ub);
293 static inline bool ublk_can_use_task_work(const struct ublk_queue *ubq)
295 if (IS_BUILTIN(CONFIG_BLK_DEV_UBLK) &&
296 !(ubq->flags & UBLK_F_URING_CMD_COMP_IN_TASK))
301 static inline bool ublk_need_get_data(const struct ublk_queue *ubq)
303 return ubq->flags & UBLK_F_NEED_GET_DATA;
306 static struct ublk_device *ublk_get_device(struct ublk_device *ub)
308 if (kobject_get_unless_zero(&ub->cdev_dev.kobj))
313 static void ublk_put_device(struct ublk_device *ub)
315 put_device(&ub->cdev_dev);
318 static inline struct ublk_queue *ublk_get_queue(struct ublk_device *dev,
321 return (struct ublk_queue *)&(dev->__queues[qid * dev->queue_size]);
324 static inline bool ublk_rq_has_data(const struct request *rq)
326 return bio_has_data(rq->bio);
329 static inline struct ublksrv_io_desc *ublk_get_iod(struct ublk_queue *ubq,
332 return (struct ublksrv_io_desc *)
333 &(ubq->io_cmd_buf[tag * sizeof(struct ublksrv_io_desc)]);
336 static inline char *ublk_queue_cmd_buf(struct ublk_device *ub, int q_id)
338 return ublk_get_queue(ub, q_id)->io_cmd_buf;
341 static inline int ublk_queue_cmd_buf_size(struct ublk_device *ub, int q_id)
343 struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
345 return round_up(ubq->q_depth * sizeof(struct ublksrv_io_desc),
349 static inline bool ublk_queue_can_use_recovery_reissue(
350 struct ublk_queue *ubq)
352 return (ubq->flags & UBLK_F_USER_RECOVERY) &&
353 (ubq->flags & UBLK_F_USER_RECOVERY_REISSUE);
356 static inline bool ublk_queue_can_use_recovery(
357 struct ublk_queue *ubq)
359 return ubq->flags & UBLK_F_USER_RECOVERY;
362 static inline bool ublk_can_use_recovery(struct ublk_device *ub)
364 return ub->dev_info.flags & UBLK_F_USER_RECOVERY;
367 static void ublk_free_disk(struct gendisk *disk)
369 struct ublk_device *ub = disk->private_data;
371 clear_bit(UB_STATE_USED, &ub->state);
372 put_device(&ub->cdev_dev);
375 static void ublk_store_owner_uid_gid(unsigned int *owner_uid,
376 unsigned int *owner_gid)
381 current_uid_gid(&uid, &gid);
383 *owner_uid = from_kuid(&init_user_ns, uid);
384 *owner_gid = from_kgid(&init_user_ns, gid);
387 static int ublk_open(struct block_device *bdev, fmode_t mode)
389 struct ublk_device *ub = bdev->bd_disk->private_data;
391 if (capable(CAP_SYS_ADMIN))
395 * If it is one unprivileged device, only owner can open
396 * the disk. Otherwise it could be one trap made by one
397 * evil user who grants this disk's privileges to other
398 * users deliberately.
400 * This way is reasonable too given anyone can create
401 * unprivileged device, and no need other's grant.
403 if (ub->dev_info.flags & UBLK_F_UNPRIVILEGED_DEV) {
404 unsigned int curr_uid, curr_gid;
406 ublk_store_owner_uid_gid(&curr_uid, &curr_gid);
408 if (curr_uid != ub->dev_info.owner_uid || curr_gid !=
409 ub->dev_info.owner_gid)
416 static const struct block_device_operations ub_fops = {
417 .owner = THIS_MODULE,
419 .free_disk = ublk_free_disk,
422 #define UBLK_MAX_PIN_PAGES 32
424 struct ublk_map_data {
425 const struct request *rq;
430 struct ublk_io_iter {
431 struct page *pages[UBLK_MAX_PIN_PAGES];
432 unsigned pg_off; /* offset in the 1st page in pages */
433 int nr_pages; /* how many page pointers in pages */
435 struct bvec_iter iter;
438 static inline unsigned ublk_copy_io_pages(struct ublk_io_iter *data,
439 unsigned max_bytes, bool to_vm)
441 const unsigned total = min_t(unsigned, max_bytes,
442 PAGE_SIZE - data->pg_off +
443 ((data->nr_pages - 1) << PAGE_SHIFT));
447 while (done < total) {
448 struct bio_vec bv = bio_iter_iovec(data->bio, data->iter);
449 const unsigned int bytes = min3(bv.bv_len, total - done,
450 (unsigned)(PAGE_SIZE - data->pg_off));
451 void *bv_buf = bvec_kmap_local(&bv);
452 void *pg_buf = kmap_local_page(data->pages[pg_idx]);
455 memcpy(pg_buf + data->pg_off, bv_buf, bytes);
457 memcpy(bv_buf, pg_buf + data->pg_off, bytes);
459 kunmap_local(pg_buf);
460 kunmap_local(bv_buf);
462 /* advance page array */
463 data->pg_off += bytes;
464 if (data->pg_off == PAGE_SIZE) {
472 bio_advance_iter_single(data->bio, &data->iter, bytes);
473 if (!data->iter.bi_size) {
474 data->bio = data->bio->bi_next;
475 if (data->bio == NULL)
477 data->iter = data->bio->bi_iter;
484 static int ublk_copy_user_pages(struct ublk_map_data *data, bool to_vm)
486 const unsigned int gup_flags = to_vm ? FOLL_WRITE : 0;
487 const unsigned long start_vm = data->ubuf;
488 unsigned int done = 0;
489 struct ublk_io_iter iter = {
490 .pg_off = start_vm & (PAGE_SIZE - 1),
491 .bio = data->rq->bio,
492 .iter = data->rq->bio->bi_iter,
494 const unsigned int nr_pages = round_up(data->len +
495 (start_vm & (PAGE_SIZE - 1)), PAGE_SIZE) >> PAGE_SHIFT;
497 while (done < nr_pages) {
498 const unsigned to_pin = min_t(unsigned, UBLK_MAX_PIN_PAGES,
502 iter.nr_pages = get_user_pages_fast(start_vm +
503 (done << PAGE_SHIFT), to_pin, gup_flags,
505 if (iter.nr_pages <= 0)
506 return done == 0 ? iter.nr_pages : done;
507 len = ublk_copy_io_pages(&iter, data->len, to_vm);
508 for (i = 0; i < iter.nr_pages; i++) {
510 set_page_dirty(iter.pages[i]);
511 put_page(iter.pages[i]);
514 done += iter.nr_pages;
520 static inline bool ublk_need_map_req(const struct request *req)
522 return ublk_rq_has_data(req) && req_op(req) == REQ_OP_WRITE;
525 static inline bool ublk_need_unmap_req(const struct request *req)
527 return ublk_rq_has_data(req) && req_op(req) == REQ_OP_READ;
530 static int ublk_map_io(const struct ublk_queue *ubq, const struct request *req,
533 const unsigned int rq_bytes = blk_rq_bytes(req);
536 * no zero copy, we delay copy WRITE request data into ublksrv
537 * context and the big benefit is that pinning pages in current
538 * context is pretty fast, see ublk_pin_user_pages
540 if (ublk_need_map_req(req)) {
541 struct ublk_map_data data = {
547 ublk_copy_user_pages(&data, true);
549 return rq_bytes - data.len;
554 static int ublk_unmap_io(const struct ublk_queue *ubq,
555 const struct request *req,
558 const unsigned int rq_bytes = blk_rq_bytes(req);
560 if (ublk_need_unmap_req(req)) {
561 struct ublk_map_data data = {
567 WARN_ON_ONCE(io->res > rq_bytes);
569 ublk_copy_user_pages(&data, false);
571 return io->res - data.len;
576 static inline unsigned int ublk_req_build_flags(struct request *req)
580 if (req->cmd_flags & REQ_FAILFAST_DEV)
581 flags |= UBLK_IO_F_FAILFAST_DEV;
583 if (req->cmd_flags & REQ_FAILFAST_TRANSPORT)
584 flags |= UBLK_IO_F_FAILFAST_TRANSPORT;
586 if (req->cmd_flags & REQ_FAILFAST_DRIVER)
587 flags |= UBLK_IO_F_FAILFAST_DRIVER;
589 if (req->cmd_flags & REQ_META)
590 flags |= UBLK_IO_F_META;
592 if (req->cmd_flags & REQ_FUA)
593 flags |= UBLK_IO_F_FUA;
595 if (req->cmd_flags & REQ_NOUNMAP)
596 flags |= UBLK_IO_F_NOUNMAP;
598 if (req->cmd_flags & REQ_SWAP)
599 flags |= UBLK_IO_F_SWAP;
604 static blk_status_t ublk_setup_iod(struct ublk_queue *ubq, struct request *req)
606 struct ublksrv_io_desc *iod = ublk_get_iod(ubq, req->tag);
607 struct ublk_io *io = &ubq->ios[req->tag];
610 switch (req_op(req)) {
612 ublk_op = UBLK_IO_OP_READ;
615 ublk_op = UBLK_IO_OP_WRITE;
618 ublk_op = UBLK_IO_OP_FLUSH;
621 ublk_op = UBLK_IO_OP_DISCARD;
623 case REQ_OP_WRITE_ZEROES:
624 ublk_op = UBLK_IO_OP_WRITE_ZEROES;
627 return BLK_STS_IOERR;
630 /* need to translate since kernel may change */
631 iod->op_flags = ublk_op | ublk_req_build_flags(req);
632 iod->nr_sectors = blk_rq_sectors(req);
633 iod->start_sector = blk_rq_pos(req);
634 iod->addr = io->addr;
639 static inline struct ublk_uring_cmd_pdu *ublk_get_uring_cmd_pdu(
640 struct io_uring_cmd *ioucmd)
642 return (struct ublk_uring_cmd_pdu *)&ioucmd->pdu;
645 static inline bool ubq_daemon_is_dying(struct ublk_queue *ubq)
647 return ubq->ubq_daemon->flags & PF_EXITING;
650 /* todo: handle partial completion */
651 static void ublk_complete_rq(struct request *req)
653 struct ublk_queue *ubq = req->mq_hctx->driver_data;
654 struct ublk_io *io = &ubq->ios[req->tag];
655 unsigned int unmapped_bytes;
656 blk_status_t res = BLK_STS_OK;
658 /* failed read IO if nothing is read */
659 if (!io->res && req_op(req) == REQ_OP_READ)
663 res = errno_to_blk_status(io->res);
668 * FLUSH, DISCARD or WRITE_ZEROES usually won't return bytes returned, so end them
671 * Both the two needn't unmap.
673 if (req_op(req) != REQ_OP_READ && req_op(req) != REQ_OP_WRITE)
676 /* for READ request, writing data in iod->addr to rq buffers */
677 unmapped_bytes = ublk_unmap_io(ubq, req, io);
680 * Extremely impossible since we got data filled in just before
682 * Re-read simply for this unlikely case.
684 if (unlikely(unmapped_bytes < io->res))
685 io->res = unmapped_bytes;
687 if (blk_update_request(req, BLK_STS_OK, io->res))
688 blk_mq_requeue_request(req, true);
690 __blk_mq_end_request(req, BLK_STS_OK);
694 blk_mq_end_request(req, res);
698 * Since __ublk_rq_task_work always fails requests immediately during
699 * exiting, __ublk_fail_req() is only called from abort context during
700 * exiting. So lock is unnecessary.
702 * Also aborting may not be started yet, keep in mind that one failed
703 * request may be issued by block layer again.
705 static void __ublk_fail_req(struct ublk_queue *ubq, struct ublk_io *io,
708 WARN_ON_ONCE(io->flags & UBLK_IO_FLAG_ACTIVE);
710 if (!(io->flags & UBLK_IO_FLAG_ABORTED)) {
711 io->flags |= UBLK_IO_FLAG_ABORTED;
712 if (ublk_queue_can_use_recovery_reissue(ubq))
713 blk_mq_requeue_request(req, false);
715 blk_mq_end_request(req, BLK_STS_IOERR);
719 static void ubq_complete_io_cmd(struct ublk_io *io, int res)
721 /* mark this cmd owned by ublksrv */
722 io->flags |= UBLK_IO_FLAG_OWNED_BY_SRV;
725 * clear ACTIVE since we are done with this sqe/cmd slot
726 * We can only accept io cmd in case of being not active.
728 io->flags &= ~UBLK_IO_FLAG_ACTIVE;
730 /* tell ublksrv one io request is coming */
731 io_uring_cmd_done(io->cmd, res, 0);
734 #define UBLK_REQUEUE_DELAY_MS 3
736 static inline void __ublk_abort_rq(struct ublk_queue *ubq,
739 /* We cannot process this rq so just requeue it. */
740 if (ublk_queue_can_use_recovery(ubq))
741 blk_mq_requeue_request(rq, false);
743 blk_mq_end_request(rq, BLK_STS_IOERR);
745 mod_delayed_work(system_wq, &ubq->dev->monitor_work, 0);
748 static inline void __ublk_rq_task_work(struct request *req)
750 struct ublk_queue *ubq = req->mq_hctx->driver_data;
752 struct ublk_io *io = &ubq->ios[tag];
753 unsigned int mapped_bytes;
755 pr_devel("%s: complete: op %d, qid %d tag %d io_flags %x addr %llx\n",
756 __func__, io->cmd->cmd_op, ubq->q_id, req->tag, io->flags,
757 ublk_get_iod(ubq, req->tag)->addr);
760 * Task is exiting if either:
762 * (1) current != ubq_daemon.
763 * io_uring_cmd_complete_in_task() tries to run task_work
764 * in a workqueue if ubq_daemon(cmd's task) is PF_EXITING.
766 * (2) current->flags & PF_EXITING.
768 if (unlikely(current != ubq->ubq_daemon || current->flags & PF_EXITING)) {
769 __ublk_abort_rq(ubq, req);
773 if (ublk_need_get_data(ubq) && ublk_need_map_req(req)) {
775 * We have not handled UBLK_IO_NEED_GET_DATA command yet,
776 * so immepdately pass UBLK_IO_RES_NEED_GET_DATA to ublksrv
779 if (!(io->flags & UBLK_IO_FLAG_NEED_GET_DATA)) {
780 io->flags |= UBLK_IO_FLAG_NEED_GET_DATA;
781 pr_devel("%s: need get data. op %d, qid %d tag %d io_flags %x\n",
782 __func__, io->cmd->cmd_op, ubq->q_id,
783 req->tag, io->flags);
784 ubq_complete_io_cmd(io, UBLK_IO_RES_NEED_GET_DATA);
788 * We have handled UBLK_IO_NEED_GET_DATA command,
789 * so clear UBLK_IO_FLAG_NEED_GET_DATA now and just
792 io->flags &= ~UBLK_IO_FLAG_NEED_GET_DATA;
793 /* update iod->addr because ublksrv may have passed a new io buffer */
794 ublk_get_iod(ubq, req->tag)->addr = io->addr;
795 pr_devel("%s: update iod->addr: op %d, qid %d tag %d io_flags %x addr %llx\n",
796 __func__, io->cmd->cmd_op, ubq->q_id, req->tag, io->flags,
797 ublk_get_iod(ubq, req->tag)->addr);
800 mapped_bytes = ublk_map_io(ubq, req, io);
802 /* partially mapped, update io descriptor */
803 if (unlikely(mapped_bytes != blk_rq_bytes(req))) {
805 * Nothing mapped, retry until we succeed.
807 * We may never succeed in mapping any bytes here because
808 * of OOM. TODO: reserve one buffer with single page pinned
809 * for providing forward progress guarantee.
811 if (unlikely(!mapped_bytes)) {
812 blk_mq_requeue_request(req, false);
813 blk_mq_delay_kick_requeue_list(req->q,
814 UBLK_REQUEUE_DELAY_MS);
818 ublk_get_iod(ubq, req->tag)->nr_sectors =
822 ubq_complete_io_cmd(io, UBLK_IO_RES_OK);
825 static inline void ublk_forward_io_cmds(struct ublk_queue *ubq)
827 struct llist_node *io_cmds = llist_del_all(&ubq->io_cmds);
828 struct ublk_rq_data *data, *tmp;
830 io_cmds = llist_reverse_order(io_cmds);
831 llist_for_each_entry_safe(data, tmp, io_cmds, node)
832 __ublk_rq_task_work(blk_mq_rq_from_pdu(data));
835 static inline void ublk_abort_io_cmds(struct ublk_queue *ubq)
837 struct llist_node *io_cmds = llist_del_all(&ubq->io_cmds);
838 struct ublk_rq_data *data, *tmp;
840 llist_for_each_entry_safe(data, tmp, io_cmds, node)
841 __ublk_abort_rq(ubq, blk_mq_rq_from_pdu(data));
844 static void ublk_rq_task_work_cb(struct io_uring_cmd *cmd)
846 struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
847 struct ublk_queue *ubq = pdu->ubq;
849 ublk_forward_io_cmds(ubq);
852 static void ublk_rq_task_work_fn(struct callback_head *work)
854 struct ublk_rq_data *data = container_of(work,
855 struct ublk_rq_data, work);
856 struct request *req = blk_mq_rq_from_pdu(data);
857 struct ublk_queue *ubq = req->mq_hctx->driver_data;
859 ublk_forward_io_cmds(ubq);
862 static void ublk_queue_cmd(struct ublk_queue *ubq, struct request *rq)
864 struct ublk_rq_data *data = blk_mq_rq_to_pdu(rq);
867 if (!llist_add(&data->node, &ubq->io_cmds))
870 io = &ubq->ios[rq->tag];
872 * If the check pass, we know that this is a re-issued request aborted
873 * previously in monitor_work because the ubq_daemon(cmd's task) is
874 * PF_EXITING. We cannot call io_uring_cmd_complete_in_task() anymore
875 * because this ioucmd's io_uring context may be freed now if no inflight
876 * ioucmd exists. Otherwise we may cause null-deref in ctx->fallback_work.
878 * Note: monitor_work sets UBLK_IO_FLAG_ABORTED and ends this request(releasing
879 * the tag). Then the request is re-started(allocating the tag) and we are here.
880 * Since releasing/allocating a tag implies smp_mb(), finding UBLK_IO_FLAG_ABORTED
881 * guarantees that here is a re-issued request aborted previously.
883 if (unlikely(io->flags & UBLK_IO_FLAG_ABORTED)) {
884 ublk_abort_io_cmds(ubq);
885 } else if (ublk_can_use_task_work(ubq)) {
886 if (task_work_add(ubq->ubq_daemon, &data->work,
888 ublk_abort_io_cmds(ubq);
890 struct io_uring_cmd *cmd = io->cmd;
891 struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
894 io_uring_cmd_complete_in_task(cmd, ublk_rq_task_work_cb);
898 static enum blk_eh_timer_return ublk_timeout(struct request *rq)
900 struct ublk_queue *ubq = rq->mq_hctx->driver_data;
902 if (ubq->flags & UBLK_F_UNPRIVILEGED_DEV) {
904 send_sig(SIGKILL, ubq->ubq_daemon, 0);
911 return BLK_EH_RESET_TIMER;
914 static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx,
915 const struct blk_mq_queue_data *bd)
917 struct ublk_queue *ubq = hctx->driver_data;
918 struct request *rq = bd->rq;
921 /* fill iod to slot in io cmd buffer */
922 res = ublk_setup_iod(ubq, rq);
923 if (unlikely(res != BLK_STS_OK))
924 return BLK_STS_IOERR;
926 /* With recovery feature enabled, force_abort is set in
927 * ublk_stop_dev() before calling del_gendisk(). We have to
928 * abort all requeued and new rqs here to let del_gendisk()
929 * move on. Besides, we cannot not call io_uring_cmd_complete_in_task()
930 * to avoid UAF on io_uring ctx.
932 * Note: force_abort is guaranteed to be seen because it is set
933 * before request queue is unqiuesced.
935 if (ublk_queue_can_use_recovery(ubq) && unlikely(ubq->force_abort))
936 return BLK_STS_IOERR;
938 blk_mq_start_request(bd->rq);
940 if (unlikely(ubq_daemon_is_dying(ubq))) {
941 __ublk_abort_rq(ubq, rq);
945 ublk_queue_cmd(ubq, rq);
950 static int ublk_init_hctx(struct blk_mq_hw_ctx *hctx, void *driver_data,
951 unsigned int hctx_idx)
953 struct ublk_device *ub = driver_data;
954 struct ublk_queue *ubq = ublk_get_queue(ub, hctx->queue_num);
956 hctx->driver_data = ubq;
960 static int ublk_init_rq(struct blk_mq_tag_set *set, struct request *req,
961 unsigned int hctx_idx, unsigned int numa_node)
963 struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
965 init_task_work(&data->work, ublk_rq_task_work_fn);
969 static const struct blk_mq_ops ublk_mq_ops = {
970 .queue_rq = ublk_queue_rq,
971 .init_hctx = ublk_init_hctx,
972 .init_request = ublk_init_rq,
973 .timeout = ublk_timeout,
976 static int ublk_ch_open(struct inode *inode, struct file *filp)
978 struct ublk_device *ub = container_of(inode->i_cdev,
979 struct ublk_device, cdev);
981 if (test_and_set_bit(UB_STATE_OPEN, &ub->state))
983 filp->private_data = ub;
987 static int ublk_ch_release(struct inode *inode, struct file *filp)
989 struct ublk_device *ub = filp->private_data;
991 clear_bit(UB_STATE_OPEN, &ub->state);
995 /* map pre-allocated per-queue cmd buffer to ublksrv daemon */
996 static int ublk_ch_mmap(struct file *filp, struct vm_area_struct *vma)
998 struct ublk_device *ub = filp->private_data;
999 size_t sz = vma->vm_end - vma->vm_start;
1000 unsigned max_sz = UBLK_MAX_QUEUE_DEPTH * sizeof(struct ublksrv_io_desc);
1001 unsigned long pfn, end, phys_off = vma->vm_pgoff << PAGE_SHIFT;
1004 spin_lock(&ub->mm_lock);
1006 ub->mm = current->mm;
1007 if (current->mm != ub->mm)
1009 spin_unlock(&ub->mm_lock);
1014 if (vma->vm_flags & VM_WRITE)
1017 end = UBLKSRV_CMD_BUF_OFFSET + ub->dev_info.nr_hw_queues * max_sz;
1018 if (phys_off < UBLKSRV_CMD_BUF_OFFSET || phys_off >= end)
1021 q_id = (phys_off - UBLKSRV_CMD_BUF_OFFSET) / max_sz;
1022 pr_devel("%s: qid %d, pid %d, addr %lx pg_off %lx sz %lu\n",
1023 __func__, q_id, current->pid, vma->vm_start,
1024 phys_off, (unsigned long)sz);
1026 if (sz != ublk_queue_cmd_buf_size(ub, q_id))
1029 pfn = virt_to_phys(ublk_queue_cmd_buf(ub, q_id)) >> PAGE_SHIFT;
1030 return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
1033 static void ublk_commit_completion(struct ublk_device *ub,
1034 struct ublksrv_io_cmd *ub_cmd)
1036 u32 qid = ub_cmd->q_id, tag = ub_cmd->tag;
1037 struct ublk_queue *ubq = ublk_get_queue(ub, qid);
1038 struct ublk_io *io = &ubq->ios[tag];
1039 struct request *req;
1041 /* now this cmd slot is owned by nbd driver */
1042 io->flags &= ~UBLK_IO_FLAG_OWNED_BY_SRV;
1043 io->res = ub_cmd->result;
1045 /* find the io request and complete */
1046 req = blk_mq_tag_to_rq(ub->tag_set.tags[qid], tag);
1048 if (req && likely(!blk_should_fake_timeout(req->q)))
1049 ublk_complete_rq(req);
1053 * When ->ubq_daemon is exiting, either new request is ended immediately,
1054 * or any queued io command is drained, so it is safe to abort queue
1057 static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq)
1061 if (!ublk_get_device(ub))
1064 for (i = 0; i < ubq->q_depth; i++) {
1065 struct ublk_io *io = &ubq->ios[i];
1067 if (!(io->flags & UBLK_IO_FLAG_ACTIVE)) {
1071 * Either we fail the request or ublk_rq_task_work_fn
1074 rq = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], i);
1076 __ublk_fail_req(ubq, io, rq);
1079 ublk_put_device(ub);
1082 static void ublk_daemon_monitor_work(struct work_struct *work)
1084 struct ublk_device *ub =
1085 container_of(work, struct ublk_device, monitor_work.work);
1088 for (i = 0; i < ub->dev_info.nr_hw_queues; i++) {
1089 struct ublk_queue *ubq = ublk_get_queue(ub, i);
1091 if (ubq_daemon_is_dying(ubq)) {
1092 if (ublk_queue_can_use_recovery(ubq))
1093 schedule_work(&ub->quiesce_work);
1095 schedule_work(&ub->stop_work);
1097 /* abort queue is for making forward progress */
1098 ublk_abort_queue(ub, ubq);
1103 * We can't schedule monitor work after ub's state is not UBLK_S_DEV_LIVE.
1104 * after ublk_remove() or __ublk_quiesce_dev() is started.
1106 * No need ub->mutex, monitor work are canceled after state is marked
1107 * as not LIVE, so new state is observed reliably.
1109 if (ub->dev_info.state == UBLK_S_DEV_LIVE)
1110 schedule_delayed_work(&ub->monitor_work,
1111 UBLK_DAEMON_MONITOR_PERIOD);
1114 static inline bool ublk_queue_ready(struct ublk_queue *ubq)
1116 return ubq->nr_io_ready == ubq->q_depth;
1119 static void ublk_cancel_queue(struct ublk_queue *ubq)
1123 if (!ublk_queue_ready(ubq))
1126 for (i = 0; i < ubq->q_depth; i++) {
1127 struct ublk_io *io = &ubq->ios[i];
1129 if (io->flags & UBLK_IO_FLAG_ACTIVE)
1130 io_uring_cmd_done(io->cmd, UBLK_IO_RES_ABORT, 0);
1133 /* all io commands are canceled */
1134 ubq->nr_io_ready = 0;
1137 /* Cancel all pending commands, must be called after del_gendisk() returns */
1138 static void ublk_cancel_dev(struct ublk_device *ub)
1142 for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
1143 ublk_cancel_queue(ublk_get_queue(ub, i));
1146 static bool ublk_check_inflight_rq(struct request *rq, void *data)
1150 if (blk_mq_request_started(rq)) {
1157 static void ublk_wait_tagset_rqs_idle(struct ublk_device *ub)
1161 WARN_ON_ONCE(!blk_queue_quiesced(ub->ub_disk->queue));
1164 blk_mq_tagset_busy_iter(&ub->tag_set,
1165 ublk_check_inflight_rq, &idle);
1168 msleep(UBLK_REQUEUE_DELAY_MS);
1172 static void __ublk_quiesce_dev(struct ublk_device *ub)
1174 pr_devel("%s: quiesce ub: dev_id %d state %s\n",
1175 __func__, ub->dev_info.dev_id,
1176 ub->dev_info.state == UBLK_S_DEV_LIVE ?
1177 "LIVE" : "QUIESCED");
1178 blk_mq_quiesce_queue(ub->ub_disk->queue);
1179 ublk_wait_tagset_rqs_idle(ub);
1180 ub->dev_info.state = UBLK_S_DEV_QUIESCED;
1181 ublk_cancel_dev(ub);
1182 /* we are going to release task_struct of ubq_daemon and resets
1183 * ->ubq_daemon to NULL. So in monitor_work, check on ubq_daemon causes UAF.
1184 * Besides, monitor_work is not necessary in QUIESCED state since we have
1185 * already scheduled quiesce_work and quiesced all ubqs.
1187 * Do not let monitor_work schedule itself if state it QUIESCED. And we cancel
1188 * it here and re-schedule it in END_USER_RECOVERY to avoid UAF.
1190 cancel_delayed_work_sync(&ub->monitor_work);
1193 static void ublk_quiesce_work_fn(struct work_struct *work)
1195 struct ublk_device *ub =
1196 container_of(work, struct ublk_device, quiesce_work);
1198 mutex_lock(&ub->mutex);
1199 if (ub->dev_info.state != UBLK_S_DEV_LIVE)
1201 __ublk_quiesce_dev(ub);
1203 mutex_unlock(&ub->mutex);
1206 static void ublk_unquiesce_dev(struct ublk_device *ub)
1210 pr_devel("%s: unquiesce ub: dev_id %d state %s\n",
1211 __func__, ub->dev_info.dev_id,
1212 ub->dev_info.state == UBLK_S_DEV_LIVE ?
1213 "LIVE" : "QUIESCED");
1214 /* quiesce_work has run. We let requeued rqs be aborted
1215 * before running fallback_wq. "force_abort" must be seen
1216 * after request queue is unqiuesced. Then del_gendisk()
1219 for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
1220 ublk_get_queue(ub, i)->force_abort = true;
1222 blk_mq_unquiesce_queue(ub->ub_disk->queue);
1223 /* We may have requeued some rqs in ublk_quiesce_queue() */
1224 blk_mq_kick_requeue_list(ub->ub_disk->queue);
1227 static void ublk_stop_dev(struct ublk_device *ub)
1229 mutex_lock(&ub->mutex);
1230 if (ub->dev_info.state == UBLK_S_DEV_DEAD)
1232 if (ublk_can_use_recovery(ub)) {
1233 if (ub->dev_info.state == UBLK_S_DEV_LIVE)
1234 __ublk_quiesce_dev(ub);
1235 ublk_unquiesce_dev(ub);
1237 del_gendisk(ub->ub_disk);
1238 ub->dev_info.state = UBLK_S_DEV_DEAD;
1239 ub->dev_info.ublksrv_pid = -1;
1240 put_disk(ub->ub_disk);
1243 ublk_cancel_dev(ub);
1244 mutex_unlock(&ub->mutex);
1245 cancel_delayed_work_sync(&ub->monitor_work);
1248 /* device can only be started after all IOs are ready */
1249 static void ublk_mark_io_ready(struct ublk_device *ub, struct ublk_queue *ubq)
1251 mutex_lock(&ub->mutex);
1253 if (ublk_queue_ready(ubq)) {
1254 ubq->ubq_daemon = current;
1255 get_task_struct(ubq->ubq_daemon);
1256 ub->nr_queues_ready++;
1258 if (capable(CAP_SYS_ADMIN))
1259 ub->nr_privileged_daemon++;
1261 if (ub->nr_queues_ready == ub->dev_info.nr_hw_queues)
1262 complete_all(&ub->completion);
1263 mutex_unlock(&ub->mutex);
1266 static void ublk_handle_need_get_data(struct ublk_device *ub, int q_id,
1269 struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
1270 struct request *req = blk_mq_tag_to_rq(ub->tag_set.tags[q_id], tag);
1272 ublk_queue_cmd(ubq, req);
1275 static inline int ublk_check_cmd_op(u32 cmd_op)
1277 u32 ioc_type = _IOC_TYPE(cmd_op);
1279 if (IS_ENABLED(CONFIG_BLKDEV_UBLK_LEGACY_OPCODES) && ioc_type != 'u')
1282 if (ioc_type != 'u' && ioc_type != 0)
1288 static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
1290 struct ublksrv_io_cmd *ub_cmd = (struct ublksrv_io_cmd *)cmd->cmd;
1291 struct ublk_device *ub = cmd->file->private_data;
1292 struct ublk_queue *ubq;
1294 u32 cmd_op = cmd->cmd_op;
1295 unsigned tag = ub_cmd->tag;
1297 struct request *req;
1299 pr_devel("%s: received: cmd op %d queue %d tag %d result %d\n",
1300 __func__, cmd->cmd_op, ub_cmd->q_id, tag,
1303 if (ub_cmd->q_id >= ub->dev_info.nr_hw_queues)
1306 ubq = ublk_get_queue(ub, ub_cmd->q_id);
1307 if (!ubq || ub_cmd->q_id != ubq->q_id)
1310 if (ubq->ubq_daemon && ubq->ubq_daemon != current)
1313 if (tag >= ubq->q_depth)
1316 io = &ubq->ios[tag];
1318 /* there is pending io cmd, something must be wrong */
1319 if (io->flags & UBLK_IO_FLAG_ACTIVE) {
1325 * ensure that the user issues UBLK_IO_NEED_GET_DATA
1326 * iff the driver have set the UBLK_IO_FLAG_NEED_GET_DATA.
1328 if ((!!(io->flags & UBLK_IO_FLAG_NEED_GET_DATA))
1329 ^ (_IOC_NR(cmd_op) == UBLK_IO_NEED_GET_DATA))
1332 ret = ublk_check_cmd_op(cmd_op);
1337 switch (_IOC_NR(cmd_op)) {
1338 case UBLK_IO_FETCH_REQ:
1339 /* UBLK_IO_FETCH_REQ is only allowed before queue is setup */
1340 if (ublk_queue_ready(ubq)) {
1345 * The io is being handled by server, so COMMIT_RQ is expected
1346 * instead of FETCH_REQ
1348 if (io->flags & UBLK_IO_FLAG_OWNED_BY_SRV)
1350 /* FETCH_RQ has to provide IO buffer if NEED GET DATA is not enabled */
1351 if (!ub_cmd->addr && !ublk_need_get_data(ubq))
1354 io->flags |= UBLK_IO_FLAG_ACTIVE;
1355 io->addr = ub_cmd->addr;
1357 ublk_mark_io_ready(ub, ubq);
1359 case UBLK_IO_COMMIT_AND_FETCH_REQ:
1360 req = blk_mq_tag_to_rq(ub->tag_set.tags[ub_cmd->q_id], tag);
1362 * COMMIT_AND_FETCH_REQ has to provide IO buffer if NEED GET DATA is
1363 * not enabled or it is Read IO.
1365 if (!ub_cmd->addr && (!ublk_need_get_data(ubq) || req_op(req) == REQ_OP_READ))
1367 if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV))
1369 io->addr = ub_cmd->addr;
1370 io->flags |= UBLK_IO_FLAG_ACTIVE;
1372 ublk_commit_completion(ub, ub_cmd);
1374 case UBLK_IO_NEED_GET_DATA:
1375 if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV))
1377 io->addr = ub_cmd->addr;
1379 io->flags |= UBLK_IO_FLAG_ACTIVE;
1380 ublk_handle_need_get_data(ub, ub_cmd->q_id, ub_cmd->tag);
1385 return -EIOCBQUEUED;
1388 io_uring_cmd_done(cmd, ret, 0);
1389 pr_devel("%s: complete: cmd op %d, tag %d ret %x io_flags %x\n",
1390 __func__, cmd_op, tag, ret, io->flags);
1391 return -EIOCBQUEUED;
1394 static const struct file_operations ublk_ch_fops = {
1395 .owner = THIS_MODULE,
1396 .open = ublk_ch_open,
1397 .release = ublk_ch_release,
1398 .llseek = no_llseek,
1399 .uring_cmd = ublk_ch_uring_cmd,
1400 .mmap = ublk_ch_mmap,
1403 static void ublk_deinit_queue(struct ublk_device *ub, int q_id)
1405 int size = ublk_queue_cmd_buf_size(ub, q_id);
1406 struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
1408 if (ubq->ubq_daemon)
1409 put_task_struct(ubq->ubq_daemon);
1410 if (ubq->io_cmd_buf)
1411 free_pages((unsigned long)ubq->io_cmd_buf, get_order(size));
1414 static int ublk_init_queue(struct ublk_device *ub, int q_id)
1416 struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
1417 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO;
1421 ubq->flags = ub->dev_info.flags;
1423 ubq->q_depth = ub->dev_info.queue_depth;
1424 size = ublk_queue_cmd_buf_size(ub, q_id);
1426 ptr = (void *) __get_free_pages(gfp_flags, get_order(size));
1430 ubq->io_cmd_buf = ptr;
1435 static void ublk_deinit_queues(struct ublk_device *ub)
1437 int nr_queues = ub->dev_info.nr_hw_queues;
1443 for (i = 0; i < nr_queues; i++)
1444 ublk_deinit_queue(ub, i);
1445 kfree(ub->__queues);
1448 static int ublk_init_queues(struct ublk_device *ub)
1450 int nr_queues = ub->dev_info.nr_hw_queues;
1451 int depth = ub->dev_info.queue_depth;
1452 int ubq_size = sizeof(struct ublk_queue) + depth * sizeof(struct ublk_io);
1453 int i, ret = -ENOMEM;
1455 ub->queue_size = ubq_size;
1456 ub->__queues = kcalloc(nr_queues, ubq_size, GFP_KERNEL);
1460 for (i = 0; i < nr_queues; i++) {
1461 if (ublk_init_queue(ub, i))
1465 init_completion(&ub->completion);
1469 ublk_deinit_queues(ub);
1473 static int ublk_alloc_dev_number(struct ublk_device *ub, int idx)
1478 spin_lock(&ublk_idr_lock);
1479 /* allocate id, if @id >= 0, we're requesting that specific id */
1481 err = idr_alloc(&ublk_index_idr, ub, i, i + 1, GFP_NOWAIT);
1485 err = idr_alloc(&ublk_index_idr, ub, 0, 0, GFP_NOWAIT);
1487 spin_unlock(&ublk_idr_lock);
1490 ub->ub_number = err;
1495 static void ublk_free_dev_number(struct ublk_device *ub)
1497 spin_lock(&ublk_idr_lock);
1498 idr_remove(&ublk_index_idr, ub->ub_number);
1499 wake_up_all(&ublk_idr_wq);
1500 spin_unlock(&ublk_idr_lock);
1503 static void ublk_cdev_rel(struct device *dev)
1505 struct ublk_device *ub = container_of(dev, struct ublk_device, cdev_dev);
1507 blk_mq_free_tag_set(&ub->tag_set);
1508 ublk_deinit_queues(ub);
1509 ublk_free_dev_number(ub);
1510 mutex_destroy(&ub->mutex);
1514 static int ublk_add_chdev(struct ublk_device *ub)
1516 struct device *dev = &ub->cdev_dev;
1517 int minor = ub->ub_number;
1520 dev->parent = ublk_misc.this_device;
1521 dev->devt = MKDEV(MAJOR(ublk_chr_devt), minor);
1522 dev->class = ublk_chr_class;
1523 dev->release = ublk_cdev_rel;
1524 device_initialize(dev);
1526 ret = dev_set_name(dev, "ublkc%d", minor);
1530 cdev_init(&ub->cdev, &ublk_ch_fops);
1531 ret = cdev_device_add(&ub->cdev, dev);
1542 static void ublk_stop_work_fn(struct work_struct *work)
1544 struct ublk_device *ub =
1545 container_of(work, struct ublk_device, stop_work);
1550 /* align max io buffer size with PAGE_SIZE */
1551 static void ublk_align_max_io_size(struct ublk_device *ub)
1553 unsigned int max_io_bytes = ub->dev_info.max_io_buf_bytes;
1555 ub->dev_info.max_io_buf_bytes =
1556 round_down(max_io_bytes, PAGE_SIZE);
1559 static int ublk_add_tag_set(struct ublk_device *ub)
1561 ub->tag_set.ops = &ublk_mq_ops;
1562 ub->tag_set.nr_hw_queues = ub->dev_info.nr_hw_queues;
1563 ub->tag_set.queue_depth = ub->dev_info.queue_depth;
1564 ub->tag_set.numa_node = NUMA_NO_NODE;
1565 ub->tag_set.cmd_size = sizeof(struct ublk_rq_data);
1566 ub->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
1567 ub->tag_set.driver_data = ub;
1568 return blk_mq_alloc_tag_set(&ub->tag_set);
1571 static void ublk_remove(struct ublk_device *ub)
1574 cancel_work_sync(&ub->stop_work);
1575 cancel_work_sync(&ub->quiesce_work);
1576 cdev_device_del(&ub->cdev, &ub->cdev_dev);
1577 put_device(&ub->cdev_dev);
1581 static struct ublk_device *ublk_get_device_from_id(int idx)
1583 struct ublk_device *ub = NULL;
1588 spin_lock(&ublk_idr_lock);
1589 ub = idr_find(&ublk_index_idr, idx);
1591 ub = ublk_get_device(ub);
1592 spin_unlock(&ublk_idr_lock);
1597 static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd)
1599 struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
1600 int ublksrv_pid = (int)header->data[0];
1601 struct gendisk *disk;
1604 if (ublksrv_pid <= 0)
1607 wait_for_completion_interruptible(&ub->completion);
1609 schedule_delayed_work(&ub->monitor_work, UBLK_DAEMON_MONITOR_PERIOD);
1611 mutex_lock(&ub->mutex);
1612 if (ub->dev_info.state == UBLK_S_DEV_LIVE ||
1613 test_bit(UB_STATE_USED, &ub->state)) {
1618 disk = blk_mq_alloc_disk(&ub->tag_set, NULL);
1620 ret = PTR_ERR(disk);
1623 sprintf(disk->disk_name, "ublkb%d", ub->ub_number);
1624 disk->fops = &ub_fops;
1625 disk->private_data = ub;
1627 ub->dev_info.ublksrv_pid = ublksrv_pid;
1630 ret = ublk_apply_params(ub);
1634 /* don't probe partitions if any one ubq daemon is un-trusted */
1635 if (ub->nr_privileged_daemon != ub->nr_queues_ready)
1636 set_bit(GD_SUPPRESS_PART_SCAN, &disk->state);
1638 get_device(&ub->cdev_dev);
1639 ret = add_disk(disk);
1642 * Has to drop the reference since ->free_disk won't be
1643 * called in case of add_disk failure.
1645 ublk_put_device(ub);
1648 set_bit(UB_STATE_USED, &ub->state);
1649 ub->dev_info.state = UBLK_S_DEV_LIVE;
1654 mutex_unlock(&ub->mutex);
1658 static int ublk_ctrl_get_queue_affinity(struct ublk_device *ub,
1659 struct io_uring_cmd *cmd)
1661 struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
1662 void __user *argp = (void __user *)(unsigned long)header->addr;
1663 cpumask_var_t cpumask;
1664 unsigned long queue;
1665 unsigned int retlen;
1669 if (header->len * BITS_PER_BYTE < nr_cpu_ids)
1671 if (header->len & (sizeof(unsigned long)-1))
1676 queue = header->data[0];
1677 if (queue >= ub->dev_info.nr_hw_queues)
1680 if (!zalloc_cpumask_var(&cpumask, GFP_KERNEL))
1683 for_each_possible_cpu(i) {
1684 if (ub->tag_set.map[HCTX_TYPE_DEFAULT].mq_map[i] == queue)
1685 cpumask_set_cpu(i, cpumask);
1689 retlen = min_t(unsigned short, header->len, cpumask_size());
1690 if (copy_to_user(argp, cpumask, retlen))
1691 goto out_free_cpumask;
1692 if (retlen != header->len &&
1693 clear_user(argp + retlen, header->len - retlen))
1694 goto out_free_cpumask;
1698 free_cpumask_var(cpumask);
1702 static inline void ublk_dump_dev_info(struct ublksrv_ctrl_dev_info *info)
1704 pr_devel("%s: dev id %d flags %llx\n", __func__,
1705 info->dev_id, info->flags);
1706 pr_devel("\t nr_hw_queues %d queue_depth %d\n",
1707 info->nr_hw_queues, info->queue_depth);
1710 static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd)
1712 struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
1713 void __user *argp = (void __user *)(unsigned long)header->addr;
1714 struct ublksrv_ctrl_dev_info info;
1715 struct ublk_device *ub;
1718 if (header->len < sizeof(info) || !header->addr)
1720 if (header->queue_id != (u16)-1) {
1721 pr_warn("%s: queue_id is wrong %x\n",
1722 __func__, header->queue_id);
1726 if (copy_from_user(&info, argp, sizeof(info)))
1729 if (capable(CAP_SYS_ADMIN))
1730 info.flags &= ~UBLK_F_UNPRIVILEGED_DEV;
1731 else if (!(info.flags & UBLK_F_UNPRIVILEGED_DEV))
1735 * unprivileged device can't be trusted, but RECOVERY and
1736 * RECOVERY_REISSUE still may hang error handling, so can't
1737 * support recovery features for unprivileged ublk now
1739 * TODO: provide forward progress for RECOVERY handler, so that
1740 * unprivileged device can benefit from it
1742 if (info.flags & UBLK_F_UNPRIVILEGED_DEV)
1743 info.flags &= ~(UBLK_F_USER_RECOVERY_REISSUE |
1744 UBLK_F_USER_RECOVERY);
1746 /* the created device is always owned by current user */
1747 ublk_store_owner_uid_gid(&info.owner_uid, &info.owner_gid);
1749 if (header->dev_id != info.dev_id) {
1750 pr_warn("%s: dev id not match %u %u\n",
1751 __func__, header->dev_id, info.dev_id);
1755 ublk_dump_dev_info(&info);
1757 ret = mutex_lock_killable(&ublk_ctl_mutex);
1762 if (ublks_added >= ublks_max)
1766 ub = kzalloc(sizeof(*ub), GFP_KERNEL);
1769 mutex_init(&ub->mutex);
1770 spin_lock_init(&ub->mm_lock);
1771 INIT_WORK(&ub->quiesce_work, ublk_quiesce_work_fn);
1772 INIT_WORK(&ub->stop_work, ublk_stop_work_fn);
1773 INIT_DELAYED_WORK(&ub->monitor_work, ublk_daemon_monitor_work);
1775 ret = ublk_alloc_dev_number(ub, header->dev_id);
1779 memcpy(&ub->dev_info, &info, sizeof(info));
1781 /* update device id */
1782 ub->dev_info.dev_id = ub->ub_number;
1785 * 64bit flags will be copied back to userspace as feature
1786 * negotiation result, so have to clear flags which driver
1787 * doesn't support yet, then userspace can get correct flags
1788 * (features) to handle.
1790 ub->dev_info.flags &= UBLK_F_ALL;
1792 if (!IS_BUILTIN(CONFIG_BLK_DEV_UBLK))
1793 ub->dev_info.flags |= UBLK_F_URING_CMD_COMP_IN_TASK;
1795 ub->dev_info.flags |= UBLK_F_CMD_IOCTL_ENCODE;
1797 /* We are not ready to support zero copy */
1798 ub->dev_info.flags &= ~UBLK_F_SUPPORT_ZERO_COPY;
1800 ub->dev_info.nr_hw_queues = min_t(unsigned int,
1801 ub->dev_info.nr_hw_queues, nr_cpu_ids);
1802 ublk_align_max_io_size(ub);
1804 ret = ublk_init_queues(ub);
1806 goto out_free_dev_number;
1808 ret = ublk_add_tag_set(ub);
1810 goto out_deinit_queues;
1813 if (copy_to_user(argp, &ub->dev_info, sizeof(info)))
1814 goto out_free_tag_set;
1817 * Add the char dev so that ublksrv daemon can be setup.
1818 * ublk_add_chdev() will cleanup everything if it fails.
1820 ret = ublk_add_chdev(ub);
1824 blk_mq_free_tag_set(&ub->tag_set);
1826 ublk_deinit_queues(ub);
1827 out_free_dev_number:
1828 ublk_free_dev_number(ub);
1830 mutex_destroy(&ub->mutex);
1833 mutex_unlock(&ublk_ctl_mutex);
1837 static inline bool ublk_idr_freed(int id)
1841 spin_lock(&ublk_idr_lock);
1842 ptr = idr_find(&ublk_index_idr, id);
1843 spin_unlock(&ublk_idr_lock);
1848 static int ublk_ctrl_del_dev(struct ublk_device **p_ub)
1850 struct ublk_device *ub = *p_ub;
1851 int idx = ub->ub_number;
1854 ret = mutex_lock_killable(&ublk_ctl_mutex);
1858 if (!test_bit(UB_STATE_DELETED, &ub->state)) {
1860 set_bit(UB_STATE_DELETED, &ub->state);
1863 /* Mark the reference as consumed */
1865 ublk_put_device(ub);
1866 mutex_unlock(&ublk_ctl_mutex);
1869 * Wait until the idr is removed, then it can be reused after
1870 * DEL_DEV command is returned.
1872 * If we returns because of user interrupt, future delete command
1875 * - the device number isn't freed, this device won't or needn't
1876 * be deleted again, since UB_STATE_DELETED is set, and device
1877 * will be released after the last reference is dropped
1879 * - the device number is freed already, we will not find this
1880 * device via ublk_get_device_from_id()
1882 wait_event_interruptible(ublk_idr_wq, ublk_idr_freed(idx));
1887 static inline void ublk_ctrl_cmd_dump(struct io_uring_cmd *cmd)
1889 struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
1891 pr_devel("%s: cmd_op %x, dev id %d qid %d data %llx buf %llx len %u\n",
1892 __func__, cmd->cmd_op, header->dev_id, header->queue_id,
1893 header->data[0], header->addr, header->len);
1896 static int ublk_ctrl_stop_dev(struct ublk_device *ub)
1899 cancel_work_sync(&ub->stop_work);
1900 cancel_work_sync(&ub->quiesce_work);
1905 static int ublk_ctrl_get_dev_info(struct ublk_device *ub,
1906 struct io_uring_cmd *cmd)
1908 struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
1909 void __user *argp = (void __user *)(unsigned long)header->addr;
1911 if (header->len < sizeof(struct ublksrv_ctrl_dev_info) || !header->addr)
1914 if (copy_to_user(argp, &ub->dev_info, sizeof(ub->dev_info)))
1920 /* TYPE_DEVT is readonly, so fill it up before returning to userspace */
1921 static void ublk_ctrl_fill_params_devt(struct ublk_device *ub)
1923 ub->params.devt.char_major = MAJOR(ub->cdev_dev.devt);
1924 ub->params.devt.char_minor = MINOR(ub->cdev_dev.devt);
1927 ub->params.devt.disk_major = MAJOR(disk_devt(ub->ub_disk));
1928 ub->params.devt.disk_minor = MINOR(disk_devt(ub->ub_disk));
1930 ub->params.devt.disk_major = 0;
1931 ub->params.devt.disk_minor = 0;
1933 ub->params.types |= UBLK_PARAM_TYPE_DEVT;
1936 static int ublk_ctrl_get_params(struct ublk_device *ub,
1937 struct io_uring_cmd *cmd)
1939 struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
1940 void __user *argp = (void __user *)(unsigned long)header->addr;
1941 struct ublk_params_header ph;
1944 if (header->len <= sizeof(ph) || !header->addr)
1947 if (copy_from_user(&ph, argp, sizeof(ph)))
1950 if (ph.len > header->len || !ph.len)
1953 if (ph.len > sizeof(struct ublk_params))
1954 ph.len = sizeof(struct ublk_params);
1956 mutex_lock(&ub->mutex);
1957 ublk_ctrl_fill_params_devt(ub);
1958 if (copy_to_user(argp, &ub->params, ph.len))
1962 mutex_unlock(&ub->mutex);
1967 static int ublk_ctrl_set_params(struct ublk_device *ub,
1968 struct io_uring_cmd *cmd)
1970 struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
1971 void __user *argp = (void __user *)(unsigned long)header->addr;
1972 struct ublk_params_header ph;
1975 if (header->len <= sizeof(ph) || !header->addr)
1978 if (copy_from_user(&ph, argp, sizeof(ph)))
1981 if (ph.len > header->len || !ph.len || !ph.types)
1984 if (ph.len > sizeof(struct ublk_params))
1985 ph.len = sizeof(struct ublk_params);
1987 /* parameters can only be changed when device isn't live */
1988 mutex_lock(&ub->mutex);
1989 if (ub->dev_info.state == UBLK_S_DEV_LIVE) {
1991 } else if (copy_from_user(&ub->params, argp, ph.len)) {
1994 /* clear all we don't support yet */
1995 ub->params.types &= UBLK_PARAM_TYPE_ALL;
1996 ret = ublk_validate_params(ub);
1998 mutex_unlock(&ub->mutex);
2003 static void ublk_queue_reinit(struct ublk_device *ub, struct ublk_queue *ubq)
2007 WARN_ON_ONCE(!(ubq->ubq_daemon && ubq_daemon_is_dying(ubq)));
2008 /* All old ioucmds have to be completed */
2009 WARN_ON_ONCE(ubq->nr_io_ready);
2010 /* old daemon is PF_EXITING, put it now */
2011 put_task_struct(ubq->ubq_daemon);
2012 /* We have to reset it to NULL, otherwise ub won't accept new FETCH_REQ */
2013 ubq->ubq_daemon = NULL;
2014 ubq->timeout = false;
2016 for (i = 0; i < ubq->q_depth; i++) {
2017 struct ublk_io *io = &ubq->ios[i];
2019 /* forget everything now and be ready for new FETCH_REQ */
2026 static int ublk_ctrl_start_recovery(struct ublk_device *ub,
2027 struct io_uring_cmd *cmd)
2029 struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
2033 mutex_lock(&ub->mutex);
2034 if (!ublk_can_use_recovery(ub))
2037 * START_RECOVERY is only allowd after:
2039 * (1) UB_STATE_OPEN is not set, which means the dying process is exited
2040 * and related io_uring ctx is freed so file struct of /dev/ublkcX is
2043 * (2) UBLK_S_DEV_QUIESCED is set, which means the quiesce_work:
2044 * (a)has quiesced request queue
2045 * (b)has requeued every inflight rqs whose io_flags is ACTIVE
2046 * (c)has requeued/aborted every inflight rqs whose io_flags is NOT ACTIVE
2047 * (d)has completed/camceled all ioucmds owned by ther dying process
2049 if (test_bit(UB_STATE_OPEN, &ub->state) ||
2050 ub->dev_info.state != UBLK_S_DEV_QUIESCED) {
2054 pr_devel("%s: start recovery for dev id %d.\n", __func__, header->dev_id);
2055 for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
2056 ublk_queue_reinit(ub, ublk_get_queue(ub, i));
2057 /* set to NULL, otherwise new ubq_daemon cannot mmap the io_cmd_buf */
2059 ub->nr_queues_ready = 0;
2060 ub->nr_privileged_daemon = 0;
2061 init_completion(&ub->completion);
2064 mutex_unlock(&ub->mutex);
2068 static int ublk_ctrl_end_recovery(struct ublk_device *ub,
2069 struct io_uring_cmd *cmd)
2071 struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
2072 int ublksrv_pid = (int)header->data[0];
2075 pr_devel("%s: Waiting for new ubq_daemons(nr: %d) are ready, dev id %d...\n",
2076 __func__, ub->dev_info.nr_hw_queues, header->dev_id);
2077 /* wait until new ubq_daemon sending all FETCH_REQ */
2078 wait_for_completion_interruptible(&ub->completion);
2079 pr_devel("%s: All new ubq_daemons(nr: %d) are ready, dev id %d\n",
2080 __func__, ub->dev_info.nr_hw_queues, header->dev_id);
2082 mutex_lock(&ub->mutex);
2083 if (!ublk_can_use_recovery(ub))
2086 if (ub->dev_info.state != UBLK_S_DEV_QUIESCED) {
2090 ub->dev_info.ublksrv_pid = ublksrv_pid;
2091 pr_devel("%s: new ublksrv_pid %d, dev id %d\n",
2092 __func__, ublksrv_pid, header->dev_id);
2093 blk_mq_unquiesce_queue(ub->ub_disk->queue);
2094 pr_devel("%s: queue unquiesced, dev id %d.\n",
2095 __func__, header->dev_id);
2096 blk_mq_kick_requeue_list(ub->ub_disk->queue);
2097 ub->dev_info.state = UBLK_S_DEV_LIVE;
2098 schedule_delayed_work(&ub->monitor_work, UBLK_DAEMON_MONITOR_PERIOD);
2101 mutex_unlock(&ub->mutex);
2106 * All control commands are sent via /dev/ublk-control, so we have to check
2107 * the destination device's permission
2109 static int ublk_char_dev_permission(struct ublk_device *ub,
2110 const char *dev_path, int mask)
2116 err = kern_path(dev_path, LOOKUP_FOLLOW, &path);
2120 err = vfs_getattr(&path, &stat, STATX_TYPE, AT_STATX_SYNC_AS_STAT);
2125 if (stat.rdev != ub->cdev_dev.devt || !S_ISCHR(stat.mode))
2128 err = inode_permission(&nop_mnt_idmap,
2129 d_backing_inode(path.dentry), mask);
2135 static int ublk_ctrl_uring_cmd_permission(struct ublk_device *ub,
2136 struct io_uring_cmd *cmd)
2138 struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
2139 bool unprivileged = ub->dev_info.flags & UBLK_F_UNPRIVILEGED_DEV;
2140 void __user *argp = (void __user *)(unsigned long)header->addr;
2141 char *dev_path = NULL;
2145 if (!unprivileged) {
2146 if (!capable(CAP_SYS_ADMIN))
2149 * The new added command of UBLK_CMD_GET_DEV_INFO2 includes
2150 * char_dev_path in payload too, since userspace may not
2151 * know if the specified device is created as unprivileged
2154 if (_IOC_NR(cmd->cmd_op) != UBLK_CMD_GET_DEV_INFO2)
2159 * User has to provide the char device path for unprivileged ublk
2161 * header->addr always points to the dev path buffer, and
2162 * header->dev_path_len records length of dev path buffer.
2164 if (!header->dev_path_len || header->dev_path_len > PATH_MAX)
2167 if (header->len < header->dev_path_len)
2170 dev_path = kmalloc(header->dev_path_len + 1, GFP_KERNEL);
2175 if (copy_from_user(dev_path, argp, header->dev_path_len))
2177 dev_path[header->dev_path_len] = 0;
2180 switch (_IOC_NR(cmd->cmd_op)) {
2181 case UBLK_CMD_GET_DEV_INFO:
2182 case UBLK_CMD_GET_DEV_INFO2:
2183 case UBLK_CMD_GET_QUEUE_AFFINITY:
2184 case UBLK_CMD_GET_PARAMS:
2187 case UBLK_CMD_START_DEV:
2188 case UBLK_CMD_STOP_DEV:
2189 case UBLK_CMD_ADD_DEV:
2190 case UBLK_CMD_DEL_DEV:
2191 case UBLK_CMD_SET_PARAMS:
2192 case UBLK_CMD_START_USER_RECOVERY:
2193 case UBLK_CMD_END_USER_RECOVERY:
2194 mask = MAY_READ | MAY_WRITE;
2200 ret = ublk_char_dev_permission(ub, dev_path, mask);
2202 header->len -= header->dev_path_len;
2203 header->addr += header->dev_path_len;
2205 pr_devel("%s: dev id %d cmd_op %x uid %d gid %d path %s ret %d\n",
2206 __func__, ub->ub_number, cmd->cmd_op,
2207 ub->dev_info.owner_uid, ub->dev_info.owner_gid,
2214 static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
2215 unsigned int issue_flags)
2217 struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
2218 struct ublk_device *ub = NULL;
2219 u32 cmd_op = cmd->cmd_op;
2222 if (issue_flags & IO_URING_F_NONBLOCK)
2225 ublk_ctrl_cmd_dump(cmd);
2227 if (!(issue_flags & IO_URING_F_SQE128))
2230 ret = ublk_check_cmd_op(cmd_op);
2234 if (_IOC_NR(cmd_op) != UBLK_CMD_ADD_DEV) {
2236 ub = ublk_get_device_from_id(header->dev_id);
2240 ret = ublk_ctrl_uring_cmd_permission(ub, cmd);
2245 switch (_IOC_NR(cmd_op)) {
2246 case UBLK_CMD_START_DEV:
2247 ret = ublk_ctrl_start_dev(ub, cmd);
2249 case UBLK_CMD_STOP_DEV:
2250 ret = ublk_ctrl_stop_dev(ub);
2252 case UBLK_CMD_GET_DEV_INFO:
2253 case UBLK_CMD_GET_DEV_INFO2:
2254 ret = ublk_ctrl_get_dev_info(ub, cmd);
2256 case UBLK_CMD_ADD_DEV:
2257 ret = ublk_ctrl_add_dev(cmd);
2259 case UBLK_CMD_DEL_DEV:
2260 ret = ublk_ctrl_del_dev(&ub);
2262 case UBLK_CMD_GET_QUEUE_AFFINITY:
2263 ret = ublk_ctrl_get_queue_affinity(ub, cmd);
2265 case UBLK_CMD_GET_PARAMS:
2266 ret = ublk_ctrl_get_params(ub, cmd);
2268 case UBLK_CMD_SET_PARAMS:
2269 ret = ublk_ctrl_set_params(ub, cmd);
2271 case UBLK_CMD_START_USER_RECOVERY:
2272 ret = ublk_ctrl_start_recovery(ub, cmd);
2274 case UBLK_CMD_END_USER_RECOVERY:
2275 ret = ublk_ctrl_end_recovery(ub, cmd);
2284 ublk_put_device(ub);
2286 io_uring_cmd_done(cmd, ret, 0);
2287 pr_devel("%s: cmd done ret %d cmd_op %x, dev id %d qid %d\n",
2288 __func__, ret, cmd->cmd_op, header->dev_id, header->queue_id);
2289 return -EIOCBQUEUED;
2292 static const struct file_operations ublk_ctl_fops = {
2293 .open = nonseekable_open,
2294 .uring_cmd = ublk_ctrl_uring_cmd,
2295 .owner = THIS_MODULE,
2296 .llseek = noop_llseek,
2299 static struct miscdevice ublk_misc = {
2300 .minor = MISC_DYNAMIC_MINOR,
2301 .name = "ublk-control",
2302 .fops = &ublk_ctl_fops,
2305 static int __init ublk_init(void)
2309 init_waitqueue_head(&ublk_idr_wq);
2311 ret = misc_register(&ublk_misc);
2315 ret = alloc_chrdev_region(&ublk_chr_devt, 0, UBLK_MINORS, "ublk-char");
2317 goto unregister_mis;
2319 ublk_chr_class = class_create(THIS_MODULE, "ublk-char");
2320 if (IS_ERR(ublk_chr_class)) {
2321 ret = PTR_ERR(ublk_chr_class);
2322 goto free_chrdev_region;
2327 unregister_chrdev_region(ublk_chr_devt, UBLK_MINORS);
2329 misc_deregister(&ublk_misc);
2333 static void __exit ublk_exit(void)
2335 struct ublk_device *ub;
2338 idr_for_each_entry(&ublk_index_idr, ub, id)
2341 class_destroy(ublk_chr_class);
2342 misc_deregister(&ublk_misc);
2344 idr_destroy(&ublk_index_idr);
2345 unregister_chrdev_region(ublk_chr_devt, UBLK_MINORS);
2348 module_init(ublk_init);
2349 module_exit(ublk_exit);
2351 module_param(ublks_max, int, 0444);
2352 MODULE_PARM_DESC(ublks_max, "max number of ublk devices allowed to add(default: 64)");
2354 MODULE_AUTHOR("Ming Lei <ming.lei@redhat.com>");
2355 MODULE_LICENSE("GPL");