1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Userspace block device - block device which IO is handled from userspace
5 * Take full use of io_uring passthrough command for communicating with
6 * ublk userspace daemon(ublksrvd) for handling basic IO request.
8 * Copyright 2022 Ming Lei <ming.lei@redhat.com>
10 * (part of code stolen from loop.c)
12 #include <linux/module.h>
13 #include <linux/moduleparam.h>
14 #include <linux/sched.h>
16 #include <linux/pagemap.h>
17 #include <linux/file.h>
18 #include <linux/stat.h>
19 #include <linux/errno.h>
20 #include <linux/major.h>
21 #include <linux/wait.h>
22 #include <linux/blkdev.h>
23 #include <linux/init.h>
24 #include <linux/swap.h>
25 #include <linux/slab.h>
26 #include <linux/compat.h>
27 #include <linux/mutex.h>
28 #include <linux/writeback.h>
29 #include <linux/completion.h>
30 #include <linux/highmem.h>
31 #include <linux/sysfs.h>
32 #include <linux/miscdevice.h>
33 #include <linux/falloc.h>
34 #include <linux/uio.h>
35 #include <linux/ioprio.h>
36 #include <linux/sched/mm.h>
37 #include <linux/uaccess.h>
38 #include <linux/cdev.h>
39 #include <linux/io_uring.h>
40 #include <linux/blk-mq.h>
41 #include <linux/delay.h>
44 #include <linux/task_work.h>
45 #include <linux/namei.h>
46 #include <linux/kref.h>
47 #include <uapi/linux/ublk_cmd.h>
49 #define UBLK_MINORS (1U << MINORBITS)
51 /* All UBLK_F_* have to be included into UBLK_F_ALL */
52 #define UBLK_F_ALL (UBLK_F_SUPPORT_ZERO_COPY \
53 | UBLK_F_URING_CMD_COMP_IN_TASK \
54 | UBLK_F_NEED_GET_DATA \
55 | UBLK_F_USER_RECOVERY \
56 | UBLK_F_USER_RECOVERY_REISSUE \
57 | UBLK_F_UNPRIVILEGED_DEV \
58 | UBLK_F_CMD_IOCTL_ENCODE \
61 /* All UBLK_PARAM_TYPE_* should be included here */
62 #define UBLK_PARAM_TYPE_ALL (UBLK_PARAM_TYPE_BASIC | \
63 UBLK_PARAM_TYPE_DISCARD | UBLK_PARAM_TYPE_DEVT)
66 struct llist_node node;
71 struct ublk_uring_cmd_pdu {
72 struct ublk_queue *ubq;
76 * io command is active: sqe cmd is received, and its cqe isn't done
78 * If the flag is set, the io command is owned by ublk driver, and waited
79 * for incoming blk-mq request from the ublk block device.
81 * If the flag is cleared, the io command will be completed, and owned by
84 #define UBLK_IO_FLAG_ACTIVE 0x01
87 * IO command is completed via cqe, and it is being handled by ublksrv, and
90 * Basically exclusively with UBLK_IO_FLAG_ACTIVE, so can be served for
93 #define UBLK_IO_FLAG_OWNED_BY_SRV 0x02
96 * IO command is aborted, so this flag is set in case of
97 * !UBLK_IO_FLAG_ACTIVE.
99 * After this flag is observed, any pending or new incoming request
100 * associated with this io command will be failed immediately
102 #define UBLK_IO_FLAG_ABORTED 0x04
105 * UBLK_IO_FLAG_NEED_GET_DATA is set because IO command requires
106 * get data buffer address from ublksrv.
108 * Then, bio data could be copied into this data buffer for a WRITE request
109 * after the IO command is issued again and UBLK_IO_FLAG_NEED_GET_DATA is unset.
111 #define UBLK_IO_FLAG_NEED_GET_DATA 0x08
114 /* userspace buffer address from io cmd */
119 struct io_uring_cmd *cmd;
127 struct task_struct *ubq_daemon;
130 struct llist_head io_cmds;
132 unsigned long io_addr; /* mapped vm address */
133 unsigned int max_io_sz;
136 unsigned short nr_io_ready; /* how many ios setup */
137 struct ublk_device *dev;
138 struct ublk_io ios[];
141 #define UBLK_DAEMON_MONITOR_PERIOD (5 * HZ)
144 struct gendisk *ub_disk;
148 unsigned int queue_size;
149 struct ublksrv_ctrl_dev_info dev_info;
151 struct blk_mq_tag_set tag_set;
154 struct device cdev_dev;
156 #define UB_STATE_OPEN 0
157 #define UB_STATE_USED 1
158 #define UB_STATE_DELETED 2
165 struct mm_struct *mm;
167 struct ublk_params params;
169 struct completion completion;
170 unsigned int nr_queues_ready;
171 unsigned int nr_privileged_daemon;
174 * Our ubq->daemon may be killed without any notification, so
175 * monitor each queue's daemon periodically
177 struct delayed_work monitor_work;
178 struct work_struct quiesce_work;
179 struct work_struct stop_work;
182 /* header of ublk_params */
183 struct ublk_params_header {
188 static inline void __ublk_complete_rq(struct request *req);
189 static void ublk_complete_rq(struct kref *ref);
191 static dev_t ublk_chr_devt;
192 static struct class *ublk_chr_class;
194 static DEFINE_IDR(ublk_index_idr);
195 static DEFINE_SPINLOCK(ublk_idr_lock);
196 static wait_queue_head_t ublk_idr_wq; /* wait until one idr is freed */
198 static DEFINE_MUTEX(ublk_ctl_mutex);
201 * Max ublk devices allowed to add
203 * It can be extended to one per-user limit in future or even controlled
206 static unsigned int ublks_max = 64;
207 static unsigned int ublks_added; /* protected by ublk_ctl_mutex */
209 static struct miscdevice ublk_misc;
211 static inline unsigned ublk_pos_to_hwq(loff_t pos)
213 return ((pos - UBLKSRV_IO_BUF_OFFSET) >> UBLK_QID_OFF) &
217 static inline unsigned ublk_pos_to_buf_off(loff_t pos)
219 return (pos - UBLKSRV_IO_BUF_OFFSET) & UBLK_IO_BUF_BITS_MASK;
222 static inline unsigned ublk_pos_to_tag(loff_t pos)
224 return ((pos - UBLKSRV_IO_BUF_OFFSET) >> UBLK_TAG_OFF) &
228 static void ublk_dev_param_basic_apply(struct ublk_device *ub)
230 struct request_queue *q = ub->ub_disk->queue;
231 const struct ublk_param_basic *p = &ub->params.basic;
233 blk_queue_logical_block_size(q, 1 << p->logical_bs_shift);
234 blk_queue_physical_block_size(q, 1 << p->physical_bs_shift);
235 blk_queue_io_min(q, 1 << p->io_min_shift);
236 blk_queue_io_opt(q, 1 << p->io_opt_shift);
238 blk_queue_write_cache(q, p->attrs & UBLK_ATTR_VOLATILE_CACHE,
239 p->attrs & UBLK_ATTR_FUA);
240 if (p->attrs & UBLK_ATTR_ROTATIONAL)
241 blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
243 blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
245 blk_queue_max_hw_sectors(q, p->max_sectors);
246 blk_queue_chunk_sectors(q, p->chunk_sectors);
247 blk_queue_virt_boundary(q, p->virt_boundary_mask);
249 if (p->attrs & UBLK_ATTR_READ_ONLY)
250 set_disk_ro(ub->ub_disk, true);
252 set_capacity(ub->ub_disk, p->dev_sectors);
255 static void ublk_dev_param_discard_apply(struct ublk_device *ub)
257 struct request_queue *q = ub->ub_disk->queue;
258 const struct ublk_param_discard *p = &ub->params.discard;
260 q->limits.discard_alignment = p->discard_alignment;
261 q->limits.discard_granularity = p->discard_granularity;
262 blk_queue_max_discard_sectors(q, p->max_discard_sectors);
263 blk_queue_max_write_zeroes_sectors(q,
264 p->max_write_zeroes_sectors);
265 blk_queue_max_discard_segments(q, p->max_discard_segments);
268 static int ublk_validate_params(const struct ublk_device *ub)
270 /* basic param is the only one which must be set */
271 if (ub->params.types & UBLK_PARAM_TYPE_BASIC) {
272 const struct ublk_param_basic *p = &ub->params.basic;
274 if (p->logical_bs_shift > PAGE_SHIFT || p->logical_bs_shift < 9)
277 if (p->logical_bs_shift > p->physical_bs_shift)
280 if (p->max_sectors > (ub->dev_info.max_io_buf_bytes >> 9))
285 if (ub->params.types & UBLK_PARAM_TYPE_DISCARD) {
286 const struct ublk_param_discard *p = &ub->params.discard;
288 /* So far, only support single segment discard */
289 if (p->max_discard_sectors && p->max_discard_segments != 1)
292 if (!p->discard_granularity)
296 /* dev_t is read-only */
297 if (ub->params.types & UBLK_PARAM_TYPE_DEVT)
303 static int ublk_apply_params(struct ublk_device *ub)
305 if (!(ub->params.types & UBLK_PARAM_TYPE_BASIC))
308 ublk_dev_param_basic_apply(ub);
310 if (ub->params.types & UBLK_PARAM_TYPE_DISCARD)
311 ublk_dev_param_discard_apply(ub);
316 static inline bool ublk_support_user_copy(const struct ublk_queue *ubq)
318 return ubq->flags & UBLK_F_USER_COPY;
321 static inline bool ublk_need_req_ref(const struct ublk_queue *ubq)
324 * read()/write() is involved in user copy, so request reference
327 return ublk_support_user_copy(ubq);
330 static inline void ublk_init_req_ref(const struct ublk_queue *ubq,
333 if (ublk_need_req_ref(ubq)) {
334 struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
336 kref_init(&data->ref);
340 static inline bool ublk_get_req_ref(const struct ublk_queue *ubq,
343 if (ublk_need_req_ref(ubq)) {
344 struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
346 return kref_get_unless_zero(&data->ref);
352 static inline void ublk_put_req_ref(const struct ublk_queue *ubq,
355 if (ublk_need_req_ref(ubq)) {
356 struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
358 kref_put(&data->ref, ublk_complete_rq);
360 __ublk_complete_rq(req);
364 static inline bool ublk_need_get_data(const struct ublk_queue *ubq)
366 return ubq->flags & UBLK_F_NEED_GET_DATA;
369 static struct ublk_device *ublk_get_device(struct ublk_device *ub)
371 if (kobject_get_unless_zero(&ub->cdev_dev.kobj))
376 static void ublk_put_device(struct ublk_device *ub)
378 put_device(&ub->cdev_dev);
381 static inline struct ublk_queue *ublk_get_queue(struct ublk_device *dev,
384 return (struct ublk_queue *)&(dev->__queues[qid * dev->queue_size]);
387 static inline bool ublk_rq_has_data(const struct request *rq)
389 return bio_has_data(rq->bio);
392 static inline struct ublksrv_io_desc *ublk_get_iod(struct ublk_queue *ubq,
395 return (struct ublksrv_io_desc *)
396 &(ubq->io_cmd_buf[tag * sizeof(struct ublksrv_io_desc)]);
399 static inline char *ublk_queue_cmd_buf(struct ublk_device *ub, int q_id)
401 return ublk_get_queue(ub, q_id)->io_cmd_buf;
404 static inline int ublk_queue_cmd_buf_size(struct ublk_device *ub, int q_id)
406 struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
408 return round_up(ubq->q_depth * sizeof(struct ublksrv_io_desc),
412 static inline bool ublk_queue_can_use_recovery_reissue(
413 struct ublk_queue *ubq)
415 return (ubq->flags & UBLK_F_USER_RECOVERY) &&
416 (ubq->flags & UBLK_F_USER_RECOVERY_REISSUE);
419 static inline bool ublk_queue_can_use_recovery(
420 struct ublk_queue *ubq)
422 return ubq->flags & UBLK_F_USER_RECOVERY;
425 static inline bool ublk_can_use_recovery(struct ublk_device *ub)
427 return ub->dev_info.flags & UBLK_F_USER_RECOVERY;
430 static void ublk_free_disk(struct gendisk *disk)
432 struct ublk_device *ub = disk->private_data;
434 clear_bit(UB_STATE_USED, &ub->state);
435 put_device(&ub->cdev_dev);
438 static void ublk_store_owner_uid_gid(unsigned int *owner_uid,
439 unsigned int *owner_gid)
444 current_uid_gid(&uid, &gid);
446 *owner_uid = from_kuid(&init_user_ns, uid);
447 *owner_gid = from_kgid(&init_user_ns, gid);
450 static int ublk_open(struct gendisk *disk, blk_mode_t mode)
452 struct ublk_device *ub = disk->private_data;
454 if (capable(CAP_SYS_ADMIN))
458 * If it is one unprivileged device, only owner can open
459 * the disk. Otherwise it could be one trap made by one
460 * evil user who grants this disk's privileges to other
461 * users deliberately.
463 * This way is reasonable too given anyone can create
464 * unprivileged device, and no need other's grant.
466 if (ub->dev_info.flags & UBLK_F_UNPRIVILEGED_DEV) {
467 unsigned int curr_uid, curr_gid;
469 ublk_store_owner_uid_gid(&curr_uid, &curr_gid);
471 if (curr_uid != ub->dev_info.owner_uid || curr_gid !=
472 ub->dev_info.owner_gid)
479 static const struct block_device_operations ub_fops = {
480 .owner = THIS_MODULE,
482 .free_disk = ublk_free_disk,
485 #define UBLK_MAX_PIN_PAGES 32
487 struct ublk_io_iter {
488 struct page *pages[UBLK_MAX_PIN_PAGES];
490 struct bvec_iter iter;
493 /* return how many pages are copied */
494 static void ublk_copy_io_pages(struct ublk_io_iter *data,
495 size_t total, size_t pg_off, int dir)
500 while (done < total) {
501 struct bio_vec bv = bio_iter_iovec(data->bio, data->iter);
502 unsigned int bytes = min3(bv.bv_len, (unsigned)total - done,
503 (unsigned)(PAGE_SIZE - pg_off));
504 void *bv_buf = bvec_kmap_local(&bv);
505 void *pg_buf = kmap_local_page(data->pages[pg_idx]);
507 if (dir == ITER_DEST)
508 memcpy(pg_buf + pg_off, bv_buf, bytes);
510 memcpy(bv_buf, pg_buf + pg_off, bytes);
512 kunmap_local(pg_buf);
513 kunmap_local(bv_buf);
515 /* advance page array */
517 if (pg_off == PAGE_SIZE) {
525 bio_advance_iter_single(data->bio, &data->iter, bytes);
526 if (!data->iter.bi_size) {
527 data->bio = data->bio->bi_next;
528 if (data->bio == NULL)
530 data->iter = data->bio->bi_iter;
535 static bool ublk_advance_io_iter(const struct request *req,
536 struct ublk_io_iter *iter, unsigned int offset)
538 struct bio *bio = req->bio;
541 if (bio->bi_iter.bi_size > offset) {
543 iter->iter = bio->bi_iter;
544 bio_advance_iter(iter->bio, &iter->iter, offset);
547 offset -= bio->bi_iter.bi_size;
553 * Copy data between request pages and io_iter, and 'offset'
554 * is the start point of linear offset of request.
556 static size_t ublk_copy_user_pages(const struct request *req,
557 unsigned offset, struct iov_iter *uiter, int dir)
559 struct ublk_io_iter iter;
562 if (!ublk_advance_io_iter(req, &iter, offset))
565 while (iov_iter_count(uiter) && iter.bio) {
571 len = iov_iter_get_pages2(uiter, iter.pages,
572 iov_iter_count(uiter),
573 UBLK_MAX_PIN_PAGES, &off);
577 ublk_copy_io_pages(&iter, len, off, dir);
578 nr_pages = DIV_ROUND_UP(len + off, PAGE_SIZE);
579 for (i = 0; i < nr_pages; i++) {
580 if (dir == ITER_DEST)
581 set_page_dirty(iter.pages[i]);
582 put_page(iter.pages[i]);
590 static inline bool ublk_need_map_req(const struct request *req)
592 return ublk_rq_has_data(req) && req_op(req) == REQ_OP_WRITE;
595 static inline bool ublk_need_unmap_req(const struct request *req)
597 return ublk_rq_has_data(req) && req_op(req) == REQ_OP_READ;
600 static int ublk_map_io(const struct ublk_queue *ubq, const struct request *req,
603 const unsigned int rq_bytes = blk_rq_bytes(req);
605 if (ublk_support_user_copy(ubq))
609 * no zero copy, we delay copy WRITE request data into ublksrv
610 * context and the big benefit is that pinning pages in current
611 * context is pretty fast, see ublk_pin_user_pages
613 if (ublk_need_map_req(req)) {
614 struct iov_iter iter;
616 const int dir = ITER_DEST;
618 import_single_range(dir, u64_to_user_ptr(io->addr), rq_bytes,
621 return ublk_copy_user_pages(req, 0, &iter, dir);
626 static int ublk_unmap_io(const struct ublk_queue *ubq,
627 const struct request *req,
630 const unsigned int rq_bytes = blk_rq_bytes(req);
632 if (ublk_support_user_copy(ubq))
635 if (ublk_need_unmap_req(req)) {
636 struct iov_iter iter;
638 const int dir = ITER_SOURCE;
640 WARN_ON_ONCE(io->res > rq_bytes);
642 import_single_range(dir, u64_to_user_ptr(io->addr), io->res,
644 return ublk_copy_user_pages(req, 0, &iter, dir);
649 static inline unsigned int ublk_req_build_flags(struct request *req)
653 if (req->cmd_flags & REQ_FAILFAST_DEV)
654 flags |= UBLK_IO_F_FAILFAST_DEV;
656 if (req->cmd_flags & REQ_FAILFAST_TRANSPORT)
657 flags |= UBLK_IO_F_FAILFAST_TRANSPORT;
659 if (req->cmd_flags & REQ_FAILFAST_DRIVER)
660 flags |= UBLK_IO_F_FAILFAST_DRIVER;
662 if (req->cmd_flags & REQ_META)
663 flags |= UBLK_IO_F_META;
665 if (req->cmd_flags & REQ_FUA)
666 flags |= UBLK_IO_F_FUA;
668 if (req->cmd_flags & REQ_NOUNMAP)
669 flags |= UBLK_IO_F_NOUNMAP;
671 if (req->cmd_flags & REQ_SWAP)
672 flags |= UBLK_IO_F_SWAP;
677 static blk_status_t ublk_setup_iod(struct ublk_queue *ubq, struct request *req)
679 struct ublksrv_io_desc *iod = ublk_get_iod(ubq, req->tag);
680 struct ublk_io *io = &ubq->ios[req->tag];
683 switch (req_op(req)) {
685 ublk_op = UBLK_IO_OP_READ;
688 ublk_op = UBLK_IO_OP_WRITE;
691 ublk_op = UBLK_IO_OP_FLUSH;
694 ublk_op = UBLK_IO_OP_DISCARD;
696 case REQ_OP_WRITE_ZEROES:
697 ublk_op = UBLK_IO_OP_WRITE_ZEROES;
700 return BLK_STS_IOERR;
703 /* need to translate since kernel may change */
704 iod->op_flags = ublk_op | ublk_req_build_flags(req);
705 iod->nr_sectors = blk_rq_sectors(req);
706 iod->start_sector = blk_rq_pos(req);
707 iod->addr = io->addr;
712 static inline struct ublk_uring_cmd_pdu *ublk_get_uring_cmd_pdu(
713 struct io_uring_cmd *ioucmd)
715 return (struct ublk_uring_cmd_pdu *)&ioucmd->pdu;
718 static inline bool ubq_daemon_is_dying(struct ublk_queue *ubq)
720 return ubq->ubq_daemon->flags & PF_EXITING;
723 /* todo: handle partial completion */
724 static inline void __ublk_complete_rq(struct request *req)
726 struct ublk_queue *ubq = req->mq_hctx->driver_data;
727 struct ublk_io *io = &ubq->ios[req->tag];
728 unsigned int unmapped_bytes;
729 blk_status_t res = BLK_STS_OK;
731 /* called from ublk_abort_queue() code path */
732 if (io->flags & UBLK_IO_FLAG_ABORTED) {
737 /* failed read IO if nothing is read */
738 if (!io->res && req_op(req) == REQ_OP_READ)
742 res = errno_to_blk_status(io->res);
747 * FLUSH, DISCARD or WRITE_ZEROES usually won't return bytes returned, so end them
750 * Both the two needn't unmap.
752 if (req_op(req) != REQ_OP_READ && req_op(req) != REQ_OP_WRITE)
755 /* for READ request, writing data in iod->addr to rq buffers */
756 unmapped_bytes = ublk_unmap_io(ubq, req, io);
759 * Extremely impossible since we got data filled in just before
761 * Re-read simply for this unlikely case.
763 if (unlikely(unmapped_bytes < io->res))
764 io->res = unmapped_bytes;
766 if (blk_update_request(req, BLK_STS_OK, io->res))
767 blk_mq_requeue_request(req, true);
769 __blk_mq_end_request(req, BLK_STS_OK);
773 blk_mq_end_request(req, res);
776 static void ublk_complete_rq(struct kref *ref)
778 struct ublk_rq_data *data = container_of(ref, struct ublk_rq_data,
780 struct request *req = blk_mq_rq_from_pdu(data);
782 __ublk_complete_rq(req);
786 * Since __ublk_rq_task_work always fails requests immediately during
787 * exiting, __ublk_fail_req() is only called from abort context during
788 * exiting. So lock is unnecessary.
790 * Also aborting may not be started yet, keep in mind that one failed
791 * request may be issued by block layer again.
793 static void __ublk_fail_req(struct ublk_queue *ubq, struct ublk_io *io,
796 WARN_ON_ONCE(io->flags & UBLK_IO_FLAG_ACTIVE);
798 if (!(io->flags & UBLK_IO_FLAG_ABORTED)) {
799 io->flags |= UBLK_IO_FLAG_ABORTED;
800 if (ublk_queue_can_use_recovery_reissue(ubq))
801 blk_mq_requeue_request(req, false);
803 ublk_put_req_ref(ubq, req);
807 static void ubq_complete_io_cmd(struct ublk_io *io, int res,
808 unsigned issue_flags)
810 /* mark this cmd owned by ublksrv */
811 io->flags |= UBLK_IO_FLAG_OWNED_BY_SRV;
814 * clear ACTIVE since we are done with this sqe/cmd slot
815 * We can only accept io cmd in case of being not active.
817 io->flags &= ~UBLK_IO_FLAG_ACTIVE;
819 /* tell ublksrv one io request is coming */
820 io_uring_cmd_done(io->cmd, res, 0, issue_flags);
823 #define UBLK_REQUEUE_DELAY_MS 3
825 static inline void __ublk_abort_rq(struct ublk_queue *ubq,
828 /* We cannot process this rq so just requeue it. */
829 if (ublk_queue_can_use_recovery(ubq))
830 blk_mq_requeue_request(rq, false);
832 blk_mq_end_request(rq, BLK_STS_IOERR);
834 mod_delayed_work(system_wq, &ubq->dev->monitor_work, 0);
837 static inline void __ublk_rq_task_work(struct request *req,
838 unsigned issue_flags)
840 struct ublk_queue *ubq = req->mq_hctx->driver_data;
842 struct ublk_io *io = &ubq->ios[tag];
843 unsigned int mapped_bytes;
845 pr_devel("%s: complete: op %d, qid %d tag %d io_flags %x addr %llx\n",
846 __func__, io->cmd->cmd_op, ubq->q_id, req->tag, io->flags,
847 ublk_get_iod(ubq, req->tag)->addr);
850 * Task is exiting if either:
852 * (1) current != ubq_daemon.
853 * io_uring_cmd_complete_in_task() tries to run task_work
854 * in a workqueue if ubq_daemon(cmd's task) is PF_EXITING.
856 * (2) current->flags & PF_EXITING.
858 if (unlikely(current != ubq->ubq_daemon || current->flags & PF_EXITING)) {
859 __ublk_abort_rq(ubq, req);
863 if (ublk_need_get_data(ubq) && ublk_need_map_req(req)) {
865 * We have not handled UBLK_IO_NEED_GET_DATA command yet,
866 * so immepdately pass UBLK_IO_RES_NEED_GET_DATA to ublksrv
869 if (!(io->flags & UBLK_IO_FLAG_NEED_GET_DATA)) {
870 io->flags |= UBLK_IO_FLAG_NEED_GET_DATA;
871 pr_devel("%s: need get data. op %d, qid %d tag %d io_flags %x\n",
872 __func__, io->cmd->cmd_op, ubq->q_id,
873 req->tag, io->flags);
874 ubq_complete_io_cmd(io, UBLK_IO_RES_NEED_GET_DATA, issue_flags);
878 * We have handled UBLK_IO_NEED_GET_DATA command,
879 * so clear UBLK_IO_FLAG_NEED_GET_DATA now and just
882 io->flags &= ~UBLK_IO_FLAG_NEED_GET_DATA;
883 /* update iod->addr because ublksrv may have passed a new io buffer */
884 ublk_get_iod(ubq, req->tag)->addr = io->addr;
885 pr_devel("%s: update iod->addr: op %d, qid %d tag %d io_flags %x addr %llx\n",
886 __func__, io->cmd->cmd_op, ubq->q_id, req->tag, io->flags,
887 ublk_get_iod(ubq, req->tag)->addr);
890 mapped_bytes = ublk_map_io(ubq, req, io);
892 /* partially mapped, update io descriptor */
893 if (unlikely(mapped_bytes != blk_rq_bytes(req))) {
895 * Nothing mapped, retry until we succeed.
897 * We may never succeed in mapping any bytes here because
898 * of OOM. TODO: reserve one buffer with single page pinned
899 * for providing forward progress guarantee.
901 if (unlikely(!mapped_bytes)) {
902 blk_mq_requeue_request(req, false);
903 blk_mq_delay_kick_requeue_list(req->q,
904 UBLK_REQUEUE_DELAY_MS);
908 ublk_get_iod(ubq, req->tag)->nr_sectors =
912 ublk_init_req_ref(ubq, req);
913 ubq_complete_io_cmd(io, UBLK_IO_RES_OK, issue_flags);
916 static inline void ublk_forward_io_cmds(struct ublk_queue *ubq,
917 unsigned issue_flags)
919 struct llist_node *io_cmds = llist_del_all(&ubq->io_cmds);
920 struct ublk_rq_data *data, *tmp;
922 io_cmds = llist_reverse_order(io_cmds);
923 llist_for_each_entry_safe(data, tmp, io_cmds, node)
924 __ublk_rq_task_work(blk_mq_rq_from_pdu(data), issue_flags);
927 static inline void ublk_abort_io_cmds(struct ublk_queue *ubq)
929 struct llist_node *io_cmds = llist_del_all(&ubq->io_cmds);
930 struct ublk_rq_data *data, *tmp;
932 llist_for_each_entry_safe(data, tmp, io_cmds, node)
933 __ublk_abort_rq(ubq, blk_mq_rq_from_pdu(data));
936 static void ublk_rq_task_work_cb(struct io_uring_cmd *cmd, unsigned issue_flags)
938 struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
939 struct ublk_queue *ubq = pdu->ubq;
941 ublk_forward_io_cmds(ubq, issue_flags);
944 static void ublk_queue_cmd(struct ublk_queue *ubq, struct request *rq)
946 struct ublk_rq_data *data = blk_mq_rq_to_pdu(rq);
949 if (!llist_add(&data->node, &ubq->io_cmds))
952 io = &ubq->ios[rq->tag];
954 * If the check pass, we know that this is a re-issued request aborted
955 * previously in monitor_work because the ubq_daemon(cmd's task) is
956 * PF_EXITING. We cannot call io_uring_cmd_complete_in_task() anymore
957 * because this ioucmd's io_uring context may be freed now if no inflight
958 * ioucmd exists. Otherwise we may cause null-deref in ctx->fallback_work.
960 * Note: monitor_work sets UBLK_IO_FLAG_ABORTED and ends this request(releasing
961 * the tag). Then the request is re-started(allocating the tag) and we are here.
962 * Since releasing/allocating a tag implies smp_mb(), finding UBLK_IO_FLAG_ABORTED
963 * guarantees that here is a re-issued request aborted previously.
965 if (unlikely(io->flags & UBLK_IO_FLAG_ABORTED)) {
966 ublk_abort_io_cmds(ubq);
968 struct io_uring_cmd *cmd = io->cmd;
969 struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
972 io_uring_cmd_complete_in_task(cmd, ublk_rq_task_work_cb);
976 static enum blk_eh_timer_return ublk_timeout(struct request *rq)
978 struct ublk_queue *ubq = rq->mq_hctx->driver_data;
980 if (ubq->flags & UBLK_F_UNPRIVILEGED_DEV) {
982 send_sig(SIGKILL, ubq->ubq_daemon, 0);
989 return BLK_EH_RESET_TIMER;
992 static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx,
993 const struct blk_mq_queue_data *bd)
995 struct ublk_queue *ubq = hctx->driver_data;
996 struct request *rq = bd->rq;
999 /* fill iod to slot in io cmd buffer */
1000 res = ublk_setup_iod(ubq, rq);
1001 if (unlikely(res != BLK_STS_OK))
1002 return BLK_STS_IOERR;
1004 /* With recovery feature enabled, force_abort is set in
1005 * ublk_stop_dev() before calling del_gendisk(). We have to
1006 * abort all requeued and new rqs here to let del_gendisk()
1007 * move on. Besides, we cannot not call io_uring_cmd_complete_in_task()
1008 * to avoid UAF on io_uring ctx.
1010 * Note: force_abort is guaranteed to be seen because it is set
1011 * before request queue is unqiuesced.
1013 if (ublk_queue_can_use_recovery(ubq) && unlikely(ubq->force_abort))
1014 return BLK_STS_IOERR;
1016 blk_mq_start_request(bd->rq);
1018 if (unlikely(ubq_daemon_is_dying(ubq))) {
1019 __ublk_abort_rq(ubq, rq);
1023 ublk_queue_cmd(ubq, rq);
1028 static int ublk_init_hctx(struct blk_mq_hw_ctx *hctx, void *driver_data,
1029 unsigned int hctx_idx)
1031 struct ublk_device *ub = driver_data;
1032 struct ublk_queue *ubq = ublk_get_queue(ub, hctx->queue_num);
1034 hctx->driver_data = ubq;
1038 static const struct blk_mq_ops ublk_mq_ops = {
1039 .queue_rq = ublk_queue_rq,
1040 .init_hctx = ublk_init_hctx,
1041 .timeout = ublk_timeout,
1044 static int ublk_ch_open(struct inode *inode, struct file *filp)
1046 struct ublk_device *ub = container_of(inode->i_cdev,
1047 struct ublk_device, cdev);
1049 if (test_and_set_bit(UB_STATE_OPEN, &ub->state))
1051 filp->private_data = ub;
1055 static int ublk_ch_release(struct inode *inode, struct file *filp)
1057 struct ublk_device *ub = filp->private_data;
1059 clear_bit(UB_STATE_OPEN, &ub->state);
1063 /* map pre-allocated per-queue cmd buffer to ublksrv daemon */
1064 static int ublk_ch_mmap(struct file *filp, struct vm_area_struct *vma)
1066 struct ublk_device *ub = filp->private_data;
1067 size_t sz = vma->vm_end - vma->vm_start;
1068 unsigned max_sz = UBLK_MAX_QUEUE_DEPTH * sizeof(struct ublksrv_io_desc);
1069 unsigned long pfn, end, phys_off = vma->vm_pgoff << PAGE_SHIFT;
1072 spin_lock(&ub->mm_lock);
1074 ub->mm = current->mm;
1075 if (current->mm != ub->mm)
1077 spin_unlock(&ub->mm_lock);
1082 if (vma->vm_flags & VM_WRITE)
1085 end = UBLKSRV_CMD_BUF_OFFSET + ub->dev_info.nr_hw_queues * max_sz;
1086 if (phys_off < UBLKSRV_CMD_BUF_OFFSET || phys_off >= end)
1089 q_id = (phys_off - UBLKSRV_CMD_BUF_OFFSET) / max_sz;
1090 pr_devel("%s: qid %d, pid %d, addr %lx pg_off %lx sz %lu\n",
1091 __func__, q_id, current->pid, vma->vm_start,
1092 phys_off, (unsigned long)sz);
1094 if (sz != ublk_queue_cmd_buf_size(ub, q_id))
1097 pfn = virt_to_phys(ublk_queue_cmd_buf(ub, q_id)) >> PAGE_SHIFT;
1098 return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
1101 static void ublk_commit_completion(struct ublk_device *ub,
1102 const struct ublksrv_io_cmd *ub_cmd)
1104 u32 qid = ub_cmd->q_id, tag = ub_cmd->tag;
1105 struct ublk_queue *ubq = ublk_get_queue(ub, qid);
1106 struct ublk_io *io = &ubq->ios[tag];
1107 struct request *req;
1109 /* now this cmd slot is owned by nbd driver */
1110 io->flags &= ~UBLK_IO_FLAG_OWNED_BY_SRV;
1111 io->res = ub_cmd->result;
1113 /* find the io request and complete */
1114 req = blk_mq_tag_to_rq(ub->tag_set.tags[qid], tag);
1116 if (req && likely(!blk_should_fake_timeout(req->q)))
1117 ublk_put_req_ref(ubq, req);
1121 * When ->ubq_daemon is exiting, either new request is ended immediately,
1122 * or any queued io command is drained, so it is safe to abort queue
1125 static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq)
1129 if (!ublk_get_device(ub))
1132 for (i = 0; i < ubq->q_depth; i++) {
1133 struct ublk_io *io = &ubq->ios[i];
1135 if (!(io->flags & UBLK_IO_FLAG_ACTIVE)) {
1139 * Either we fail the request or ublk_rq_task_work_fn
1142 rq = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], i);
1144 __ublk_fail_req(ubq, io, rq);
1147 ublk_put_device(ub);
1150 static void ublk_daemon_monitor_work(struct work_struct *work)
1152 struct ublk_device *ub =
1153 container_of(work, struct ublk_device, monitor_work.work);
1156 for (i = 0; i < ub->dev_info.nr_hw_queues; i++) {
1157 struct ublk_queue *ubq = ublk_get_queue(ub, i);
1159 if (ubq_daemon_is_dying(ubq)) {
1160 if (ublk_queue_can_use_recovery(ubq))
1161 schedule_work(&ub->quiesce_work);
1163 schedule_work(&ub->stop_work);
1165 /* abort queue is for making forward progress */
1166 ublk_abort_queue(ub, ubq);
1171 * We can't schedule monitor work after ub's state is not UBLK_S_DEV_LIVE.
1172 * after ublk_remove() or __ublk_quiesce_dev() is started.
1174 * No need ub->mutex, monitor work are canceled after state is marked
1175 * as not LIVE, so new state is observed reliably.
1177 if (ub->dev_info.state == UBLK_S_DEV_LIVE)
1178 schedule_delayed_work(&ub->monitor_work,
1179 UBLK_DAEMON_MONITOR_PERIOD);
1182 static inline bool ublk_queue_ready(struct ublk_queue *ubq)
1184 return ubq->nr_io_ready == ubq->q_depth;
1187 static void ublk_cancel_queue(struct ublk_queue *ubq)
1191 if (!ublk_queue_ready(ubq))
1194 for (i = 0; i < ubq->q_depth; i++) {
1195 struct ublk_io *io = &ubq->ios[i];
1197 if (io->flags & UBLK_IO_FLAG_ACTIVE)
1198 io_uring_cmd_done(io->cmd, UBLK_IO_RES_ABORT, 0,
1199 IO_URING_F_UNLOCKED);
1202 /* all io commands are canceled */
1203 ubq->nr_io_ready = 0;
1206 /* Cancel all pending commands, must be called after del_gendisk() returns */
1207 static void ublk_cancel_dev(struct ublk_device *ub)
1211 for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
1212 ublk_cancel_queue(ublk_get_queue(ub, i));
1215 static bool ublk_check_inflight_rq(struct request *rq, void *data)
1219 if (blk_mq_request_started(rq)) {
1226 static void ublk_wait_tagset_rqs_idle(struct ublk_device *ub)
1230 WARN_ON_ONCE(!blk_queue_quiesced(ub->ub_disk->queue));
1233 blk_mq_tagset_busy_iter(&ub->tag_set,
1234 ublk_check_inflight_rq, &idle);
1237 msleep(UBLK_REQUEUE_DELAY_MS);
1241 static void __ublk_quiesce_dev(struct ublk_device *ub)
1243 pr_devel("%s: quiesce ub: dev_id %d state %s\n",
1244 __func__, ub->dev_info.dev_id,
1245 ub->dev_info.state == UBLK_S_DEV_LIVE ?
1246 "LIVE" : "QUIESCED");
1247 blk_mq_quiesce_queue(ub->ub_disk->queue);
1248 ublk_wait_tagset_rqs_idle(ub);
1249 ub->dev_info.state = UBLK_S_DEV_QUIESCED;
1250 ublk_cancel_dev(ub);
1251 /* we are going to release task_struct of ubq_daemon and resets
1252 * ->ubq_daemon to NULL. So in monitor_work, check on ubq_daemon causes UAF.
1253 * Besides, monitor_work is not necessary in QUIESCED state since we have
1254 * already scheduled quiesce_work and quiesced all ubqs.
1256 * Do not let monitor_work schedule itself if state it QUIESCED. And we cancel
1257 * it here and re-schedule it in END_USER_RECOVERY to avoid UAF.
1259 cancel_delayed_work_sync(&ub->monitor_work);
1262 static void ublk_quiesce_work_fn(struct work_struct *work)
1264 struct ublk_device *ub =
1265 container_of(work, struct ublk_device, quiesce_work);
1267 mutex_lock(&ub->mutex);
1268 if (ub->dev_info.state != UBLK_S_DEV_LIVE)
1270 __ublk_quiesce_dev(ub);
1272 mutex_unlock(&ub->mutex);
1275 static void ublk_unquiesce_dev(struct ublk_device *ub)
1279 pr_devel("%s: unquiesce ub: dev_id %d state %s\n",
1280 __func__, ub->dev_info.dev_id,
1281 ub->dev_info.state == UBLK_S_DEV_LIVE ?
1282 "LIVE" : "QUIESCED");
1283 /* quiesce_work has run. We let requeued rqs be aborted
1284 * before running fallback_wq. "force_abort" must be seen
1285 * after request queue is unqiuesced. Then del_gendisk()
1288 for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
1289 ublk_get_queue(ub, i)->force_abort = true;
1291 blk_mq_unquiesce_queue(ub->ub_disk->queue);
1292 /* We may have requeued some rqs in ublk_quiesce_queue() */
1293 blk_mq_kick_requeue_list(ub->ub_disk->queue);
1296 static void ublk_stop_dev(struct ublk_device *ub)
1298 mutex_lock(&ub->mutex);
1299 if (ub->dev_info.state == UBLK_S_DEV_DEAD)
1301 if (ublk_can_use_recovery(ub)) {
1302 if (ub->dev_info.state == UBLK_S_DEV_LIVE)
1303 __ublk_quiesce_dev(ub);
1304 ublk_unquiesce_dev(ub);
1306 del_gendisk(ub->ub_disk);
1307 ub->dev_info.state = UBLK_S_DEV_DEAD;
1308 ub->dev_info.ublksrv_pid = -1;
1309 put_disk(ub->ub_disk);
1312 ublk_cancel_dev(ub);
1313 mutex_unlock(&ub->mutex);
1314 cancel_delayed_work_sync(&ub->monitor_work);
1317 /* device can only be started after all IOs are ready */
1318 static void ublk_mark_io_ready(struct ublk_device *ub, struct ublk_queue *ubq)
1320 mutex_lock(&ub->mutex);
1322 if (ublk_queue_ready(ubq)) {
1323 ubq->ubq_daemon = current;
1324 get_task_struct(ubq->ubq_daemon);
1325 ub->nr_queues_ready++;
1327 if (capable(CAP_SYS_ADMIN))
1328 ub->nr_privileged_daemon++;
1330 if (ub->nr_queues_ready == ub->dev_info.nr_hw_queues)
1331 complete_all(&ub->completion);
1332 mutex_unlock(&ub->mutex);
1335 static void ublk_handle_need_get_data(struct ublk_device *ub, int q_id,
1338 struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
1339 struct request *req = blk_mq_tag_to_rq(ub->tag_set.tags[q_id], tag);
1341 ublk_queue_cmd(ubq, req);
1344 static inline int ublk_check_cmd_op(u32 cmd_op)
1346 u32 ioc_type = _IOC_TYPE(cmd_op);
1348 if (!IS_ENABLED(CONFIG_BLKDEV_UBLK_LEGACY_OPCODES) && ioc_type != 'u')
1351 if (ioc_type != 'u' && ioc_type != 0)
1357 static inline void ublk_fill_io_cmd(struct ublk_io *io,
1358 struct io_uring_cmd *cmd, unsigned long buf_addr)
1361 io->flags |= UBLK_IO_FLAG_ACTIVE;
1362 io->addr = buf_addr;
1365 static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
1366 unsigned int issue_flags,
1367 const struct ublksrv_io_cmd *ub_cmd)
1369 struct ublk_device *ub = cmd->file->private_data;
1370 struct ublk_queue *ubq;
1372 u32 cmd_op = cmd->cmd_op;
1373 unsigned tag = ub_cmd->tag;
1375 struct request *req;
1377 pr_devel("%s: received: cmd op %d queue %d tag %d result %d\n",
1378 __func__, cmd->cmd_op, ub_cmd->q_id, tag,
1381 if (ub_cmd->q_id >= ub->dev_info.nr_hw_queues)
1384 ubq = ublk_get_queue(ub, ub_cmd->q_id);
1385 if (!ubq || ub_cmd->q_id != ubq->q_id)
1388 if (ubq->ubq_daemon && ubq->ubq_daemon != current)
1391 if (tag >= ubq->q_depth)
1394 io = &ubq->ios[tag];
1396 /* there is pending io cmd, something must be wrong */
1397 if (io->flags & UBLK_IO_FLAG_ACTIVE) {
1403 * ensure that the user issues UBLK_IO_NEED_GET_DATA
1404 * iff the driver have set the UBLK_IO_FLAG_NEED_GET_DATA.
1406 if ((!!(io->flags & UBLK_IO_FLAG_NEED_GET_DATA))
1407 ^ (_IOC_NR(cmd_op) == UBLK_IO_NEED_GET_DATA))
1410 if (ublk_support_user_copy(ubq) && ub_cmd->addr) {
1415 ret = ublk_check_cmd_op(cmd_op);
1420 switch (_IOC_NR(cmd_op)) {
1421 case UBLK_IO_FETCH_REQ:
1422 /* UBLK_IO_FETCH_REQ is only allowed before queue is setup */
1423 if (ublk_queue_ready(ubq)) {
1428 * The io is being handled by server, so COMMIT_RQ is expected
1429 * instead of FETCH_REQ
1431 if (io->flags & UBLK_IO_FLAG_OWNED_BY_SRV)
1434 if (!ublk_support_user_copy(ubq)) {
1436 * FETCH_RQ has to provide IO buffer if NEED GET
1437 * DATA is not enabled
1439 if (!ub_cmd->addr && !ublk_need_get_data(ubq))
1443 ublk_fill_io_cmd(io, cmd, ub_cmd->addr);
1444 ublk_mark_io_ready(ub, ubq);
1446 case UBLK_IO_COMMIT_AND_FETCH_REQ:
1447 req = blk_mq_tag_to_rq(ub->tag_set.tags[ub_cmd->q_id], tag);
1449 if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV))
1452 if (!ublk_support_user_copy(ubq)) {
1454 * COMMIT_AND_FETCH_REQ has to provide IO buffer if
1455 * NEED GET DATA is not enabled or it is Read IO.
1457 if (!ub_cmd->addr && (!ublk_need_get_data(ubq) ||
1458 req_op(req) == REQ_OP_READ))
1461 ublk_fill_io_cmd(io, cmd, ub_cmd->addr);
1462 ublk_commit_completion(ub, ub_cmd);
1464 case UBLK_IO_NEED_GET_DATA:
1465 if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV))
1467 ublk_fill_io_cmd(io, cmd, ub_cmd->addr);
1468 ublk_handle_need_get_data(ub, ub_cmd->q_id, ub_cmd->tag);
1473 return -EIOCBQUEUED;
1476 io_uring_cmd_done(cmd, ret, 0, issue_flags);
1477 pr_devel("%s: complete: cmd op %d, tag %d ret %x io_flags %x\n",
1478 __func__, cmd_op, tag, ret, io->flags);
1479 return -EIOCBQUEUED;
1482 static inline struct request *__ublk_check_and_get_req(struct ublk_device *ub,
1483 struct ublk_queue *ubq, int tag, size_t offset)
1485 struct request *req;
1487 if (!ublk_need_req_ref(ubq))
1490 req = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], tag);
1494 if (!ublk_get_req_ref(ubq, req))
1497 if (unlikely(!blk_mq_request_started(req) || req->tag != tag))
1500 if (!ublk_rq_has_data(req))
1503 if (offset > blk_rq_bytes(req))
1508 ublk_put_req_ref(ubq, req);
1512 static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
1515 * Not necessary for async retry, but let's keep it simple and always
1516 * copy the values to avoid any potential reuse.
1518 const struct ublksrv_io_cmd *ub_src = io_uring_sqe_cmd(cmd->sqe);
1519 const struct ublksrv_io_cmd ub_cmd = {
1520 .q_id = READ_ONCE(ub_src->q_id),
1521 .tag = READ_ONCE(ub_src->tag),
1522 .result = READ_ONCE(ub_src->result),
1523 .addr = READ_ONCE(ub_src->addr)
1526 return __ublk_ch_uring_cmd(cmd, issue_flags, &ub_cmd);
1529 static inline bool ublk_check_ubuf_dir(const struct request *req,
1532 /* copy ubuf to request pages */
1533 if (req_op(req) == REQ_OP_READ && ubuf_dir == ITER_SOURCE)
1536 /* copy request pages to ubuf */
1537 if (req_op(req) == REQ_OP_WRITE && ubuf_dir == ITER_DEST)
1543 static struct request *ublk_check_and_get_req(struct kiocb *iocb,
1544 struct iov_iter *iter, size_t *off, int dir)
1546 struct ublk_device *ub = iocb->ki_filp->private_data;
1547 struct ublk_queue *ubq;
1548 struct request *req;
1553 return ERR_PTR(-EACCES);
1555 if (!user_backed_iter(iter))
1556 return ERR_PTR(-EACCES);
1558 if (ub->dev_info.state == UBLK_S_DEV_DEAD)
1559 return ERR_PTR(-EACCES);
1561 tag = ublk_pos_to_tag(iocb->ki_pos);
1562 q_id = ublk_pos_to_hwq(iocb->ki_pos);
1563 buf_off = ublk_pos_to_buf_off(iocb->ki_pos);
1565 if (q_id >= ub->dev_info.nr_hw_queues)
1566 return ERR_PTR(-EINVAL);
1568 ubq = ublk_get_queue(ub, q_id);
1570 return ERR_PTR(-EINVAL);
1572 if (tag >= ubq->q_depth)
1573 return ERR_PTR(-EINVAL);
1575 req = __ublk_check_and_get_req(ub, ubq, tag, buf_off);
1577 return ERR_PTR(-EINVAL);
1579 if (!req->mq_hctx || !req->mq_hctx->driver_data)
1582 if (!ublk_check_ubuf_dir(req, dir))
1588 ublk_put_req_ref(ubq, req);
1589 return ERR_PTR(-EACCES);
1592 static ssize_t ublk_ch_read_iter(struct kiocb *iocb, struct iov_iter *to)
1594 struct ublk_queue *ubq;
1595 struct request *req;
1599 req = ublk_check_and_get_req(iocb, to, &buf_off, ITER_DEST);
1601 return PTR_ERR(req);
1603 ret = ublk_copy_user_pages(req, buf_off, to, ITER_DEST);
1604 ubq = req->mq_hctx->driver_data;
1605 ublk_put_req_ref(ubq, req);
1610 static ssize_t ublk_ch_write_iter(struct kiocb *iocb, struct iov_iter *from)
1612 struct ublk_queue *ubq;
1613 struct request *req;
1617 req = ublk_check_and_get_req(iocb, from, &buf_off, ITER_SOURCE);
1619 return PTR_ERR(req);
1621 ret = ublk_copy_user_pages(req, buf_off, from, ITER_SOURCE);
1622 ubq = req->mq_hctx->driver_data;
1623 ublk_put_req_ref(ubq, req);
1628 static const struct file_operations ublk_ch_fops = {
1629 .owner = THIS_MODULE,
1630 .open = ublk_ch_open,
1631 .release = ublk_ch_release,
1632 .llseek = no_llseek,
1633 .read_iter = ublk_ch_read_iter,
1634 .write_iter = ublk_ch_write_iter,
1635 .uring_cmd = ublk_ch_uring_cmd,
1636 .mmap = ublk_ch_mmap,
1639 static void ublk_deinit_queue(struct ublk_device *ub, int q_id)
1641 int size = ublk_queue_cmd_buf_size(ub, q_id);
1642 struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
1644 if (ubq->ubq_daemon)
1645 put_task_struct(ubq->ubq_daemon);
1646 if (ubq->io_cmd_buf)
1647 free_pages((unsigned long)ubq->io_cmd_buf, get_order(size));
1650 static int ublk_init_queue(struct ublk_device *ub, int q_id)
1652 struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
1653 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO;
1657 ubq->flags = ub->dev_info.flags;
1659 ubq->q_depth = ub->dev_info.queue_depth;
1660 size = ublk_queue_cmd_buf_size(ub, q_id);
1662 ptr = (void *) __get_free_pages(gfp_flags, get_order(size));
1666 ubq->io_cmd_buf = ptr;
1671 static void ublk_deinit_queues(struct ublk_device *ub)
1673 int nr_queues = ub->dev_info.nr_hw_queues;
1679 for (i = 0; i < nr_queues; i++)
1680 ublk_deinit_queue(ub, i);
1681 kfree(ub->__queues);
1684 static int ublk_init_queues(struct ublk_device *ub)
1686 int nr_queues = ub->dev_info.nr_hw_queues;
1687 int depth = ub->dev_info.queue_depth;
1688 int ubq_size = sizeof(struct ublk_queue) + depth * sizeof(struct ublk_io);
1689 int i, ret = -ENOMEM;
1691 ub->queue_size = ubq_size;
1692 ub->__queues = kcalloc(nr_queues, ubq_size, GFP_KERNEL);
1696 for (i = 0; i < nr_queues; i++) {
1697 if (ublk_init_queue(ub, i))
1701 init_completion(&ub->completion);
1705 ublk_deinit_queues(ub);
1709 static int ublk_alloc_dev_number(struct ublk_device *ub, int idx)
1714 spin_lock(&ublk_idr_lock);
1715 /* allocate id, if @id >= 0, we're requesting that specific id */
1717 err = idr_alloc(&ublk_index_idr, ub, i, i + 1, GFP_NOWAIT);
1721 err = idr_alloc(&ublk_index_idr, ub, 0, 0, GFP_NOWAIT);
1723 spin_unlock(&ublk_idr_lock);
1726 ub->ub_number = err;
1731 static void ublk_free_dev_number(struct ublk_device *ub)
1733 spin_lock(&ublk_idr_lock);
1734 idr_remove(&ublk_index_idr, ub->ub_number);
1735 wake_up_all(&ublk_idr_wq);
1736 spin_unlock(&ublk_idr_lock);
1739 static void ublk_cdev_rel(struct device *dev)
1741 struct ublk_device *ub = container_of(dev, struct ublk_device, cdev_dev);
1743 blk_mq_free_tag_set(&ub->tag_set);
1744 ublk_deinit_queues(ub);
1745 ublk_free_dev_number(ub);
1746 mutex_destroy(&ub->mutex);
1750 static int ublk_add_chdev(struct ublk_device *ub)
1752 struct device *dev = &ub->cdev_dev;
1753 int minor = ub->ub_number;
1756 dev->parent = ublk_misc.this_device;
1757 dev->devt = MKDEV(MAJOR(ublk_chr_devt), minor);
1758 dev->class = ublk_chr_class;
1759 dev->release = ublk_cdev_rel;
1760 device_initialize(dev);
1762 ret = dev_set_name(dev, "ublkc%d", minor);
1766 cdev_init(&ub->cdev, &ublk_ch_fops);
1767 ret = cdev_device_add(&ub->cdev, dev);
1778 static void ublk_stop_work_fn(struct work_struct *work)
1780 struct ublk_device *ub =
1781 container_of(work, struct ublk_device, stop_work);
1786 /* align max io buffer size with PAGE_SIZE */
1787 static void ublk_align_max_io_size(struct ublk_device *ub)
1789 unsigned int max_io_bytes = ub->dev_info.max_io_buf_bytes;
1791 ub->dev_info.max_io_buf_bytes =
1792 round_down(max_io_bytes, PAGE_SIZE);
1795 static int ublk_add_tag_set(struct ublk_device *ub)
1797 ub->tag_set.ops = &ublk_mq_ops;
1798 ub->tag_set.nr_hw_queues = ub->dev_info.nr_hw_queues;
1799 ub->tag_set.queue_depth = ub->dev_info.queue_depth;
1800 ub->tag_set.numa_node = NUMA_NO_NODE;
1801 ub->tag_set.cmd_size = sizeof(struct ublk_rq_data);
1802 ub->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
1803 ub->tag_set.driver_data = ub;
1804 return blk_mq_alloc_tag_set(&ub->tag_set);
1807 static void ublk_remove(struct ublk_device *ub)
1810 cancel_work_sync(&ub->stop_work);
1811 cancel_work_sync(&ub->quiesce_work);
1812 cdev_device_del(&ub->cdev, &ub->cdev_dev);
1813 put_device(&ub->cdev_dev);
1817 static struct ublk_device *ublk_get_device_from_id(int idx)
1819 struct ublk_device *ub = NULL;
1824 spin_lock(&ublk_idr_lock);
1825 ub = idr_find(&ublk_index_idr, idx);
1827 ub = ublk_get_device(ub);
1828 spin_unlock(&ublk_idr_lock);
1833 static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd)
1835 const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
1836 int ublksrv_pid = (int)header->data[0];
1837 struct gendisk *disk;
1840 if (ublksrv_pid <= 0)
1843 wait_for_completion_interruptible(&ub->completion);
1845 schedule_delayed_work(&ub->monitor_work, UBLK_DAEMON_MONITOR_PERIOD);
1847 mutex_lock(&ub->mutex);
1848 if (ub->dev_info.state == UBLK_S_DEV_LIVE ||
1849 test_bit(UB_STATE_USED, &ub->state)) {
1854 disk = blk_mq_alloc_disk(&ub->tag_set, NULL);
1856 ret = PTR_ERR(disk);
1859 sprintf(disk->disk_name, "ublkb%d", ub->ub_number);
1860 disk->fops = &ub_fops;
1861 disk->private_data = ub;
1863 ub->dev_info.ublksrv_pid = ublksrv_pid;
1866 ret = ublk_apply_params(ub);
1870 /* don't probe partitions if any one ubq daemon is un-trusted */
1871 if (ub->nr_privileged_daemon != ub->nr_queues_ready)
1872 set_bit(GD_SUPPRESS_PART_SCAN, &disk->state);
1874 get_device(&ub->cdev_dev);
1875 ub->dev_info.state = UBLK_S_DEV_LIVE;
1876 ret = add_disk(disk);
1879 * Has to drop the reference since ->free_disk won't be
1880 * called in case of add_disk failure.
1882 ub->dev_info.state = UBLK_S_DEV_DEAD;
1883 ublk_put_device(ub);
1886 set_bit(UB_STATE_USED, &ub->state);
1891 mutex_unlock(&ub->mutex);
1895 static int ublk_ctrl_get_queue_affinity(struct ublk_device *ub,
1896 struct io_uring_cmd *cmd)
1898 const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
1899 void __user *argp = (void __user *)(unsigned long)header->addr;
1900 cpumask_var_t cpumask;
1901 unsigned long queue;
1902 unsigned int retlen;
1906 if (header->len * BITS_PER_BYTE < nr_cpu_ids)
1908 if (header->len & (sizeof(unsigned long)-1))
1913 queue = header->data[0];
1914 if (queue >= ub->dev_info.nr_hw_queues)
1917 if (!zalloc_cpumask_var(&cpumask, GFP_KERNEL))
1920 for_each_possible_cpu(i) {
1921 if (ub->tag_set.map[HCTX_TYPE_DEFAULT].mq_map[i] == queue)
1922 cpumask_set_cpu(i, cpumask);
1926 retlen = min_t(unsigned short, header->len, cpumask_size());
1927 if (copy_to_user(argp, cpumask, retlen))
1928 goto out_free_cpumask;
1929 if (retlen != header->len &&
1930 clear_user(argp + retlen, header->len - retlen))
1931 goto out_free_cpumask;
1935 free_cpumask_var(cpumask);
1939 static inline void ublk_dump_dev_info(struct ublksrv_ctrl_dev_info *info)
1941 pr_devel("%s: dev id %d flags %llx\n", __func__,
1942 info->dev_id, info->flags);
1943 pr_devel("\t nr_hw_queues %d queue_depth %d\n",
1944 info->nr_hw_queues, info->queue_depth);
1947 static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd)
1949 const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
1950 void __user *argp = (void __user *)(unsigned long)header->addr;
1951 struct ublksrv_ctrl_dev_info info;
1952 struct ublk_device *ub;
1955 if (header->len < sizeof(info) || !header->addr)
1957 if (header->queue_id != (u16)-1) {
1958 pr_warn("%s: queue_id is wrong %x\n",
1959 __func__, header->queue_id);
1963 if (copy_from_user(&info, argp, sizeof(info)))
1966 if (capable(CAP_SYS_ADMIN))
1967 info.flags &= ~UBLK_F_UNPRIVILEGED_DEV;
1968 else if (!(info.flags & UBLK_F_UNPRIVILEGED_DEV))
1972 * unprivileged device can't be trusted, but RECOVERY and
1973 * RECOVERY_REISSUE still may hang error handling, so can't
1974 * support recovery features for unprivileged ublk now
1976 * TODO: provide forward progress for RECOVERY handler, so that
1977 * unprivileged device can benefit from it
1979 if (info.flags & UBLK_F_UNPRIVILEGED_DEV)
1980 info.flags &= ~(UBLK_F_USER_RECOVERY_REISSUE |
1981 UBLK_F_USER_RECOVERY);
1983 /* the created device is always owned by current user */
1984 ublk_store_owner_uid_gid(&info.owner_uid, &info.owner_gid);
1986 if (header->dev_id != info.dev_id) {
1987 pr_warn("%s: dev id not match %u %u\n",
1988 __func__, header->dev_id, info.dev_id);
1992 ublk_dump_dev_info(&info);
1994 ret = mutex_lock_killable(&ublk_ctl_mutex);
1999 if (ublks_added >= ublks_max)
2003 ub = kzalloc(sizeof(*ub), GFP_KERNEL);
2006 mutex_init(&ub->mutex);
2007 spin_lock_init(&ub->mm_lock);
2008 INIT_WORK(&ub->quiesce_work, ublk_quiesce_work_fn);
2009 INIT_WORK(&ub->stop_work, ublk_stop_work_fn);
2010 INIT_DELAYED_WORK(&ub->monitor_work, ublk_daemon_monitor_work);
2012 ret = ublk_alloc_dev_number(ub, header->dev_id);
2016 memcpy(&ub->dev_info, &info, sizeof(info));
2018 /* update device id */
2019 ub->dev_info.dev_id = ub->ub_number;
2022 * 64bit flags will be copied back to userspace as feature
2023 * negotiation result, so have to clear flags which driver
2024 * doesn't support yet, then userspace can get correct flags
2025 * (features) to handle.
2027 ub->dev_info.flags &= UBLK_F_ALL;
2029 ub->dev_info.flags |= UBLK_F_CMD_IOCTL_ENCODE |
2030 UBLK_F_URING_CMD_COMP_IN_TASK;
2032 /* GET_DATA isn't needed any more with USER_COPY */
2033 if (ub->dev_info.flags & UBLK_F_USER_COPY)
2034 ub->dev_info.flags &= ~UBLK_F_NEED_GET_DATA;
2036 /* We are not ready to support zero copy */
2037 ub->dev_info.flags &= ~UBLK_F_SUPPORT_ZERO_COPY;
2039 ub->dev_info.nr_hw_queues = min_t(unsigned int,
2040 ub->dev_info.nr_hw_queues, nr_cpu_ids);
2041 ublk_align_max_io_size(ub);
2043 ret = ublk_init_queues(ub);
2045 goto out_free_dev_number;
2047 ret = ublk_add_tag_set(ub);
2049 goto out_deinit_queues;
2052 if (copy_to_user(argp, &ub->dev_info, sizeof(info)))
2053 goto out_free_tag_set;
2056 * Add the char dev so that ublksrv daemon can be setup.
2057 * ublk_add_chdev() will cleanup everything if it fails.
2059 ret = ublk_add_chdev(ub);
2063 blk_mq_free_tag_set(&ub->tag_set);
2065 ublk_deinit_queues(ub);
2066 out_free_dev_number:
2067 ublk_free_dev_number(ub);
2069 mutex_destroy(&ub->mutex);
2072 mutex_unlock(&ublk_ctl_mutex);
2076 static inline bool ublk_idr_freed(int id)
2080 spin_lock(&ublk_idr_lock);
2081 ptr = idr_find(&ublk_index_idr, id);
2082 spin_unlock(&ublk_idr_lock);
2087 static int ublk_ctrl_del_dev(struct ublk_device **p_ub)
2089 struct ublk_device *ub = *p_ub;
2090 int idx = ub->ub_number;
2093 ret = mutex_lock_killable(&ublk_ctl_mutex);
2097 if (!test_bit(UB_STATE_DELETED, &ub->state)) {
2099 set_bit(UB_STATE_DELETED, &ub->state);
2102 /* Mark the reference as consumed */
2104 ublk_put_device(ub);
2105 mutex_unlock(&ublk_ctl_mutex);
2108 * Wait until the idr is removed, then it can be reused after
2109 * DEL_DEV command is returned.
2111 * If we returns because of user interrupt, future delete command
2114 * - the device number isn't freed, this device won't or needn't
2115 * be deleted again, since UB_STATE_DELETED is set, and device
2116 * will be released after the last reference is dropped
2118 * - the device number is freed already, we will not find this
2119 * device via ublk_get_device_from_id()
2121 wait_event_interruptible(ublk_idr_wq, ublk_idr_freed(idx));
2126 static inline void ublk_ctrl_cmd_dump(struct io_uring_cmd *cmd)
2128 const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
2130 pr_devel("%s: cmd_op %x, dev id %d qid %d data %llx buf %llx len %u\n",
2131 __func__, cmd->cmd_op, header->dev_id, header->queue_id,
2132 header->data[0], header->addr, header->len);
2135 static int ublk_ctrl_stop_dev(struct ublk_device *ub)
2138 cancel_work_sync(&ub->stop_work);
2139 cancel_work_sync(&ub->quiesce_work);
2144 static int ublk_ctrl_get_dev_info(struct ublk_device *ub,
2145 struct io_uring_cmd *cmd)
2147 const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
2148 void __user *argp = (void __user *)(unsigned long)header->addr;
2150 if (header->len < sizeof(struct ublksrv_ctrl_dev_info) || !header->addr)
2153 if (copy_to_user(argp, &ub->dev_info, sizeof(ub->dev_info)))
2159 /* TYPE_DEVT is readonly, so fill it up before returning to userspace */
2160 static void ublk_ctrl_fill_params_devt(struct ublk_device *ub)
2162 ub->params.devt.char_major = MAJOR(ub->cdev_dev.devt);
2163 ub->params.devt.char_minor = MINOR(ub->cdev_dev.devt);
2166 ub->params.devt.disk_major = MAJOR(disk_devt(ub->ub_disk));
2167 ub->params.devt.disk_minor = MINOR(disk_devt(ub->ub_disk));
2169 ub->params.devt.disk_major = 0;
2170 ub->params.devt.disk_minor = 0;
2172 ub->params.types |= UBLK_PARAM_TYPE_DEVT;
2175 static int ublk_ctrl_get_params(struct ublk_device *ub,
2176 struct io_uring_cmd *cmd)
2178 const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
2179 void __user *argp = (void __user *)(unsigned long)header->addr;
2180 struct ublk_params_header ph;
2183 if (header->len <= sizeof(ph) || !header->addr)
2186 if (copy_from_user(&ph, argp, sizeof(ph)))
2189 if (ph.len > header->len || !ph.len)
2192 if (ph.len > sizeof(struct ublk_params))
2193 ph.len = sizeof(struct ublk_params);
2195 mutex_lock(&ub->mutex);
2196 ublk_ctrl_fill_params_devt(ub);
2197 if (copy_to_user(argp, &ub->params, ph.len))
2201 mutex_unlock(&ub->mutex);
2206 static int ublk_ctrl_set_params(struct ublk_device *ub,
2207 struct io_uring_cmd *cmd)
2209 const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
2210 void __user *argp = (void __user *)(unsigned long)header->addr;
2211 struct ublk_params_header ph;
2214 if (header->len <= sizeof(ph) || !header->addr)
2217 if (copy_from_user(&ph, argp, sizeof(ph)))
2220 if (ph.len > header->len || !ph.len || !ph.types)
2223 if (ph.len > sizeof(struct ublk_params))
2224 ph.len = sizeof(struct ublk_params);
2226 /* parameters can only be changed when device isn't live */
2227 mutex_lock(&ub->mutex);
2228 if (ub->dev_info.state == UBLK_S_DEV_LIVE) {
2230 } else if (copy_from_user(&ub->params, argp, ph.len)) {
2233 /* clear all we don't support yet */
2234 ub->params.types &= UBLK_PARAM_TYPE_ALL;
2235 ret = ublk_validate_params(ub);
2237 ub->params.types = 0;
2239 mutex_unlock(&ub->mutex);
2244 static void ublk_queue_reinit(struct ublk_device *ub, struct ublk_queue *ubq)
2248 WARN_ON_ONCE(!(ubq->ubq_daemon && ubq_daemon_is_dying(ubq)));
2249 /* All old ioucmds have to be completed */
2250 WARN_ON_ONCE(ubq->nr_io_ready);
2251 /* old daemon is PF_EXITING, put it now */
2252 put_task_struct(ubq->ubq_daemon);
2253 /* We have to reset it to NULL, otherwise ub won't accept new FETCH_REQ */
2254 ubq->ubq_daemon = NULL;
2255 ubq->timeout = false;
2257 for (i = 0; i < ubq->q_depth; i++) {
2258 struct ublk_io *io = &ubq->ios[i];
2260 /* forget everything now and be ready for new FETCH_REQ */
2267 static int ublk_ctrl_start_recovery(struct ublk_device *ub,
2268 struct io_uring_cmd *cmd)
2270 const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
2274 mutex_lock(&ub->mutex);
2275 if (!ublk_can_use_recovery(ub))
2278 * START_RECOVERY is only allowd after:
2280 * (1) UB_STATE_OPEN is not set, which means the dying process is exited
2281 * and related io_uring ctx is freed so file struct of /dev/ublkcX is
2284 * (2) UBLK_S_DEV_QUIESCED is set, which means the quiesce_work:
2285 * (a)has quiesced request queue
2286 * (b)has requeued every inflight rqs whose io_flags is ACTIVE
2287 * (c)has requeued/aborted every inflight rqs whose io_flags is NOT ACTIVE
2288 * (d)has completed/camceled all ioucmds owned by ther dying process
2290 if (test_bit(UB_STATE_OPEN, &ub->state) ||
2291 ub->dev_info.state != UBLK_S_DEV_QUIESCED) {
2295 pr_devel("%s: start recovery for dev id %d.\n", __func__, header->dev_id);
2296 for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
2297 ublk_queue_reinit(ub, ublk_get_queue(ub, i));
2298 /* set to NULL, otherwise new ubq_daemon cannot mmap the io_cmd_buf */
2300 ub->nr_queues_ready = 0;
2301 ub->nr_privileged_daemon = 0;
2302 init_completion(&ub->completion);
2305 mutex_unlock(&ub->mutex);
2309 static int ublk_ctrl_end_recovery(struct ublk_device *ub,
2310 struct io_uring_cmd *cmd)
2312 const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
2313 int ublksrv_pid = (int)header->data[0];
2316 pr_devel("%s: Waiting for new ubq_daemons(nr: %d) are ready, dev id %d...\n",
2317 __func__, ub->dev_info.nr_hw_queues, header->dev_id);
2318 /* wait until new ubq_daemon sending all FETCH_REQ */
2319 wait_for_completion_interruptible(&ub->completion);
2320 pr_devel("%s: All new ubq_daemons(nr: %d) are ready, dev id %d\n",
2321 __func__, ub->dev_info.nr_hw_queues, header->dev_id);
2323 mutex_lock(&ub->mutex);
2324 if (!ublk_can_use_recovery(ub))
2327 if (ub->dev_info.state != UBLK_S_DEV_QUIESCED) {
2331 ub->dev_info.ublksrv_pid = ublksrv_pid;
2332 pr_devel("%s: new ublksrv_pid %d, dev id %d\n",
2333 __func__, ublksrv_pid, header->dev_id);
2334 blk_mq_unquiesce_queue(ub->ub_disk->queue);
2335 pr_devel("%s: queue unquiesced, dev id %d.\n",
2336 __func__, header->dev_id);
2337 blk_mq_kick_requeue_list(ub->ub_disk->queue);
2338 ub->dev_info.state = UBLK_S_DEV_LIVE;
2339 schedule_delayed_work(&ub->monitor_work, UBLK_DAEMON_MONITOR_PERIOD);
2342 mutex_unlock(&ub->mutex);
2346 static int ublk_ctrl_get_features(struct io_uring_cmd *cmd)
2348 const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
2349 void __user *argp = (void __user *)(unsigned long)header->addr;
2350 u64 features = UBLK_F_ALL & ~UBLK_F_SUPPORT_ZERO_COPY;
2352 if (header->len != UBLK_FEATURES_LEN || !header->addr)
2355 if (copy_to_user(argp, &features, UBLK_FEATURES_LEN))
2362 * All control commands are sent via /dev/ublk-control, so we have to check
2363 * the destination device's permission
2365 static int ublk_char_dev_permission(struct ublk_device *ub,
2366 const char *dev_path, int mask)
2372 err = kern_path(dev_path, LOOKUP_FOLLOW, &path);
2376 err = vfs_getattr(&path, &stat, STATX_TYPE, AT_STATX_SYNC_AS_STAT);
2381 if (stat.rdev != ub->cdev_dev.devt || !S_ISCHR(stat.mode))
2384 err = inode_permission(&nop_mnt_idmap,
2385 d_backing_inode(path.dentry), mask);
2391 static int ublk_ctrl_uring_cmd_permission(struct ublk_device *ub,
2392 struct io_uring_cmd *cmd)
2394 struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)io_uring_sqe_cmd(cmd->sqe);
2395 bool unprivileged = ub->dev_info.flags & UBLK_F_UNPRIVILEGED_DEV;
2396 void __user *argp = (void __user *)(unsigned long)header->addr;
2397 char *dev_path = NULL;
2401 if (!unprivileged) {
2402 if (!capable(CAP_SYS_ADMIN))
2405 * The new added command of UBLK_CMD_GET_DEV_INFO2 includes
2406 * char_dev_path in payload too, since userspace may not
2407 * know if the specified device is created as unprivileged
2410 if (_IOC_NR(cmd->cmd_op) != UBLK_CMD_GET_DEV_INFO2)
2415 * User has to provide the char device path for unprivileged ublk
2417 * header->addr always points to the dev path buffer, and
2418 * header->dev_path_len records length of dev path buffer.
2420 if (!header->dev_path_len || header->dev_path_len > PATH_MAX)
2423 if (header->len < header->dev_path_len)
2426 dev_path = kmalloc(header->dev_path_len + 1, GFP_KERNEL);
2431 if (copy_from_user(dev_path, argp, header->dev_path_len))
2433 dev_path[header->dev_path_len] = 0;
2436 switch (_IOC_NR(cmd->cmd_op)) {
2437 case UBLK_CMD_GET_DEV_INFO:
2438 case UBLK_CMD_GET_DEV_INFO2:
2439 case UBLK_CMD_GET_QUEUE_AFFINITY:
2440 case UBLK_CMD_GET_PARAMS:
2441 case (_IOC_NR(UBLK_U_CMD_GET_FEATURES)):
2444 case UBLK_CMD_START_DEV:
2445 case UBLK_CMD_STOP_DEV:
2446 case UBLK_CMD_ADD_DEV:
2447 case UBLK_CMD_DEL_DEV:
2448 case UBLK_CMD_SET_PARAMS:
2449 case UBLK_CMD_START_USER_RECOVERY:
2450 case UBLK_CMD_END_USER_RECOVERY:
2451 mask = MAY_READ | MAY_WRITE;
2457 ret = ublk_char_dev_permission(ub, dev_path, mask);
2459 header->len -= header->dev_path_len;
2460 header->addr += header->dev_path_len;
2462 pr_devel("%s: dev id %d cmd_op %x uid %d gid %d path %s ret %d\n",
2463 __func__, ub->ub_number, cmd->cmd_op,
2464 ub->dev_info.owner_uid, ub->dev_info.owner_gid,
2471 static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
2472 unsigned int issue_flags)
2474 const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
2475 struct ublk_device *ub = NULL;
2476 u32 cmd_op = cmd->cmd_op;
2479 if (issue_flags & IO_URING_F_NONBLOCK)
2482 ublk_ctrl_cmd_dump(cmd);
2484 if (!(issue_flags & IO_URING_F_SQE128))
2487 ret = ublk_check_cmd_op(cmd_op);
2491 if (cmd_op == UBLK_U_CMD_GET_FEATURES) {
2492 ret = ublk_ctrl_get_features(cmd);
2496 if (_IOC_NR(cmd_op) != UBLK_CMD_ADD_DEV) {
2498 ub = ublk_get_device_from_id(header->dev_id);
2502 ret = ublk_ctrl_uring_cmd_permission(ub, cmd);
2507 switch (_IOC_NR(cmd_op)) {
2508 case UBLK_CMD_START_DEV:
2509 ret = ublk_ctrl_start_dev(ub, cmd);
2511 case UBLK_CMD_STOP_DEV:
2512 ret = ublk_ctrl_stop_dev(ub);
2514 case UBLK_CMD_GET_DEV_INFO:
2515 case UBLK_CMD_GET_DEV_INFO2:
2516 ret = ublk_ctrl_get_dev_info(ub, cmd);
2518 case UBLK_CMD_ADD_DEV:
2519 ret = ublk_ctrl_add_dev(cmd);
2521 case UBLK_CMD_DEL_DEV:
2522 ret = ublk_ctrl_del_dev(&ub);
2524 case UBLK_CMD_GET_QUEUE_AFFINITY:
2525 ret = ublk_ctrl_get_queue_affinity(ub, cmd);
2527 case UBLK_CMD_GET_PARAMS:
2528 ret = ublk_ctrl_get_params(ub, cmd);
2530 case UBLK_CMD_SET_PARAMS:
2531 ret = ublk_ctrl_set_params(ub, cmd);
2533 case UBLK_CMD_START_USER_RECOVERY:
2534 ret = ublk_ctrl_start_recovery(ub, cmd);
2536 case UBLK_CMD_END_USER_RECOVERY:
2537 ret = ublk_ctrl_end_recovery(ub, cmd);
2546 ublk_put_device(ub);
2548 io_uring_cmd_done(cmd, ret, 0, issue_flags);
2549 pr_devel("%s: cmd done ret %d cmd_op %x, dev id %d qid %d\n",
2550 __func__, ret, cmd->cmd_op, header->dev_id, header->queue_id);
2551 return -EIOCBQUEUED;
2554 static const struct file_operations ublk_ctl_fops = {
2555 .open = nonseekable_open,
2556 .uring_cmd = ublk_ctrl_uring_cmd,
2557 .owner = THIS_MODULE,
2558 .llseek = noop_llseek,
2561 static struct miscdevice ublk_misc = {
2562 .minor = MISC_DYNAMIC_MINOR,
2563 .name = "ublk-control",
2564 .fops = &ublk_ctl_fops,
2567 static int __init ublk_init(void)
2571 BUILD_BUG_ON((u64)UBLKSRV_IO_BUF_OFFSET +
2572 UBLKSRV_IO_BUF_TOTAL_SIZE < UBLKSRV_IO_BUF_OFFSET);
2574 init_waitqueue_head(&ublk_idr_wq);
2576 ret = misc_register(&ublk_misc);
2580 ret = alloc_chrdev_region(&ublk_chr_devt, 0, UBLK_MINORS, "ublk-char");
2582 goto unregister_mis;
2584 ublk_chr_class = class_create("ublk-char");
2585 if (IS_ERR(ublk_chr_class)) {
2586 ret = PTR_ERR(ublk_chr_class);
2587 goto free_chrdev_region;
2592 unregister_chrdev_region(ublk_chr_devt, UBLK_MINORS);
2594 misc_deregister(&ublk_misc);
2598 static void __exit ublk_exit(void)
2600 struct ublk_device *ub;
2603 idr_for_each_entry(&ublk_index_idr, ub, id)
2606 class_destroy(ublk_chr_class);
2607 misc_deregister(&ublk_misc);
2609 idr_destroy(&ublk_index_idr);
2610 unregister_chrdev_region(ublk_chr_devt, UBLK_MINORS);
2613 module_init(ublk_init);
2614 module_exit(ublk_exit);
2616 module_param(ublks_max, int, 0444);
2617 MODULE_PARM_DESC(ublks_max, "max number of ublk devices allowed to add(default: 64)");
2619 MODULE_AUTHOR("Ming Lei <ming.lei@redhat.com>");
2620 MODULE_LICENSE("GPL");