1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Userspace block device - block device which IO is handled from userspace
5 * Take full use of io_uring passthrough command for communicating with
6 * ublk userspace daemon(ublksrvd) for handling basic IO request.
8 * Copyright 2022 Ming Lei <ming.lei@redhat.com>
10 * (part of code stolen from loop.c)
12 #include <linux/module.h>
13 #include <linux/moduleparam.h>
14 #include <linux/sched.h>
16 #include <linux/pagemap.h>
17 #include <linux/file.h>
18 #include <linux/stat.h>
19 #include <linux/errno.h>
20 #include <linux/major.h>
21 #include <linux/wait.h>
22 #include <linux/blkdev.h>
23 #include <linux/init.h>
24 #include <linux/swap.h>
25 #include <linux/slab.h>
26 #include <linux/compat.h>
27 #include <linux/mutex.h>
28 #include <linux/writeback.h>
29 #include <linux/completion.h>
30 #include <linux/highmem.h>
31 #include <linux/sysfs.h>
32 #include <linux/miscdevice.h>
33 #include <linux/falloc.h>
34 #include <linux/uio.h>
35 #include <linux/ioprio.h>
36 #include <linux/sched/mm.h>
37 #include <linux/uaccess.h>
38 #include <linux/cdev.h>
39 #include <linux/io_uring.h>
40 #include <linux/blk-mq.h>
41 #include <linux/delay.h>
44 #include <linux/task_work.h>
45 #include <linux/namei.h>
46 #include <linux/kref.h>
47 #include <uapi/linux/ublk_cmd.h>
49 #define UBLK_MINORS (1U << MINORBITS)
51 /* All UBLK_F_* have to be included into UBLK_F_ALL */
52 #define UBLK_F_ALL (UBLK_F_SUPPORT_ZERO_COPY \
53 | UBLK_F_URING_CMD_COMP_IN_TASK \
54 | UBLK_F_NEED_GET_DATA \
55 | UBLK_F_USER_RECOVERY \
56 | UBLK_F_USER_RECOVERY_REISSUE \
57 | UBLK_F_UNPRIVILEGED_DEV \
58 | UBLK_F_CMD_IOCTL_ENCODE \
62 /* All UBLK_PARAM_TYPE_* should be included here */
63 #define UBLK_PARAM_TYPE_ALL \
64 (UBLK_PARAM_TYPE_BASIC | UBLK_PARAM_TYPE_DISCARD | \
65 UBLK_PARAM_TYPE_DEVT | UBLK_PARAM_TYPE_ZONED)
68 struct llist_node node;
76 struct ublk_uring_cmd_pdu {
77 struct ublk_queue *ubq;
81 * io command is active: sqe cmd is received, and its cqe isn't done
83 * If the flag is set, the io command is owned by ublk driver, and waited
84 * for incoming blk-mq request from the ublk block device.
86 * If the flag is cleared, the io command will be completed, and owned by
89 #define UBLK_IO_FLAG_ACTIVE 0x01
92 * IO command is completed via cqe, and it is being handled by ublksrv, and
95 * Basically exclusively with UBLK_IO_FLAG_ACTIVE, so can be served for
98 #define UBLK_IO_FLAG_OWNED_BY_SRV 0x02
101 * IO command is aborted, so this flag is set in case of
102 * !UBLK_IO_FLAG_ACTIVE.
104 * After this flag is observed, any pending or new incoming request
105 * associated with this io command will be failed immediately
107 #define UBLK_IO_FLAG_ABORTED 0x04
110 * UBLK_IO_FLAG_NEED_GET_DATA is set because IO command requires
111 * get data buffer address from ublksrv.
113 * Then, bio data could be copied into this data buffer for a WRITE request
114 * after the IO command is issued again and UBLK_IO_FLAG_NEED_GET_DATA is unset.
116 #define UBLK_IO_FLAG_NEED_GET_DATA 0x08
119 /* userspace buffer address from io cmd */
124 struct io_uring_cmd *cmd;
132 struct task_struct *ubq_daemon;
135 struct llist_head io_cmds;
137 unsigned long io_addr; /* mapped vm address */
138 unsigned int max_io_sz;
141 unsigned short nr_io_ready; /* how many ios setup */
142 struct ublk_device *dev;
143 struct ublk_io ios[];
146 #define UBLK_DAEMON_MONITOR_PERIOD (5 * HZ)
149 struct gendisk *ub_disk;
153 unsigned int queue_size;
154 struct ublksrv_ctrl_dev_info dev_info;
156 struct blk_mq_tag_set tag_set;
159 struct device cdev_dev;
161 #define UB_STATE_OPEN 0
162 #define UB_STATE_USED 1
163 #define UB_STATE_DELETED 2
170 struct mm_struct *mm;
172 struct ublk_params params;
174 struct completion completion;
175 unsigned int nr_queues_ready;
176 unsigned int nr_privileged_daemon;
179 * Our ubq->daemon may be killed without any notification, so
180 * monitor each queue's daemon periodically
182 struct delayed_work monitor_work;
183 struct work_struct quiesce_work;
184 struct work_struct stop_work;
187 /* header of ublk_params */
188 struct ublk_params_header {
193 static inline unsigned int ublk_req_build_flags(struct request *req);
194 static inline struct ublksrv_io_desc *ublk_get_iod(struct ublk_queue *ubq,
197 static inline bool ublk_dev_is_user_copy(const struct ublk_device *ub)
199 return ub->dev_info.flags & UBLK_F_USER_COPY;
202 static inline bool ublk_dev_is_zoned(const struct ublk_device *ub)
204 return ub->dev_info.flags & UBLK_F_ZONED;
207 static inline bool ublk_queue_is_zoned(struct ublk_queue *ubq)
209 return ubq->flags & UBLK_F_ZONED;
212 #ifdef CONFIG_BLK_DEV_ZONED
214 static int ublk_get_nr_zones(const struct ublk_device *ub)
216 const struct ublk_param_basic *p = &ub->params.basic;
218 /* Zone size is a power of 2 */
219 return p->dev_sectors >> ilog2(p->chunk_sectors);
222 static int ublk_revalidate_disk_zones(struct ublk_device *ub)
224 return blk_revalidate_disk_zones(ub->ub_disk, NULL);
227 static int ublk_dev_param_zoned_validate(const struct ublk_device *ub)
229 const struct ublk_param_zoned *p = &ub->params.zoned;
232 if (!ublk_dev_is_zoned(ub))
235 if (!p->max_zone_append_sectors)
238 nr_zones = ublk_get_nr_zones(ub);
240 if (p->max_active_zones > nr_zones)
243 if (p->max_open_zones > nr_zones)
249 static int ublk_dev_param_zoned_apply(struct ublk_device *ub)
251 const struct ublk_param_zoned *p = &ub->params.zoned;
253 disk_set_zoned(ub->ub_disk, BLK_ZONED_HM);
254 blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, ub->ub_disk->queue);
255 blk_queue_required_elevator_features(ub->ub_disk->queue,
256 ELEVATOR_F_ZBD_SEQ_WRITE);
257 disk_set_max_active_zones(ub->ub_disk, p->max_active_zones);
258 disk_set_max_open_zones(ub->ub_disk, p->max_open_zones);
259 blk_queue_max_zone_append_sectors(ub->ub_disk->queue, p->max_zone_append_sectors);
261 ub->ub_disk->nr_zones = ublk_get_nr_zones(ub);
266 /* Based on virtblk_alloc_report_buffer */
267 static void *ublk_alloc_report_buffer(struct ublk_device *ublk,
268 unsigned int nr_zones, size_t *buflen)
270 struct request_queue *q = ublk->ub_disk->queue;
274 nr_zones = min_t(unsigned int, nr_zones,
275 ublk->ub_disk->nr_zones);
277 bufsize = nr_zones * sizeof(struct blk_zone);
279 min_t(size_t, bufsize, queue_max_hw_sectors(q) << SECTOR_SHIFT);
281 while (bufsize >= sizeof(struct blk_zone)) {
282 buf = kvmalloc(bufsize, GFP_KERNEL | __GFP_NORETRY);
294 static int ublk_report_zones(struct gendisk *disk, sector_t sector,
295 unsigned int nr_zones, report_zones_cb cb, void *data)
297 struct ublk_device *ub = disk->private_data;
298 unsigned int zone_size_sectors = disk->queue->limits.chunk_sectors;
299 unsigned int first_zone = sector >> ilog2(zone_size_sectors);
300 unsigned int done_zones = 0;
301 unsigned int max_zones_per_request;
303 struct blk_zone *buffer;
304 size_t buffer_length;
306 nr_zones = min_t(unsigned int, ub->ub_disk->nr_zones - first_zone,
309 buffer = ublk_alloc_report_buffer(ub, nr_zones, &buffer_length);
313 max_zones_per_request = buffer_length / sizeof(struct blk_zone);
315 while (done_zones < nr_zones) {
316 unsigned int remaining_zones = nr_zones - done_zones;
317 unsigned int zones_in_request =
318 min_t(unsigned int, remaining_zones, max_zones_per_request);
320 struct ublk_rq_data *pdu;
323 memset(buffer, 0, buffer_length);
325 req = blk_mq_alloc_request(disk->queue, REQ_OP_DRV_IN, 0);
331 pdu = blk_mq_rq_to_pdu(req);
332 pdu->operation = UBLK_IO_OP_REPORT_ZONES;
333 pdu->sector = sector;
334 pdu->nr_zones = zones_in_request;
336 ret = blk_rq_map_kern(disk->queue, req, buffer, buffer_length,
339 blk_mq_free_request(req);
343 status = blk_execute_rq(req, 0);
344 ret = blk_status_to_errno(status);
345 blk_mq_free_request(req);
349 for (unsigned int i = 0; i < zones_in_request; i++) {
350 struct blk_zone *zone = buffer + i;
352 /* A zero length zone means no more zones in this response */
356 ret = cb(zone, i, data);
361 sector += zone_size_sectors;
373 static blk_status_t ublk_setup_iod_zoned(struct ublk_queue *ubq,
376 struct ublksrv_io_desc *iod = ublk_get_iod(ubq, req->tag);
377 struct ublk_io *io = &ubq->ios[req->tag];
378 struct ublk_rq_data *pdu = blk_mq_rq_to_pdu(req);
381 switch (req_op(req)) {
382 case REQ_OP_ZONE_OPEN:
383 ublk_op = UBLK_IO_OP_ZONE_OPEN;
385 case REQ_OP_ZONE_CLOSE:
386 ublk_op = UBLK_IO_OP_ZONE_CLOSE;
388 case REQ_OP_ZONE_FINISH:
389 ublk_op = UBLK_IO_OP_ZONE_FINISH;
391 case REQ_OP_ZONE_RESET:
392 ublk_op = UBLK_IO_OP_ZONE_RESET;
394 case REQ_OP_ZONE_APPEND:
395 ublk_op = UBLK_IO_OP_ZONE_APPEND;
397 case REQ_OP_ZONE_RESET_ALL:
398 ublk_op = UBLK_IO_OP_ZONE_RESET_ALL;
401 ublk_op = pdu->operation;
403 case UBLK_IO_OP_REPORT_ZONES:
404 iod->op_flags = ublk_op | ublk_req_build_flags(req);
405 iod->nr_zones = pdu->nr_zones;
406 iod->start_sector = pdu->sector;
409 return BLK_STS_IOERR;
412 /* We do not support drv_out */
413 return BLK_STS_NOTSUPP;
415 return BLK_STS_IOERR;
418 iod->op_flags = ublk_op | ublk_req_build_flags(req);
419 iod->nr_sectors = blk_rq_sectors(req);
420 iod->start_sector = blk_rq_pos(req);
421 iod->addr = io->addr;
428 #define ublk_report_zones (NULL)
430 static int ublk_dev_param_zoned_validate(const struct ublk_device *ub)
435 static int ublk_dev_param_zoned_apply(struct ublk_device *ub)
440 static int ublk_revalidate_disk_zones(struct ublk_device *ub)
445 static blk_status_t ublk_setup_iod_zoned(struct ublk_queue *ubq,
448 return BLK_STS_NOTSUPP;
453 static inline void __ublk_complete_rq(struct request *req);
454 static void ublk_complete_rq(struct kref *ref);
456 static dev_t ublk_chr_devt;
457 static const struct class ublk_chr_class = {
461 static DEFINE_IDR(ublk_index_idr);
462 static DEFINE_SPINLOCK(ublk_idr_lock);
463 static wait_queue_head_t ublk_idr_wq; /* wait until one idr is freed */
465 static DEFINE_MUTEX(ublk_ctl_mutex);
468 * Max ublk devices allowed to add
470 * It can be extended to one per-user limit in future or even controlled
473 #define UBLK_MAX_UBLKS UBLK_MINORS
474 static unsigned int ublks_max = 64;
475 static unsigned int ublks_added; /* protected by ublk_ctl_mutex */
477 static struct miscdevice ublk_misc;
479 static inline unsigned ublk_pos_to_hwq(loff_t pos)
481 return ((pos - UBLKSRV_IO_BUF_OFFSET) >> UBLK_QID_OFF) &
485 static inline unsigned ublk_pos_to_buf_off(loff_t pos)
487 return (pos - UBLKSRV_IO_BUF_OFFSET) & UBLK_IO_BUF_BITS_MASK;
490 static inline unsigned ublk_pos_to_tag(loff_t pos)
492 return ((pos - UBLKSRV_IO_BUF_OFFSET) >> UBLK_TAG_OFF) &
496 static void ublk_dev_param_basic_apply(struct ublk_device *ub)
498 struct request_queue *q = ub->ub_disk->queue;
499 const struct ublk_param_basic *p = &ub->params.basic;
501 blk_queue_logical_block_size(q, 1 << p->logical_bs_shift);
502 blk_queue_physical_block_size(q, 1 << p->physical_bs_shift);
503 blk_queue_io_min(q, 1 << p->io_min_shift);
504 blk_queue_io_opt(q, 1 << p->io_opt_shift);
506 blk_queue_write_cache(q, p->attrs & UBLK_ATTR_VOLATILE_CACHE,
507 p->attrs & UBLK_ATTR_FUA);
508 if (p->attrs & UBLK_ATTR_ROTATIONAL)
509 blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
511 blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
513 blk_queue_max_hw_sectors(q, p->max_sectors);
514 blk_queue_chunk_sectors(q, p->chunk_sectors);
515 blk_queue_virt_boundary(q, p->virt_boundary_mask);
517 if (p->attrs & UBLK_ATTR_READ_ONLY)
518 set_disk_ro(ub->ub_disk, true);
520 set_capacity(ub->ub_disk, p->dev_sectors);
523 static void ublk_dev_param_discard_apply(struct ublk_device *ub)
525 struct request_queue *q = ub->ub_disk->queue;
526 const struct ublk_param_discard *p = &ub->params.discard;
528 q->limits.discard_alignment = p->discard_alignment;
529 q->limits.discard_granularity = p->discard_granularity;
530 blk_queue_max_discard_sectors(q, p->max_discard_sectors);
531 blk_queue_max_write_zeroes_sectors(q,
532 p->max_write_zeroes_sectors);
533 blk_queue_max_discard_segments(q, p->max_discard_segments);
536 static int ublk_validate_params(const struct ublk_device *ub)
538 /* basic param is the only one which must be set */
539 if (ub->params.types & UBLK_PARAM_TYPE_BASIC) {
540 const struct ublk_param_basic *p = &ub->params.basic;
542 if (p->logical_bs_shift > PAGE_SHIFT || p->logical_bs_shift < 9)
545 if (p->logical_bs_shift > p->physical_bs_shift)
548 if (p->max_sectors > (ub->dev_info.max_io_buf_bytes >> 9))
551 if (ublk_dev_is_zoned(ub) && !p->chunk_sectors)
556 if (ub->params.types & UBLK_PARAM_TYPE_DISCARD) {
557 const struct ublk_param_discard *p = &ub->params.discard;
559 /* So far, only support single segment discard */
560 if (p->max_discard_sectors && p->max_discard_segments != 1)
563 if (!p->discard_granularity)
567 /* dev_t is read-only */
568 if (ub->params.types & UBLK_PARAM_TYPE_DEVT)
571 if (ub->params.types & UBLK_PARAM_TYPE_ZONED)
572 return ublk_dev_param_zoned_validate(ub);
573 else if (ublk_dev_is_zoned(ub))
579 static int ublk_apply_params(struct ublk_device *ub)
581 if (!(ub->params.types & UBLK_PARAM_TYPE_BASIC))
584 ublk_dev_param_basic_apply(ub);
586 if (ub->params.types & UBLK_PARAM_TYPE_DISCARD)
587 ublk_dev_param_discard_apply(ub);
589 if (ub->params.types & UBLK_PARAM_TYPE_ZONED)
590 return ublk_dev_param_zoned_apply(ub);
595 static inline bool ublk_support_user_copy(const struct ublk_queue *ubq)
597 return ubq->flags & UBLK_F_USER_COPY;
600 static inline bool ublk_need_req_ref(const struct ublk_queue *ubq)
603 * read()/write() is involved in user copy, so request reference
606 return ublk_support_user_copy(ubq);
609 static inline void ublk_init_req_ref(const struct ublk_queue *ubq,
612 if (ublk_need_req_ref(ubq)) {
613 struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
615 kref_init(&data->ref);
619 static inline bool ublk_get_req_ref(const struct ublk_queue *ubq,
622 if (ublk_need_req_ref(ubq)) {
623 struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
625 return kref_get_unless_zero(&data->ref);
631 static inline void ublk_put_req_ref(const struct ublk_queue *ubq,
634 if (ublk_need_req_ref(ubq)) {
635 struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
637 kref_put(&data->ref, ublk_complete_rq);
639 __ublk_complete_rq(req);
643 static inline bool ublk_need_get_data(const struct ublk_queue *ubq)
645 return ubq->flags & UBLK_F_NEED_GET_DATA;
648 static struct ublk_device *ublk_get_device(struct ublk_device *ub)
650 if (kobject_get_unless_zero(&ub->cdev_dev.kobj))
655 static void ublk_put_device(struct ublk_device *ub)
657 put_device(&ub->cdev_dev);
660 static inline struct ublk_queue *ublk_get_queue(struct ublk_device *dev,
663 return (struct ublk_queue *)&(dev->__queues[qid * dev->queue_size]);
666 static inline bool ublk_rq_has_data(const struct request *rq)
668 return bio_has_data(rq->bio);
671 static inline struct ublksrv_io_desc *ublk_get_iod(struct ublk_queue *ubq,
674 return (struct ublksrv_io_desc *)
675 &(ubq->io_cmd_buf[tag * sizeof(struct ublksrv_io_desc)]);
678 static inline char *ublk_queue_cmd_buf(struct ublk_device *ub, int q_id)
680 return ublk_get_queue(ub, q_id)->io_cmd_buf;
683 static inline int ublk_queue_cmd_buf_size(struct ublk_device *ub, int q_id)
685 struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
687 return round_up(ubq->q_depth * sizeof(struct ublksrv_io_desc),
691 static inline bool ublk_queue_can_use_recovery_reissue(
692 struct ublk_queue *ubq)
694 return (ubq->flags & UBLK_F_USER_RECOVERY) &&
695 (ubq->flags & UBLK_F_USER_RECOVERY_REISSUE);
698 static inline bool ublk_queue_can_use_recovery(
699 struct ublk_queue *ubq)
701 return ubq->flags & UBLK_F_USER_RECOVERY;
704 static inline bool ublk_can_use_recovery(struct ublk_device *ub)
706 return ub->dev_info.flags & UBLK_F_USER_RECOVERY;
709 static void ublk_free_disk(struct gendisk *disk)
711 struct ublk_device *ub = disk->private_data;
713 clear_bit(UB_STATE_USED, &ub->state);
714 put_device(&ub->cdev_dev);
717 static void ublk_store_owner_uid_gid(unsigned int *owner_uid,
718 unsigned int *owner_gid)
723 current_uid_gid(&uid, &gid);
725 *owner_uid = from_kuid(&init_user_ns, uid);
726 *owner_gid = from_kgid(&init_user_ns, gid);
729 static int ublk_open(struct gendisk *disk, blk_mode_t mode)
731 struct ublk_device *ub = disk->private_data;
733 if (capable(CAP_SYS_ADMIN))
737 * If it is one unprivileged device, only owner can open
738 * the disk. Otherwise it could be one trap made by one
739 * evil user who grants this disk's privileges to other
740 * users deliberately.
742 * This way is reasonable too given anyone can create
743 * unprivileged device, and no need other's grant.
745 if (ub->dev_info.flags & UBLK_F_UNPRIVILEGED_DEV) {
746 unsigned int curr_uid, curr_gid;
748 ublk_store_owner_uid_gid(&curr_uid, &curr_gid);
750 if (curr_uid != ub->dev_info.owner_uid || curr_gid !=
751 ub->dev_info.owner_gid)
758 static const struct block_device_operations ub_fops = {
759 .owner = THIS_MODULE,
761 .free_disk = ublk_free_disk,
762 .report_zones = ublk_report_zones,
765 #define UBLK_MAX_PIN_PAGES 32
767 struct ublk_io_iter {
768 struct page *pages[UBLK_MAX_PIN_PAGES];
770 struct bvec_iter iter;
773 /* return how many pages are copied */
774 static void ublk_copy_io_pages(struct ublk_io_iter *data,
775 size_t total, size_t pg_off, int dir)
780 while (done < total) {
781 struct bio_vec bv = bio_iter_iovec(data->bio, data->iter);
782 unsigned int bytes = min3(bv.bv_len, (unsigned)total - done,
783 (unsigned)(PAGE_SIZE - pg_off));
784 void *bv_buf = bvec_kmap_local(&bv);
785 void *pg_buf = kmap_local_page(data->pages[pg_idx]);
787 if (dir == ITER_DEST)
788 memcpy(pg_buf + pg_off, bv_buf, bytes);
790 memcpy(bv_buf, pg_buf + pg_off, bytes);
792 kunmap_local(pg_buf);
793 kunmap_local(bv_buf);
795 /* advance page array */
797 if (pg_off == PAGE_SIZE) {
805 bio_advance_iter_single(data->bio, &data->iter, bytes);
806 if (!data->iter.bi_size) {
807 data->bio = data->bio->bi_next;
808 if (data->bio == NULL)
810 data->iter = data->bio->bi_iter;
815 static bool ublk_advance_io_iter(const struct request *req,
816 struct ublk_io_iter *iter, unsigned int offset)
818 struct bio *bio = req->bio;
821 if (bio->bi_iter.bi_size > offset) {
823 iter->iter = bio->bi_iter;
824 bio_advance_iter(iter->bio, &iter->iter, offset);
827 offset -= bio->bi_iter.bi_size;
833 * Copy data between request pages and io_iter, and 'offset'
834 * is the start point of linear offset of request.
836 static size_t ublk_copy_user_pages(const struct request *req,
837 unsigned offset, struct iov_iter *uiter, int dir)
839 struct ublk_io_iter iter;
842 if (!ublk_advance_io_iter(req, &iter, offset))
845 while (iov_iter_count(uiter) && iter.bio) {
851 len = iov_iter_get_pages2(uiter, iter.pages,
852 iov_iter_count(uiter),
853 UBLK_MAX_PIN_PAGES, &off);
857 ublk_copy_io_pages(&iter, len, off, dir);
858 nr_pages = DIV_ROUND_UP(len + off, PAGE_SIZE);
859 for (i = 0; i < nr_pages; i++) {
860 if (dir == ITER_DEST)
861 set_page_dirty(iter.pages[i]);
862 put_page(iter.pages[i]);
870 static inline bool ublk_need_map_req(const struct request *req)
872 return ublk_rq_has_data(req) && req_op(req) == REQ_OP_WRITE;
875 static inline bool ublk_need_unmap_req(const struct request *req)
877 return ublk_rq_has_data(req) &&
878 (req_op(req) == REQ_OP_READ || req_op(req) == REQ_OP_DRV_IN);
881 static int ublk_map_io(const struct ublk_queue *ubq, const struct request *req,
884 const unsigned int rq_bytes = blk_rq_bytes(req);
886 if (ublk_support_user_copy(ubq))
890 * no zero copy, we delay copy WRITE request data into ublksrv
891 * context and the big benefit is that pinning pages in current
892 * context is pretty fast, see ublk_pin_user_pages
894 if (ublk_need_map_req(req)) {
895 struct iov_iter iter;
897 const int dir = ITER_DEST;
899 import_single_range(dir, u64_to_user_ptr(io->addr), rq_bytes,
902 return ublk_copy_user_pages(req, 0, &iter, dir);
907 static int ublk_unmap_io(const struct ublk_queue *ubq,
908 const struct request *req,
911 const unsigned int rq_bytes = blk_rq_bytes(req);
913 if (ublk_support_user_copy(ubq))
916 if (ublk_need_unmap_req(req)) {
917 struct iov_iter iter;
919 const int dir = ITER_SOURCE;
921 WARN_ON_ONCE(io->res > rq_bytes);
923 import_single_range(dir, u64_to_user_ptr(io->addr), io->res,
925 return ublk_copy_user_pages(req, 0, &iter, dir);
930 static inline unsigned int ublk_req_build_flags(struct request *req)
934 if (req->cmd_flags & REQ_FAILFAST_DEV)
935 flags |= UBLK_IO_F_FAILFAST_DEV;
937 if (req->cmd_flags & REQ_FAILFAST_TRANSPORT)
938 flags |= UBLK_IO_F_FAILFAST_TRANSPORT;
940 if (req->cmd_flags & REQ_FAILFAST_DRIVER)
941 flags |= UBLK_IO_F_FAILFAST_DRIVER;
943 if (req->cmd_flags & REQ_META)
944 flags |= UBLK_IO_F_META;
946 if (req->cmd_flags & REQ_FUA)
947 flags |= UBLK_IO_F_FUA;
949 if (req->cmd_flags & REQ_NOUNMAP)
950 flags |= UBLK_IO_F_NOUNMAP;
952 if (req->cmd_flags & REQ_SWAP)
953 flags |= UBLK_IO_F_SWAP;
958 static blk_status_t ublk_setup_iod(struct ublk_queue *ubq, struct request *req)
960 struct ublksrv_io_desc *iod = ublk_get_iod(ubq, req->tag);
961 struct ublk_io *io = &ubq->ios[req->tag];
962 enum req_op op = req_op(req);
965 if (!ublk_queue_is_zoned(ubq) &&
966 (op_is_zone_mgmt(op) || op == REQ_OP_ZONE_APPEND))
967 return BLK_STS_IOERR;
969 switch (req_op(req)) {
971 ublk_op = UBLK_IO_OP_READ;
974 ublk_op = UBLK_IO_OP_WRITE;
977 ublk_op = UBLK_IO_OP_FLUSH;
980 ublk_op = UBLK_IO_OP_DISCARD;
982 case REQ_OP_WRITE_ZEROES:
983 ublk_op = UBLK_IO_OP_WRITE_ZEROES;
986 if (ublk_queue_is_zoned(ubq))
987 return ublk_setup_iod_zoned(ubq, req);
988 return BLK_STS_IOERR;
991 /* need to translate since kernel may change */
992 iod->op_flags = ublk_op | ublk_req_build_flags(req);
993 iod->nr_sectors = blk_rq_sectors(req);
994 iod->start_sector = blk_rq_pos(req);
995 iod->addr = io->addr;
1000 static inline struct ublk_uring_cmd_pdu *ublk_get_uring_cmd_pdu(
1001 struct io_uring_cmd *ioucmd)
1003 return (struct ublk_uring_cmd_pdu *)&ioucmd->pdu;
1006 static inline bool ubq_daemon_is_dying(struct ublk_queue *ubq)
1008 return ubq->ubq_daemon->flags & PF_EXITING;
1011 /* todo: handle partial completion */
1012 static inline void __ublk_complete_rq(struct request *req)
1014 struct ublk_queue *ubq = req->mq_hctx->driver_data;
1015 struct ublk_io *io = &ubq->ios[req->tag];
1016 unsigned int unmapped_bytes;
1017 blk_status_t res = BLK_STS_OK;
1019 /* called from ublk_abort_queue() code path */
1020 if (io->flags & UBLK_IO_FLAG_ABORTED) {
1021 res = BLK_STS_IOERR;
1025 /* failed read IO if nothing is read */
1026 if (!io->res && req_op(req) == REQ_OP_READ)
1030 res = errno_to_blk_status(io->res);
1035 * FLUSH, DISCARD or WRITE_ZEROES usually won't return bytes returned, so end them
1038 * Both the two needn't unmap.
1040 if (req_op(req) != REQ_OP_READ && req_op(req) != REQ_OP_WRITE &&
1041 req_op(req) != REQ_OP_DRV_IN)
1044 /* for READ request, writing data in iod->addr to rq buffers */
1045 unmapped_bytes = ublk_unmap_io(ubq, req, io);
1048 * Extremely impossible since we got data filled in just before
1050 * Re-read simply for this unlikely case.
1052 if (unlikely(unmapped_bytes < io->res))
1053 io->res = unmapped_bytes;
1055 if (blk_update_request(req, BLK_STS_OK, io->res))
1056 blk_mq_requeue_request(req, true);
1058 __blk_mq_end_request(req, BLK_STS_OK);
1062 blk_mq_end_request(req, res);
1065 static void ublk_complete_rq(struct kref *ref)
1067 struct ublk_rq_data *data = container_of(ref, struct ublk_rq_data,
1069 struct request *req = blk_mq_rq_from_pdu(data);
1071 __ublk_complete_rq(req);
1075 * Since __ublk_rq_task_work always fails requests immediately during
1076 * exiting, __ublk_fail_req() is only called from abort context during
1077 * exiting. So lock is unnecessary.
1079 * Also aborting may not be started yet, keep in mind that one failed
1080 * request may be issued by block layer again.
1082 static void __ublk_fail_req(struct ublk_queue *ubq, struct ublk_io *io,
1083 struct request *req)
1085 WARN_ON_ONCE(io->flags & UBLK_IO_FLAG_ACTIVE);
1087 if (!(io->flags & UBLK_IO_FLAG_ABORTED)) {
1088 io->flags |= UBLK_IO_FLAG_ABORTED;
1089 if (ublk_queue_can_use_recovery_reissue(ubq))
1090 blk_mq_requeue_request(req, false);
1092 ublk_put_req_ref(ubq, req);
1096 static void ubq_complete_io_cmd(struct ublk_io *io, int res,
1097 unsigned issue_flags)
1099 /* mark this cmd owned by ublksrv */
1100 io->flags |= UBLK_IO_FLAG_OWNED_BY_SRV;
1103 * clear ACTIVE since we are done with this sqe/cmd slot
1104 * We can only accept io cmd in case of being not active.
1106 io->flags &= ~UBLK_IO_FLAG_ACTIVE;
1108 /* tell ublksrv one io request is coming */
1109 io_uring_cmd_done(io->cmd, res, 0, issue_flags);
1112 #define UBLK_REQUEUE_DELAY_MS 3
1114 static inline void __ublk_abort_rq(struct ublk_queue *ubq,
1117 /* We cannot process this rq so just requeue it. */
1118 if (ublk_queue_can_use_recovery(ubq))
1119 blk_mq_requeue_request(rq, false);
1121 blk_mq_end_request(rq, BLK_STS_IOERR);
1123 mod_delayed_work(system_wq, &ubq->dev->monitor_work, 0);
1126 static inline void __ublk_rq_task_work(struct request *req,
1127 unsigned issue_flags)
1129 struct ublk_queue *ubq = req->mq_hctx->driver_data;
1131 struct ublk_io *io = &ubq->ios[tag];
1132 unsigned int mapped_bytes;
1134 pr_devel("%s: complete: op %d, qid %d tag %d io_flags %x addr %llx\n",
1135 __func__, io->cmd->cmd_op, ubq->q_id, req->tag, io->flags,
1136 ublk_get_iod(ubq, req->tag)->addr);
1139 * Task is exiting if either:
1141 * (1) current != ubq_daemon.
1142 * io_uring_cmd_complete_in_task() tries to run task_work
1143 * in a workqueue if ubq_daemon(cmd's task) is PF_EXITING.
1145 * (2) current->flags & PF_EXITING.
1147 if (unlikely(current != ubq->ubq_daemon || current->flags & PF_EXITING)) {
1148 __ublk_abort_rq(ubq, req);
1152 if (ublk_need_get_data(ubq) && ublk_need_map_req(req)) {
1154 * We have not handled UBLK_IO_NEED_GET_DATA command yet,
1155 * so immepdately pass UBLK_IO_RES_NEED_GET_DATA to ublksrv
1158 if (!(io->flags & UBLK_IO_FLAG_NEED_GET_DATA)) {
1159 io->flags |= UBLK_IO_FLAG_NEED_GET_DATA;
1160 pr_devel("%s: need get data. op %d, qid %d tag %d io_flags %x\n",
1161 __func__, io->cmd->cmd_op, ubq->q_id,
1162 req->tag, io->flags);
1163 ubq_complete_io_cmd(io, UBLK_IO_RES_NEED_GET_DATA, issue_flags);
1167 * We have handled UBLK_IO_NEED_GET_DATA command,
1168 * so clear UBLK_IO_FLAG_NEED_GET_DATA now and just
1171 io->flags &= ~UBLK_IO_FLAG_NEED_GET_DATA;
1172 /* update iod->addr because ublksrv may have passed a new io buffer */
1173 ublk_get_iod(ubq, req->tag)->addr = io->addr;
1174 pr_devel("%s: update iod->addr: op %d, qid %d tag %d io_flags %x addr %llx\n",
1175 __func__, io->cmd->cmd_op, ubq->q_id, req->tag, io->flags,
1176 ublk_get_iod(ubq, req->tag)->addr);
1179 mapped_bytes = ublk_map_io(ubq, req, io);
1181 /* partially mapped, update io descriptor */
1182 if (unlikely(mapped_bytes != blk_rq_bytes(req))) {
1184 * Nothing mapped, retry until we succeed.
1186 * We may never succeed in mapping any bytes here because
1187 * of OOM. TODO: reserve one buffer with single page pinned
1188 * for providing forward progress guarantee.
1190 if (unlikely(!mapped_bytes)) {
1191 blk_mq_requeue_request(req, false);
1192 blk_mq_delay_kick_requeue_list(req->q,
1193 UBLK_REQUEUE_DELAY_MS);
1197 ublk_get_iod(ubq, req->tag)->nr_sectors =
1201 ublk_init_req_ref(ubq, req);
1202 ubq_complete_io_cmd(io, UBLK_IO_RES_OK, issue_flags);
1205 static inline void ublk_forward_io_cmds(struct ublk_queue *ubq,
1206 unsigned issue_flags)
1208 struct llist_node *io_cmds = llist_del_all(&ubq->io_cmds);
1209 struct ublk_rq_data *data, *tmp;
1211 io_cmds = llist_reverse_order(io_cmds);
1212 llist_for_each_entry_safe(data, tmp, io_cmds, node)
1213 __ublk_rq_task_work(blk_mq_rq_from_pdu(data), issue_flags);
1216 static inline void ublk_abort_io_cmds(struct ublk_queue *ubq)
1218 struct llist_node *io_cmds = llist_del_all(&ubq->io_cmds);
1219 struct ublk_rq_data *data, *tmp;
1221 llist_for_each_entry_safe(data, tmp, io_cmds, node)
1222 __ublk_abort_rq(ubq, blk_mq_rq_from_pdu(data));
1225 static void ublk_rq_task_work_cb(struct io_uring_cmd *cmd, unsigned issue_flags)
1227 struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
1228 struct ublk_queue *ubq = pdu->ubq;
1230 ublk_forward_io_cmds(ubq, issue_flags);
1233 static void ublk_queue_cmd(struct ublk_queue *ubq, struct request *rq)
1235 struct ublk_rq_data *data = blk_mq_rq_to_pdu(rq);
1238 if (!llist_add(&data->node, &ubq->io_cmds))
1241 io = &ubq->ios[rq->tag];
1243 * If the check pass, we know that this is a re-issued request aborted
1244 * previously in monitor_work because the ubq_daemon(cmd's task) is
1245 * PF_EXITING. We cannot call io_uring_cmd_complete_in_task() anymore
1246 * because this ioucmd's io_uring context may be freed now if no inflight
1247 * ioucmd exists. Otherwise we may cause null-deref in ctx->fallback_work.
1249 * Note: monitor_work sets UBLK_IO_FLAG_ABORTED and ends this request(releasing
1250 * the tag). Then the request is re-started(allocating the tag) and we are here.
1251 * Since releasing/allocating a tag implies smp_mb(), finding UBLK_IO_FLAG_ABORTED
1252 * guarantees that here is a re-issued request aborted previously.
1254 if (unlikely(io->flags & UBLK_IO_FLAG_ABORTED)) {
1255 ublk_abort_io_cmds(ubq);
1257 struct io_uring_cmd *cmd = io->cmd;
1258 struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
1261 io_uring_cmd_complete_in_task(cmd, ublk_rq_task_work_cb);
1265 static enum blk_eh_timer_return ublk_timeout(struct request *rq)
1267 struct ublk_queue *ubq = rq->mq_hctx->driver_data;
1269 if (ubq->flags & UBLK_F_UNPRIVILEGED_DEV) {
1270 if (!ubq->timeout) {
1271 send_sig(SIGKILL, ubq->ubq_daemon, 0);
1272 ubq->timeout = true;
1278 return BLK_EH_RESET_TIMER;
1281 static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx,
1282 const struct blk_mq_queue_data *bd)
1284 struct ublk_queue *ubq = hctx->driver_data;
1285 struct request *rq = bd->rq;
1288 /* fill iod to slot in io cmd buffer */
1289 res = ublk_setup_iod(ubq, rq);
1290 if (unlikely(res != BLK_STS_OK))
1291 return BLK_STS_IOERR;
1293 /* With recovery feature enabled, force_abort is set in
1294 * ublk_stop_dev() before calling del_gendisk(). We have to
1295 * abort all requeued and new rqs here to let del_gendisk()
1296 * move on. Besides, we cannot not call io_uring_cmd_complete_in_task()
1297 * to avoid UAF on io_uring ctx.
1299 * Note: force_abort is guaranteed to be seen because it is set
1300 * before request queue is unqiuesced.
1302 if (ublk_queue_can_use_recovery(ubq) && unlikely(ubq->force_abort))
1303 return BLK_STS_IOERR;
1305 blk_mq_start_request(bd->rq);
1307 if (unlikely(ubq_daemon_is_dying(ubq))) {
1308 __ublk_abort_rq(ubq, rq);
1312 ublk_queue_cmd(ubq, rq);
1317 static int ublk_init_hctx(struct blk_mq_hw_ctx *hctx, void *driver_data,
1318 unsigned int hctx_idx)
1320 struct ublk_device *ub = driver_data;
1321 struct ublk_queue *ubq = ublk_get_queue(ub, hctx->queue_num);
1323 hctx->driver_data = ubq;
1327 static const struct blk_mq_ops ublk_mq_ops = {
1328 .queue_rq = ublk_queue_rq,
1329 .init_hctx = ublk_init_hctx,
1330 .timeout = ublk_timeout,
1333 static int ublk_ch_open(struct inode *inode, struct file *filp)
1335 struct ublk_device *ub = container_of(inode->i_cdev,
1336 struct ublk_device, cdev);
1338 if (test_and_set_bit(UB_STATE_OPEN, &ub->state))
1340 filp->private_data = ub;
1344 static int ublk_ch_release(struct inode *inode, struct file *filp)
1346 struct ublk_device *ub = filp->private_data;
1348 clear_bit(UB_STATE_OPEN, &ub->state);
1352 /* map pre-allocated per-queue cmd buffer to ublksrv daemon */
1353 static int ublk_ch_mmap(struct file *filp, struct vm_area_struct *vma)
1355 struct ublk_device *ub = filp->private_data;
1356 size_t sz = vma->vm_end - vma->vm_start;
1357 unsigned max_sz = UBLK_MAX_QUEUE_DEPTH * sizeof(struct ublksrv_io_desc);
1358 unsigned long pfn, end, phys_off = vma->vm_pgoff << PAGE_SHIFT;
1361 spin_lock(&ub->mm_lock);
1363 ub->mm = current->mm;
1364 if (current->mm != ub->mm)
1366 spin_unlock(&ub->mm_lock);
1371 if (vma->vm_flags & VM_WRITE)
1374 end = UBLKSRV_CMD_BUF_OFFSET + ub->dev_info.nr_hw_queues * max_sz;
1375 if (phys_off < UBLKSRV_CMD_BUF_OFFSET || phys_off >= end)
1378 q_id = (phys_off - UBLKSRV_CMD_BUF_OFFSET) / max_sz;
1379 pr_devel("%s: qid %d, pid %d, addr %lx pg_off %lx sz %lu\n",
1380 __func__, q_id, current->pid, vma->vm_start,
1381 phys_off, (unsigned long)sz);
1383 if (sz != ublk_queue_cmd_buf_size(ub, q_id))
1386 pfn = virt_to_phys(ublk_queue_cmd_buf(ub, q_id)) >> PAGE_SHIFT;
1387 return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
1390 static void ublk_commit_completion(struct ublk_device *ub,
1391 const struct ublksrv_io_cmd *ub_cmd)
1393 u32 qid = ub_cmd->q_id, tag = ub_cmd->tag;
1394 struct ublk_queue *ubq = ublk_get_queue(ub, qid);
1395 struct ublk_io *io = &ubq->ios[tag];
1396 struct request *req;
1398 /* now this cmd slot is owned by nbd driver */
1399 io->flags &= ~UBLK_IO_FLAG_OWNED_BY_SRV;
1400 io->res = ub_cmd->result;
1402 /* find the io request and complete */
1403 req = blk_mq_tag_to_rq(ub->tag_set.tags[qid], tag);
1404 if (WARN_ON_ONCE(unlikely(!req)))
1407 if (req_op(req) == REQ_OP_ZONE_APPEND)
1408 req->__sector = ub_cmd->zone_append_lba;
1410 if (likely(!blk_should_fake_timeout(req->q)))
1411 ublk_put_req_ref(ubq, req);
1415 * When ->ubq_daemon is exiting, either new request is ended immediately,
1416 * or any queued io command is drained, so it is safe to abort queue
1419 static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq)
1423 for (i = 0; i < ubq->q_depth; i++) {
1424 struct ublk_io *io = &ubq->ios[i];
1426 if (!(io->flags & UBLK_IO_FLAG_ACTIVE)) {
1430 * Either we fail the request or ublk_rq_task_work_fn
1433 rq = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], i);
1435 __ublk_fail_req(ubq, io, rq);
1440 static void ublk_daemon_monitor_work(struct work_struct *work)
1442 struct ublk_device *ub =
1443 container_of(work, struct ublk_device, monitor_work.work);
1446 for (i = 0; i < ub->dev_info.nr_hw_queues; i++) {
1447 struct ublk_queue *ubq = ublk_get_queue(ub, i);
1449 if (ubq_daemon_is_dying(ubq)) {
1450 if (ublk_queue_can_use_recovery(ubq))
1451 schedule_work(&ub->quiesce_work);
1453 schedule_work(&ub->stop_work);
1455 /* abort queue is for making forward progress */
1456 ublk_abort_queue(ub, ubq);
1461 * We can't schedule monitor work after ub's state is not UBLK_S_DEV_LIVE.
1462 * after ublk_remove() or __ublk_quiesce_dev() is started.
1464 * No need ub->mutex, monitor work are canceled after state is marked
1465 * as not LIVE, so new state is observed reliably.
1467 if (ub->dev_info.state == UBLK_S_DEV_LIVE)
1468 schedule_delayed_work(&ub->monitor_work,
1469 UBLK_DAEMON_MONITOR_PERIOD);
1472 static inline bool ublk_queue_ready(struct ublk_queue *ubq)
1474 return ubq->nr_io_ready == ubq->q_depth;
1477 static void ublk_cmd_cancel_cb(struct io_uring_cmd *cmd, unsigned issue_flags)
1479 io_uring_cmd_done(cmd, UBLK_IO_RES_ABORT, 0, issue_flags);
1482 static void ublk_cancel_queue(struct ublk_queue *ubq)
1486 if (!ublk_queue_ready(ubq))
1489 for (i = 0; i < ubq->q_depth; i++) {
1490 struct ublk_io *io = &ubq->ios[i];
1492 if (io->flags & UBLK_IO_FLAG_ACTIVE)
1493 io_uring_cmd_complete_in_task(io->cmd,
1494 ublk_cmd_cancel_cb);
1497 /* all io commands are canceled */
1498 ubq->nr_io_ready = 0;
1501 /* Cancel all pending commands, must be called after del_gendisk() returns */
1502 static void ublk_cancel_dev(struct ublk_device *ub)
1506 for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
1507 ublk_cancel_queue(ublk_get_queue(ub, i));
1510 static bool ublk_check_inflight_rq(struct request *rq, void *data)
1514 if (blk_mq_request_started(rq)) {
1521 static void ublk_wait_tagset_rqs_idle(struct ublk_device *ub)
1525 WARN_ON_ONCE(!blk_queue_quiesced(ub->ub_disk->queue));
1528 blk_mq_tagset_busy_iter(&ub->tag_set,
1529 ublk_check_inflight_rq, &idle);
1532 msleep(UBLK_REQUEUE_DELAY_MS);
1536 static void __ublk_quiesce_dev(struct ublk_device *ub)
1538 pr_devel("%s: quiesce ub: dev_id %d state %s\n",
1539 __func__, ub->dev_info.dev_id,
1540 ub->dev_info.state == UBLK_S_DEV_LIVE ?
1541 "LIVE" : "QUIESCED");
1542 blk_mq_quiesce_queue(ub->ub_disk->queue);
1543 ublk_wait_tagset_rqs_idle(ub);
1544 ub->dev_info.state = UBLK_S_DEV_QUIESCED;
1545 ublk_cancel_dev(ub);
1546 /* we are going to release task_struct of ubq_daemon and resets
1547 * ->ubq_daemon to NULL. So in monitor_work, check on ubq_daemon causes UAF.
1548 * Besides, monitor_work is not necessary in QUIESCED state since we have
1549 * already scheduled quiesce_work and quiesced all ubqs.
1551 * Do not let monitor_work schedule itself if state it QUIESCED. And we cancel
1552 * it here and re-schedule it in END_USER_RECOVERY to avoid UAF.
1554 cancel_delayed_work_sync(&ub->monitor_work);
1557 static void ublk_quiesce_work_fn(struct work_struct *work)
1559 struct ublk_device *ub =
1560 container_of(work, struct ublk_device, quiesce_work);
1562 mutex_lock(&ub->mutex);
1563 if (ub->dev_info.state != UBLK_S_DEV_LIVE)
1565 __ublk_quiesce_dev(ub);
1567 mutex_unlock(&ub->mutex);
1570 static void ublk_unquiesce_dev(struct ublk_device *ub)
1574 pr_devel("%s: unquiesce ub: dev_id %d state %s\n",
1575 __func__, ub->dev_info.dev_id,
1576 ub->dev_info.state == UBLK_S_DEV_LIVE ?
1577 "LIVE" : "QUIESCED");
1578 /* quiesce_work has run. We let requeued rqs be aborted
1579 * before running fallback_wq. "force_abort" must be seen
1580 * after request queue is unqiuesced. Then del_gendisk()
1583 for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
1584 ublk_get_queue(ub, i)->force_abort = true;
1586 blk_mq_unquiesce_queue(ub->ub_disk->queue);
1587 /* We may have requeued some rqs in ublk_quiesce_queue() */
1588 blk_mq_kick_requeue_list(ub->ub_disk->queue);
1591 static void ublk_stop_dev(struct ublk_device *ub)
1593 mutex_lock(&ub->mutex);
1594 if (ub->dev_info.state == UBLK_S_DEV_DEAD)
1596 if (ublk_can_use_recovery(ub)) {
1597 if (ub->dev_info.state == UBLK_S_DEV_LIVE)
1598 __ublk_quiesce_dev(ub);
1599 ublk_unquiesce_dev(ub);
1601 del_gendisk(ub->ub_disk);
1602 ub->dev_info.state = UBLK_S_DEV_DEAD;
1603 ub->dev_info.ublksrv_pid = -1;
1604 put_disk(ub->ub_disk);
1607 ublk_cancel_dev(ub);
1608 mutex_unlock(&ub->mutex);
1609 cancel_delayed_work_sync(&ub->monitor_work);
1612 /* device can only be started after all IOs are ready */
1613 static void ublk_mark_io_ready(struct ublk_device *ub, struct ublk_queue *ubq)
1615 mutex_lock(&ub->mutex);
1617 if (ublk_queue_ready(ubq)) {
1618 ubq->ubq_daemon = current;
1619 get_task_struct(ubq->ubq_daemon);
1620 ub->nr_queues_ready++;
1622 if (capable(CAP_SYS_ADMIN))
1623 ub->nr_privileged_daemon++;
1625 if (ub->nr_queues_ready == ub->dev_info.nr_hw_queues)
1626 complete_all(&ub->completion);
1627 mutex_unlock(&ub->mutex);
1630 static void ublk_handle_need_get_data(struct ublk_device *ub, int q_id,
1633 struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
1634 struct request *req = blk_mq_tag_to_rq(ub->tag_set.tags[q_id], tag);
1636 ublk_queue_cmd(ubq, req);
1639 static inline int ublk_check_cmd_op(u32 cmd_op)
1641 u32 ioc_type = _IOC_TYPE(cmd_op);
1643 if (!IS_ENABLED(CONFIG_BLKDEV_UBLK_LEGACY_OPCODES) && ioc_type != 'u')
1646 if (ioc_type != 'u' && ioc_type != 0)
1652 static inline void ublk_fill_io_cmd(struct ublk_io *io,
1653 struct io_uring_cmd *cmd, unsigned long buf_addr)
1656 io->flags |= UBLK_IO_FLAG_ACTIVE;
1657 io->addr = buf_addr;
1660 static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
1661 unsigned int issue_flags,
1662 const struct ublksrv_io_cmd *ub_cmd)
1664 struct ublk_device *ub = cmd->file->private_data;
1665 struct ublk_queue *ubq;
1667 u32 cmd_op = cmd->cmd_op;
1668 unsigned tag = ub_cmd->tag;
1670 struct request *req;
1672 pr_devel("%s: received: cmd op %d queue %d tag %d result %d\n",
1673 __func__, cmd->cmd_op, ub_cmd->q_id, tag,
1676 if (ub_cmd->q_id >= ub->dev_info.nr_hw_queues)
1679 ubq = ublk_get_queue(ub, ub_cmd->q_id);
1680 if (!ubq || ub_cmd->q_id != ubq->q_id)
1683 if (ubq->ubq_daemon && ubq->ubq_daemon != current)
1686 if (tag >= ubq->q_depth)
1689 io = &ubq->ios[tag];
1691 /* there is pending io cmd, something must be wrong */
1692 if (io->flags & UBLK_IO_FLAG_ACTIVE) {
1698 * ensure that the user issues UBLK_IO_NEED_GET_DATA
1699 * iff the driver have set the UBLK_IO_FLAG_NEED_GET_DATA.
1701 if ((!!(io->flags & UBLK_IO_FLAG_NEED_GET_DATA))
1702 ^ (_IOC_NR(cmd_op) == UBLK_IO_NEED_GET_DATA))
1705 ret = ublk_check_cmd_op(cmd_op);
1710 switch (_IOC_NR(cmd_op)) {
1711 case UBLK_IO_FETCH_REQ:
1712 /* UBLK_IO_FETCH_REQ is only allowed before queue is setup */
1713 if (ublk_queue_ready(ubq)) {
1718 * The io is being handled by server, so COMMIT_RQ is expected
1719 * instead of FETCH_REQ
1721 if (io->flags & UBLK_IO_FLAG_OWNED_BY_SRV)
1724 if (!ublk_support_user_copy(ubq)) {
1726 * FETCH_RQ has to provide IO buffer if NEED GET
1727 * DATA is not enabled
1729 if (!ub_cmd->addr && !ublk_need_get_data(ubq))
1731 } else if (ub_cmd->addr) {
1732 /* User copy requires addr to be unset */
1737 ublk_fill_io_cmd(io, cmd, ub_cmd->addr);
1738 ublk_mark_io_ready(ub, ubq);
1740 case UBLK_IO_COMMIT_AND_FETCH_REQ:
1741 req = blk_mq_tag_to_rq(ub->tag_set.tags[ub_cmd->q_id], tag);
1743 if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV))
1746 if (!ublk_support_user_copy(ubq)) {
1748 * COMMIT_AND_FETCH_REQ has to provide IO buffer if
1749 * NEED GET DATA is not enabled or it is Read IO.
1751 if (!ub_cmd->addr && (!ublk_need_get_data(ubq) ||
1752 req_op(req) == REQ_OP_READ))
1754 } else if (req_op(req) != REQ_OP_ZONE_APPEND && ub_cmd->addr) {
1756 * User copy requires addr to be unset when command is
1763 ublk_fill_io_cmd(io, cmd, ub_cmd->addr);
1764 ublk_commit_completion(ub, ub_cmd);
1766 case UBLK_IO_NEED_GET_DATA:
1767 if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV))
1769 ublk_fill_io_cmd(io, cmd, ub_cmd->addr);
1770 ublk_handle_need_get_data(ub, ub_cmd->q_id, ub_cmd->tag);
1775 return -EIOCBQUEUED;
1778 io_uring_cmd_done(cmd, ret, 0, issue_flags);
1779 pr_devel("%s: complete: cmd op %d, tag %d ret %x io_flags %x\n",
1780 __func__, cmd_op, tag, ret, io->flags);
1781 return -EIOCBQUEUED;
1784 static inline struct request *__ublk_check_and_get_req(struct ublk_device *ub,
1785 struct ublk_queue *ubq, int tag, size_t offset)
1787 struct request *req;
1789 if (!ublk_need_req_ref(ubq))
1792 req = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], tag);
1796 if (!ublk_get_req_ref(ubq, req))
1799 if (unlikely(!blk_mq_request_started(req) || req->tag != tag))
1802 if (!ublk_rq_has_data(req))
1805 if (offset > blk_rq_bytes(req))
1810 ublk_put_req_ref(ubq, req);
1814 static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
1817 * Not necessary for async retry, but let's keep it simple and always
1818 * copy the values to avoid any potential reuse.
1820 const struct ublksrv_io_cmd *ub_src = io_uring_sqe_cmd(cmd->sqe);
1821 const struct ublksrv_io_cmd ub_cmd = {
1822 .q_id = READ_ONCE(ub_src->q_id),
1823 .tag = READ_ONCE(ub_src->tag),
1824 .result = READ_ONCE(ub_src->result),
1825 .addr = READ_ONCE(ub_src->addr)
1828 return __ublk_ch_uring_cmd(cmd, issue_flags, &ub_cmd);
1831 static inline bool ublk_check_ubuf_dir(const struct request *req,
1834 /* copy ubuf to request pages */
1835 if ((req_op(req) == REQ_OP_READ || req_op(req) == REQ_OP_DRV_IN) &&
1836 ubuf_dir == ITER_SOURCE)
1839 /* copy request pages to ubuf */
1840 if ((req_op(req) == REQ_OP_WRITE ||
1841 req_op(req) == REQ_OP_ZONE_APPEND) &&
1842 ubuf_dir == ITER_DEST)
1848 static struct request *ublk_check_and_get_req(struct kiocb *iocb,
1849 struct iov_iter *iter, size_t *off, int dir)
1851 struct ublk_device *ub = iocb->ki_filp->private_data;
1852 struct ublk_queue *ubq;
1853 struct request *req;
1858 return ERR_PTR(-EACCES);
1860 if (!user_backed_iter(iter))
1861 return ERR_PTR(-EACCES);
1863 if (ub->dev_info.state == UBLK_S_DEV_DEAD)
1864 return ERR_PTR(-EACCES);
1866 tag = ublk_pos_to_tag(iocb->ki_pos);
1867 q_id = ublk_pos_to_hwq(iocb->ki_pos);
1868 buf_off = ublk_pos_to_buf_off(iocb->ki_pos);
1870 if (q_id >= ub->dev_info.nr_hw_queues)
1871 return ERR_PTR(-EINVAL);
1873 ubq = ublk_get_queue(ub, q_id);
1875 return ERR_PTR(-EINVAL);
1877 if (tag >= ubq->q_depth)
1878 return ERR_PTR(-EINVAL);
1880 req = __ublk_check_and_get_req(ub, ubq, tag, buf_off);
1882 return ERR_PTR(-EINVAL);
1884 if (!req->mq_hctx || !req->mq_hctx->driver_data)
1887 if (!ublk_check_ubuf_dir(req, dir))
1893 ublk_put_req_ref(ubq, req);
1894 return ERR_PTR(-EACCES);
1897 static ssize_t ublk_ch_read_iter(struct kiocb *iocb, struct iov_iter *to)
1899 struct ublk_queue *ubq;
1900 struct request *req;
1904 req = ublk_check_and_get_req(iocb, to, &buf_off, ITER_DEST);
1906 return PTR_ERR(req);
1908 ret = ublk_copy_user_pages(req, buf_off, to, ITER_DEST);
1909 ubq = req->mq_hctx->driver_data;
1910 ublk_put_req_ref(ubq, req);
1915 static ssize_t ublk_ch_write_iter(struct kiocb *iocb, struct iov_iter *from)
1917 struct ublk_queue *ubq;
1918 struct request *req;
1922 req = ublk_check_and_get_req(iocb, from, &buf_off, ITER_SOURCE);
1924 return PTR_ERR(req);
1926 ret = ublk_copy_user_pages(req, buf_off, from, ITER_SOURCE);
1927 ubq = req->mq_hctx->driver_data;
1928 ublk_put_req_ref(ubq, req);
1933 static const struct file_operations ublk_ch_fops = {
1934 .owner = THIS_MODULE,
1935 .open = ublk_ch_open,
1936 .release = ublk_ch_release,
1937 .llseek = no_llseek,
1938 .read_iter = ublk_ch_read_iter,
1939 .write_iter = ublk_ch_write_iter,
1940 .uring_cmd = ublk_ch_uring_cmd,
1941 .mmap = ublk_ch_mmap,
1944 static void ublk_deinit_queue(struct ublk_device *ub, int q_id)
1946 int size = ublk_queue_cmd_buf_size(ub, q_id);
1947 struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
1949 if (ubq->ubq_daemon)
1950 put_task_struct(ubq->ubq_daemon);
1951 if (ubq->io_cmd_buf)
1952 free_pages((unsigned long)ubq->io_cmd_buf, get_order(size));
1955 static int ublk_init_queue(struct ublk_device *ub, int q_id)
1957 struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
1958 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO;
1962 ubq->flags = ub->dev_info.flags;
1964 ubq->q_depth = ub->dev_info.queue_depth;
1965 size = ublk_queue_cmd_buf_size(ub, q_id);
1967 ptr = (void *) __get_free_pages(gfp_flags, get_order(size));
1971 ubq->io_cmd_buf = ptr;
1976 static void ublk_deinit_queues(struct ublk_device *ub)
1978 int nr_queues = ub->dev_info.nr_hw_queues;
1984 for (i = 0; i < nr_queues; i++)
1985 ublk_deinit_queue(ub, i);
1986 kfree(ub->__queues);
1989 static int ublk_init_queues(struct ublk_device *ub)
1991 int nr_queues = ub->dev_info.nr_hw_queues;
1992 int depth = ub->dev_info.queue_depth;
1993 int ubq_size = sizeof(struct ublk_queue) + depth * sizeof(struct ublk_io);
1994 int i, ret = -ENOMEM;
1996 ub->queue_size = ubq_size;
1997 ub->__queues = kcalloc(nr_queues, ubq_size, GFP_KERNEL);
2001 for (i = 0; i < nr_queues; i++) {
2002 if (ublk_init_queue(ub, i))
2006 init_completion(&ub->completion);
2010 ublk_deinit_queues(ub);
2014 static int ublk_alloc_dev_number(struct ublk_device *ub, int idx)
2019 spin_lock(&ublk_idr_lock);
2020 /* allocate id, if @id >= 0, we're requesting that specific id */
2022 err = idr_alloc(&ublk_index_idr, ub, i, i + 1, GFP_NOWAIT);
2026 err = idr_alloc(&ublk_index_idr, ub, 0, UBLK_MAX_UBLKS,
2029 spin_unlock(&ublk_idr_lock);
2032 ub->ub_number = err;
2037 static void ublk_free_dev_number(struct ublk_device *ub)
2039 spin_lock(&ublk_idr_lock);
2040 idr_remove(&ublk_index_idr, ub->ub_number);
2041 wake_up_all(&ublk_idr_wq);
2042 spin_unlock(&ublk_idr_lock);
2045 static void ublk_cdev_rel(struct device *dev)
2047 struct ublk_device *ub = container_of(dev, struct ublk_device, cdev_dev);
2049 blk_mq_free_tag_set(&ub->tag_set);
2050 ublk_deinit_queues(ub);
2051 ublk_free_dev_number(ub);
2052 mutex_destroy(&ub->mutex);
2056 static int ublk_add_chdev(struct ublk_device *ub)
2058 struct device *dev = &ub->cdev_dev;
2059 int minor = ub->ub_number;
2062 dev->parent = ublk_misc.this_device;
2063 dev->devt = MKDEV(MAJOR(ublk_chr_devt), minor);
2064 dev->class = &ublk_chr_class;
2065 dev->release = ublk_cdev_rel;
2066 device_initialize(dev);
2068 ret = dev_set_name(dev, "ublkc%d", minor);
2072 cdev_init(&ub->cdev, &ublk_ch_fops);
2073 ret = cdev_device_add(&ub->cdev, dev);
2084 static void ublk_stop_work_fn(struct work_struct *work)
2086 struct ublk_device *ub =
2087 container_of(work, struct ublk_device, stop_work);
2092 /* align max io buffer size with PAGE_SIZE */
2093 static void ublk_align_max_io_size(struct ublk_device *ub)
2095 unsigned int max_io_bytes = ub->dev_info.max_io_buf_bytes;
2097 ub->dev_info.max_io_buf_bytes =
2098 round_down(max_io_bytes, PAGE_SIZE);
2101 static int ublk_add_tag_set(struct ublk_device *ub)
2103 ub->tag_set.ops = &ublk_mq_ops;
2104 ub->tag_set.nr_hw_queues = ub->dev_info.nr_hw_queues;
2105 ub->tag_set.queue_depth = ub->dev_info.queue_depth;
2106 ub->tag_set.numa_node = NUMA_NO_NODE;
2107 ub->tag_set.cmd_size = sizeof(struct ublk_rq_data);
2108 ub->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
2109 ub->tag_set.driver_data = ub;
2110 return blk_mq_alloc_tag_set(&ub->tag_set);
2113 static void ublk_remove(struct ublk_device *ub)
2116 cancel_work_sync(&ub->stop_work);
2117 cancel_work_sync(&ub->quiesce_work);
2118 cdev_device_del(&ub->cdev, &ub->cdev_dev);
2119 put_device(&ub->cdev_dev);
2123 static struct ublk_device *ublk_get_device_from_id(int idx)
2125 struct ublk_device *ub = NULL;
2130 spin_lock(&ublk_idr_lock);
2131 ub = idr_find(&ublk_index_idr, idx);
2133 ub = ublk_get_device(ub);
2134 spin_unlock(&ublk_idr_lock);
2139 static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd)
2141 const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
2142 int ublksrv_pid = (int)header->data[0];
2143 struct gendisk *disk;
2146 if (ublksrv_pid <= 0)
2149 if (wait_for_completion_interruptible(&ub->completion) != 0)
2152 schedule_delayed_work(&ub->monitor_work, UBLK_DAEMON_MONITOR_PERIOD);
2154 mutex_lock(&ub->mutex);
2155 if (ub->dev_info.state == UBLK_S_DEV_LIVE ||
2156 test_bit(UB_STATE_USED, &ub->state)) {
2161 disk = blk_mq_alloc_disk(&ub->tag_set, NULL);
2163 ret = PTR_ERR(disk);
2166 sprintf(disk->disk_name, "ublkb%d", ub->ub_number);
2167 disk->fops = &ub_fops;
2168 disk->private_data = ub;
2170 ub->dev_info.ublksrv_pid = ublksrv_pid;
2173 ret = ublk_apply_params(ub);
2177 /* don't probe partitions if any one ubq daemon is un-trusted */
2178 if (ub->nr_privileged_daemon != ub->nr_queues_ready)
2179 set_bit(GD_SUPPRESS_PART_SCAN, &disk->state);
2181 get_device(&ub->cdev_dev);
2182 ub->dev_info.state = UBLK_S_DEV_LIVE;
2184 if (ublk_dev_is_zoned(ub)) {
2185 ret = ublk_revalidate_disk_zones(ub);
2190 ret = add_disk(disk);
2194 set_bit(UB_STATE_USED, &ub->state);
2198 ub->dev_info.state = UBLK_S_DEV_DEAD;
2199 ublk_put_device(ub);
2205 mutex_unlock(&ub->mutex);
2209 static int ublk_ctrl_get_queue_affinity(struct ublk_device *ub,
2210 struct io_uring_cmd *cmd)
2212 const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
2213 void __user *argp = (void __user *)(unsigned long)header->addr;
2214 cpumask_var_t cpumask;
2215 unsigned long queue;
2216 unsigned int retlen;
2220 if (header->len * BITS_PER_BYTE < nr_cpu_ids)
2222 if (header->len & (sizeof(unsigned long)-1))
2227 queue = header->data[0];
2228 if (queue >= ub->dev_info.nr_hw_queues)
2231 if (!zalloc_cpumask_var(&cpumask, GFP_KERNEL))
2234 for_each_possible_cpu(i) {
2235 if (ub->tag_set.map[HCTX_TYPE_DEFAULT].mq_map[i] == queue)
2236 cpumask_set_cpu(i, cpumask);
2240 retlen = min_t(unsigned short, header->len, cpumask_size());
2241 if (copy_to_user(argp, cpumask, retlen))
2242 goto out_free_cpumask;
2243 if (retlen != header->len &&
2244 clear_user(argp + retlen, header->len - retlen))
2245 goto out_free_cpumask;
2249 free_cpumask_var(cpumask);
2253 static inline void ublk_dump_dev_info(struct ublksrv_ctrl_dev_info *info)
2255 pr_devel("%s: dev id %d flags %llx\n", __func__,
2256 info->dev_id, info->flags);
2257 pr_devel("\t nr_hw_queues %d queue_depth %d\n",
2258 info->nr_hw_queues, info->queue_depth);
2261 static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd)
2263 const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
2264 void __user *argp = (void __user *)(unsigned long)header->addr;
2265 struct ublksrv_ctrl_dev_info info;
2266 struct ublk_device *ub;
2269 if (header->len < sizeof(info) || !header->addr)
2271 if (header->queue_id != (u16)-1) {
2272 pr_warn("%s: queue_id is wrong %x\n",
2273 __func__, header->queue_id);
2277 if (copy_from_user(&info, argp, sizeof(info)))
2280 if (capable(CAP_SYS_ADMIN))
2281 info.flags &= ~UBLK_F_UNPRIVILEGED_DEV;
2282 else if (!(info.flags & UBLK_F_UNPRIVILEGED_DEV))
2286 * unprivileged device can't be trusted, but RECOVERY and
2287 * RECOVERY_REISSUE still may hang error handling, so can't
2288 * support recovery features for unprivileged ublk now
2290 * TODO: provide forward progress for RECOVERY handler, so that
2291 * unprivileged device can benefit from it
2293 if (info.flags & UBLK_F_UNPRIVILEGED_DEV)
2294 info.flags &= ~(UBLK_F_USER_RECOVERY_REISSUE |
2295 UBLK_F_USER_RECOVERY);
2297 /* the created device is always owned by current user */
2298 ublk_store_owner_uid_gid(&info.owner_uid, &info.owner_gid);
2300 if (header->dev_id != info.dev_id) {
2301 pr_warn("%s: dev id not match %u %u\n",
2302 __func__, header->dev_id, info.dev_id);
2306 if (header->dev_id != U32_MAX && header->dev_id >= UBLK_MAX_UBLKS) {
2307 pr_warn("%s: dev id is too large. Max supported is %d\n",
2308 __func__, UBLK_MAX_UBLKS - 1);
2312 ublk_dump_dev_info(&info);
2314 ret = mutex_lock_killable(&ublk_ctl_mutex);
2319 if (ublks_added >= ublks_max)
2323 ub = kzalloc(sizeof(*ub), GFP_KERNEL);
2326 mutex_init(&ub->mutex);
2327 spin_lock_init(&ub->mm_lock);
2328 INIT_WORK(&ub->quiesce_work, ublk_quiesce_work_fn);
2329 INIT_WORK(&ub->stop_work, ublk_stop_work_fn);
2330 INIT_DELAYED_WORK(&ub->monitor_work, ublk_daemon_monitor_work);
2332 ret = ublk_alloc_dev_number(ub, header->dev_id);
2336 memcpy(&ub->dev_info, &info, sizeof(info));
2338 /* update device id */
2339 ub->dev_info.dev_id = ub->ub_number;
2342 * 64bit flags will be copied back to userspace as feature
2343 * negotiation result, so have to clear flags which driver
2344 * doesn't support yet, then userspace can get correct flags
2345 * (features) to handle.
2347 ub->dev_info.flags &= UBLK_F_ALL;
2349 ub->dev_info.flags |= UBLK_F_CMD_IOCTL_ENCODE |
2350 UBLK_F_URING_CMD_COMP_IN_TASK;
2352 /* GET_DATA isn't needed any more with USER_COPY */
2353 if (ublk_dev_is_user_copy(ub))
2354 ub->dev_info.flags &= ~UBLK_F_NEED_GET_DATA;
2356 /* Zoned storage support requires user copy feature */
2357 if (ublk_dev_is_zoned(ub) &&
2358 (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) || !ublk_dev_is_user_copy(ub))) {
2360 goto out_free_dev_number;
2363 /* We are not ready to support zero copy */
2364 ub->dev_info.flags &= ~UBLK_F_SUPPORT_ZERO_COPY;
2366 ub->dev_info.nr_hw_queues = min_t(unsigned int,
2367 ub->dev_info.nr_hw_queues, nr_cpu_ids);
2368 ublk_align_max_io_size(ub);
2370 ret = ublk_init_queues(ub);
2372 goto out_free_dev_number;
2374 ret = ublk_add_tag_set(ub);
2376 goto out_deinit_queues;
2379 if (copy_to_user(argp, &ub->dev_info, sizeof(info)))
2380 goto out_free_tag_set;
2383 * Add the char dev so that ublksrv daemon can be setup.
2384 * ublk_add_chdev() will cleanup everything if it fails.
2386 ret = ublk_add_chdev(ub);
2390 blk_mq_free_tag_set(&ub->tag_set);
2392 ublk_deinit_queues(ub);
2393 out_free_dev_number:
2394 ublk_free_dev_number(ub);
2396 mutex_destroy(&ub->mutex);
2399 mutex_unlock(&ublk_ctl_mutex);
2403 static inline bool ublk_idr_freed(int id)
2407 spin_lock(&ublk_idr_lock);
2408 ptr = idr_find(&ublk_index_idr, id);
2409 spin_unlock(&ublk_idr_lock);
2414 static int ublk_ctrl_del_dev(struct ublk_device **p_ub)
2416 struct ublk_device *ub = *p_ub;
2417 int idx = ub->ub_number;
2420 ret = mutex_lock_killable(&ublk_ctl_mutex);
2424 if (!test_bit(UB_STATE_DELETED, &ub->state)) {
2426 set_bit(UB_STATE_DELETED, &ub->state);
2429 /* Mark the reference as consumed */
2431 ublk_put_device(ub);
2432 mutex_unlock(&ublk_ctl_mutex);
2435 * Wait until the idr is removed, then it can be reused after
2436 * DEL_DEV command is returned.
2438 * If we returns because of user interrupt, future delete command
2441 * - the device number isn't freed, this device won't or needn't
2442 * be deleted again, since UB_STATE_DELETED is set, and device
2443 * will be released after the last reference is dropped
2445 * - the device number is freed already, we will not find this
2446 * device via ublk_get_device_from_id()
2448 if (wait_event_interruptible(ublk_idr_wq, ublk_idr_freed(idx)))
2453 static inline void ublk_ctrl_cmd_dump(struct io_uring_cmd *cmd)
2455 const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
2457 pr_devel("%s: cmd_op %x, dev id %d qid %d data %llx buf %llx len %u\n",
2458 __func__, cmd->cmd_op, header->dev_id, header->queue_id,
2459 header->data[0], header->addr, header->len);
2462 static int ublk_ctrl_stop_dev(struct ublk_device *ub)
2465 cancel_work_sync(&ub->stop_work);
2466 cancel_work_sync(&ub->quiesce_work);
2471 static int ublk_ctrl_get_dev_info(struct ublk_device *ub,
2472 struct io_uring_cmd *cmd)
2474 const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
2475 void __user *argp = (void __user *)(unsigned long)header->addr;
2477 if (header->len < sizeof(struct ublksrv_ctrl_dev_info) || !header->addr)
2480 if (copy_to_user(argp, &ub->dev_info, sizeof(ub->dev_info)))
2486 /* TYPE_DEVT is readonly, so fill it up before returning to userspace */
2487 static void ublk_ctrl_fill_params_devt(struct ublk_device *ub)
2489 ub->params.devt.char_major = MAJOR(ub->cdev_dev.devt);
2490 ub->params.devt.char_minor = MINOR(ub->cdev_dev.devt);
2493 ub->params.devt.disk_major = MAJOR(disk_devt(ub->ub_disk));
2494 ub->params.devt.disk_minor = MINOR(disk_devt(ub->ub_disk));
2496 ub->params.devt.disk_major = 0;
2497 ub->params.devt.disk_minor = 0;
2499 ub->params.types |= UBLK_PARAM_TYPE_DEVT;
2502 static int ublk_ctrl_get_params(struct ublk_device *ub,
2503 struct io_uring_cmd *cmd)
2505 const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
2506 void __user *argp = (void __user *)(unsigned long)header->addr;
2507 struct ublk_params_header ph;
2510 if (header->len <= sizeof(ph) || !header->addr)
2513 if (copy_from_user(&ph, argp, sizeof(ph)))
2516 if (ph.len > header->len || !ph.len)
2519 if (ph.len > sizeof(struct ublk_params))
2520 ph.len = sizeof(struct ublk_params);
2522 mutex_lock(&ub->mutex);
2523 ublk_ctrl_fill_params_devt(ub);
2524 if (copy_to_user(argp, &ub->params, ph.len))
2528 mutex_unlock(&ub->mutex);
2533 static int ublk_ctrl_set_params(struct ublk_device *ub,
2534 struct io_uring_cmd *cmd)
2536 const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
2537 void __user *argp = (void __user *)(unsigned long)header->addr;
2538 struct ublk_params_header ph;
2541 if (header->len <= sizeof(ph) || !header->addr)
2544 if (copy_from_user(&ph, argp, sizeof(ph)))
2547 if (ph.len > header->len || !ph.len || !ph.types)
2550 if (ph.len > sizeof(struct ublk_params))
2551 ph.len = sizeof(struct ublk_params);
2553 /* parameters can only be changed when device isn't live */
2554 mutex_lock(&ub->mutex);
2555 if (ub->dev_info.state == UBLK_S_DEV_LIVE) {
2557 } else if (copy_from_user(&ub->params, argp, ph.len)) {
2560 /* clear all we don't support yet */
2561 ub->params.types &= UBLK_PARAM_TYPE_ALL;
2562 ret = ublk_validate_params(ub);
2564 ub->params.types = 0;
2566 mutex_unlock(&ub->mutex);
2571 static void ublk_queue_reinit(struct ublk_device *ub, struct ublk_queue *ubq)
2575 WARN_ON_ONCE(!(ubq->ubq_daemon && ubq_daemon_is_dying(ubq)));
2576 /* All old ioucmds have to be completed */
2577 WARN_ON_ONCE(ubq->nr_io_ready);
2578 /* old daemon is PF_EXITING, put it now */
2579 put_task_struct(ubq->ubq_daemon);
2580 /* We have to reset it to NULL, otherwise ub won't accept new FETCH_REQ */
2581 ubq->ubq_daemon = NULL;
2582 ubq->timeout = false;
2584 for (i = 0; i < ubq->q_depth; i++) {
2585 struct ublk_io *io = &ubq->ios[i];
2587 /* forget everything now and be ready for new FETCH_REQ */
2594 static int ublk_ctrl_start_recovery(struct ublk_device *ub,
2595 struct io_uring_cmd *cmd)
2597 const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
2601 mutex_lock(&ub->mutex);
2602 if (!ublk_can_use_recovery(ub))
2605 * START_RECOVERY is only allowd after:
2607 * (1) UB_STATE_OPEN is not set, which means the dying process is exited
2608 * and related io_uring ctx is freed so file struct of /dev/ublkcX is
2611 * (2) UBLK_S_DEV_QUIESCED is set, which means the quiesce_work:
2612 * (a)has quiesced request queue
2613 * (b)has requeued every inflight rqs whose io_flags is ACTIVE
2614 * (c)has requeued/aborted every inflight rqs whose io_flags is NOT ACTIVE
2615 * (d)has completed/camceled all ioucmds owned by ther dying process
2617 if (test_bit(UB_STATE_OPEN, &ub->state) ||
2618 ub->dev_info.state != UBLK_S_DEV_QUIESCED) {
2622 pr_devel("%s: start recovery for dev id %d.\n", __func__, header->dev_id);
2623 for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
2624 ublk_queue_reinit(ub, ublk_get_queue(ub, i));
2625 /* set to NULL, otherwise new ubq_daemon cannot mmap the io_cmd_buf */
2627 ub->nr_queues_ready = 0;
2628 ub->nr_privileged_daemon = 0;
2629 init_completion(&ub->completion);
2632 mutex_unlock(&ub->mutex);
2636 static int ublk_ctrl_end_recovery(struct ublk_device *ub,
2637 struct io_uring_cmd *cmd)
2639 const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
2640 int ublksrv_pid = (int)header->data[0];
2643 pr_devel("%s: Waiting for new ubq_daemons(nr: %d) are ready, dev id %d...\n",
2644 __func__, ub->dev_info.nr_hw_queues, header->dev_id);
2645 /* wait until new ubq_daemon sending all FETCH_REQ */
2646 if (wait_for_completion_interruptible(&ub->completion))
2649 pr_devel("%s: All new ubq_daemons(nr: %d) are ready, dev id %d\n",
2650 __func__, ub->dev_info.nr_hw_queues, header->dev_id);
2652 mutex_lock(&ub->mutex);
2653 if (!ublk_can_use_recovery(ub))
2656 if (ub->dev_info.state != UBLK_S_DEV_QUIESCED) {
2660 ub->dev_info.ublksrv_pid = ublksrv_pid;
2661 pr_devel("%s: new ublksrv_pid %d, dev id %d\n",
2662 __func__, ublksrv_pid, header->dev_id);
2663 blk_mq_unquiesce_queue(ub->ub_disk->queue);
2664 pr_devel("%s: queue unquiesced, dev id %d.\n",
2665 __func__, header->dev_id);
2666 blk_mq_kick_requeue_list(ub->ub_disk->queue);
2667 ub->dev_info.state = UBLK_S_DEV_LIVE;
2668 schedule_delayed_work(&ub->monitor_work, UBLK_DAEMON_MONITOR_PERIOD);
2671 mutex_unlock(&ub->mutex);
2675 static int ublk_ctrl_get_features(struct io_uring_cmd *cmd)
2677 const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
2678 void __user *argp = (void __user *)(unsigned long)header->addr;
2679 u64 features = UBLK_F_ALL & ~UBLK_F_SUPPORT_ZERO_COPY;
2681 if (header->len != UBLK_FEATURES_LEN || !header->addr)
2684 if (copy_to_user(argp, &features, UBLK_FEATURES_LEN))
2691 * All control commands are sent via /dev/ublk-control, so we have to check
2692 * the destination device's permission
2694 static int ublk_char_dev_permission(struct ublk_device *ub,
2695 const char *dev_path, int mask)
2701 err = kern_path(dev_path, LOOKUP_FOLLOW, &path);
2705 err = vfs_getattr(&path, &stat, STATX_TYPE, AT_STATX_SYNC_AS_STAT);
2710 if (stat.rdev != ub->cdev_dev.devt || !S_ISCHR(stat.mode))
2713 err = inode_permission(&nop_mnt_idmap,
2714 d_backing_inode(path.dentry), mask);
2720 static int ublk_ctrl_uring_cmd_permission(struct ublk_device *ub,
2721 struct io_uring_cmd *cmd)
2723 struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)io_uring_sqe_cmd(cmd->sqe);
2724 bool unprivileged = ub->dev_info.flags & UBLK_F_UNPRIVILEGED_DEV;
2725 void __user *argp = (void __user *)(unsigned long)header->addr;
2726 char *dev_path = NULL;
2730 if (!unprivileged) {
2731 if (!capable(CAP_SYS_ADMIN))
2734 * The new added command of UBLK_CMD_GET_DEV_INFO2 includes
2735 * char_dev_path in payload too, since userspace may not
2736 * know if the specified device is created as unprivileged
2739 if (_IOC_NR(cmd->cmd_op) != UBLK_CMD_GET_DEV_INFO2)
2744 * User has to provide the char device path for unprivileged ublk
2746 * header->addr always points to the dev path buffer, and
2747 * header->dev_path_len records length of dev path buffer.
2749 if (!header->dev_path_len || header->dev_path_len > PATH_MAX)
2752 if (header->len < header->dev_path_len)
2755 dev_path = memdup_user_nul(argp, header->dev_path_len);
2756 if (IS_ERR(dev_path))
2757 return PTR_ERR(dev_path);
2760 switch (_IOC_NR(cmd->cmd_op)) {
2761 case UBLK_CMD_GET_DEV_INFO:
2762 case UBLK_CMD_GET_DEV_INFO2:
2763 case UBLK_CMD_GET_QUEUE_AFFINITY:
2764 case UBLK_CMD_GET_PARAMS:
2765 case (_IOC_NR(UBLK_U_CMD_GET_FEATURES)):
2768 case UBLK_CMD_START_DEV:
2769 case UBLK_CMD_STOP_DEV:
2770 case UBLK_CMD_ADD_DEV:
2771 case UBLK_CMD_DEL_DEV:
2772 case UBLK_CMD_SET_PARAMS:
2773 case UBLK_CMD_START_USER_RECOVERY:
2774 case UBLK_CMD_END_USER_RECOVERY:
2775 mask = MAY_READ | MAY_WRITE;
2781 ret = ublk_char_dev_permission(ub, dev_path, mask);
2783 header->len -= header->dev_path_len;
2784 header->addr += header->dev_path_len;
2786 pr_devel("%s: dev id %d cmd_op %x uid %d gid %d path %s ret %d\n",
2787 __func__, ub->ub_number, cmd->cmd_op,
2788 ub->dev_info.owner_uid, ub->dev_info.owner_gid,
2795 static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
2796 unsigned int issue_flags)
2798 const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
2799 struct ublk_device *ub = NULL;
2800 u32 cmd_op = cmd->cmd_op;
2803 if (issue_flags & IO_URING_F_NONBLOCK)
2806 ublk_ctrl_cmd_dump(cmd);
2808 if (!(issue_flags & IO_URING_F_SQE128))
2811 ret = ublk_check_cmd_op(cmd_op);
2815 if (cmd_op == UBLK_U_CMD_GET_FEATURES) {
2816 ret = ublk_ctrl_get_features(cmd);
2820 if (_IOC_NR(cmd_op) != UBLK_CMD_ADD_DEV) {
2822 ub = ublk_get_device_from_id(header->dev_id);
2826 ret = ublk_ctrl_uring_cmd_permission(ub, cmd);
2831 switch (_IOC_NR(cmd_op)) {
2832 case UBLK_CMD_START_DEV:
2833 ret = ublk_ctrl_start_dev(ub, cmd);
2835 case UBLK_CMD_STOP_DEV:
2836 ret = ublk_ctrl_stop_dev(ub);
2838 case UBLK_CMD_GET_DEV_INFO:
2839 case UBLK_CMD_GET_DEV_INFO2:
2840 ret = ublk_ctrl_get_dev_info(ub, cmd);
2842 case UBLK_CMD_ADD_DEV:
2843 ret = ublk_ctrl_add_dev(cmd);
2845 case UBLK_CMD_DEL_DEV:
2846 ret = ublk_ctrl_del_dev(&ub);
2848 case UBLK_CMD_GET_QUEUE_AFFINITY:
2849 ret = ublk_ctrl_get_queue_affinity(ub, cmd);
2851 case UBLK_CMD_GET_PARAMS:
2852 ret = ublk_ctrl_get_params(ub, cmd);
2854 case UBLK_CMD_SET_PARAMS:
2855 ret = ublk_ctrl_set_params(ub, cmd);
2857 case UBLK_CMD_START_USER_RECOVERY:
2858 ret = ublk_ctrl_start_recovery(ub, cmd);
2860 case UBLK_CMD_END_USER_RECOVERY:
2861 ret = ublk_ctrl_end_recovery(ub, cmd);
2870 ublk_put_device(ub);
2872 io_uring_cmd_done(cmd, ret, 0, issue_flags);
2873 pr_devel("%s: cmd done ret %d cmd_op %x, dev id %d qid %d\n",
2874 __func__, ret, cmd->cmd_op, header->dev_id, header->queue_id);
2875 return -EIOCBQUEUED;
2878 static const struct file_operations ublk_ctl_fops = {
2879 .open = nonseekable_open,
2880 .uring_cmd = ublk_ctrl_uring_cmd,
2881 .owner = THIS_MODULE,
2882 .llseek = noop_llseek,
2885 static struct miscdevice ublk_misc = {
2886 .minor = MISC_DYNAMIC_MINOR,
2887 .name = "ublk-control",
2888 .fops = &ublk_ctl_fops,
2891 static int __init ublk_init(void)
2895 BUILD_BUG_ON((u64)UBLKSRV_IO_BUF_OFFSET +
2896 UBLKSRV_IO_BUF_TOTAL_SIZE < UBLKSRV_IO_BUF_OFFSET);
2898 init_waitqueue_head(&ublk_idr_wq);
2900 ret = misc_register(&ublk_misc);
2904 ret = alloc_chrdev_region(&ublk_chr_devt, 0, UBLK_MINORS, "ublk-char");
2906 goto unregister_mis;
2908 ret = class_register(&ublk_chr_class);
2910 goto free_chrdev_region;
2915 unregister_chrdev_region(ublk_chr_devt, UBLK_MINORS);
2917 misc_deregister(&ublk_misc);
2921 static void __exit ublk_exit(void)
2923 struct ublk_device *ub;
2926 idr_for_each_entry(&ublk_index_idr, ub, id)
2929 class_unregister(&ublk_chr_class);
2930 misc_deregister(&ublk_misc);
2932 idr_destroy(&ublk_index_idr);
2933 unregister_chrdev_region(ublk_chr_devt, UBLK_MINORS);
2936 module_init(ublk_init);
2937 module_exit(ublk_exit);
2939 static int ublk_set_max_ublks(const char *buf, const struct kernel_param *kp)
2941 return param_set_uint_minmax(buf, kp, 0, UBLK_MAX_UBLKS);
2944 static int ublk_get_max_ublks(char *buf, const struct kernel_param *kp)
2946 return sysfs_emit(buf, "%u\n", ublks_max);
2949 static const struct kernel_param_ops ublk_max_ublks_ops = {
2950 .set = ublk_set_max_ublks,
2951 .get = ublk_get_max_ublks,
2954 module_param_cb(ublks_max, &ublk_max_ublks_ops, &ublks_max, 0644);
2955 MODULE_PARM_DESC(ublks_max, "max number of ublk devices allowed to add(default: 64)");
2957 MODULE_AUTHOR("Ming Lei <ming.lei@redhat.com>");
2958 MODULE_LICENSE("GPL");