ublk: add timeout handler
[linux-block.git] / drivers / block / ublk_drv.c
index 6368b56eacf11a6ac67a089c14b1435ad830b39d..e96309f2e1ada14e1aab06afc5231c36cdb903ff 100644 (file)
@@ -42,6 +42,7 @@
 #include <linux/mm.h>
 #include <asm/page.h>
 #include <linux/task_work.h>
+#include <linux/namei.h>
 #include <uapi/linux/ublk_cmd.h>
 
 #define UBLK_MINORS            (1U << MINORBITS)
                | UBLK_F_URING_CMD_COMP_IN_TASK \
                | UBLK_F_NEED_GET_DATA \
                | UBLK_F_USER_RECOVERY \
-               | UBLK_F_USER_RECOVERY_REISSUE)
+               | UBLK_F_USER_RECOVERY_REISSUE \
+               | UBLK_F_UNPRIVILEGED_DEV \
+               | UBLK_F_CMD_IOCTL_ENCODE)
 
 /* All UBLK_PARAM_TYPE_* should be included here */
-#define UBLK_PARAM_TYPE_ALL (UBLK_PARAM_TYPE_BASIC | UBLK_PARAM_TYPE_DISCARD)
+#define UBLK_PARAM_TYPE_ALL (UBLK_PARAM_TYPE_BASIC | \
+               UBLK_PARAM_TYPE_DISCARD | UBLK_PARAM_TYPE_DEVT)
 
 struct ublk_rq_data {
        struct llist_node node;
@@ -125,6 +129,7 @@ struct ublk_queue {
        unsigned long io_addr;  /* mapped vm address */
        unsigned int max_io_sz;
        bool force_abort;
+       bool timeout;
        unsigned short nr_io_ready;     /* how many ios setup */
        struct ublk_device *dev;
        struct ublk_io ios[];
@@ -147,6 +152,7 @@ struct ublk_device {
 
 #define UB_STATE_OPEN          0
 #define UB_STATE_USED          1
+#define UB_STATE_DELETED       2
        unsigned long           state;
        int                     ub_number;
 
@@ -159,7 +165,7 @@ struct ublk_device {
 
        struct completion       completion;
        unsigned int            nr_queues_ready;
-       atomic_t                nr_aborted_queues;
+       unsigned int            nr_privileged_daemon;
 
        /*
         * Our ubq->daemon may be killed without any notification, so
@@ -185,6 +191,15 @@ static wait_queue_head_t ublk_idr_wq;      /* wait until one idr is freed */
 
 static DEFINE_MUTEX(ublk_ctl_mutex);
 
+/*
+ * Max ublk devices allowed to add
+ *
+ * It can be extended to one per-user limit in future or even controlled
+ * by cgroup.
+ */
+static unsigned int ublks_max = 64;
+static unsigned int ublks_added;       /* protected by ublk_ctl_mutex */
+
 static struct miscdevice ublk_misc;
 
 static void ublk_dev_param_basic_apply(struct ublk_device *ub)
@@ -255,6 +270,10 @@ static int ublk_validate_params(const struct ublk_device *ub)
                        return -EINVAL;
        }
 
+       /* dev_t is read-only */
+       if (ub->params.types & UBLK_PARAM_TYPE_DEVT)
+               return -EINVAL;
+
        return 0;
 }
 
@@ -281,9 +300,7 @@ static inline bool ublk_can_use_task_work(const struct ublk_queue *ubq)
 
 static inline bool ublk_need_get_data(const struct ublk_queue *ubq)
 {
-       if (ubq->flags & UBLK_F_NEED_GET_DATA)
-               return true;
-       return false;
+       return ubq->flags & UBLK_F_NEED_GET_DATA;
 }
 
 static struct ublk_device *ublk_get_device(struct ublk_device *ub)
@@ -306,7 +323,7 @@ static inline struct ublk_queue *ublk_get_queue(struct ublk_device *dev,
 
 static inline bool ublk_rq_has_data(const struct request *rq)
 {
-       return rq->bio && bio_has_data(rq->bio);
+       return bio_has_data(rq->bio);
 }
 
 static inline struct ublksrv_io_desc *ublk_get_iod(struct ublk_queue *ubq,
@@ -332,25 +349,19 @@ static inline int ublk_queue_cmd_buf_size(struct ublk_device *ub, int q_id)
 static inline bool ublk_queue_can_use_recovery_reissue(
                struct ublk_queue *ubq)
 {
-       if ((ubq->flags & UBLK_F_USER_RECOVERY) &&
-                       (ubq->flags & UBLK_F_USER_RECOVERY_REISSUE))
-               return true;
-       return false;
+       return (ubq->flags & UBLK_F_USER_RECOVERY) &&
+                       (ubq->flags & UBLK_F_USER_RECOVERY_REISSUE);
 }
 
 static inline bool ublk_queue_can_use_recovery(
                struct ublk_queue *ubq)
 {
-       if (ubq->flags & UBLK_F_USER_RECOVERY)
-               return true;
-       return false;
+       return ubq->flags & UBLK_F_USER_RECOVERY;
 }
 
 static inline bool ublk_can_use_recovery(struct ublk_device *ub)
 {
-       if (ub->dev_info.flags & UBLK_F_USER_RECOVERY)
-               return true;
-       return false;
+       return ub->dev_info.flags & UBLK_F_USER_RECOVERY;
 }
 
 static void ublk_free_disk(struct gendisk *disk)
@@ -361,18 +372,59 @@ static void ublk_free_disk(struct gendisk *disk)
        put_device(&ub->cdev_dev);
 }
 
+static void ublk_store_owner_uid_gid(unsigned int *owner_uid,
+               unsigned int *owner_gid)
+{
+       kuid_t uid;
+       kgid_t gid;
+
+       current_uid_gid(&uid, &gid);
+
+       *owner_uid = from_kuid(&init_user_ns, uid);
+       *owner_gid = from_kgid(&init_user_ns, gid);
+}
+
+static int ublk_open(struct block_device *bdev, fmode_t mode)
+{
+       struct ublk_device *ub = bdev->bd_disk->private_data;
+
+       if (capable(CAP_SYS_ADMIN))
+               return 0;
+
+       /*
+        * If it is one unprivileged device, only owner can open
+        * the disk. Otherwise it could be one trap made by one
+        * evil user who grants this disk's privileges to other
+        * users deliberately.
+        *
+        * This way is reasonable too given anyone can create
+        * unprivileged device, and no need other's grant.
+        */
+       if (ub->dev_info.flags & UBLK_F_UNPRIVILEGED_DEV) {
+               unsigned int curr_uid, curr_gid;
+
+               ublk_store_owner_uid_gid(&curr_uid, &curr_gid);
+
+               if (curr_uid != ub->dev_info.owner_uid || curr_gid !=
+                               ub->dev_info.owner_gid)
+                       return -EPERM;
+       }
+
+       return 0;
+}
+
 static const struct block_device_operations ub_fops = {
        .owner =        THIS_MODULE,
+       .open =         ublk_open,
        .free_disk =    ublk_free_disk,
 };
 
 #define UBLK_MAX_PIN_PAGES     32
 
 struct ublk_map_data {
-       const struct ublk_queue *ubq;
        const struct request *rq;
-       const struct ublk_io *io;
-       unsigned max_bytes;
+       unsigned long   ubuf;
+       unsigned int    len;
 };
 
 struct ublk_io_iter {
@@ -429,18 +481,17 @@ static inline unsigned ublk_copy_io_pages(struct ublk_io_iter *data,
        return done;
 }
 
-static inline int ublk_copy_user_pages(struct ublk_map_data *data,
-               bool to_vm)
+static int ublk_copy_user_pages(struct ublk_map_data *data, bool to_vm)
 {
        const unsigned int gup_flags = to_vm ? FOLL_WRITE : 0;
-       const unsigned long start_vm = data->io->addr;
+       const unsigned long start_vm = data->ubuf;
        unsigned int done = 0;
        struct ublk_io_iter iter = {
                .pg_off = start_vm & (PAGE_SIZE - 1),
                .bio    = data->rq->bio,
                .iter   = data->rq->bio->bi_iter,
        };
-       const unsigned int nr_pages = round_up(data->max_bytes +
+       const unsigned int nr_pages = round_up(data->len +
                        (start_vm & (PAGE_SIZE - 1)), PAGE_SIZE) >> PAGE_SHIFT;
 
        while (done < nr_pages) {
@@ -453,42 +504,49 @@ static inline int ublk_copy_user_pages(struct ublk_map_data *data,
                                iter.pages);
                if (iter.nr_pages <= 0)
                        return done == 0 ? iter.nr_pages : done;
-               len = ublk_copy_io_pages(&iter, data->max_bytes, to_vm);
+               len = ublk_copy_io_pages(&iter, data->len, to_vm);
                for (i = 0; i < iter.nr_pages; i++) {
                        if (to_vm)
                                set_page_dirty(iter.pages[i]);
                        put_page(iter.pages[i]);
                }
-               data->max_bytes -= len;
+               data->len -= len;
                done += iter.nr_pages;
        }
 
        return done;
 }
 
+static inline bool ublk_need_map_req(const struct request *req)
+{
+       return ublk_rq_has_data(req) && req_op(req) == REQ_OP_WRITE;
+}
+
+static inline bool ublk_need_unmap_req(const struct request *req)
+{
+       return ublk_rq_has_data(req) && req_op(req) == REQ_OP_READ;
+}
+
 static int ublk_map_io(const struct ublk_queue *ubq, const struct request *req,
                struct ublk_io *io)
 {
        const unsigned int rq_bytes = blk_rq_bytes(req);
+
        /*
         * no zero copy, we delay copy WRITE request data into ublksrv
         * context and the big benefit is that pinning pages in current
         * context is pretty fast, see ublk_pin_user_pages
         */
-       if (req_op(req) != REQ_OP_WRITE && req_op(req) != REQ_OP_FLUSH)
-               return rq_bytes;
-
-       if (ublk_rq_has_data(req)) {
+       if (ublk_need_map_req(req)) {
                struct ublk_map_data data = {
-                       .ubq    =       ubq,
                        .rq     =       req,
-                       .io     =       io,
-                       .max_bytes =    rq_bytes,
+                       .ubuf   =       io->addr,
+                       .len    =       rq_bytes,
                };
 
                ublk_copy_user_pages(&data, true);
 
-               return rq_bytes - data.max_bytes;
+               return rq_bytes - data.len;
        }
        return rq_bytes;
 }
@@ -499,19 +557,18 @@ static int ublk_unmap_io(const struct ublk_queue *ubq,
 {
        const unsigned int rq_bytes = blk_rq_bytes(req);
 
-       if (req_op(req) == REQ_OP_READ && ublk_rq_has_data(req)) {
+       if (ublk_need_unmap_req(req)) {
                struct ublk_map_data data = {
-                       .ubq    =       ubq,
                        .rq     =       req,
-                       .io     =       io,
-                       .max_bytes =    io->res,
+                       .ubuf   =       io->addr,
+                       .len    =       io->res,
                };
 
                WARN_ON_ONCE(io->res > rq_bytes);
 
                ublk_copy_user_pages(&data, false);
 
-               return io->res - data.max_bytes;
+               return io->res - data.len;
        }
        return rq_bytes;
 }
@@ -596,26 +653,25 @@ static void ublk_complete_rq(struct request *req)
        struct ublk_queue *ubq = req->mq_hctx->driver_data;
        struct ublk_io *io = &ubq->ios[req->tag];
        unsigned int unmapped_bytes;
+       blk_status_t res = BLK_STS_OK;
 
        /* failed read IO if nothing is read */
        if (!io->res && req_op(req) == REQ_OP_READ)
                io->res = -EIO;
 
        if (io->res < 0) {
-               blk_mq_end_request(req, errno_to_blk_status(io->res));
-               return;
+               res = errno_to_blk_status(io->res);
+               goto exit;
        }
 
        /*
-        * FLUSH or DISCARD usually won't return bytes returned, so end them
+        * FLUSH, DISCARD or WRITE_ZEROES usually won't return bytes returned, so end them
         * directly.
         *
         * Both the two needn't unmap.
         */
-       if (req_op(req) != REQ_OP_READ && req_op(req) != REQ_OP_WRITE) {
-               blk_mq_end_request(req, BLK_STS_OK);
-               return;
-       }
+       if (req_op(req) != REQ_OP_READ && req_op(req) != REQ_OP_WRITE)
+               goto exit;
 
        /* for READ request, writing data in iod->addr to rq buffers */
        unmapped_bytes = ublk_unmap_io(ubq, req, io);
@@ -632,6 +688,10 @@ static void ublk_complete_rq(struct request *req)
                blk_mq_requeue_request(req, true);
        else
                __blk_mq_end_request(req, BLK_STS_OK);
+
+       return;
+exit:
+       blk_mq_end_request(req, res);
 }
 
 /*
@@ -710,9 +770,7 @@ static inline void __ublk_rq_task_work(struct request *req)
                return;
        }
 
-       if (ublk_need_get_data(ubq) &&
-                       (req_op(req) == REQ_OP_WRITE ||
-                       req_op(req) == REQ_OP_FLUSH)) {
+       if (ublk_need_get_data(ubq) && ublk_need_map_req(req)) {
                /*
                 * We have not handled UBLK_IO_NEED_GET_DATA command yet,
                 * so immepdately pass UBLK_IO_RES_NEED_GET_DATA to ublksrv
@@ -837,6 +895,22 @@ static void ublk_queue_cmd(struct ublk_queue *ubq, struct request *rq)
        }
 }
 
+static enum blk_eh_timer_return ublk_timeout(struct request *rq)
+{
+       struct ublk_queue *ubq = rq->mq_hctx->driver_data;
+
+       if (ubq->flags & UBLK_F_UNPRIVILEGED_DEV) {
+               if (!ubq->timeout) {
+                       send_sig(SIGKILL, ubq->ubq_daemon, 0);
+                       ubq->timeout = true;
+               }
+
+               return BLK_EH_DONE;
+       }
+
+       return BLK_EH_RESET_TIMER;
+}
+
 static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx,
                const struct blk_mq_queue_data *bd)
 {
@@ -896,6 +970,7 @@ static const struct blk_mq_ops ublk_mq_ops = {
        .queue_rq       = ublk_queue_rq,
        .init_hctx      = ublk_init_hctx,
        .init_request   = ublk_init_rq,
+       .timeout        = ublk_timeout,
 };
 
 static int ublk_ch_open(struct inode *inode, struct file *filp)
@@ -1179,6 +1254,9 @@ static void ublk_mark_io_ready(struct ublk_device *ub, struct ublk_queue *ubq)
                ubq->ubq_daemon = current;
                get_task_struct(ubq->ubq_daemon);
                ub->nr_queues_ready++;
+
+               if (capable(CAP_SYS_ADMIN))
+                       ub->nr_privileged_daemon++;
        }
        if (ub->nr_queues_ready == ub->dev_info.nr_hw_queues)
                complete_all(&ub->completion);
@@ -1194,6 +1272,19 @@ static void ublk_handle_need_get_data(struct ublk_device *ub, int q_id,
        ublk_queue_cmd(ubq, req);
 }
 
+static inline int ublk_check_cmd_op(u32 cmd_op)
+{
+       u32 ioc_type = _IOC_TYPE(cmd_op);
+
+       if (IS_ENABLED(CONFIG_BLKDEV_UBLK_LEGACY_OPCODES) && ioc_type != 'u')
+               return -EOPNOTSUPP;
+
+       if (ioc_type != 'u' && ioc_type != 0)
+               return -EOPNOTSUPP;
+
+       return 0;
+}
+
 static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
 {
        struct ublksrv_io_cmd *ub_cmd = (struct ublksrv_io_cmd *)cmd->cmd;
@@ -1203,14 +1294,12 @@ static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
        u32 cmd_op = cmd->cmd_op;
        unsigned tag = ub_cmd->tag;
        int ret = -EINVAL;
+       struct request *req;
 
        pr_devel("%s: received: cmd op %d queue %d tag %d result %d\n",
                        __func__, cmd->cmd_op, ub_cmd->q_id, tag,
                        ub_cmd->result);
 
-       if (!(issue_flags & IO_URING_F_SQE128))
-               goto out;
-
        if (ub_cmd->q_id >= ub->dev_info.nr_hw_queues)
                goto out;
 
@@ -1237,10 +1326,15 @@ static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
         * iff the driver have set the UBLK_IO_FLAG_NEED_GET_DATA.
         */
        if ((!!(io->flags & UBLK_IO_FLAG_NEED_GET_DATA))
-                       ^ (cmd_op == UBLK_IO_NEED_GET_DATA))
+                       ^ (_IOC_NR(cmd_op) == UBLK_IO_NEED_GET_DATA))
+               goto out;
+
+       ret = ublk_check_cmd_op(cmd_op);
+       if (ret)
                goto out;
 
-       switch (cmd_op) {
+       ret = -EINVAL;
+       switch (_IOC_NR(cmd_op)) {
        case UBLK_IO_FETCH_REQ:
                /* UBLK_IO_FETCH_REQ is only allowed before queue is setup */
                if (ublk_queue_ready(ubq)) {
@@ -1253,8 +1347,8 @@ static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
                 */
                if (io->flags & UBLK_IO_FLAG_OWNED_BY_SRV)
                        goto out;
-               /* FETCH_RQ has to provide IO buffer */
-               if (!ub_cmd->addr)
+               /* FETCH_RQ has to provide IO buffer if NEED GET DATA is not enabled */
+               if (!ub_cmd->addr && !ublk_need_get_data(ubq))
                        goto out;
                io->cmd = cmd;
                io->flags |= UBLK_IO_FLAG_ACTIVE;
@@ -1263,8 +1357,12 @@ static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
                ublk_mark_io_ready(ub, ubq);
                break;
        case UBLK_IO_COMMIT_AND_FETCH_REQ:
-               /* FETCH_RQ has to provide IO buffer */
-               if (!ub_cmd->addr)
+               req = blk_mq_tag_to_rq(ub->tag_set.tags[ub_cmd->q_id], tag);
+               /*
+                * COMMIT_AND_FETCH_REQ has to provide IO buffer if NEED GET DATA is
+                * not enabled or it is Read IO.
+                */
+               if (!ub_cmd->addr && (!ublk_need_get_data(ubq) || req_op(req) == REQ_OP_READ))
                        goto out;
                if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV))
                        goto out;
@@ -1433,6 +1531,8 @@ static int ublk_add_chdev(struct ublk_device *ub)
        ret = cdev_device_add(&ub->cdev, dev);
        if (ret)
                goto fail;
+
+       ublks_added++;
        return 0;
  fail:
        put_device(dev);
@@ -1475,6 +1575,7 @@ static void ublk_remove(struct ublk_device *ub)
        cancel_work_sync(&ub->quiesce_work);
        cdev_device_del(&ub->cdev, &ub->cdev_dev);
        put_device(&ub->cdev_dev);
+       ublks_added--;
 }
 
 static struct ublk_device *ublk_get_device_from_id(int idx)
@@ -1493,21 +1594,16 @@ static struct ublk_device *ublk_get_device_from_id(int idx)
        return ub;
 }
 
-static int ublk_ctrl_start_dev(struct io_uring_cmd *cmd)
+static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd)
 {
        struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
        int ublksrv_pid = (int)header->data[0];
-       struct ublk_device *ub;
        struct gendisk *disk;
        int ret = -EINVAL;
 
        if (ublksrv_pid <= 0)
                return -EINVAL;
 
-       ub = ublk_get_device_from_id(header->dev_id);
-       if (!ub)
-               return -EINVAL;
-
        wait_for_completion_interruptible(&ub->completion);
 
        schedule_delayed_work(&ub->monitor_work, UBLK_DAEMON_MONITOR_PERIOD);
@@ -1519,7 +1615,7 @@ static int ublk_ctrl_start_dev(struct io_uring_cmd *cmd)
                goto out_unlock;
        }
 
-       disk = blk_mq_alloc_disk(&ub->tag_set, ub);
+       disk = blk_mq_alloc_disk(&ub->tag_set, NULL);
        if (IS_ERR(disk)) {
                ret = PTR_ERR(disk);
                goto out_unlock;
@@ -1535,6 +1631,10 @@ static int ublk_ctrl_start_dev(struct io_uring_cmd *cmd)
        if (ret)
                goto out_put_disk;
 
+       /* don't probe partitions if any one ubq daemon is un-trusted */
+       if (ub->nr_privileged_daemon != ub->nr_queues_ready)
+               set_bit(GD_SUPPRESS_PART_SCAN, &disk->state);
+
        get_device(&ub->cdev_dev);
        ret = add_disk(disk);
        if (ret) {
@@ -1552,21 +1652,20 @@ out_put_disk:
                put_disk(disk);
 out_unlock:
        mutex_unlock(&ub->mutex);
-       ublk_put_device(ub);
        return ret;
 }
 
-static int ublk_ctrl_get_queue_affinity(struct io_uring_cmd *cmd)
+static int ublk_ctrl_get_queue_affinity(struct ublk_device *ub,
+               struct io_uring_cmd *cmd)
 {
        struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
        void __user *argp = (void __user *)(unsigned long)header->addr;
-       struct ublk_device *ub;
        cpumask_var_t cpumask;
        unsigned long queue;
        unsigned int retlen;
        unsigned int i;
-       int ret = -EINVAL;
-       
+       int ret;
+
        if (header->len * BITS_PER_BYTE < nr_cpu_ids)
                return -EINVAL;
        if (header->len & (sizeof(unsigned long)-1))
@@ -1574,17 +1673,12 @@ static int ublk_ctrl_get_queue_affinity(struct io_uring_cmd *cmd)
        if (!header->addr)
                return -EINVAL;
 
-       ub = ublk_get_device_from_id(header->dev_id);
-       if (!ub)
-               return -EINVAL;
-
        queue = header->data[0];
        if (queue >= ub->dev_info.nr_hw_queues)
-               goto out_put_device;
+               return -EINVAL;
 
-       ret = -ENOMEM;
        if (!zalloc_cpumask_var(&cpumask, GFP_KERNEL))
-               goto out_put_device;
+               return -ENOMEM;
 
        for_each_possible_cpu(i) {
                if (ub->tag_set.map[HCTX_TYPE_DEFAULT].mq_map[i] == queue)
@@ -1602,8 +1696,6 @@ static int ublk_ctrl_get_queue_affinity(struct io_uring_cmd *cmd)
        ret = 0;
 out_free_cpumask:
        free_cpumask_var(cpumask);
-out_put_device:
-       ublk_put_device(ub);
        return ret;
 }
 
@@ -1630,19 +1722,46 @@ static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd)
                        __func__, header->queue_id);
                return -EINVAL;
        }
+
        if (copy_from_user(&info, argp, sizeof(info)))
                return -EFAULT;
-       ublk_dump_dev_info(&info);
+
+       if (capable(CAP_SYS_ADMIN))
+               info.flags &= ~UBLK_F_UNPRIVILEGED_DEV;
+       else if (!(info.flags & UBLK_F_UNPRIVILEGED_DEV))
+               return -EPERM;
+
+       /*
+        * unprivileged device can't be trusted, but RECOVERY and
+        * RECOVERY_REISSUE still may hang error handling, so can't
+        * support recovery features for unprivileged ublk now
+        *
+        * TODO: provide forward progress for RECOVERY handler, so that
+        * unprivileged device can benefit from it
+        */
+       if (info.flags & UBLK_F_UNPRIVILEGED_DEV)
+               info.flags &= ~(UBLK_F_USER_RECOVERY_REISSUE |
+                               UBLK_F_USER_RECOVERY);
+
+       /* the created device is always owned by current user */
+       ublk_store_owner_uid_gid(&info.owner_uid, &info.owner_gid);
+
        if (header->dev_id != info.dev_id) {
                pr_warn("%s: dev id not match %u %u\n",
                        __func__, header->dev_id, info.dev_id);
                return -EINVAL;
        }
 
+       ublk_dump_dev_info(&info);
+
        ret = mutex_lock_killable(&ublk_ctl_mutex);
        if (ret)
                return ret;
 
+       ret = -EACCES;
+       if (ublks_added >= ublks_max)
+               goto out_unlock;
+
        ret = -ENOMEM;
        ub = kzalloc(sizeof(*ub), GFP_KERNEL);
        if (!ub)
@@ -1673,6 +1792,8 @@ static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd)
        if (!IS_BUILTIN(CONFIG_BLK_DEV_UBLK))
                ub->dev_info.flags |= UBLK_F_URING_CMD_COMP_IN_TASK;
 
+       ub->dev_info.flags |= UBLK_F_CMD_IOCTL_ENCODE;
+
        /* We are not ready to support zero copy */
        ub->dev_info.flags &= ~UBLK_F_SUPPORT_ZERO_COPY;
 
@@ -1724,33 +1845,43 @@ static inline bool ublk_idr_freed(int id)
        return ptr == NULL;
 }
 
-static int ublk_ctrl_del_dev(int idx)
+static int ublk_ctrl_del_dev(struct ublk_device **p_ub)
 {
-       struct ublk_device *ub;
+       struct ublk_device *ub = *p_ub;
+       int idx = ub->ub_number;
        int ret;
 
        ret = mutex_lock_killable(&ublk_ctl_mutex);
        if (ret)
                return ret;
 
-       ub = ublk_get_device_from_id(idx);
-       if (ub) {
+       if (!test_bit(UB_STATE_DELETED, &ub->state)) {
                ublk_remove(ub);
-               ublk_put_device(ub);
-               ret = 0;
-       } else {
-               ret = -ENODEV;
+               set_bit(UB_STATE_DELETED, &ub->state);
        }
 
+       /* Mark the reference as consumed */
+       *p_ub = NULL;
+       ublk_put_device(ub);
+       mutex_unlock(&ublk_ctl_mutex);
+
        /*
         * Wait until the idr is removed, then it can be reused after
         * DEL_DEV command is returned.
+        *
+        * If we returns because of user interrupt, future delete command
+        * may come:
+        *
+        * - the device number isn't freed, this device won't or needn't
+        *   be deleted again, since UB_STATE_DELETED is set, and device
+        *   will be released after the last reference is dropped
+        *
+        * - the device number is freed already, we will not find this
+        *   device via ublk_get_device_from_id()
         */
-       if (!ret)
-               wait_event(ublk_idr_wq, ublk_idr_freed(idx));
-       mutex_unlock(&ublk_ctl_mutex);
+       wait_event_interruptible(ublk_idr_wq, ublk_idr_freed(idx));
 
-       return ret;
+       return 0;
 }
 
 static inline void ublk_ctrl_cmd_dump(struct io_uring_cmd *cmd)
@@ -1762,50 +1893,52 @@ static inline void ublk_ctrl_cmd_dump(struct io_uring_cmd *cmd)
                        header->data[0], header->addr, header->len);
 }
 
-static int ublk_ctrl_stop_dev(struct io_uring_cmd *cmd)
+static int ublk_ctrl_stop_dev(struct ublk_device *ub)
 {
-       struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
-       struct ublk_device *ub;
-
-       ub = ublk_get_device_from_id(header->dev_id);
-       if (!ub)
-               return -EINVAL;
-
        ublk_stop_dev(ub);
        cancel_work_sync(&ub->stop_work);
        cancel_work_sync(&ub->quiesce_work);
 
-       ublk_put_device(ub);
        return 0;
 }
 
-static int ublk_ctrl_get_dev_info(struct io_uring_cmd *cmd)
+static int ublk_ctrl_get_dev_info(struct ublk_device *ub,
+               struct io_uring_cmd *cmd)
 {
        struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
        void __user *argp = (void __user *)(unsigned long)header->addr;
-       struct ublk_device *ub;
-       int ret = 0;
 
        if (header->len < sizeof(struct ublksrv_ctrl_dev_info) || !header->addr)
                return -EINVAL;
 
-       ub = ublk_get_device_from_id(header->dev_id);
-       if (!ub)
-               return -EINVAL;
-
        if (copy_to_user(argp, &ub->dev_info, sizeof(ub->dev_info)))
-               ret = -EFAULT;
-       ublk_put_device(ub);
+               return -EFAULT;
 
-       return ret;
+       return 0;
+}
+
+/* TYPE_DEVT is readonly, so fill it up before returning to userspace */
+static void ublk_ctrl_fill_params_devt(struct ublk_device *ub)
+{
+       ub->params.devt.char_major = MAJOR(ub->cdev_dev.devt);
+       ub->params.devt.char_minor = MINOR(ub->cdev_dev.devt);
+
+       if (ub->ub_disk) {
+               ub->params.devt.disk_major = MAJOR(disk_devt(ub->ub_disk));
+               ub->params.devt.disk_minor = MINOR(disk_devt(ub->ub_disk));
+       } else {
+               ub->params.devt.disk_major = 0;
+               ub->params.devt.disk_minor = 0;
+       }
+       ub->params.types |= UBLK_PARAM_TYPE_DEVT;
 }
 
-static int ublk_ctrl_get_params(struct io_uring_cmd *cmd)
+static int ublk_ctrl_get_params(struct ublk_device *ub,
+               struct io_uring_cmd *cmd)
 {
        struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
        void __user *argp = (void __user *)(unsigned long)header->addr;
        struct ublk_params_header ph;
-       struct ublk_device *ub;
        int ret;
 
        if (header->len <= sizeof(ph) || !header->addr)
@@ -1820,27 +1953,23 @@ static int ublk_ctrl_get_params(struct io_uring_cmd *cmd)
        if (ph.len > sizeof(struct ublk_params))
                ph.len = sizeof(struct ublk_params);
 
-       ub = ublk_get_device_from_id(header->dev_id);
-       if (!ub)
-               return -EINVAL;
-
        mutex_lock(&ub->mutex);
+       ublk_ctrl_fill_params_devt(ub);
        if (copy_to_user(argp, &ub->params, ph.len))
                ret = -EFAULT;
        else
                ret = 0;
        mutex_unlock(&ub->mutex);
 
-       ublk_put_device(ub);
        return ret;
 }
 
-static int ublk_ctrl_set_params(struct io_uring_cmd *cmd)
+static int ublk_ctrl_set_params(struct ublk_device *ub,
+               struct io_uring_cmd *cmd)
 {
        struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
        void __user *argp = (void __user *)(unsigned long)header->addr;
        struct ublk_params_header ph;
-       struct ublk_device *ub;
        int ret = -EFAULT;
 
        if (header->len <= sizeof(ph) || !header->addr)
@@ -1855,10 +1984,6 @@ static int ublk_ctrl_set_params(struct io_uring_cmd *cmd)
        if (ph.len > sizeof(struct ublk_params))
                ph.len = sizeof(struct ublk_params);
 
-       ub = ublk_get_device_from_id(header->dev_id);
-       if (!ub)
-               return -EINVAL;
-
        /* parameters can only be changed when device isn't live */
        mutex_lock(&ub->mutex);
        if (ub->dev_info.state == UBLK_S_DEV_LIVE) {
@@ -1871,7 +1996,6 @@ static int ublk_ctrl_set_params(struct io_uring_cmd *cmd)
                ret = ublk_validate_params(ub);
        }
        mutex_unlock(&ub->mutex);
-       ublk_put_device(ub);
 
        return ret;
 }
@@ -1887,6 +2011,7 @@ static void ublk_queue_reinit(struct ublk_device *ub, struct ublk_queue *ubq)
        put_task_struct(ubq->ubq_daemon);
        /* We have to reset it to NULL, otherwise ub won't accept new FETCH_REQ */
        ubq->ubq_daemon = NULL;
+       ubq->timeout = false;
 
        for (i = 0; i < ubq->q_depth; i++) {
                struct ublk_io *io = &ubq->ios[i];
@@ -1898,17 +2023,13 @@ static void ublk_queue_reinit(struct ublk_device *ub, struct ublk_queue *ubq)
        }
 }
 
-static int ublk_ctrl_start_recovery(struct io_uring_cmd *cmd)
+static int ublk_ctrl_start_recovery(struct ublk_device *ub,
+               struct io_uring_cmd *cmd)
 {
        struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
-       struct ublk_device *ub;
        int ret = -EINVAL;
        int i;
 
-       ub = ublk_get_device_from_id(header->dev_id);
-       if (!ub)
-               return ret;
-
        mutex_lock(&ub->mutex);
        if (!ublk_can_use_recovery(ub))
                goto out_unlock;
@@ -1936,25 +2057,21 @@ static int ublk_ctrl_start_recovery(struct io_uring_cmd *cmd)
        /* set to NULL, otherwise new ubq_daemon cannot mmap the io_cmd_buf */
        ub->mm = NULL;
        ub->nr_queues_ready = 0;
+       ub->nr_privileged_daemon = 0;
        init_completion(&ub->completion);
        ret = 0;
  out_unlock:
        mutex_unlock(&ub->mutex);
-       ublk_put_device(ub);
        return ret;
 }
 
-static int ublk_ctrl_end_recovery(struct io_uring_cmd *cmd)
+static int ublk_ctrl_end_recovery(struct ublk_device *ub,
+               struct io_uring_cmd *cmd)
 {
        struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
        int ublksrv_pid = (int)header->data[0];
-       struct ublk_device *ub;
        int ret = -EINVAL;
 
-       ub = ublk_get_device_from_id(header->dev_id);
-       if (!ub)
-               return ret;
-
        pr_devel("%s: Waiting for new ubq_daemons(nr: %d) are ready, dev id %d...\n",
                        __func__, ub->dev_info.nr_hw_queues, header->dev_id);
        /* wait until new ubq_daemon sending all FETCH_REQ */
@@ -1982,7 +2099,115 @@ static int ublk_ctrl_end_recovery(struct io_uring_cmd *cmd)
        ret = 0;
  out_unlock:
        mutex_unlock(&ub->mutex);
-       ublk_put_device(ub);
+       return ret;
+}
+
+/*
+ * All control commands are sent via /dev/ublk-control, so we have to check
+ * the destination device's permission
+ */
+static int ublk_char_dev_permission(struct ublk_device *ub,
+               const char *dev_path, int mask)
+{
+       int err;
+       struct path path;
+       struct kstat stat;
+
+       err = kern_path(dev_path, LOOKUP_FOLLOW, &path);
+       if (err)
+               return err;
+
+       err = vfs_getattr(&path, &stat, STATX_TYPE, AT_STATX_SYNC_AS_STAT);
+       if (err)
+               goto exit;
+
+       err = -EPERM;
+       if (stat.rdev != ub->cdev_dev.devt || !S_ISCHR(stat.mode))
+               goto exit;
+
+       err = inode_permission(&nop_mnt_idmap,
+                       d_backing_inode(path.dentry), mask);
+exit:
+       path_put(&path);
+       return err;
+}
+
+static int ublk_ctrl_uring_cmd_permission(struct ublk_device *ub,
+               struct io_uring_cmd *cmd)
+{
+       struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
+       bool unprivileged = ub->dev_info.flags & UBLK_F_UNPRIVILEGED_DEV;
+       void __user *argp = (void __user *)(unsigned long)header->addr;
+       char *dev_path = NULL;
+       int ret = 0;
+       int mask;
+
+       if (!unprivileged) {
+               if (!capable(CAP_SYS_ADMIN))
+                       return -EPERM;
+               /*
+                * The new added command of UBLK_CMD_GET_DEV_INFO2 includes
+                * char_dev_path in payload too, since userspace may not
+                * know if the specified device is created as unprivileged
+                * mode.
+                */
+               if (_IOC_NR(cmd->cmd_op) != UBLK_CMD_GET_DEV_INFO2)
+                       return 0;
+       }
+
+       /*
+        * User has to provide the char device path for unprivileged ublk
+        *
+        * header->addr always points to the dev path buffer, and
+        * header->dev_path_len records length of dev path buffer.
+        */
+       if (!header->dev_path_len || header->dev_path_len > PATH_MAX)
+               return -EINVAL;
+
+       if (header->len < header->dev_path_len)
+               return -EINVAL;
+
+       dev_path = kmalloc(header->dev_path_len + 1, GFP_KERNEL);
+       if (!dev_path)
+               return -ENOMEM;
+
+       ret = -EFAULT;
+       if (copy_from_user(dev_path, argp, header->dev_path_len))
+               goto exit;
+       dev_path[header->dev_path_len] = 0;
+
+       ret = -EINVAL;
+       switch (_IOC_NR(cmd->cmd_op)) {
+       case UBLK_CMD_GET_DEV_INFO:
+       case UBLK_CMD_GET_DEV_INFO2:
+       case UBLK_CMD_GET_QUEUE_AFFINITY:
+       case UBLK_CMD_GET_PARAMS:
+               mask = MAY_READ;
+               break;
+       case UBLK_CMD_START_DEV:
+       case UBLK_CMD_STOP_DEV:
+       case UBLK_CMD_ADD_DEV:
+       case UBLK_CMD_DEL_DEV:
+       case UBLK_CMD_SET_PARAMS:
+       case UBLK_CMD_START_USER_RECOVERY:
+       case UBLK_CMD_END_USER_RECOVERY:
+               mask = MAY_READ | MAY_WRITE;
+               break;
+       default:
+               goto exit;
+       }
+
+       ret = ublk_char_dev_permission(ub, dev_path, mask);
+       if (!ret) {
+               header->len -= header->dev_path_len;
+               header->addr += header->dev_path_len;
+       }
+       pr_devel("%s: dev id %d cmd_op %x uid %d gid %d path %s ret %d\n",
+                       __func__, ub->ub_number, cmd->cmd_op,
+                       ub->dev_info.owner_uid, ub->dev_info.owner_gid,
+                       dev_path, ret);
+exit:
+       kfree(dev_path);
        return ret;
 }
 
@@ -1990,6 +2215,8 @@ static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
                unsigned int issue_flags)
 {
        struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
+       struct ublk_device *ub = NULL;
+       u32 cmd_op = cmd->cmd_op;
        int ret = -EINVAL;
 
        if (issue_flags & IO_URING_F_NONBLOCK)
@@ -2000,45 +2227,61 @@ static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
        if (!(issue_flags & IO_URING_F_SQE128))
                goto out;
 
-       ret = -EPERM;
-       if (!capable(CAP_SYS_ADMIN))
+       ret = ublk_check_cmd_op(cmd_op);
+       if (ret)
                goto out;
 
-       ret = -ENODEV;
-       switch (cmd->cmd_op) {
+       if (_IOC_NR(cmd_op) != UBLK_CMD_ADD_DEV) {
+               ret = -ENODEV;
+               ub = ublk_get_device_from_id(header->dev_id);
+               if (!ub)
+                       goto out;
+
+               ret = ublk_ctrl_uring_cmd_permission(ub, cmd);
+               if (ret)
+                       goto put_dev;
+       }
+
+       switch (_IOC_NR(cmd_op)) {
        case UBLK_CMD_START_DEV:
-               ret = ublk_ctrl_start_dev(cmd);
+               ret = ublk_ctrl_start_dev(ub, cmd);
                break;
        case UBLK_CMD_STOP_DEV:
-               ret = ublk_ctrl_stop_dev(cmd);
+               ret = ublk_ctrl_stop_dev(ub);
                break;
        case UBLK_CMD_GET_DEV_INFO:
-               ret = ublk_ctrl_get_dev_info(cmd);
+       case UBLK_CMD_GET_DEV_INFO2:
+               ret = ublk_ctrl_get_dev_info(ub, cmd);
                break;
        case UBLK_CMD_ADD_DEV:
                ret = ublk_ctrl_add_dev(cmd);
                break;
        case UBLK_CMD_DEL_DEV:
-               ret = ublk_ctrl_del_dev(header->dev_id);
+               ret = ublk_ctrl_del_dev(&ub);
                break;
        case UBLK_CMD_GET_QUEUE_AFFINITY:
-               ret = ublk_ctrl_get_queue_affinity(cmd);
+               ret = ublk_ctrl_get_queue_affinity(ub, cmd);
                break;
        case UBLK_CMD_GET_PARAMS:
-               ret = ublk_ctrl_get_params(cmd);
+               ret = ublk_ctrl_get_params(ub, cmd);
                break;
        case UBLK_CMD_SET_PARAMS:
-               ret = ublk_ctrl_set_params(cmd);
+               ret = ublk_ctrl_set_params(ub, cmd);
                break;
        case UBLK_CMD_START_USER_RECOVERY:
-               ret = ublk_ctrl_start_recovery(cmd);
+               ret = ublk_ctrl_start_recovery(ub, cmd);
                break;
        case UBLK_CMD_END_USER_RECOVERY:
-               ret = ublk_ctrl_end_recovery(cmd);
+               ret = ublk_ctrl_end_recovery(ub, cmd);
                break;
        default:
+               ret = -ENOTSUPP;
                break;
        }
+
+ put_dev:
+       if (ub)
+               ublk_put_device(ub);
  out:
        io_uring_cmd_done(cmd, ret, 0);
        pr_devel("%s: cmd done ret %d cmd_op %x, dev id %d qid %d\n",
@@ -2105,5 +2348,8 @@ static void __exit ublk_exit(void)
 module_init(ublk_init);
 module_exit(ublk_exit);
 
+module_param(ublks_max, int, 0444);
+MODULE_PARM_DESC(ublks_max, "max number of ublk devices allowed to add(default: 64)");
+
 MODULE_AUTHOR("Ming Lei <ming.lei@redhat.com>");
 MODULE_LICENSE("GPL");