ublk: don't return 0 in case of any failure
[linux-block.git] / drivers / block / ublk_drv.c
index b9c759cef00e6ec6d3b444b69ff11a6d5b236755..253008b2091d69be99d34376ef1488fc47a5e426 100644 (file)
@@ -53,7 +53,8 @@
                | UBLK_F_NEED_GET_DATA \
                | UBLK_F_USER_RECOVERY \
                | UBLK_F_USER_RECOVERY_REISSUE \
-               | UBLK_F_UNPRIVILEGED_DEV)
+               | UBLK_F_UNPRIVILEGED_DEV \
+               | UBLK_F_CMD_IOCTL_ENCODE)
 
 /* All UBLK_PARAM_TYPE_* should be included here */
 #define UBLK_PARAM_TYPE_ALL (UBLK_PARAM_TYPE_BASIC | \
@@ -298,9 +299,7 @@ static inline bool ublk_can_use_task_work(const struct ublk_queue *ubq)
 
 static inline bool ublk_need_get_data(const struct ublk_queue *ubq)
 {
-       if (ubq->flags & UBLK_F_NEED_GET_DATA)
-               return true;
-       return false;
+       return ubq->flags & UBLK_F_NEED_GET_DATA;
 }
 
 static struct ublk_device *ublk_get_device(struct ublk_device *ub)
@@ -349,25 +348,19 @@ static inline int ublk_queue_cmd_buf_size(struct ublk_device *ub, int q_id)
 static inline bool ublk_queue_can_use_recovery_reissue(
                struct ublk_queue *ubq)
 {
-       if ((ubq->flags & UBLK_F_USER_RECOVERY) &&
-                       (ubq->flags & UBLK_F_USER_RECOVERY_REISSUE))
-               return true;
-       return false;
+       return (ubq->flags & UBLK_F_USER_RECOVERY) &&
+                       (ubq->flags & UBLK_F_USER_RECOVERY_REISSUE);
 }
 
 static inline bool ublk_queue_can_use_recovery(
                struct ublk_queue *ubq)
 {
-       if (ubq->flags & UBLK_F_USER_RECOVERY)
-               return true;
-       return false;
+       return ubq->flags & UBLK_F_USER_RECOVERY;
 }
 
 static inline bool ublk_can_use_recovery(struct ublk_device *ub)
 {
-       if (ub->dev_info.flags & UBLK_F_USER_RECOVERY)
-               return true;
-       return false;
+       return ub->dev_info.flags & UBLK_F_USER_RECOVERY;
 }
 
 static void ublk_free_disk(struct gendisk *disk)
@@ -428,10 +421,9 @@ static const struct block_device_operations ub_fops = {
 #define UBLK_MAX_PIN_PAGES     32
 
 struct ublk_map_data {
-       const struct ublk_queue *ubq;
        const struct request *rq;
-       const struct ublk_io *io;
-       unsigned max_bytes;
+       unsigned long   ubuf;
+       unsigned int    len;
 };
 
 struct ublk_io_iter {
@@ -488,18 +480,17 @@ static inline unsigned ublk_copy_io_pages(struct ublk_io_iter *data,
        return done;
 }
 
-static inline int ublk_copy_user_pages(struct ublk_map_data *data,
-               bool to_vm)
+static int ublk_copy_user_pages(struct ublk_map_data *data, bool to_vm)
 {
        const unsigned int gup_flags = to_vm ? FOLL_WRITE : 0;
-       const unsigned long start_vm = data->io->addr;
+       const unsigned long start_vm = data->ubuf;
        unsigned int done = 0;
        struct ublk_io_iter iter = {
                .pg_off = start_vm & (PAGE_SIZE - 1),
                .bio    = data->rq->bio,
                .iter   = data->rq->bio->bi_iter,
        };
-       const unsigned int nr_pages = round_up(data->max_bytes +
+       const unsigned int nr_pages = round_up(data->len +
                        (start_vm & (PAGE_SIZE - 1)), PAGE_SIZE) >> PAGE_SHIFT;
 
        while (done < nr_pages) {
@@ -512,42 +503,49 @@ static inline int ublk_copy_user_pages(struct ublk_map_data *data,
                                iter.pages);
                if (iter.nr_pages <= 0)
                        return done == 0 ? iter.nr_pages : done;
-               len = ublk_copy_io_pages(&iter, data->max_bytes, to_vm);
+               len = ublk_copy_io_pages(&iter, data->len, to_vm);
                for (i = 0; i < iter.nr_pages; i++) {
                        if (to_vm)
                                set_page_dirty(iter.pages[i]);
                        put_page(iter.pages[i]);
                }
-               data->max_bytes -= len;
+               data->len -= len;
                done += iter.nr_pages;
        }
 
        return done;
 }
 
+static inline bool ublk_need_map_req(const struct request *req)
+{
+       return ublk_rq_has_data(req) && req_op(req) == REQ_OP_WRITE;
+}
+
+static inline bool ublk_need_unmap_req(const struct request *req)
+{
+       return ublk_rq_has_data(req) && req_op(req) == REQ_OP_READ;
+}
+
 static int ublk_map_io(const struct ublk_queue *ubq, const struct request *req,
                struct ublk_io *io)
 {
        const unsigned int rq_bytes = blk_rq_bytes(req);
+
        /*
         * no zero copy, we delay copy WRITE request data into ublksrv
         * context and the big benefit is that pinning pages in current
         * context is pretty fast, see ublk_pin_user_pages
         */
-       if (req_op(req) != REQ_OP_WRITE && req_op(req) != REQ_OP_FLUSH)
-               return rq_bytes;
-
-       if (ublk_rq_has_data(req)) {
+       if (ublk_need_map_req(req)) {
                struct ublk_map_data data = {
-                       .ubq    =       ubq,
                        .rq     =       req,
-                       .io     =       io,
-                       .max_bytes =    rq_bytes,
+                       .ubuf   =       io->addr,
+                       .len    =       rq_bytes,
                };
 
                ublk_copy_user_pages(&data, true);
 
-               return rq_bytes - data.max_bytes;
+               return rq_bytes - data.len;
        }
        return rq_bytes;
 }
@@ -558,19 +556,18 @@ static int ublk_unmap_io(const struct ublk_queue *ubq,
 {
        const unsigned int rq_bytes = blk_rq_bytes(req);
 
-       if (req_op(req) == REQ_OP_READ && ublk_rq_has_data(req)) {
+       if (ublk_need_unmap_req(req)) {
                struct ublk_map_data data = {
-                       .ubq    =       ubq,
                        .rq     =       req,
-                       .io     =       io,
-                       .max_bytes =    io->res,
+                       .ubuf   =       io->addr,
+                       .len    =       io->res,
                };
 
                WARN_ON_ONCE(io->res > rq_bytes);
 
                ublk_copy_user_pages(&data, false);
 
-               return io->res - data.max_bytes;
+               return io->res - data.len;
        }
        return rq_bytes;
 }
@@ -655,14 +652,15 @@ static void ublk_complete_rq(struct request *req)
        struct ublk_queue *ubq = req->mq_hctx->driver_data;
        struct ublk_io *io = &ubq->ios[req->tag];
        unsigned int unmapped_bytes;
+       blk_status_t res = BLK_STS_OK;
 
        /* failed read IO if nothing is read */
        if (!io->res && req_op(req) == REQ_OP_READ)
                io->res = -EIO;
 
        if (io->res < 0) {
-               blk_mq_end_request(req, errno_to_blk_status(io->res));
-               return;
+               res = errno_to_blk_status(io->res);
+               goto exit;
        }
 
        /*
@@ -671,10 +669,8 @@ static void ublk_complete_rq(struct request *req)
         *
         * Both the two needn't unmap.
         */
-       if (req_op(req) != REQ_OP_READ && req_op(req) != REQ_OP_WRITE) {
-               blk_mq_end_request(req, BLK_STS_OK);
-               return;
-       }
+       if (req_op(req) != REQ_OP_READ && req_op(req) != REQ_OP_WRITE)
+               goto exit;
 
        /* for READ request, writing data in iod->addr to rq buffers */
        unmapped_bytes = ublk_unmap_io(ubq, req, io);
@@ -691,6 +687,10 @@ static void ublk_complete_rq(struct request *req)
                blk_mq_requeue_request(req, true);
        else
                __blk_mq_end_request(req, BLK_STS_OK);
+
+       return;
+exit:
+       blk_mq_end_request(req, res);
 }
 
 /*
@@ -769,9 +769,7 @@ static inline void __ublk_rq_task_work(struct request *req)
                return;
        }
 
-       if (ublk_need_get_data(ubq) &&
-                       (req_op(req) == REQ_OP_WRITE ||
-                       req_op(req) == REQ_OP_FLUSH)) {
+       if (ublk_need_get_data(ubq) && ublk_need_map_req(req)) {
                /*
                 * We have not handled UBLK_IO_NEED_GET_DATA command yet,
                 * so immepdately pass UBLK_IO_RES_NEED_GET_DATA to ublksrv
@@ -1256,6 +1254,19 @@ static void ublk_handle_need_get_data(struct ublk_device *ub, int q_id,
        ublk_queue_cmd(ubq, req);
 }
 
+static inline int ublk_check_cmd_op(u32 cmd_op)
+{
+       u32 ioc_type = _IOC_TYPE(cmd_op);
+
+       if (IS_ENABLED(CONFIG_BLKDEV_UBLK_LEGACY_OPCODES) && ioc_type != 'u')
+               return -EOPNOTSUPP;
+
+       if (ioc_type != 'u' && ioc_type != 0)
+               return -EOPNOTSUPP;
+
+       return 0;
+}
+
 static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
 {
        struct ublksrv_io_cmd *ub_cmd = (struct ublksrv_io_cmd *)cmd->cmd;
@@ -1271,9 +1282,6 @@ static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
                        __func__, cmd->cmd_op, ub_cmd->q_id, tag,
                        ub_cmd->result);
 
-       if (!(issue_flags & IO_URING_F_SQE128))
-               goto out;
-
        if (ub_cmd->q_id >= ub->dev_info.nr_hw_queues)
                goto out;
 
@@ -1300,10 +1308,15 @@ static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
         * iff the driver have set the UBLK_IO_FLAG_NEED_GET_DATA.
         */
        if ((!!(io->flags & UBLK_IO_FLAG_NEED_GET_DATA))
-                       ^ (cmd_op == UBLK_IO_NEED_GET_DATA))
+                       ^ (_IOC_NR(cmd_op) == UBLK_IO_NEED_GET_DATA))
                goto out;
 
-       switch (cmd_op) {
+       ret = ublk_check_cmd_op(cmd_op);
+       if (ret)
+               goto out;
+
+       ret = -EINVAL;
+       switch (_IOC_NR(cmd_op)) {
        case UBLK_IO_FETCH_REQ:
                /* UBLK_IO_FETCH_REQ is only allowed before queue is setup */
                if (ublk_queue_ready(ubq)) {
@@ -1749,6 +1762,8 @@ static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd)
        if (!IS_BUILTIN(CONFIG_BLK_DEV_UBLK))
                ub->dev_info.flags |= UBLK_F_URING_CMD_COMP_IN_TASK;
 
+       ub->dev_info.flags |= UBLK_F_CMD_IOCTL_ENCODE;
+
        /* We are not ready to support zero copy */
        ub->dev_info.flags &= ~UBLK_F_SUPPORT_ZERO_COPY;
 
@@ -2105,7 +2120,7 @@ static int ublk_ctrl_uring_cmd_permission(struct ublk_device *ub,
                 * know if the specified device is created as unprivileged
                 * mode.
                 */
-               if (cmd->cmd_op != UBLK_CMD_GET_DEV_INFO2)
+               if (_IOC_NR(cmd->cmd_op) != UBLK_CMD_GET_DEV_INFO2)
                        return 0;
        }
 
@@ -2131,7 +2146,7 @@ static int ublk_ctrl_uring_cmd_permission(struct ublk_device *ub,
        dev_path[header->dev_path_len] = 0;
 
        ret = -EINVAL;
-       switch (cmd->cmd_op) {
+       switch (_IOC_NR(cmd->cmd_op)) {
        case UBLK_CMD_GET_DEV_INFO:
        case UBLK_CMD_GET_DEV_INFO2:
        case UBLK_CMD_GET_QUEUE_AFFINITY:
@@ -2170,6 +2185,7 @@ static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
 {
        struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
        struct ublk_device *ub = NULL;
+       u32 cmd_op = cmd->cmd_op;
        int ret = -EINVAL;
 
        if (issue_flags & IO_URING_F_NONBLOCK)
@@ -2180,22 +2196,22 @@ static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
        if (!(issue_flags & IO_URING_F_SQE128))
                goto out;
 
-       if (cmd->cmd_op != UBLK_CMD_ADD_DEV) {
+       ret = ublk_check_cmd_op(cmd_op);
+       if (ret)
+               goto out;
+
+       if (_IOC_NR(cmd_op) != UBLK_CMD_ADD_DEV) {
                ret = -ENODEV;
                ub = ublk_get_device_from_id(header->dev_id);
                if (!ub)
                        goto out;
 
                ret = ublk_ctrl_uring_cmd_permission(ub, cmd);
-       } else {
-               /* ADD_DEV permission check is done in command handler */
-               ret = 0;
+               if (ret)
+                       goto put_dev;
        }
 
-       if (ret)
-               goto put_dev;
-
-       switch (cmd->cmd_op) {
+       switch (_IOC_NR(cmd_op)) {
        case UBLK_CMD_START_DEV:
                ret = ublk_ctrl_start_dev(ub, cmd);
                break;