io_uring: Create a helper to return the SQE size
[linux-block.git] / drivers / block / ublk_drv.c
CommitLineData
71f28f31
ML
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Userspace block device - block device which IO is handled from userspace
4 *
5 * Take full use of io_uring passthrough command for communicating with
6 * ublk userspace daemon(ublksrvd) for handling basic IO request.
7 *
8 * Copyright 2022 Ming Lei <ming.lei@redhat.com>
9 *
10 * (part of code stolen from loop.c)
11 */
12#include <linux/module.h>
13#include <linux/moduleparam.h>
14#include <linux/sched.h>
15#include <linux/fs.h>
16#include <linux/pagemap.h>
17#include <linux/file.h>
18#include <linux/stat.h>
19#include <linux/errno.h>
20#include <linux/major.h>
21#include <linux/wait.h>
22#include <linux/blkdev.h>
23#include <linux/init.h>
24#include <linux/swap.h>
25#include <linux/slab.h>
26#include <linux/compat.h>
27#include <linux/mutex.h>
28#include <linux/writeback.h>
29#include <linux/completion.h>
30#include <linux/highmem.h>
31#include <linux/sysfs.h>
32#include <linux/miscdevice.h>
33#include <linux/falloc.h>
34#include <linux/uio.h>
35#include <linux/ioprio.h>
36#include <linux/sched/mm.h>
37#include <linux/uaccess.h>
38#include <linux/cdev.h>
39#include <linux/io_uring.h>
40#include <linux/blk-mq.h>
41#include <linux/delay.h>
42#include <linux/mm.h>
43#include <asm/page.h>
0edb3696 44#include <linux/task_work.h>
4093cb5a 45#include <linux/namei.h>
71f28f31
ML
46#include <uapi/linux/ublk_cmd.h>
47
48#define UBLK_MINORS (1U << MINORBITS)
49
6d8c5afc 50/* All UBLK_F_* have to be included into UBLK_F_ALL */
c86019ff
Z
51#define UBLK_F_ALL (UBLK_F_SUPPORT_ZERO_COPY \
52 | UBLK_F_URING_CMD_COMP_IN_TASK \
77a440e2 53 | UBLK_F_NEED_GET_DATA \
a0d41dc1 54 | UBLK_F_USER_RECOVERY \
4093cb5a
ML
55 | UBLK_F_USER_RECOVERY_REISSUE \
56 | UBLK_F_UNPRIVILEGED_DEV)
6d8c5afc 57
0aa73170 58/* All UBLK_PARAM_TYPE_* should be included here */
abb864d3
ML
59#define UBLK_PARAM_TYPE_ALL (UBLK_PARAM_TYPE_BASIC | \
60 UBLK_PARAM_TYPE_DISCARD | UBLK_PARAM_TYPE_DEVT)
0aa73170 61
0edb3696 62struct ublk_rq_data {
7d4a9317
ML
63 struct llist_node node;
64 struct callback_head work;
0edb3696
ML
65};
66
71f28f31 67struct ublk_uring_cmd_pdu {
3ab6e94c 68 struct ublk_queue *ubq;
71f28f31
ML
69};
70
71/*
72 * io command is active: sqe cmd is received, and its cqe isn't done
73 *
74 * If the flag is set, the io command is owned by ublk driver, and waited
75 * for incoming blk-mq request from the ublk block device.
76 *
77 * If the flag is cleared, the io command will be completed, and owned by
78 * ublk server.
79 */
80#define UBLK_IO_FLAG_ACTIVE 0x01
81
82/*
83 * IO command is completed via cqe, and it is being handled by ublksrv, and
84 * not committed yet
85 *
86 * Basically exclusively with UBLK_IO_FLAG_ACTIVE, so can be served for
87 * cross verification
88 */
89#define UBLK_IO_FLAG_OWNED_BY_SRV 0x02
90
91/*
92 * IO command is aborted, so this flag is set in case of
93 * !UBLK_IO_FLAG_ACTIVE.
94 *
95 * After this flag is observed, any pending or new incoming request
96 * associated with this io command will be failed immediately
97 */
98#define UBLK_IO_FLAG_ABORTED 0x04
99
c86019ff
Z
100/*
101 * UBLK_IO_FLAG_NEED_GET_DATA is set because IO command requires
102 * get data buffer address from ublksrv.
103 *
104 * Then, bio data could be copied into this data buffer for a WRITE request
105 * after the IO command is issued again and UBLK_IO_FLAG_NEED_GET_DATA is unset.
106 */
107#define UBLK_IO_FLAG_NEED_GET_DATA 0x08
108
71f28f31
ML
109struct ublk_io {
110 /* userspace buffer address from io cmd */
111 __u64 addr;
112 unsigned int flags;
113 int res;
114
115 struct io_uring_cmd *cmd;
116};
117
118struct ublk_queue {
119 int q_id;
120 int q_depth;
121
0edb3696 122 unsigned long flags;
71f28f31
ML
123 struct task_struct *ubq_daemon;
124 char *io_cmd_buf;
125
3ab6e94c
ML
126 struct llist_head io_cmds;
127
71f28f31
ML
128 unsigned long io_addr; /* mapped vm address */
129 unsigned int max_io_sz;
bbae8d1f 130 bool force_abort;
71f28f31
ML
131 unsigned short nr_io_ready; /* how many ios setup */
132 struct ublk_device *dev;
72495b5a 133 struct ublk_io ios[];
71f28f31
ML
134};
135
136#define UBLK_DAEMON_MONITOR_PERIOD (5 * HZ)
137
138struct ublk_device {
139 struct gendisk *ub_disk;
71f28f31
ML
140
141 char *__queues;
142
29baef78 143 unsigned int queue_size;
71f28f31
ML
144 struct ublksrv_ctrl_dev_info dev_info;
145
146 struct blk_mq_tag_set tag_set;
147
148 struct cdev cdev;
149 struct device cdev_dev;
150
8d9fdb60
DC
151#define UB_STATE_OPEN 0
152#define UB_STATE_USED 1
0abe39de 153#define UB_STATE_DELETED 2
fa362045 154 unsigned long state;
71f28f31
ML
155 int ub_number;
156
157 struct mutex mutex;
158
e94eb459 159 spinlock_t mm_lock;
71f28f31
ML
160 struct mm_struct *mm;
161
0aa73170
ML
162 struct ublk_params params;
163
71f28f31
ML
164 struct completion completion;
165 unsigned int nr_queues_ready;
73a166d9 166 unsigned int nr_privileged_daemon;
71f28f31
ML
167
168 /*
169 * Our ubq->daemon may be killed without any notification, so
170 * monitor each queue's daemon periodically
171 */
172 struct delayed_work monitor_work;
bbae8d1f 173 struct work_struct quiesce_work;
71f28f31
ML
174 struct work_struct stop_work;
175};
176
0aa73170
ML
177/* header of ublk_params */
178struct ublk_params_header {
179 __u32 len;
180 __u32 types;
181};
182
71f28f31
ML
183static dev_t ublk_chr_devt;
184static struct class *ublk_chr_class;
185
186static DEFINE_IDR(ublk_index_idr);
187static DEFINE_SPINLOCK(ublk_idr_lock);
188static wait_queue_head_t ublk_idr_wq; /* wait until one idr is freed */
189
190static DEFINE_MUTEX(ublk_ctl_mutex);
191
403ebc87
ML
192/*
193 * Max ublk devices allowed to add
194 *
195 * It can be extended to one per-user limit in future or even controlled
196 * by cgroup.
197 */
198static unsigned int ublks_max = 64;
199static unsigned int ublks_added; /* protected by ublk_ctl_mutex */
200
71f28f31
ML
201static struct miscdevice ublk_misc;
202
0aa73170
ML
203static void ublk_dev_param_basic_apply(struct ublk_device *ub)
204{
205 struct request_queue *q = ub->ub_disk->queue;
206 const struct ublk_param_basic *p = &ub->params.basic;
207
208 blk_queue_logical_block_size(q, 1 << p->logical_bs_shift);
209 blk_queue_physical_block_size(q, 1 << p->physical_bs_shift);
210 blk_queue_io_min(q, 1 << p->io_min_shift);
211 blk_queue_io_opt(q, 1 << p->io_opt_shift);
212
213 blk_queue_write_cache(q, p->attrs & UBLK_ATTR_VOLATILE_CACHE,
214 p->attrs & UBLK_ATTR_FUA);
215 if (p->attrs & UBLK_ATTR_ROTATIONAL)
216 blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
217 else
218 blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
219
220 blk_queue_max_hw_sectors(q, p->max_sectors);
221 blk_queue_chunk_sectors(q, p->chunk_sectors);
222 blk_queue_virt_boundary(q, p->virt_boundary_mask);
223
224 if (p->attrs & UBLK_ATTR_READ_ONLY)
225 set_disk_ro(ub->ub_disk, true);
226
227 set_capacity(ub->ub_disk, p->dev_sectors);
228}
229
230static void ublk_dev_param_discard_apply(struct ublk_device *ub)
231{
232 struct request_queue *q = ub->ub_disk->queue;
233 const struct ublk_param_discard *p = &ub->params.discard;
234
235 q->limits.discard_alignment = p->discard_alignment;
236 q->limits.discard_granularity = p->discard_granularity;
237 blk_queue_max_discard_sectors(q, p->max_discard_sectors);
238 blk_queue_max_write_zeroes_sectors(q,
239 p->max_write_zeroes_sectors);
240 blk_queue_max_discard_segments(q, p->max_discard_segments);
241}
242
243static int ublk_validate_params(const struct ublk_device *ub)
244{
245 /* basic param is the only one which must be set */
246 if (ub->params.types & UBLK_PARAM_TYPE_BASIC) {
247 const struct ublk_param_basic *p = &ub->params.basic;
248
249 if (p->logical_bs_shift > PAGE_SHIFT)
250 return -EINVAL;
251
252 if (p->logical_bs_shift > p->physical_bs_shift)
253 return -EINVAL;
254
4bf9cbf3 255 if (p->max_sectors > (ub->dev_info.max_io_buf_bytes >> 9))
0aa73170
ML
256 return -EINVAL;
257 } else
258 return -EINVAL;
259
260 if (ub->params.types & UBLK_PARAM_TYPE_DISCARD) {
261 const struct ublk_param_discard *p = &ub->params.discard;
262
263 /* So far, only support single segment discard */
264 if (p->max_discard_sectors && p->max_discard_segments != 1)
265 return -EINVAL;
266
267 if (!p->discard_granularity)
268 return -EINVAL;
269 }
270
abb864d3
ML
271 /* dev_t is read-only */
272 if (ub->params.types & UBLK_PARAM_TYPE_DEVT)
273 return -EINVAL;
274
0aa73170
ML
275 return 0;
276}
277
278static int ublk_apply_params(struct ublk_device *ub)
279{
280 if (!(ub->params.types & UBLK_PARAM_TYPE_BASIC))
281 return -EINVAL;
282
283 ublk_dev_param_basic_apply(ub);
284
285 if (ub->params.types & UBLK_PARAM_TYPE_DISCARD)
286 ublk_dev_param_discard_apply(ub);
287
288 return 0;
289}
290
0edb3696
ML
291static inline bool ublk_can_use_task_work(const struct ublk_queue *ubq)
292{
293 if (IS_BUILTIN(CONFIG_BLK_DEV_UBLK) &&
294 !(ubq->flags & UBLK_F_URING_CMD_COMP_IN_TASK))
295 return true;
296 return false;
297}
298
c86019ff
Z
299static inline bool ublk_need_get_data(const struct ublk_queue *ubq)
300{
301 if (ubq->flags & UBLK_F_NEED_GET_DATA)
302 return true;
303 return false;
304}
305
71f28f31
ML
306static struct ublk_device *ublk_get_device(struct ublk_device *ub)
307{
308 if (kobject_get_unless_zero(&ub->cdev_dev.kobj))
309 return ub;
310 return NULL;
311}
312
313static void ublk_put_device(struct ublk_device *ub)
314{
315 put_device(&ub->cdev_dev);
316}
317
318static inline struct ublk_queue *ublk_get_queue(struct ublk_device *dev,
319 int qid)
320{
321 return (struct ublk_queue *)&(dev->__queues[qid * dev->queue_size]);
322}
323
324static inline bool ublk_rq_has_data(const struct request *rq)
325{
731e208d 326 return bio_has_data(rq->bio);
71f28f31
ML
327}
328
329static inline struct ublksrv_io_desc *ublk_get_iod(struct ublk_queue *ubq,
330 int tag)
331{
332 return (struct ublksrv_io_desc *)
333 &(ubq->io_cmd_buf[tag * sizeof(struct ublksrv_io_desc)]);
334}
335
336static inline char *ublk_queue_cmd_buf(struct ublk_device *ub, int q_id)
337{
338 return ublk_get_queue(ub, q_id)->io_cmd_buf;
339}
340
341static inline int ublk_queue_cmd_buf_size(struct ublk_device *ub, int q_id)
342{
343 struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
344
345 return round_up(ubq->q_depth * sizeof(struct ublksrv_io_desc),
346 PAGE_SIZE);
347}
348
a0d41dc1
Z
349static inline bool ublk_queue_can_use_recovery_reissue(
350 struct ublk_queue *ubq)
351{
352 if ((ubq->flags & UBLK_F_USER_RECOVERY) &&
353 (ubq->flags & UBLK_F_USER_RECOVERY_REISSUE))
354 return true;
355 return false;
356}
357
77a440e2
Z
358static inline bool ublk_queue_can_use_recovery(
359 struct ublk_queue *ubq)
360{
361 if (ubq->flags & UBLK_F_USER_RECOVERY)
362 return true;
363 return false;
364}
365
366static inline bool ublk_can_use_recovery(struct ublk_device *ub)
367{
368 if (ub->dev_info.flags & UBLK_F_USER_RECOVERY)
369 return true;
370 return false;
371}
372
6d9e6dfd
CH
373static void ublk_free_disk(struct gendisk *disk)
374{
375 struct ublk_device *ub = disk->private_data;
376
377 clear_bit(UB_STATE_USED, &ub->state);
378 put_device(&ub->cdev_dev);
379}
380
48a90519
ML
381static void ublk_store_owner_uid_gid(unsigned int *owner_uid,
382 unsigned int *owner_gid)
383{
384 kuid_t uid;
385 kgid_t gid;
386
387 current_uid_gid(&uid, &gid);
388
389 *owner_uid = from_kuid(&init_user_ns, uid);
390 *owner_gid = from_kgid(&init_user_ns, gid);
391}
392
393static int ublk_open(struct block_device *bdev, fmode_t mode)
394{
395 struct ublk_device *ub = bdev->bd_disk->private_data;
396
397 if (capable(CAP_SYS_ADMIN))
398 return 0;
399
400 /*
401 * If it is one unprivileged device, only owner can open
402 * the disk. Otherwise it could be one trap made by one
403 * evil user who grants this disk's privileges to other
404 * users deliberately.
405 *
406 * This way is reasonable too given anyone can create
407 * unprivileged device, and no need other's grant.
408 */
409 if (ub->dev_info.flags & UBLK_F_UNPRIVILEGED_DEV) {
410 unsigned int curr_uid, curr_gid;
411
412 ublk_store_owner_uid_gid(&curr_uid, &curr_gid);
413
414 if (curr_uid != ub->dev_info.owner_uid || curr_gid !=
415 ub->dev_info.owner_gid)
416 return -EPERM;
417 }
418
419 return 0;
420}
421
71f28f31
ML
422static const struct block_device_operations ub_fops = {
423 .owner = THIS_MODULE,
48a90519 424 .open = ublk_open,
6d9e6dfd 425 .free_disk = ublk_free_disk,
71f28f31
ML
426};
427
428#define UBLK_MAX_PIN_PAGES 32
429
430struct ublk_map_data {
431 const struct ublk_queue *ubq;
432 const struct request *rq;
433 const struct ublk_io *io;
434 unsigned max_bytes;
435};
436
437struct ublk_io_iter {
438 struct page *pages[UBLK_MAX_PIN_PAGES];
439 unsigned pg_off; /* offset in the 1st page in pages */
440 int nr_pages; /* how many page pointers in pages */
441 struct bio *bio;
442 struct bvec_iter iter;
443};
444
445static inline unsigned ublk_copy_io_pages(struct ublk_io_iter *data,
446 unsigned max_bytes, bool to_vm)
447{
448 const unsigned total = min_t(unsigned, max_bytes,
449 PAGE_SIZE - data->pg_off +
450 ((data->nr_pages - 1) << PAGE_SHIFT));
451 unsigned done = 0;
452 unsigned pg_idx = 0;
453
454 while (done < total) {
455 struct bio_vec bv = bio_iter_iovec(data->bio, data->iter);
456 const unsigned int bytes = min3(bv.bv_len, total - done,
457 (unsigned)(PAGE_SIZE - data->pg_off));
458 void *bv_buf = bvec_kmap_local(&bv);
459 void *pg_buf = kmap_local_page(data->pages[pg_idx]);
460
461 if (to_vm)
462 memcpy(pg_buf + data->pg_off, bv_buf, bytes);
463 else
464 memcpy(bv_buf, pg_buf + data->pg_off, bytes);
465
466 kunmap_local(pg_buf);
467 kunmap_local(bv_buf);
468
469 /* advance page array */
470 data->pg_off += bytes;
471 if (data->pg_off == PAGE_SIZE) {
472 pg_idx += 1;
473 data->pg_off = 0;
474 }
475
476 done += bytes;
477
478 /* advance bio */
479 bio_advance_iter_single(data->bio, &data->iter, bytes);
480 if (!data->iter.bi_size) {
481 data->bio = data->bio->bi_next;
482 if (data->bio == NULL)
483 break;
484 data->iter = data->bio->bi_iter;
485 }
486 }
487
488 return done;
489}
490
491static inline int ublk_copy_user_pages(struct ublk_map_data *data,
492 bool to_vm)
493{
494 const unsigned int gup_flags = to_vm ? FOLL_WRITE : 0;
495 const unsigned long start_vm = data->io->addr;
496 unsigned int done = 0;
497 struct ublk_io_iter iter = {
498 .pg_off = start_vm & (PAGE_SIZE - 1),
499 .bio = data->rq->bio,
500 .iter = data->rq->bio->bi_iter,
501 };
502 const unsigned int nr_pages = round_up(data->max_bytes +
503 (start_vm & (PAGE_SIZE - 1)), PAGE_SIZE) >> PAGE_SHIFT;
504
505 while (done < nr_pages) {
506 const unsigned to_pin = min_t(unsigned, UBLK_MAX_PIN_PAGES,
507 nr_pages - done);
508 unsigned i, len;
509
510 iter.nr_pages = get_user_pages_fast(start_vm +
511 (done << PAGE_SHIFT), to_pin, gup_flags,
512 iter.pages);
513 if (iter.nr_pages <= 0)
514 return done == 0 ? iter.nr_pages : done;
515 len = ublk_copy_io_pages(&iter, data->max_bytes, to_vm);
516 for (i = 0; i < iter.nr_pages; i++) {
517 if (to_vm)
518 set_page_dirty(iter.pages[i]);
519 put_page(iter.pages[i]);
520 }
521 data->max_bytes -= len;
522 done += iter.nr_pages;
523 }
524
525 return done;
526}
527
528static int ublk_map_io(const struct ublk_queue *ubq, const struct request *req,
529 struct ublk_io *io)
530{
531 const unsigned int rq_bytes = blk_rq_bytes(req);
532 /*
533 * no zero copy, we delay copy WRITE request data into ublksrv
534 * context and the big benefit is that pinning pages in current
535 * context is pretty fast, see ublk_pin_user_pages
536 */
537 if (req_op(req) != REQ_OP_WRITE && req_op(req) != REQ_OP_FLUSH)
538 return rq_bytes;
539
540 if (ublk_rq_has_data(req)) {
541 struct ublk_map_data data = {
542 .ubq = ubq,
543 .rq = req,
544 .io = io,
545 .max_bytes = rq_bytes,
546 };
547
548 ublk_copy_user_pages(&data, true);
549
550 return rq_bytes - data.max_bytes;
551 }
552 return rq_bytes;
553}
554
555static int ublk_unmap_io(const struct ublk_queue *ubq,
556 const struct request *req,
557 struct ublk_io *io)
558{
559 const unsigned int rq_bytes = blk_rq_bytes(req);
560
561 if (req_op(req) == REQ_OP_READ && ublk_rq_has_data(req)) {
562 struct ublk_map_data data = {
563 .ubq = ubq,
564 .rq = req,
565 .io = io,
566 .max_bytes = io->res,
567 };
568
569 WARN_ON_ONCE(io->res > rq_bytes);
570
571 ublk_copy_user_pages(&data, false);
572
573 return io->res - data.max_bytes;
574 }
575 return rq_bytes;
576}
577
578static inline unsigned int ublk_req_build_flags(struct request *req)
579{
580 unsigned flags = 0;
581
582 if (req->cmd_flags & REQ_FAILFAST_DEV)
583 flags |= UBLK_IO_F_FAILFAST_DEV;
584
585 if (req->cmd_flags & REQ_FAILFAST_TRANSPORT)
586 flags |= UBLK_IO_F_FAILFAST_TRANSPORT;
587
588 if (req->cmd_flags & REQ_FAILFAST_DRIVER)
589 flags |= UBLK_IO_F_FAILFAST_DRIVER;
590
591 if (req->cmd_flags & REQ_META)
592 flags |= UBLK_IO_F_META;
593
71f28f31
ML
594 if (req->cmd_flags & REQ_FUA)
595 flags |= UBLK_IO_F_FUA;
596
71f28f31
ML
597 if (req->cmd_flags & REQ_NOUNMAP)
598 flags |= UBLK_IO_F_NOUNMAP;
599
600 if (req->cmd_flags & REQ_SWAP)
601 flags |= UBLK_IO_F_SWAP;
602
603 return flags;
604}
605
f2450f8a 606static blk_status_t ublk_setup_iod(struct ublk_queue *ubq, struct request *req)
71f28f31
ML
607{
608 struct ublksrv_io_desc *iod = ublk_get_iod(ubq, req->tag);
609 struct ublk_io *io = &ubq->ios[req->tag];
610 u32 ublk_op;
611
612 switch (req_op(req)) {
613 case REQ_OP_READ:
614 ublk_op = UBLK_IO_OP_READ;
615 break;
616 case REQ_OP_WRITE:
617 ublk_op = UBLK_IO_OP_WRITE;
618 break;
619 case REQ_OP_FLUSH:
620 ublk_op = UBLK_IO_OP_FLUSH;
621 break;
622 case REQ_OP_DISCARD:
623 ublk_op = UBLK_IO_OP_DISCARD;
624 break;
625 case REQ_OP_WRITE_ZEROES:
626 ublk_op = UBLK_IO_OP_WRITE_ZEROES;
627 break;
628 default:
629 return BLK_STS_IOERR;
630 }
631
632 /* need to translate since kernel may change */
633 iod->op_flags = ublk_op | ublk_req_build_flags(req);
634 iod->nr_sectors = blk_rq_sectors(req);
635 iod->start_sector = blk_rq_pos(req);
636 iod->addr = io->addr;
637
638 return BLK_STS_OK;
639}
640
641static inline struct ublk_uring_cmd_pdu *ublk_get_uring_cmd_pdu(
642 struct io_uring_cmd *ioucmd)
643{
644 return (struct ublk_uring_cmd_pdu *)&ioucmd->pdu;
645}
646
966120b5 647static inline bool ubq_daemon_is_dying(struct ublk_queue *ubq)
71f28f31
ML
648{
649 return ubq->ubq_daemon->flags & PF_EXITING;
650}
651
652/* todo: handle partial completion */
653static void ublk_complete_rq(struct request *req)
654{
655 struct ublk_queue *ubq = req->mq_hctx->driver_data;
656 struct ublk_io *io = &ubq->ios[req->tag];
657 unsigned int unmapped_bytes;
658
659 /* failed read IO if nothing is read */
660 if (!io->res && req_op(req) == REQ_OP_READ)
661 io->res = -EIO;
662
663 if (io->res < 0) {
664 blk_mq_end_request(req, errno_to_blk_status(io->res));
665 return;
666 }
667
668 /*
b352389e 669 * FLUSH, DISCARD or WRITE_ZEROES usually won't return bytes returned, so end them
71f28f31
ML
670 * directly.
671 *
672 * Both the two needn't unmap.
673 */
674 if (req_op(req) != REQ_OP_READ && req_op(req) != REQ_OP_WRITE) {
675 blk_mq_end_request(req, BLK_STS_OK);
676 return;
677 }
678
679 /* for READ request, writing data in iod->addr to rq buffers */
680 unmapped_bytes = ublk_unmap_io(ubq, req, io);
681
682 /*
683 * Extremely impossible since we got data filled in just before
684 *
685 * Re-read simply for this unlikely case.
686 */
687 if (unlikely(unmapped_bytes < io->res))
688 io->res = unmapped_bytes;
689
690 if (blk_update_request(req, BLK_STS_OK, io->res))
691 blk_mq_requeue_request(req, true);
692 else
693 __blk_mq_end_request(req, BLK_STS_OK);
694}
695
696/*
bb241747
Z
697 * Since __ublk_rq_task_work always fails requests immediately during
698 * exiting, __ublk_fail_req() is only called from abort context during
699 * exiting. So lock is unnecessary.
71f28f31
ML
700 *
701 * Also aborting may not be started yet, keep in mind that one failed
702 * request may be issued by block layer again.
703 */
a0d41dc1
Z
704static void __ublk_fail_req(struct ublk_queue *ubq, struct ublk_io *io,
705 struct request *req)
71f28f31
ML
706{
707 WARN_ON_ONCE(io->flags & UBLK_IO_FLAG_ACTIVE);
708
709 if (!(io->flags & UBLK_IO_FLAG_ABORTED)) {
710 io->flags |= UBLK_IO_FLAG_ABORTED;
a0d41dc1
Z
711 if (ublk_queue_can_use_recovery_reissue(ubq))
712 blk_mq_requeue_request(req, false);
713 else
714 blk_mq_end_request(req, BLK_STS_IOERR);
71f28f31
ML
715 }
716}
717
9d2789ac
JA
718static void ubq_complete_io_cmd(struct ublk_io *io, int res,
719 unsigned issue_flags)
c86019ff
Z
720{
721 /* mark this cmd owned by ublksrv */
722 io->flags |= UBLK_IO_FLAG_OWNED_BY_SRV;
723
724 /*
725 * clear ACTIVE since we are done with this sqe/cmd slot
726 * We can only accept io cmd in case of being not active.
727 */
728 io->flags &= ~UBLK_IO_FLAG_ACTIVE;
729
730 /* tell ublksrv one io request is coming */
9d2789ac 731 io_uring_cmd_done(io->cmd, res, 0, issue_flags);
c86019ff
Z
732}
733
71f28f31
ML
734#define UBLK_REQUEUE_DELAY_MS 3
735
42cf5fc5
Z
736static inline void __ublk_abort_rq(struct ublk_queue *ubq,
737 struct request *rq)
738{
739 /* We cannot process this rq so just requeue it. */
740 if (ublk_queue_can_use_recovery(ubq))
741 blk_mq_requeue_request(rq, false);
742 else
743 blk_mq_end_request(rq, BLK_STS_IOERR);
744
745 mod_delayed_work(system_wq, &ubq->dev->monitor_work, 0);
746}
747
9d2789ac
JA
748static inline void __ublk_rq_task_work(struct request *req,
749 unsigned issue_flags)
71f28f31 750{
71f28f31
ML
751 struct ublk_queue *ubq = req->mq_hctx->driver_data;
752 int tag = req->tag;
753 struct ublk_io *io = &ubq->ios[tag];
71f28f31
ML
754 unsigned int mapped_bytes;
755
756 pr_devel("%s: complete: op %d, qid %d tag %d io_flags %x addr %llx\n",
757 __func__, io->cmd->cmd_op, ubq->q_id, req->tag, io->flags,
758 ublk_get_iod(ubq, req->tag)->addr);
759
ae3f7193
Z
760 /*
761 * Task is exiting if either:
762 *
763 * (1) current != ubq_daemon.
764 * io_uring_cmd_complete_in_task() tries to run task_work
765 * in a workqueue if ubq_daemon(cmd's task) is PF_EXITING.
766 *
767 * (2) current->flags & PF_EXITING.
768 */
769 if (unlikely(current != ubq->ubq_daemon || current->flags & PF_EXITING)) {
42cf5fc5 770 __ublk_abort_rq(ubq, req);
71f28f31
ML
771 return;
772 }
773
c86019ff
Z
774 if (ublk_need_get_data(ubq) &&
775 (req_op(req) == REQ_OP_WRITE ||
776 req_op(req) == REQ_OP_FLUSH)) {
777 /*
778 * We have not handled UBLK_IO_NEED_GET_DATA command yet,
779 * so immepdately pass UBLK_IO_RES_NEED_GET_DATA to ublksrv
780 * and notify it.
781 */
782 if (!(io->flags & UBLK_IO_FLAG_NEED_GET_DATA)) {
783 io->flags |= UBLK_IO_FLAG_NEED_GET_DATA;
784 pr_devel("%s: need get data. op %d, qid %d tag %d io_flags %x\n",
785 __func__, io->cmd->cmd_op, ubq->q_id,
786 req->tag, io->flags);
9d2789ac 787 ubq_complete_io_cmd(io, UBLK_IO_RES_NEED_GET_DATA, issue_flags);
c86019ff
Z
788 return;
789 }
790 /*
791 * We have handled UBLK_IO_NEED_GET_DATA command,
792 * so clear UBLK_IO_FLAG_NEED_GET_DATA now and just
793 * do the copy work.
794 */
795 io->flags &= ~UBLK_IO_FLAG_NEED_GET_DATA;
92cb6e2e
Z
796 /* update iod->addr because ublksrv may have passed a new io buffer */
797 ublk_get_iod(ubq, req->tag)->addr = io->addr;
798 pr_devel("%s: update iod->addr: op %d, qid %d tag %d io_flags %x addr %llx\n",
799 __func__, io->cmd->cmd_op, ubq->q_id, req->tag, io->flags,
800 ublk_get_iod(ubq, req->tag)->addr);
c86019ff
Z
801 }
802
71f28f31
ML
803 mapped_bytes = ublk_map_io(ubq, req, io);
804
805 /* partially mapped, update io descriptor */
806 if (unlikely(mapped_bytes != blk_rq_bytes(req))) {
807 /*
808 * Nothing mapped, retry until we succeed.
809 *
810 * We may never succeed in mapping any bytes here because
811 * of OOM. TODO: reserve one buffer with single page pinned
812 * for providing forward progress guarantee.
813 */
814 if (unlikely(!mapped_bytes)) {
815 blk_mq_requeue_request(req, false);
816 blk_mq_delay_kick_requeue_list(req->q,
817 UBLK_REQUEUE_DELAY_MS);
818 return;
819 }
820
821 ublk_get_iod(ubq, req->tag)->nr_sectors =
822 mapped_bytes >> 9;
823 }
824
9d2789ac 825 ubq_complete_io_cmd(io, UBLK_IO_RES_OK, issue_flags);
71f28f31
ML
826}
827
9d2789ac
JA
828static inline void ublk_forward_io_cmds(struct ublk_queue *ubq,
829 unsigned issue_flags)
7d4a9317
ML
830{
831 struct llist_node *io_cmds = llist_del_all(&ubq->io_cmds);
832 struct ublk_rq_data *data, *tmp;
833
834 io_cmds = llist_reverse_order(io_cmds);
835 llist_for_each_entry_safe(data, tmp, io_cmds, node)
9d2789ac 836 __ublk_rq_task_work(blk_mq_rq_from_pdu(data), issue_flags);
7d4a9317
ML
837}
838
839static inline void ublk_abort_io_cmds(struct ublk_queue *ubq)
840{
841 struct llist_node *io_cmds = llist_del_all(&ubq->io_cmds);
842 struct ublk_rq_data *data, *tmp;
843
844 llist_for_each_entry_safe(data, tmp, io_cmds, node)
845 __ublk_abort_rq(ubq, blk_mq_rq_from_pdu(data));
846}
847
9d2789ac 848static void ublk_rq_task_work_cb(struct io_uring_cmd *cmd, unsigned issue_flags)
0edb3696
ML
849{
850 struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
3ab6e94c 851 struct ublk_queue *ubq = pdu->ubq;
0edb3696 852
9d2789ac 853 ublk_forward_io_cmds(ubq, issue_flags);
0edb3696
ML
854}
855
856static void ublk_rq_task_work_fn(struct callback_head *work)
857{
858 struct ublk_rq_data *data = container_of(work,
859 struct ublk_rq_data, work);
860 struct request *req = blk_mq_rq_from_pdu(data);
7d4a9317 861 struct ublk_queue *ubq = req->mq_hctx->driver_data;
9d2789ac 862 unsigned issue_flags = IO_URING_F_UNLOCKED;
0edb3696 863
9d2789ac 864 ublk_forward_io_cmds(ubq, issue_flags);
0edb3696
ML
865}
866
7d4a9317 867static void ublk_queue_cmd(struct ublk_queue *ubq, struct request *rq)
3ab6e94c 868{
7d4a9317
ML
869 struct ublk_rq_data *data = blk_mq_rq_to_pdu(rq);
870 struct ublk_io *io;
3ab6e94c 871
7d4a9317
ML
872 if (!llist_add(&data->node, &ubq->io_cmds))
873 return;
874
875 io = &ubq->ios[rq->tag];
3ab6e94c
ML
876 /*
877 * If the check pass, we know that this is a re-issued request aborted
878 * previously in monitor_work because the ubq_daemon(cmd's task) is
879 * PF_EXITING. We cannot call io_uring_cmd_complete_in_task() anymore
880 * because this ioucmd's io_uring context may be freed now if no inflight
881 * ioucmd exists. Otherwise we may cause null-deref in ctx->fallback_work.
882 *
883 * Note: monitor_work sets UBLK_IO_FLAG_ABORTED and ends this request(releasing
884 * the tag). Then the request is re-started(allocating the tag) and we are here.
885 * Since releasing/allocating a tag implies smp_mb(), finding UBLK_IO_FLAG_ABORTED
886 * guarantees that here is a re-issued request aborted previously.
887 */
888 if (unlikely(io->flags & UBLK_IO_FLAG_ABORTED)) {
7d4a9317
ML
889 ublk_abort_io_cmds(ubq);
890 } else if (ublk_can_use_task_work(ubq)) {
891 if (task_work_add(ubq->ubq_daemon, &data->work,
892 TWA_SIGNAL_NO_IPI))
893 ublk_abort_io_cmds(ubq);
3ab6e94c
ML
894 } else {
895 struct io_uring_cmd *cmd = io->cmd;
896 struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
897
898 pdu->ubq = ubq;
899 io_uring_cmd_complete_in_task(cmd, ublk_rq_task_work_cb);
900 }
901}
902
71f28f31
ML
903static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx,
904 const struct blk_mq_queue_data *bd)
905{
906 struct ublk_queue *ubq = hctx->driver_data;
907 struct request *rq = bd->rq;
71f28f31
ML
908 blk_status_t res;
909
910 /* fill iod to slot in io cmd buffer */
911 res = ublk_setup_iod(ubq, rq);
912 if (unlikely(res != BLK_STS_OK))
913 return BLK_STS_IOERR;
3ab6e94c 914
bbae8d1f
Z
915 /* With recovery feature enabled, force_abort is set in
916 * ublk_stop_dev() before calling del_gendisk(). We have to
917 * abort all requeued and new rqs here to let del_gendisk()
918 * move on. Besides, we cannot not call io_uring_cmd_complete_in_task()
919 * to avoid UAF on io_uring ctx.
920 *
921 * Note: force_abort is guaranteed to be seen because it is set
922 * before request queue is unqiuesced.
923 */
924 if (ublk_queue_can_use_recovery(ubq) && unlikely(ubq->force_abort))
925 return BLK_STS_IOERR;
71f28f31
ML
926
927 blk_mq_start_request(bd->rq);
928
929 if (unlikely(ubq_daemon_is_dying(ubq))) {
42cf5fc5
Z
930 __ublk_abort_rq(ubq, rq);
931 return BLK_STS_OK;
71f28f31
ML
932 }
933
7d4a9317 934 ublk_queue_cmd(ubq, rq);
0edb3696 935
71f28f31
ML
936 return BLK_STS_OK;
937}
938
71f28f31
ML
939static int ublk_init_hctx(struct blk_mq_hw_ctx *hctx, void *driver_data,
940 unsigned int hctx_idx)
941{
cebbe577 942 struct ublk_device *ub = driver_data;
71f28f31
ML
943 struct ublk_queue *ubq = ublk_get_queue(ub, hctx->queue_num);
944
945 hctx->driver_data = ubq;
946 return 0;
947}
948
0edb3696
ML
949static int ublk_init_rq(struct blk_mq_tag_set *set, struct request *req,
950 unsigned int hctx_idx, unsigned int numa_node)
951{
952 struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
953
954 init_task_work(&data->work, ublk_rq_task_work_fn);
955 return 0;
956}
957
71f28f31
ML
958static const struct blk_mq_ops ublk_mq_ops = {
959 .queue_rq = ublk_queue_rq,
960 .init_hctx = ublk_init_hctx,
0edb3696 961 .init_request = ublk_init_rq,
71f28f31
ML
962};
963
964static int ublk_ch_open(struct inode *inode, struct file *filp)
965{
966 struct ublk_device *ub = container_of(inode->i_cdev,
967 struct ublk_device, cdev);
968
fa362045
CH
969 if (test_and_set_bit(UB_STATE_OPEN, &ub->state))
970 return -EBUSY;
971 filp->private_data = ub;
972 return 0;
71f28f31
ML
973}
974
975static int ublk_ch_release(struct inode *inode, struct file *filp)
976{
977 struct ublk_device *ub = filp->private_data;
978
fa362045 979 clear_bit(UB_STATE_OPEN, &ub->state);
71f28f31
ML
980 return 0;
981}
982
983/* map pre-allocated per-queue cmd buffer to ublksrv daemon */
984static int ublk_ch_mmap(struct file *filp, struct vm_area_struct *vma)
985{
986 struct ublk_device *ub = filp->private_data;
987 size_t sz = vma->vm_end - vma->vm_start;
988 unsigned max_sz = UBLK_MAX_QUEUE_DEPTH * sizeof(struct ublksrv_io_desc);
989 unsigned long pfn, end, phys_off = vma->vm_pgoff << PAGE_SHIFT;
990 int q_id, ret = 0;
991
e94eb459 992 spin_lock(&ub->mm_lock);
71f28f31
ML
993 if (!ub->mm)
994 ub->mm = current->mm;
995 if (current->mm != ub->mm)
996 ret = -EINVAL;
e94eb459 997 spin_unlock(&ub->mm_lock);
71f28f31
ML
998
999 if (ret)
1000 return ret;
1001
1002 if (vma->vm_flags & VM_WRITE)
1003 return -EPERM;
1004
1005 end = UBLKSRV_CMD_BUF_OFFSET + ub->dev_info.nr_hw_queues * max_sz;
1006 if (phys_off < UBLKSRV_CMD_BUF_OFFSET || phys_off >= end)
1007 return -EINVAL;
1008
1009 q_id = (phys_off - UBLKSRV_CMD_BUF_OFFSET) / max_sz;
1010 pr_devel("%s: qid %d, pid %d, addr %lx pg_off %lx sz %lu\n",
1011 __func__, q_id, current->pid, vma->vm_start,
1012 phys_off, (unsigned long)sz);
1013
1014 if (sz != ublk_queue_cmd_buf_size(ub, q_id))
1015 return -EINVAL;
1016
1017 pfn = virt_to_phys(ublk_queue_cmd_buf(ub, q_id)) >> PAGE_SHIFT;
1018 return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
1019}
1020
1021static void ublk_commit_completion(struct ublk_device *ub,
1022 struct ublksrv_io_cmd *ub_cmd)
1023{
1024 u32 qid = ub_cmd->q_id, tag = ub_cmd->tag;
1025 struct ublk_queue *ubq = ublk_get_queue(ub, qid);
1026 struct ublk_io *io = &ubq->ios[tag];
1027 struct request *req;
1028
1029 /* now this cmd slot is owned by nbd driver */
1030 io->flags &= ~UBLK_IO_FLAG_OWNED_BY_SRV;
1031 io->res = ub_cmd->result;
1032
1033 /* find the io request and complete */
1034 req = blk_mq_tag_to_rq(ub->tag_set.tags[qid], tag);
1035
1036 if (req && likely(!blk_should_fake_timeout(req->q)))
1037 ublk_complete_rq(req);
1038}
1039
1040/*
1041 * When ->ubq_daemon is exiting, either new request is ended immediately,
1042 * or any queued io command is drained, so it is safe to abort queue
1043 * lockless
1044 */
1045static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq)
1046{
1047 int i;
1048
1049 if (!ublk_get_device(ub))
1050 return;
1051
1052 for (i = 0; i < ubq->q_depth; i++) {
1053 struct ublk_io *io = &ubq->ios[i];
1054
1055 if (!(io->flags & UBLK_IO_FLAG_ACTIVE)) {
1056 struct request *rq;
1057
1058 /*
1059 * Either we fail the request or ublk_rq_task_work_fn
1060 * will do it
1061 */
1062 rq = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], i);
1063 if (rq)
a0d41dc1 1064 __ublk_fail_req(ubq, io, rq);
71f28f31
ML
1065 }
1066 }
1067 ublk_put_device(ub);
1068}
1069
1070static void ublk_daemon_monitor_work(struct work_struct *work)
1071{
1072 struct ublk_device *ub =
1073 container_of(work, struct ublk_device, monitor_work.work);
1074 int i;
1075
1076 for (i = 0; i < ub->dev_info.nr_hw_queues; i++) {
1077 struct ublk_queue *ubq = ublk_get_queue(ub, i);
1078
1079 if (ubq_daemon_is_dying(ubq)) {
bbae8d1f
Z
1080 if (ublk_queue_can_use_recovery(ubq))
1081 schedule_work(&ub->quiesce_work);
1082 else
1083 schedule_work(&ub->stop_work);
71f28f31
ML
1084
1085 /* abort queue is for making forward progress */
1086 ublk_abort_queue(ub, ubq);
1087 }
1088 }
1089
1090 /*
bbae8d1f
Z
1091 * We can't schedule monitor work after ub's state is not UBLK_S_DEV_LIVE.
1092 * after ublk_remove() or __ublk_quiesce_dev() is started.
71f28f31
ML
1093 *
1094 * No need ub->mutex, monitor work are canceled after state is marked
bbae8d1f 1095 * as not LIVE, so new state is observed reliably.
71f28f31 1096 */
bbae8d1f 1097 if (ub->dev_info.state == UBLK_S_DEV_LIVE)
71f28f31
ML
1098 schedule_delayed_work(&ub->monitor_work,
1099 UBLK_DAEMON_MONITOR_PERIOD);
1100}
1101
a8ce5f52
ML
1102static inline bool ublk_queue_ready(struct ublk_queue *ubq)
1103{
1104 return ubq->nr_io_ready == ubq->q_depth;
1105}
1106
71f28f31
ML
1107static void ublk_cancel_queue(struct ublk_queue *ubq)
1108{
1109 int i;
1110
a8ce5f52
ML
1111 if (!ublk_queue_ready(ubq))
1112 return;
1113
71f28f31
ML
1114 for (i = 0; i < ubq->q_depth; i++) {
1115 struct ublk_io *io = &ubq->ios[i];
1116
1117 if (io->flags & UBLK_IO_FLAG_ACTIVE)
9d2789ac
JA
1118 io_uring_cmd_done(io->cmd, UBLK_IO_RES_ABORT, 0,
1119 IO_URING_F_UNLOCKED);
71f28f31 1120 }
a8ce5f52
ML
1121
1122 /* all io commands are canceled */
1123 ubq->nr_io_ready = 0;
71f28f31
ML
1124}
1125
1126/* Cancel all pending commands, must be called after del_gendisk() returns */
1127static void ublk_cancel_dev(struct ublk_device *ub)
1128{
1129 int i;
1130
1131 for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
1132 ublk_cancel_queue(ublk_get_queue(ub, i));
1133}
1134
bbae8d1f
Z
1135static bool ublk_check_inflight_rq(struct request *rq, void *data)
1136{
1137 bool *idle = data;
1138
1139 if (blk_mq_request_started(rq)) {
1140 *idle = false;
1141 return false;
1142 }
1143 return true;
1144}
1145
1146static void ublk_wait_tagset_rqs_idle(struct ublk_device *ub)
1147{
1148 bool idle;
1149
1150 WARN_ON_ONCE(!blk_queue_quiesced(ub->ub_disk->queue));
1151 while (true) {
1152 idle = true;
1153 blk_mq_tagset_busy_iter(&ub->tag_set,
1154 ublk_check_inflight_rq, &idle);
1155 if (idle)
1156 break;
1157 msleep(UBLK_REQUEUE_DELAY_MS);
1158 }
1159}
1160
1161static void __ublk_quiesce_dev(struct ublk_device *ub)
71f28f31 1162{
bbae8d1f
Z
1163 pr_devel("%s: quiesce ub: dev_id %d state %s\n",
1164 __func__, ub->dev_info.dev_id,
1165 ub->dev_info.state == UBLK_S_DEV_LIVE ?
1166 "LIVE" : "QUIESCED");
1167 blk_mq_quiesce_queue(ub->ub_disk->queue);
1168 ublk_wait_tagset_rqs_idle(ub);
1169 ub->dev_info.state = UBLK_S_DEV_QUIESCED;
1170 ublk_cancel_dev(ub);
1171 /* we are going to release task_struct of ubq_daemon and resets
1172 * ->ubq_daemon to NULL. So in monitor_work, check on ubq_daemon causes UAF.
1173 * Besides, monitor_work is not necessary in QUIESCED state since we have
1174 * already scheduled quiesce_work and quiesced all ubqs.
1175 *
1176 * Do not let monitor_work schedule itself if state it QUIESCED. And we cancel
1177 * it here and re-schedule it in END_USER_RECOVERY to avoid UAF.
1178 */
1179 cancel_delayed_work_sync(&ub->monitor_work);
1180}
1181
1182static void ublk_quiesce_work_fn(struct work_struct *work)
1183{
1184 struct ublk_device *ub =
1185 container_of(work, struct ublk_device, quiesce_work);
1186
71f28f31 1187 mutex_lock(&ub->mutex);
6d9e6dfd 1188 if (ub->dev_info.state != UBLK_S_DEV_LIVE)
71f28f31 1189 goto unlock;
bbae8d1f
Z
1190 __ublk_quiesce_dev(ub);
1191 unlock:
1192 mutex_unlock(&ub->mutex);
1193}
71f28f31 1194
bbae8d1f
Z
1195static void ublk_unquiesce_dev(struct ublk_device *ub)
1196{
1197 int i;
1198
1199 pr_devel("%s: unquiesce ub: dev_id %d state %s\n",
1200 __func__, ub->dev_info.dev_id,
1201 ub->dev_info.state == UBLK_S_DEV_LIVE ?
1202 "LIVE" : "QUIESCED");
1203 /* quiesce_work has run. We let requeued rqs be aborted
1204 * before running fallback_wq. "force_abort" must be seen
1205 * after request queue is unqiuesced. Then del_gendisk()
1206 * can move on.
1207 */
1208 for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
1209 ublk_get_queue(ub, i)->force_abort = true;
1210
1211 blk_mq_unquiesce_queue(ub->ub_disk->queue);
1212 /* We may have requeued some rqs in ublk_quiesce_queue() */
1213 blk_mq_kick_requeue_list(ub->ub_disk->queue);
1214}
1215
1216static void ublk_stop_dev(struct ublk_device *ub)
1217{
1218 mutex_lock(&ub->mutex);
1219 if (ub->dev_info.state == UBLK_S_DEV_DEAD)
1220 goto unlock;
1221 if (ublk_can_use_recovery(ub)) {
1222 if (ub->dev_info.state == UBLK_S_DEV_LIVE)
1223 __ublk_quiesce_dev(ub);
1224 ublk_unquiesce_dev(ub);
1225 }
71f28f31
ML
1226 del_gendisk(ub->ub_disk);
1227 ub->dev_info.state = UBLK_S_DEV_DEAD;
1228 ub->dev_info.ublksrv_pid = -1;
6d9e6dfd
CH
1229 put_disk(ub->ub_disk);
1230 ub->ub_disk = NULL;
71f28f31 1231 unlock:
a8ce5f52 1232 ublk_cancel_dev(ub);
71f28f31
ML
1233 mutex_unlock(&ub->mutex);
1234 cancel_delayed_work_sync(&ub->monitor_work);
1235}
1236
71f28f31
ML
1237/* device can only be started after all IOs are ready */
1238static void ublk_mark_io_ready(struct ublk_device *ub, struct ublk_queue *ubq)
1239{
1240 mutex_lock(&ub->mutex);
1241 ubq->nr_io_ready++;
1242 if (ublk_queue_ready(ubq)) {
1243 ubq->ubq_daemon = current;
1244 get_task_struct(ubq->ubq_daemon);
1245 ub->nr_queues_ready++;
73a166d9
ML
1246
1247 if (capable(CAP_SYS_ADMIN))
1248 ub->nr_privileged_daemon++;
71f28f31
ML
1249 }
1250 if (ub->nr_queues_ready == ub->dev_info.nr_hw_queues)
1251 complete_all(&ub->completion);
1252 mutex_unlock(&ub->mutex);
1253}
1254
c86019ff 1255static void ublk_handle_need_get_data(struct ublk_device *ub, int q_id,
fee32f31 1256 int tag)
c86019ff
Z
1257{
1258 struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
1259 struct request *req = blk_mq_tag_to_rq(ub->tag_set.tags[q_id], tag);
1260
7d4a9317 1261 ublk_queue_cmd(ubq, req);
c86019ff
Z
1262}
1263
71f28f31
ML
1264static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
1265{
1266 struct ublksrv_io_cmd *ub_cmd = (struct ublksrv_io_cmd *)cmd->cmd;
1267 struct ublk_device *ub = cmd->file->private_data;
1268 struct ublk_queue *ubq;
1269 struct ublk_io *io;
1270 u32 cmd_op = cmd->cmd_op;
1271 unsigned tag = ub_cmd->tag;
1272 int ret = -EINVAL;
2f1e07dd 1273 struct request *req;
71f28f31
ML
1274
1275 pr_devel("%s: received: cmd op %d queue %d tag %d result %d\n",
1276 __func__, cmd->cmd_op, ub_cmd->q_id, tag,
1277 ub_cmd->result);
1278
71f28f31
ML
1279 if (ub_cmd->q_id >= ub->dev_info.nr_hw_queues)
1280 goto out;
1281
1282 ubq = ublk_get_queue(ub, ub_cmd->q_id);
1283 if (!ubq || ub_cmd->q_id != ubq->q_id)
1284 goto out;
1285
1286 if (ubq->ubq_daemon && ubq->ubq_daemon != current)
1287 goto out;
1288
1289 if (tag >= ubq->q_depth)
1290 goto out;
1291
1292 io = &ubq->ios[tag];
1293
1294 /* there is pending io cmd, something must be wrong */
1295 if (io->flags & UBLK_IO_FLAG_ACTIVE) {
1296 ret = -EBUSY;
1297 goto out;
1298 }
1299
c86019ff
Z
1300 /*
1301 * ensure that the user issues UBLK_IO_NEED_GET_DATA
1302 * iff the driver have set the UBLK_IO_FLAG_NEED_GET_DATA.
1303 */
1304 if ((!!(io->flags & UBLK_IO_FLAG_NEED_GET_DATA))
1305 ^ (cmd_op == UBLK_IO_NEED_GET_DATA))
1306 goto out;
1307
71f28f31
ML
1308 switch (cmd_op) {
1309 case UBLK_IO_FETCH_REQ:
1310 /* UBLK_IO_FETCH_REQ is only allowed before queue is setup */
1311 if (ublk_queue_ready(ubq)) {
1312 ret = -EBUSY;
1313 goto out;
1314 }
1315 /*
1316 * The io is being handled by server, so COMMIT_RQ is expected
1317 * instead of FETCH_REQ
1318 */
1319 if (io->flags & UBLK_IO_FLAG_OWNED_BY_SRV)
1320 goto out;
2f1e07dd
LX
1321 /* FETCH_RQ has to provide IO buffer if NEED GET DATA is not enabled */
1322 if (!ub_cmd->addr && !ublk_need_get_data(ubq))
71f28f31
ML
1323 goto out;
1324 io->cmd = cmd;
1325 io->flags |= UBLK_IO_FLAG_ACTIVE;
1326 io->addr = ub_cmd->addr;
1327
1328 ublk_mark_io_ready(ub, ubq);
1329 break;
1330 case UBLK_IO_COMMIT_AND_FETCH_REQ:
2f1e07dd
LX
1331 req = blk_mq_tag_to_rq(ub->tag_set.tags[ub_cmd->q_id], tag);
1332 /*
1333 * COMMIT_AND_FETCH_REQ has to provide IO buffer if NEED GET DATA is
1334 * not enabled or it is Read IO.
1335 */
1336 if (!ub_cmd->addr && (!ublk_need_get_data(ubq) || req_op(req) == REQ_OP_READ))
71f28f31
ML
1337 goto out;
1338 if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV))
1339 goto out;
1340 io->addr = ub_cmd->addr;
1341 io->flags |= UBLK_IO_FLAG_ACTIVE;
1342 io->cmd = cmd;
1343 ublk_commit_completion(ub, ub_cmd);
1344 break;
c86019ff
Z
1345 case UBLK_IO_NEED_GET_DATA:
1346 if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV))
1347 goto out;
1348 io->addr = ub_cmd->addr;
1349 io->cmd = cmd;
1350 io->flags |= UBLK_IO_FLAG_ACTIVE;
fee32f31 1351 ublk_handle_need_get_data(ub, ub_cmd->q_id, ub_cmd->tag);
c86019ff 1352 break;
71f28f31
ML
1353 default:
1354 goto out;
1355 }
1356 return -EIOCBQUEUED;
1357
1358 out:
9d2789ac 1359 io_uring_cmd_done(cmd, ret, 0, issue_flags);
71f28f31
ML
1360 pr_devel("%s: complete: cmd op %d, tag %d ret %x io_flags %x\n",
1361 __func__, cmd_op, tag, ret, io->flags);
1362 return -EIOCBQUEUED;
1363}
1364
1365static const struct file_operations ublk_ch_fops = {
1366 .owner = THIS_MODULE,
1367 .open = ublk_ch_open,
1368 .release = ublk_ch_release,
1369 .llseek = no_llseek,
1370 .uring_cmd = ublk_ch_uring_cmd,
1371 .mmap = ublk_ch_mmap,
1372};
1373
1374static void ublk_deinit_queue(struct ublk_device *ub, int q_id)
1375{
1376 int size = ublk_queue_cmd_buf_size(ub, q_id);
1377 struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
1378
1379 if (ubq->ubq_daemon)
1380 put_task_struct(ubq->ubq_daemon);
1381 if (ubq->io_cmd_buf)
1382 free_pages((unsigned long)ubq->io_cmd_buf, get_order(size));
1383}
1384
1385static int ublk_init_queue(struct ublk_device *ub, int q_id)
1386{
1387 struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
1388 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO;
1389 void *ptr;
1390 int size;
1391
6d8c5afc 1392 ubq->flags = ub->dev_info.flags;
71f28f31
ML
1393 ubq->q_id = q_id;
1394 ubq->q_depth = ub->dev_info.queue_depth;
1395 size = ublk_queue_cmd_buf_size(ub, q_id);
1396
1397 ptr = (void *) __get_free_pages(gfp_flags, get_order(size));
1398 if (!ptr)
1399 return -ENOMEM;
1400
1401 ubq->io_cmd_buf = ptr;
1402 ubq->dev = ub;
1403 return 0;
1404}
1405
1406static void ublk_deinit_queues(struct ublk_device *ub)
1407{
1408 int nr_queues = ub->dev_info.nr_hw_queues;
1409 int i;
1410
1411 if (!ub->__queues)
1412 return;
1413
1414 for (i = 0; i < nr_queues; i++)
1415 ublk_deinit_queue(ub, i);
1416 kfree(ub->__queues);
1417}
1418
1419static int ublk_init_queues(struct ublk_device *ub)
1420{
1421 int nr_queues = ub->dev_info.nr_hw_queues;
1422 int depth = ub->dev_info.queue_depth;
1423 int ubq_size = sizeof(struct ublk_queue) + depth * sizeof(struct ublk_io);
1424 int i, ret = -ENOMEM;
1425
1426 ub->queue_size = ubq_size;
1427 ub->__queues = kcalloc(nr_queues, ubq_size, GFP_KERNEL);
1428 if (!ub->__queues)
1429 return ret;
1430
1431 for (i = 0; i < nr_queues; i++) {
1432 if (ublk_init_queue(ub, i))
1433 goto fail;
1434 }
1435
1436 init_completion(&ub->completion);
1437 return 0;
1438
1439 fail:
1440 ublk_deinit_queues(ub);
1441 return ret;
1442}
1443
fa9482e0 1444static int ublk_alloc_dev_number(struct ublk_device *ub, int idx)
71f28f31
ML
1445{
1446 int i = idx;
1447 int err;
1448
1449 spin_lock(&ublk_idr_lock);
1450 /* allocate id, if @id >= 0, we're requesting that specific id */
1451 if (i >= 0) {
1452 err = idr_alloc(&ublk_index_idr, ub, i, i + 1, GFP_NOWAIT);
1453 if (err == -ENOSPC)
1454 err = -EEXIST;
1455 } else {
1456 err = idr_alloc(&ublk_index_idr, ub, 0, 0, GFP_NOWAIT);
1457 }
1458 spin_unlock(&ublk_idr_lock);
1459
1460 if (err >= 0)
1461 ub->ub_number = err;
1462
1463 return err;
1464}
1465
fa9482e0 1466static void ublk_free_dev_number(struct ublk_device *ub)
71f28f31
ML
1467{
1468 spin_lock(&ublk_idr_lock);
1469 idr_remove(&ublk_index_idr, ub->ub_number);
1470 wake_up_all(&ublk_idr_wq);
1471 spin_unlock(&ublk_idr_lock);
71f28f31
ML
1472}
1473
1474static void ublk_cdev_rel(struct device *dev)
1475{
1476 struct ublk_device *ub = container_of(dev, struct ublk_device, cdev_dev);
1477
71f28f31 1478 blk_mq_free_tag_set(&ub->tag_set);
71f28f31 1479 ublk_deinit_queues(ub);
fa9482e0
CH
1480 ublk_free_dev_number(ub);
1481 mutex_destroy(&ub->mutex);
1482 kfree(ub);
71f28f31
ML
1483}
1484
1485static int ublk_add_chdev(struct ublk_device *ub)
1486{
1487 struct device *dev = &ub->cdev_dev;
1488 int minor = ub->ub_number;
1489 int ret;
1490
1491 dev->parent = ublk_misc.this_device;
1492 dev->devt = MKDEV(MAJOR(ublk_chr_devt), minor);
1493 dev->class = ublk_chr_class;
1494 dev->release = ublk_cdev_rel;
1495 device_initialize(dev);
1496
1497 ret = dev_set_name(dev, "ublkc%d", minor);
1498 if (ret)
1499 goto fail;
1500
1501 cdev_init(&ub->cdev, &ublk_ch_fops);
1502 ret = cdev_device_add(&ub->cdev, dev);
1503 if (ret)
1504 goto fail;
403ebc87
ML
1505
1506 ublks_added++;
71f28f31
ML
1507 return 0;
1508 fail:
1509 put_device(dev);
1510 return ret;
1511}
1512
1513static void ublk_stop_work_fn(struct work_struct *work)
1514{
1515 struct ublk_device *ub =
1516 container_of(work, struct ublk_device, stop_work);
1517
1518 ublk_stop_dev(ub);
1519}
1520
4bf9cbf3 1521/* align max io buffer size with PAGE_SIZE */
6d9e6dfd 1522static void ublk_align_max_io_size(struct ublk_device *ub)
71f28f31 1523{
4bf9cbf3 1524 unsigned int max_io_bytes = ub->dev_info.max_io_buf_bytes;
71f28f31 1525
4bf9cbf3
ML
1526 ub->dev_info.max_io_buf_bytes =
1527 round_down(max_io_bytes, PAGE_SIZE);
71f28f31
ML
1528}
1529
fa9482e0 1530static int ublk_add_tag_set(struct ublk_device *ub)
71f28f31 1531{
71f28f31
ML
1532 ub->tag_set.ops = &ublk_mq_ops;
1533 ub->tag_set.nr_hw_queues = ub->dev_info.nr_hw_queues;
1534 ub->tag_set.queue_depth = ub->dev_info.queue_depth;
1535 ub->tag_set.numa_node = NUMA_NO_NODE;
0edb3696 1536 ub->tag_set.cmd_size = sizeof(struct ublk_rq_data);
71f28f31
ML
1537 ub->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
1538 ub->tag_set.driver_data = ub;
fa9482e0 1539 return blk_mq_alloc_tag_set(&ub->tag_set);
71f28f31
ML
1540}
1541
1542static void ublk_remove(struct ublk_device *ub)
1543{
34d8f2be
CH
1544 ublk_stop_dev(ub);
1545 cancel_work_sync(&ub->stop_work);
bbae8d1f 1546 cancel_work_sync(&ub->quiesce_work);
71f28f31
ML
1547 cdev_device_del(&ub->cdev, &ub->cdev_dev);
1548 put_device(&ub->cdev_dev);
403ebc87 1549 ublks_added--;
71f28f31
ML
1550}
1551
1552static struct ublk_device *ublk_get_device_from_id(int idx)
1553{
1554 struct ublk_device *ub = NULL;
1555
1556 if (idx < 0)
1557 return NULL;
1558
1559 spin_lock(&ublk_idr_lock);
1560 ub = idr_find(&ublk_index_idr, idx);
1561 if (ub)
1562 ub = ublk_get_device(ub);
1563 spin_unlock(&ublk_idr_lock);
1564
1565 return ub;
1566}
1567
bfbcef03 1568static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd)
71f28f31
ML
1569{
1570 struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
71f28f31 1571 int ublksrv_pid = (int)header->data[0];
6d9e6dfd 1572 struct gendisk *disk;
34d8f2be 1573 int ret = -EINVAL;
71f28f31
ML
1574
1575 if (ublksrv_pid <= 0)
34d8f2be
CH
1576 return -EINVAL;
1577
71f28f31
ML
1578 wait_for_completion_interruptible(&ub->completion);
1579
1580 schedule_delayed_work(&ub->monitor_work, UBLK_DAEMON_MONITOR_PERIOD);
1581
1582 mutex_lock(&ub->mutex);
6d9e6dfd
CH
1583 if (ub->dev_info.state == UBLK_S_DEV_LIVE ||
1584 test_bit(UB_STATE_USED, &ub->state)) {
71f28f31 1585 ret = -EEXIST;
34d8f2be 1586 goto out_unlock;
71f28f31 1587 }
71f28f31 1588
1972d038 1589 disk = blk_mq_alloc_disk(&ub->tag_set, NULL);
6d9e6dfd
CH
1590 if (IS_ERR(disk)) {
1591 ret = PTR_ERR(disk);
1592 goto out_unlock;
34d8f2be 1593 }
6d9e6dfd
CH
1594 sprintf(disk->disk_name, "ublkb%d", ub->ub_number);
1595 disk->fops = &ub_fops;
1596 disk->private_data = ub;
1597
34d8f2be 1598 ub->dev_info.ublksrv_pid = ublksrv_pid;
6d9e6dfd 1599 ub->ub_disk = disk;
0aa73170
ML
1600
1601 ret = ublk_apply_params(ub);
1602 if (ret)
1603 goto out_put_disk;
1604
73a166d9
ML
1605 /* don't probe partitions if any one ubq daemon is un-trusted */
1606 if (ub->nr_privileged_daemon != ub->nr_queues_ready)
1607 set_bit(GD_SUPPRESS_PART_SCAN, &disk->state);
1608
6d9e6dfd 1609 get_device(&ub->cdev_dev);
4985e7b2 1610 ub->dev_info.state = UBLK_S_DEV_LIVE;
6d9e6dfd
CH
1611 ret = add_disk(disk);
1612 if (ret) {
93d71ec8
ML
1613 /*
1614 * Has to drop the reference since ->free_disk won't be
1615 * called in case of add_disk failure.
1616 */
4985e7b2 1617 ub->dev_info.state = UBLK_S_DEV_DEAD;
93d71ec8 1618 ublk_put_device(ub);
0aa73170 1619 goto out_put_disk;
6d9e6dfd
CH
1620 }
1621 set_bit(UB_STATE_USED, &ub->state);
0aa73170
ML
1622out_put_disk:
1623 if (ret)
1624 put_disk(disk);
34d8f2be
CH
1625out_unlock:
1626 mutex_unlock(&ub->mutex);
71f28f31
ML
1627 return ret;
1628}
1629
bfbcef03
ML
1630static int ublk_ctrl_get_queue_affinity(struct ublk_device *ub,
1631 struct io_uring_cmd *cmd)
71f28f31
ML
1632{
1633 struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
1634 void __user *argp = (void __user *)(unsigned long)header->addr;
c50061f0 1635 cpumask_var_t cpumask;
71f28f31
ML
1636 unsigned long queue;
1637 unsigned int retlen;
c50061f0 1638 unsigned int i;
bfbcef03
ML
1639 int ret;
1640
34d8f2be
CH
1641 if (header->len * BITS_PER_BYTE < nr_cpu_ids)
1642 return -EINVAL;
1643 if (header->len & (sizeof(unsigned long)-1))
1644 return -EINVAL;
1645 if (!header->addr)
1646 return -EINVAL;
71f28f31 1647
71f28f31
ML
1648 queue = header->data[0];
1649 if (queue >= ub->dev_info.nr_hw_queues)
bfbcef03 1650 return -EINVAL;
71f28f31 1651
c50061f0 1652 if (!zalloc_cpumask_var(&cpumask, GFP_KERNEL))
bfbcef03 1653 return -ENOMEM;
c50061f0
CH
1654
1655 for_each_possible_cpu(i) {
1656 if (ub->tag_set.map[HCTX_TYPE_DEFAULT].mq_map[i] == queue)
1657 cpumask_set_cpu(i, cpumask);
71f28f31 1658 }
c50061f0
CH
1659
1660 ret = -EFAULT;
1661 retlen = min_t(unsigned short, header->len, cpumask_size());
1662 if (copy_to_user(argp, cpumask, retlen))
1663 goto out_free_cpumask;
1664 if (retlen != header->len &&
1665 clear_user(argp + retlen, header->len - retlen))
1666 goto out_free_cpumask;
1667
71f28f31 1668 ret = 0;
c50061f0
CH
1669out_free_cpumask:
1670 free_cpumask_var(cpumask);
71f28f31
ML
1671 return ret;
1672}
1673
34d8f2be 1674static inline void ublk_dump_dev_info(struct ublksrv_ctrl_dev_info *info)
71f28f31 1675{
34d8f2be 1676 pr_devel("%s: dev id %d flags %llx\n", __func__,
6d8c5afc 1677 info->dev_id, info->flags);
4bf9cbf3
ML
1678 pr_devel("\t nr_hw_queues %d queue_depth %d\n",
1679 info->nr_hw_queues, info->queue_depth);
34d8f2be
CH
1680}
1681
1682static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd)
1683{
1684 struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
1685 void __user *argp = (void __user *)(unsigned long)header->addr;
1686 struct ublksrv_ctrl_dev_info info;
71f28f31 1687 struct ublk_device *ub;
34d8f2be
CH
1688 int ret = -EINVAL;
1689
1690 if (header->len < sizeof(info) || !header->addr)
1691 return -EINVAL;
1692 if (header->queue_id != (u16)-1) {
1693 pr_warn("%s: queue_id is wrong %x\n",
1694 __func__, header->queue_id);
1695 return -EINVAL;
1696 }
4093cb5a 1697
34d8f2be
CH
1698 if (copy_from_user(&info, argp, sizeof(info)))
1699 return -EFAULT;
4093cb5a
ML
1700
1701 if (capable(CAP_SYS_ADMIN))
1702 info.flags &= ~UBLK_F_UNPRIVILEGED_DEV;
1703 else if (!(info.flags & UBLK_F_UNPRIVILEGED_DEV))
1704 return -EPERM;
1705
1706 /* the created device is always owned by current user */
48a90519 1707 ublk_store_owner_uid_gid(&info.owner_uid, &info.owner_gid);
4093cb5a 1708
34d8f2be
CH
1709 if (header->dev_id != info.dev_id) {
1710 pr_warn("%s: dev id not match %u %u\n",
1711 __func__, header->dev_id, info.dev_id);
1712 return -EINVAL;
1713 }
71f28f31 1714
4093cb5a
ML
1715 ublk_dump_dev_info(&info);
1716
71f28f31
ML
1717 ret = mutex_lock_killable(&ublk_ctl_mutex);
1718 if (ret)
1719 return ret;
1720
403ebc87
ML
1721 ret = -EACCES;
1722 if (ublks_added >= ublks_max)
1723 goto out_unlock;
1724
cfee7e4d
CH
1725 ret = -ENOMEM;
1726 ub = kzalloc(sizeof(*ub), GFP_KERNEL);
1727 if (!ub)
1728 goto out_unlock;
fa9482e0
CH
1729 mutex_init(&ub->mutex);
1730 spin_lock_init(&ub->mm_lock);
bbae8d1f 1731 INIT_WORK(&ub->quiesce_work, ublk_quiesce_work_fn);
fa9482e0
CH
1732 INIT_WORK(&ub->stop_work, ublk_stop_work_fn);
1733 INIT_DELAYED_WORK(&ub->monitor_work, ublk_daemon_monitor_work);
cfee7e4d 1734
fa9482e0
CH
1735 ret = ublk_alloc_dev_number(ub, header->dev_id);
1736 if (ret < 0)
1737 goto out_free_ub;
71f28f31 1738
34d8f2be 1739 memcpy(&ub->dev_info, &info, sizeof(info));
71f28f31 1740
34d8f2be
CH
1741 /* update device id */
1742 ub->dev_info.dev_id = ub->ub_number;
1743
6d8c5afc
ML
1744 /*
1745 * 64bit flags will be copied back to userspace as feature
1746 * negotiation result, so have to clear flags which driver
1747 * doesn't support yet, then userspace can get correct flags
1748 * (features) to handle.
1749 */
1750 ub->dev_info.flags &= UBLK_F_ALL;
1751
224e858f
ML
1752 if (!IS_BUILTIN(CONFIG_BLK_DEV_UBLK))
1753 ub->dev_info.flags |= UBLK_F_URING_CMD_COMP_IN_TASK;
1754
fa9482e0 1755 /* We are not ready to support zero copy */
6d8c5afc 1756 ub->dev_info.flags &= ~UBLK_F_SUPPORT_ZERO_COPY;
fa9482e0 1757
fa9482e0
CH
1758 ub->dev_info.nr_hw_queues = min_t(unsigned int,
1759 ub->dev_info.nr_hw_queues, nr_cpu_ids);
1760 ublk_align_max_io_size(ub);
1761
1762 ret = ublk_init_queues(ub);
34d8f2be 1763 if (ret)
fa9482e0 1764 goto out_free_dev_number;
34d8f2be 1765
fa9482e0
CH
1766 ret = ublk_add_tag_set(ub);
1767 if (ret)
1768 goto out_deinit_queues;
1769
1770 ret = -EFAULT;
1771 if (copy_to_user(argp, &ub->dev_info, sizeof(info)))
1772 goto out_free_tag_set;
1773
1774 /*
1775 * Add the char dev so that ublksrv daemon can be setup.
1776 * ublk_add_chdev() will cleanup everything if it fails.
1777 */
1778 ret = ublk_add_chdev(ub);
1779 goto out_unlock;
1780
1781out_free_tag_set:
1782 blk_mq_free_tag_set(&ub->tag_set);
1783out_deinit_queues:
1784 ublk_deinit_queues(ub);
1785out_free_dev_number:
1786 ublk_free_dev_number(ub);
1787out_free_ub:
1788 mutex_destroy(&ub->mutex);
1789 kfree(ub);
34d8f2be 1790out_unlock:
71f28f31 1791 mutex_unlock(&ublk_ctl_mutex);
71f28f31
ML
1792 return ret;
1793}
1794
1795static inline bool ublk_idr_freed(int id)
1796{
1797 void *ptr;
1798
1799 spin_lock(&ublk_idr_lock);
1800 ptr = idr_find(&ublk_index_idr, id);
1801 spin_unlock(&ublk_idr_lock);
1802
1803 return ptr == NULL;
1804}
1805
bfbcef03 1806static int ublk_ctrl_del_dev(struct ublk_device **p_ub)
71f28f31 1807{
bfbcef03
ML
1808 struct ublk_device *ub = *p_ub;
1809 int idx = ub->ub_number;
71f28f31
ML
1810 int ret;
1811
1812 ret = mutex_lock_killable(&ublk_ctl_mutex);
1813 if (ret)
1814 return ret;
1815
0abe39de 1816 if (!test_bit(UB_STATE_DELETED, &ub->state)) {
71f28f31 1817 ublk_remove(ub);
0abe39de 1818 set_bit(UB_STATE_DELETED, &ub->state);
71f28f31
ML
1819 }
1820
bfbcef03
ML
1821 /* Mark the reference as consumed */
1822 *p_ub = NULL;
1823 ublk_put_device(ub);
0abe39de 1824 mutex_unlock(&ublk_ctl_mutex);
71f28f31
ML
1825
1826 /*
1827 * Wait until the idr is removed, then it can be reused after
1828 * DEL_DEV command is returned.
0abe39de
ML
1829 *
1830 * If we returns because of user interrupt, future delete command
1831 * may come:
1832 *
1833 * - the device number isn't freed, this device won't or needn't
1834 * be deleted again, since UB_STATE_DELETED is set, and device
1835 * will be released after the last reference is dropped
1836 *
1837 * - the device number is freed already, we will not find this
1838 * device via ublk_get_device_from_id()
71f28f31 1839 */
0abe39de 1840 wait_event_interruptible(ublk_idr_wq, ublk_idr_freed(idx));
71f28f31 1841
0abe39de 1842 return 0;
71f28f31
ML
1843}
1844
71f28f31
ML
1845static inline void ublk_ctrl_cmd_dump(struct io_uring_cmd *cmd)
1846{
1847 struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
1848
1849 pr_devel("%s: cmd_op %x, dev id %d qid %d data %llx buf %llx len %u\n",
1850 __func__, cmd->cmd_op, header->dev_id, header->queue_id,
1851 header->data[0], header->addr, header->len);
1852}
1853
bfbcef03 1854static int ublk_ctrl_stop_dev(struct ublk_device *ub)
71f28f31 1855{
34d8f2be
CH
1856 ublk_stop_dev(ub);
1857 cancel_work_sync(&ub->stop_work);
bbae8d1f 1858 cancel_work_sync(&ub->quiesce_work);
71f28f31
ML
1859
1860 return 0;
1861}
1862
bfbcef03
ML
1863static int ublk_ctrl_get_dev_info(struct ublk_device *ub,
1864 struct io_uring_cmd *cmd)
71f28f31
ML
1865{
1866 struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
1867 void __user *argp = (void __user *)(unsigned long)header->addr;
34d8f2be
CH
1868
1869 if (header->len < sizeof(struct ublksrv_ctrl_dev_info) || !header->addr)
1870 return -EINVAL;
1871
34d8f2be 1872 if (copy_to_user(argp, &ub->dev_info, sizeof(ub->dev_info)))
bfbcef03 1873 return -EFAULT;
34d8f2be 1874
bfbcef03 1875 return 0;
34d8f2be
CH
1876}
1877
abb864d3
ML
1878/* TYPE_DEVT is readonly, so fill it up before returning to userspace */
1879static void ublk_ctrl_fill_params_devt(struct ublk_device *ub)
1880{
1881 ub->params.devt.char_major = MAJOR(ub->cdev_dev.devt);
1882 ub->params.devt.char_minor = MINOR(ub->cdev_dev.devt);
1883
1884 if (ub->ub_disk) {
1885 ub->params.devt.disk_major = MAJOR(disk_devt(ub->ub_disk));
1886 ub->params.devt.disk_minor = MINOR(disk_devt(ub->ub_disk));
1887 } else {
1888 ub->params.devt.disk_major = 0;
1889 ub->params.devt.disk_minor = 0;
1890 }
1891 ub->params.types |= UBLK_PARAM_TYPE_DEVT;
34d8f2be
CH
1892}
1893
bfbcef03
ML
1894static int ublk_ctrl_get_params(struct ublk_device *ub,
1895 struct io_uring_cmd *cmd)
0aa73170
ML
1896{
1897 struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
1898 void __user *argp = (void __user *)(unsigned long)header->addr;
1899 struct ublk_params_header ph;
0aa73170
ML
1900 int ret;
1901
1902 if (header->len <= sizeof(ph) || !header->addr)
1903 return -EINVAL;
1904
1905 if (copy_from_user(&ph, argp, sizeof(ph)))
1906 return -EFAULT;
1907
1908 if (ph.len > header->len || !ph.len)
1909 return -EINVAL;
1910
1911 if (ph.len > sizeof(struct ublk_params))
1912 ph.len = sizeof(struct ublk_params);
1913
0aa73170 1914 mutex_lock(&ub->mutex);
abb864d3 1915 ublk_ctrl_fill_params_devt(ub);
0aa73170
ML
1916 if (copy_to_user(argp, &ub->params, ph.len))
1917 ret = -EFAULT;
1918 else
1919 ret = 0;
1920 mutex_unlock(&ub->mutex);
1921
0aa73170
ML
1922 return ret;
1923}
1924
bfbcef03
ML
1925static int ublk_ctrl_set_params(struct ublk_device *ub,
1926 struct io_uring_cmd *cmd)
0aa73170
ML
1927{
1928 struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
1929 void __user *argp = (void __user *)(unsigned long)header->addr;
1930 struct ublk_params_header ph;
0aa73170
ML
1931 int ret = -EFAULT;
1932
1933 if (header->len <= sizeof(ph) || !header->addr)
1934 return -EINVAL;
1935
1936 if (copy_from_user(&ph, argp, sizeof(ph)))
1937 return -EFAULT;
1938
1939 if (ph.len > header->len || !ph.len || !ph.types)
1940 return -EINVAL;
1941
1942 if (ph.len > sizeof(struct ublk_params))
1943 ph.len = sizeof(struct ublk_params);
1944
0aa73170
ML
1945 /* parameters can only be changed when device isn't live */
1946 mutex_lock(&ub->mutex);
1947 if (ub->dev_info.state == UBLK_S_DEV_LIVE) {
1948 ret = -EACCES;
1949 } else if (copy_from_user(&ub->params, argp, ph.len)) {
1950 ret = -EFAULT;
1951 } else {
1952 /* clear all we don't support yet */
1953 ub->params.types &= UBLK_PARAM_TYPE_ALL;
1954 ret = ublk_validate_params(ub);
1955 }
1956 mutex_unlock(&ub->mutex);
0aa73170
ML
1957
1958 return ret;
1959}
1960
c732a852
Z
1961static void ublk_queue_reinit(struct ublk_device *ub, struct ublk_queue *ubq)
1962{
1963 int i;
1964
1965 WARN_ON_ONCE(!(ubq->ubq_daemon && ubq_daemon_is_dying(ubq)));
1966 /* All old ioucmds have to be completed */
1967 WARN_ON_ONCE(ubq->nr_io_ready);
1968 /* old daemon is PF_EXITING, put it now */
1969 put_task_struct(ubq->ubq_daemon);
1970 /* We have to reset it to NULL, otherwise ub won't accept new FETCH_REQ */
1971 ubq->ubq_daemon = NULL;
1972
1973 for (i = 0; i < ubq->q_depth; i++) {
1974 struct ublk_io *io = &ubq->ios[i];
1975
1976 /* forget everything now and be ready for new FETCH_REQ */
1977 io->flags = 0;
1978 io->cmd = NULL;
1979 io->addr = 0;
1980 }
1981}
1982
bfbcef03
ML
1983static int ublk_ctrl_start_recovery(struct ublk_device *ub,
1984 struct io_uring_cmd *cmd)
c732a852
Z
1985{
1986 struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
c732a852
Z
1987 int ret = -EINVAL;
1988 int i;
1989
c732a852
Z
1990 mutex_lock(&ub->mutex);
1991 if (!ublk_can_use_recovery(ub))
1992 goto out_unlock;
1993 /*
1994 * START_RECOVERY is only allowd after:
1995 *
1996 * (1) UB_STATE_OPEN is not set, which means the dying process is exited
1997 * and related io_uring ctx is freed so file struct of /dev/ublkcX is
1998 * released.
1999 *
2000 * (2) UBLK_S_DEV_QUIESCED is set, which means the quiesce_work:
2001 * (a)has quiesced request queue
2002 * (b)has requeued every inflight rqs whose io_flags is ACTIVE
2003 * (c)has requeued/aborted every inflight rqs whose io_flags is NOT ACTIVE
2004 * (d)has completed/camceled all ioucmds owned by ther dying process
2005 */
2006 if (test_bit(UB_STATE_OPEN, &ub->state) ||
2007 ub->dev_info.state != UBLK_S_DEV_QUIESCED) {
2008 ret = -EBUSY;
2009 goto out_unlock;
2010 }
2011 pr_devel("%s: start recovery for dev id %d.\n", __func__, header->dev_id);
2012 for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
2013 ublk_queue_reinit(ub, ublk_get_queue(ub, i));
2014 /* set to NULL, otherwise new ubq_daemon cannot mmap the io_cmd_buf */
2015 ub->mm = NULL;
2016 ub->nr_queues_ready = 0;
73a166d9 2017 ub->nr_privileged_daemon = 0;
c732a852
Z
2018 init_completion(&ub->completion);
2019 ret = 0;
2020 out_unlock:
2021 mutex_unlock(&ub->mutex);
c732a852
Z
2022 return ret;
2023}
2024
bfbcef03
ML
2025static int ublk_ctrl_end_recovery(struct ublk_device *ub,
2026 struct io_uring_cmd *cmd)
c732a852
Z
2027{
2028 struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
2029 int ublksrv_pid = (int)header->data[0];
c732a852
Z
2030 int ret = -EINVAL;
2031
c732a852
Z
2032 pr_devel("%s: Waiting for new ubq_daemons(nr: %d) are ready, dev id %d...\n",
2033 __func__, ub->dev_info.nr_hw_queues, header->dev_id);
2034 /* wait until new ubq_daemon sending all FETCH_REQ */
2035 wait_for_completion_interruptible(&ub->completion);
2036 pr_devel("%s: All new ubq_daemons(nr: %d) are ready, dev id %d\n",
2037 __func__, ub->dev_info.nr_hw_queues, header->dev_id);
2038
2039 mutex_lock(&ub->mutex);
2040 if (!ublk_can_use_recovery(ub))
2041 goto out_unlock;
2042
2043 if (ub->dev_info.state != UBLK_S_DEV_QUIESCED) {
2044 ret = -EBUSY;
2045 goto out_unlock;
2046 }
2047 ub->dev_info.ublksrv_pid = ublksrv_pid;
2048 pr_devel("%s: new ublksrv_pid %d, dev id %d\n",
2049 __func__, ublksrv_pid, header->dev_id);
2050 blk_mq_unquiesce_queue(ub->ub_disk->queue);
2051 pr_devel("%s: queue unquiesced, dev id %d.\n",
2052 __func__, header->dev_id);
2053 blk_mq_kick_requeue_list(ub->ub_disk->queue);
2054 ub->dev_info.state = UBLK_S_DEV_LIVE;
2055 schedule_delayed_work(&ub->monitor_work, UBLK_DAEMON_MONITOR_PERIOD);
2056 ret = 0;
2057 out_unlock:
2058 mutex_unlock(&ub->mutex);
c732a852
Z
2059 return ret;
2060}
2061
4093cb5a
ML
2062/*
2063 * All control commands are sent via /dev/ublk-control, so we have to check
2064 * the destination device's permission
2065 */
2066static int ublk_char_dev_permission(struct ublk_device *ub,
2067 const char *dev_path, int mask)
2068{
2069 int err;
2070 struct path path;
2071 struct kstat stat;
2072
2073 err = kern_path(dev_path, LOOKUP_FOLLOW, &path);
2074 if (err)
2075 return err;
2076
2077 err = vfs_getattr(&path, &stat, STATX_TYPE, AT_STATX_SYNC_AS_STAT);
2078 if (err)
2079 goto exit;
2080
2081 err = -EPERM;
2082 if (stat.rdev != ub->cdev_dev.devt || !S_ISCHR(stat.mode))
2083 goto exit;
2084
5b0ed596 2085 err = inode_permission(&nop_mnt_idmap,
4093cb5a
ML
2086 d_backing_inode(path.dentry), mask);
2087exit:
2088 path_put(&path);
2089 return err;
2090}
2091
2092static int ublk_ctrl_uring_cmd_permission(struct ublk_device *ub,
2093 struct io_uring_cmd *cmd)
2094{
2095 struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
2096 bool unprivileged = ub->dev_info.flags & UBLK_F_UNPRIVILEGED_DEV;
2097 void __user *argp = (void __user *)(unsigned long)header->addr;
2098 char *dev_path = NULL;
2099 int ret = 0;
2100 int mask;
2101
2102 if (!unprivileged) {
2103 if (!capable(CAP_SYS_ADMIN))
2104 return -EPERM;
2105 /*
2106 * The new added command of UBLK_CMD_GET_DEV_INFO2 includes
2107 * char_dev_path in payload too, since userspace may not
2108 * know if the specified device is created as unprivileged
2109 * mode.
2110 */
2111 if (cmd->cmd_op != UBLK_CMD_GET_DEV_INFO2)
2112 return 0;
2113 }
2114
2115 /*
2116 * User has to provide the char device path for unprivileged ublk
2117 *
2118 * header->addr always points to the dev path buffer, and
2119 * header->dev_path_len records length of dev path buffer.
2120 */
2121 if (!header->dev_path_len || header->dev_path_len > PATH_MAX)
2122 return -EINVAL;
2123
2124 if (header->len < header->dev_path_len)
2125 return -EINVAL;
2126
2127 dev_path = kmalloc(header->dev_path_len + 1, GFP_KERNEL);
2128 if (!dev_path)
2129 return -ENOMEM;
2130
2131 ret = -EFAULT;
2132 if (copy_from_user(dev_path, argp, header->dev_path_len))
2133 goto exit;
2134 dev_path[header->dev_path_len] = 0;
2135
2136 ret = -EINVAL;
2137 switch (cmd->cmd_op) {
2138 case UBLK_CMD_GET_DEV_INFO:
2139 case UBLK_CMD_GET_DEV_INFO2:
2140 case UBLK_CMD_GET_QUEUE_AFFINITY:
2141 case UBLK_CMD_GET_PARAMS:
2142 mask = MAY_READ;
2143 break;
2144 case UBLK_CMD_START_DEV:
2145 case UBLK_CMD_STOP_DEV:
2146 case UBLK_CMD_ADD_DEV:
2147 case UBLK_CMD_DEL_DEV:
2148 case UBLK_CMD_SET_PARAMS:
2149 case UBLK_CMD_START_USER_RECOVERY:
2150 case UBLK_CMD_END_USER_RECOVERY:
2151 mask = MAY_READ | MAY_WRITE;
2152 break;
2153 default:
2154 goto exit;
2155 }
2156
2157 ret = ublk_char_dev_permission(ub, dev_path, mask);
2158 if (!ret) {
2159 header->len -= header->dev_path_len;
2160 header->addr += header->dev_path_len;
2161 }
2162 pr_devel("%s: dev id %d cmd_op %x uid %d gid %d path %s ret %d\n",
2163 __func__, ub->ub_number, cmd->cmd_op,
2164 ub->dev_info.owner_uid, ub->dev_info.owner_gid,
2165 dev_path, ret);
2166exit:
2167 kfree(dev_path);
c732a852
Z
2168 return ret;
2169}
2170
34d8f2be
CH
2171static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
2172 unsigned int issue_flags)
2173{
2174 struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
bfbcef03 2175 struct ublk_device *ub = NULL;
71f28f31
ML
2176 int ret = -EINVAL;
2177
fa8e442e
ML
2178 if (issue_flags & IO_URING_F_NONBLOCK)
2179 return -EAGAIN;
2180
71f28f31
ML
2181 ublk_ctrl_cmd_dump(cmd);
2182
2183 if (!(issue_flags & IO_URING_F_SQE128))
2184 goto out;
2185
bfbcef03
ML
2186 if (cmd->cmd_op != UBLK_CMD_ADD_DEV) {
2187 ret = -ENODEV;
2188 ub = ublk_get_device_from_id(header->dev_id);
2189 if (!ub)
2190 goto out;
4093cb5a
ML
2191
2192 ret = ublk_ctrl_uring_cmd_permission(ub, cmd);
2193 } else {
2194 /* ADD_DEV permission check is done in command handler */
2195 ret = 0;
bfbcef03
ML
2196 }
2197
4093cb5a
ML
2198 if (ret)
2199 goto put_dev;
71f28f31 2200
34d8f2be 2201 switch (cmd->cmd_op) {
71f28f31 2202 case UBLK_CMD_START_DEV:
bfbcef03 2203 ret = ublk_ctrl_start_dev(ub, cmd);
71f28f31
ML
2204 break;
2205 case UBLK_CMD_STOP_DEV:
bfbcef03 2206 ret = ublk_ctrl_stop_dev(ub);
71f28f31
ML
2207 break;
2208 case UBLK_CMD_GET_DEV_INFO:
4093cb5a 2209 case UBLK_CMD_GET_DEV_INFO2:
bfbcef03 2210 ret = ublk_ctrl_get_dev_info(ub, cmd);
71f28f31
ML
2211 break;
2212 case UBLK_CMD_ADD_DEV:
34d8f2be 2213 ret = ublk_ctrl_add_dev(cmd);
71f28f31
ML
2214 break;
2215 case UBLK_CMD_DEL_DEV:
bfbcef03 2216 ret = ublk_ctrl_del_dev(&ub);
71f28f31
ML
2217 break;
2218 case UBLK_CMD_GET_QUEUE_AFFINITY:
bfbcef03 2219 ret = ublk_ctrl_get_queue_affinity(ub, cmd);
71f28f31 2220 break;
0aa73170 2221 case UBLK_CMD_GET_PARAMS:
bfbcef03 2222 ret = ublk_ctrl_get_params(ub, cmd);
0aa73170
ML
2223 break;
2224 case UBLK_CMD_SET_PARAMS:
bfbcef03 2225 ret = ublk_ctrl_set_params(ub, cmd);
0aa73170 2226 break;
c732a852 2227 case UBLK_CMD_START_USER_RECOVERY:
bfbcef03 2228 ret = ublk_ctrl_start_recovery(ub, cmd);
c732a852
Z
2229 break;
2230 case UBLK_CMD_END_USER_RECOVERY:
bfbcef03 2231 ret = ublk_ctrl_end_recovery(ub, cmd);
c732a852 2232 break;
71f28f31 2233 default:
bfbcef03 2234 ret = -ENOTSUPP;
71f28f31 2235 break;
6b1439d2 2236 }
4093cb5a
ML
2237
2238 put_dev:
bfbcef03
ML
2239 if (ub)
2240 ublk_put_device(ub);
71f28f31 2241 out:
9d2789ac 2242 io_uring_cmd_done(cmd, ret, 0, issue_flags);
71f28f31
ML
2243 pr_devel("%s: cmd done ret %d cmd_op %x, dev id %d qid %d\n",
2244 __func__, ret, cmd->cmd_op, header->dev_id, header->queue_id);
2245 return -EIOCBQUEUED;
2246}
2247
2248static const struct file_operations ublk_ctl_fops = {
2249 .open = nonseekable_open,
2250 .uring_cmd = ublk_ctrl_uring_cmd,
2251 .owner = THIS_MODULE,
2252 .llseek = noop_llseek,
2253};
2254
2255static struct miscdevice ublk_misc = {
2256 .minor = MISC_DYNAMIC_MINOR,
2257 .name = "ublk-control",
2258 .fops = &ublk_ctl_fops,
2259};
2260
2261static int __init ublk_init(void)
2262{
2263 int ret;
2264
2265 init_waitqueue_head(&ublk_idr_wq);
2266
2267 ret = misc_register(&ublk_misc);
2268 if (ret)
2269 return ret;
2270
2271 ret = alloc_chrdev_region(&ublk_chr_devt, 0, UBLK_MINORS, "ublk-char");
2272 if (ret)
2273 goto unregister_mis;
2274
2275 ublk_chr_class = class_create(THIS_MODULE, "ublk-char");
2276 if (IS_ERR(ublk_chr_class)) {
2277 ret = PTR_ERR(ublk_chr_class);
2278 goto free_chrdev_region;
2279 }
2280 return 0;
2281
2282free_chrdev_region:
2283 unregister_chrdev_region(ublk_chr_devt, UBLK_MINORS);
2284unregister_mis:
2285 misc_deregister(&ublk_misc);
2286 return ret;
2287}
2288
2289static void __exit ublk_exit(void)
2290{
2291 struct ublk_device *ub;
2292 int id;
2293
71f28f31
ML
2294 idr_for_each_entry(&ublk_index_idr, ub, id)
2295 ublk_remove(ub);
2296
8e4ff684
ML
2297 class_destroy(ublk_chr_class);
2298 misc_deregister(&ublk_misc);
2299
71f28f31
ML
2300 idr_destroy(&ublk_index_idr);
2301 unregister_chrdev_region(ublk_chr_devt, UBLK_MINORS);
2302}
2303
2304module_init(ublk_init);
2305module_exit(ublk_exit);
2306
403ebc87
ML
2307module_param(ublks_max, int, 0444);
2308MODULE_PARM_DESC(ublks_max, "max number of ublk devices allowed to add(default: 64)");
2309
71f28f31
ML
2310MODULE_AUTHOR("Ming Lei <ming.lei@redhat.com>");
2311MODULE_LICENSE("GPL");