io_uring: Pass whole sqe to commands
[linux-block.git] / drivers / nvme / host / ioctl.c
CommitLineData
2405252a
CH
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2011-2014, Intel Corporation.
4 * Copyright (c) 2017-2021 Christoph Hellwig.
5 */
6#include <linux/ptrace.h> /* for force_successful_syscall_return */
7#include <linux/nvme_ioctl.h>
456cba38 8#include <linux/io_uring.h>
2405252a
CH
9#include "nvme.h"
10
7b7fdb8e
CH
11enum {
12 NVME_IOCTL_VEC = (1 << 0),
313c08c7 13 NVME_IOCTL_PARTITION = (1 << 1),
7b7fdb8e
CH
14};
15
855b7717 16static bool nvme_cmd_allowed(struct nvme_ns *ns, struct nvme_command *c,
313c08c7 17 unsigned int flags, fmode_t mode)
855b7717 18{
6f99ac04
CH
19 u32 effects;
20
855b7717
KJ
21 if (capable(CAP_SYS_ADMIN))
22 return true;
23
313c08c7
CH
24 /*
25 * Do not allow unprivileged passthrough on partitions, as that allows an
26 * escape from the containment of the partition.
27 */
28 if (flags & NVME_IOCTL_PARTITION)
29 return false;
30
855b7717
KJ
31 /*
32 * Do not allow unprivileged processes to send vendor specific or fabrics
33 * commands as we can't be sure about their effects.
34 */
35 if (c->common.opcode >= nvme_cmd_vendor_start ||
36 c->common.opcode == nvme_fabrics_command)
37 return false;
38
e4fbcf32
KJ
39 /*
40 * Do not allow unprivileged passthrough of admin commands except
41 * for a subset of identify commands that contain information required
42 * to form proper I/O commands in userspace and do not expose any
43 * potentially sensitive information.
44 */
45 if (!ns) {
46 if (c->common.opcode == nvme_admin_identify) {
47 switch (c->identify.cns) {
48 case NVME_ID_CNS_NS:
49 case NVME_ID_CNS_CS_NS:
50 case NVME_ID_CNS_NS_CS_INDEP:
ea43fcee
JG
51 case NVME_ID_CNS_CS_CTRL:
52 case NVME_ID_CNS_CTRL:
e4fbcf32
KJ
53 return true;
54 }
55 }
855b7717 56 return false;
e4fbcf32 57 }
855b7717
KJ
58
59 /*
6f99ac04
CH
60 * Check if the controller provides a Commands Supported and Effects log
61 * and marks this command as supported. If not reject unprivileged
62 * passthrough.
63 */
64 effects = nvme_command_effects(ns->ctrl, ns, c->common.opcode);
65 if (!(effects & NVME_CMD_EFFECTS_CSUPP))
66 return false;
67
68 /*
69 * Don't allow passthrough for command that have intrusive (or unknown)
70 * effects.
71 */
72 if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC |
73 NVME_CMD_EFFECTS_UUID_SEL |
74 NVME_CMD_EFFECTS_SCOPE_MASK))
75 return false;
76
77 /*
78 * Only allow I/O commands that transfer data to the controller or that
79 * change the logical block contents if the file descriptor is open for
80 * writing.
855b7717 81 */
6f99ac04 82 if (nvme_is_write(c) || (effects & NVME_CMD_EFFECTS_LBCC))
855b7717
KJ
83 return mode & FMODE_WRITE;
84 return true;
85}
86
2405252a
CH
87/*
88 * Convert integer values from ioctl structures to user pointers, silently
89 * ignoring the upper bits in the compat case to match behaviour of 32-bit
90 * kernels.
91 */
92static void __user *nvme_to_user_ptr(uintptr_t ptrval)
93{
94 if (in_compat_syscall())
95 ptrval = (compat_uptr_t)ptrval;
96 return (void __user *)ptrval;
97}
98
38c0ddab
KJ
99static void *nvme_add_user_metadata(struct request *req, void __user *ubuf,
100 unsigned len, u32 seed)
2405252a
CH
101{
102 struct bio_integrity_payload *bip;
103 int ret = -ENOMEM;
104 void *buf;
38c0ddab 105 struct bio *bio = req->bio;
2405252a
CH
106
107 buf = kmalloc(len, GFP_KERNEL);
108 if (!buf)
109 goto out;
110
111 ret = -EFAULT;
38c0ddab 112 if ((req_op(req) == REQ_OP_DRV_OUT) && copy_from_user(buf, ubuf, len))
2405252a
CH
113 goto out_free_meta;
114
115 bip = bio_integrity_alloc(bio, GFP_KERNEL, 1);
116 if (IS_ERR(bip)) {
117 ret = PTR_ERR(bip);
118 goto out_free_meta;
119 }
120
121 bip->bip_iter.bi_size = len;
122 bip->bip_iter.bi_sector = seed;
123 ret = bio_integrity_add_page(bio, virt_to_page(buf), len,
124 offset_in_page(buf));
38c0ddab
KJ
125 if (ret != len) {
126 ret = -ENOMEM;
127 goto out_free_meta;
128 }
129
130 req->cmd_flags |= REQ_INTEGRITY;
131 return buf;
2405252a
CH
132out_free_meta:
133 kfree(buf);
134out:
135 return ERR_PTR(ret);
136}
137
bcad2565
CH
138static int nvme_finish_user_metadata(struct request *req, void __user *ubuf,
139 void *meta, unsigned len, int ret)
140{
141 if (!ret && req_op(req) == REQ_OP_DRV_IN &&
142 copy_to_user(ubuf, meta, len))
143 ret = -EFAULT;
144 kfree(meta);
145 return ret;
146}
147
148static struct request *nvme_alloc_user_request(struct request_queue *q,
470e900c
KJ
149 struct nvme_command *cmd, blk_opf_t rq_flags,
150 blk_mq_req_flags_t blk_flags)
2405252a 151{
2405252a 152 struct request *req;
2405252a 153
456cba38 154 req = blk_mq_alloc_request(q, nvme_req_op(cmd) | rq_flags, blk_flags);
2405252a 155 if (IS_ERR(req))
bcad2565 156 return req;
e559398f 157 nvme_init_request(req, cmd);
2405252a 158 nvme_req(req)->flags |= NVME_REQ_USERCMD;
470e900c
KJ
159 return req;
160}
2405252a 161
4d174486 162static int nvme_map_user_request(struct request *req, u64 ubuffer,
470e900c 163 unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
4d174486 164 u32 meta_seed, void **metap, struct io_uring_cmd *ioucmd,
7b7fdb8e 165 unsigned int flags)
470e900c
KJ
166{
167 struct request_queue *q = req->q;
168 struct nvme_ns *ns = q->queuedata;
169 struct block_device *bdev = ns ? ns->disk->part0 : NULL;
170 struct bio *bio = NULL;
171 void *meta = NULL;
172 int ret;
173
23fd22e5
KJ
174 if (ioucmd && (ioucmd->flags & IORING_URING_CMD_FIXED)) {
175 struct iov_iter iter;
176
177 /* fixedbufs is only for non-vectored io */
7b7fdb8e 178 if (WARN_ON_ONCE(flags & NVME_IOCTL_VEC))
23fd22e5
KJ
179 return -EINVAL;
180 ret = io_uring_cmd_import_fixed(ubuffer, bufflen,
181 rq_data_dir(req), &iter, ioucmd);
182 if (ret < 0)
183 goto out;
184 ret = blk_rq_map_user_iov(q, req, NULL, &iter, GFP_KERNEL);
185 } else {
186 ret = blk_rq_map_user_io(req, NULL, nvme_to_user_ptr(ubuffer),
7b7fdb8e
CH
187 bufflen, GFP_KERNEL, flags & NVME_IOCTL_VEC, 0,
188 0, rq_data_dir(req));
23fd22e5 189 }
470e900c
KJ
190
191 if (ret)
192 goto out;
193 bio = req->bio;
194 if (bdev)
195 bio_set_dev(bio, bdev);
196
197 if (bdev && meta_buffer && meta_len) {
198 meta = nvme_add_user_metadata(req, meta_buffer, meta_len,
199 meta_seed);
200 if (IS_ERR(meta)) {
201 ret = PTR_ERR(meta);
202 goto out_unmap;
2405252a 203 }
470e900c 204 *metap = meta;
2405252a
CH
205 }
206
470e900c 207 return ret;
bcad2565
CH
208
209out_unmap:
210 if (bio)
211 blk_rq_unmap_user(bio);
212out:
213 blk_mq_free_request(req);
470e900c 214 return ret;
bcad2565
CH
215}
216
217static int nvme_submit_user_cmd(struct request_queue *q,
7b7fdb8e
CH
218 struct nvme_command *cmd, u64 ubuffer, unsigned bufflen,
219 void __user *meta_buffer, unsigned meta_len, u32 meta_seed,
220 u64 *result, unsigned timeout, unsigned int flags)
bcad2565 221{
62281b9e 222 struct nvme_ns *ns = q->queuedata;
bc8fb906 223 struct nvme_ctrl *ctrl;
bcad2565
CH
224 struct request *req;
225 void *meta = NULL;
226 struct bio *bio;
bc8fb906 227 u32 effects;
bcad2565
CH
228 int ret;
229
470e900c 230 req = nvme_alloc_user_request(q, cmd, 0, 0);
bcad2565
CH
231 if (IS_ERR(req))
232 return PTR_ERR(req);
233
470e900c
KJ
234 req->timeout = timeout;
235 if (ubuffer && bufflen) {
236 ret = nvme_map_user_request(req, ubuffer, bufflen, meta_buffer,
7b7fdb8e 237 meta_len, meta_seed, &meta, NULL, flags);
470e900c
KJ
238 if (ret)
239 return ret;
240 }
241
bcad2565 242 bio = req->bio;
bc8fb906 243 ctrl = nvme_req(req)->ctrl;
bcad2565 244
62281b9e
CH
245 effects = nvme_passthru_start(ctrl, ns, cmd->common.opcode);
246 ret = nvme_execute_rq(req, false);
2405252a
CH
247 if (result)
248 *result = le64_to_cpu(nvme_req(req)->result.u64);
bcad2565
CH
249 if (meta)
250 ret = nvme_finish_user_metadata(req, meta_buffer, meta,
251 meta_len, ret);
2405252a
CH
252 if (bio)
253 blk_rq_unmap_user(bio);
2405252a 254 blk_mq_free_request(req);
bc8fb906
KB
255
256 if (effects)
257 nvme_passthru_end(ctrl, effects, cmd, ret);
258
2405252a
CH
259 return ret;
260}
261
2405252a
CH
262static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
263{
264 struct nvme_user_io io;
265 struct nvme_command c;
266 unsigned length, meta_len;
267 void __user *metadata;
268
269 if (copy_from_user(&io, uio, sizeof(io)))
270 return -EFAULT;
271 if (io.flags)
272 return -EINVAL;
273
274 switch (io.opcode) {
275 case nvme_cmd_write:
276 case nvme_cmd_read:
277 case nvme_cmd_compare:
278 break;
279 default:
280 return -EINVAL;
281 }
282
283 length = (io.nblocks + 1) << ns->lba_shift;
284
285 if ((io.control & NVME_RW_PRINFO_PRACT) &&
286 ns->ms == sizeof(struct t10_pi_tuple)) {
287 /*
288 * Protection information is stripped/inserted by the
289 * controller.
290 */
291 if (nvme_to_user_ptr(io.metadata))
292 return -EINVAL;
293 meta_len = 0;
294 metadata = NULL;
295 } else {
296 meta_len = (io.nblocks + 1) * ns->ms;
297 metadata = nvme_to_user_ptr(io.metadata);
298 }
299
300 if (ns->features & NVME_NS_EXT_LBAS) {
301 length += meta_len;
302 meta_len = 0;
303 } else if (meta_len) {
304 if ((io.metadata & 3) || !io.metadata)
305 return -EINVAL;
306 }
307
308 memset(&c, 0, sizeof(c));
309 c.rw.opcode = io.opcode;
310 c.rw.flags = io.flags;
311 c.rw.nsid = cpu_to_le32(ns->head->ns_id);
312 c.rw.slba = cpu_to_le64(io.slba);
313 c.rw.length = cpu_to_le16(io.nblocks);
314 c.rw.control = cpu_to_le16(io.control);
315 c.rw.dsmgmt = cpu_to_le32(io.dsmgmt);
316 c.rw.reftag = cpu_to_le32(io.reftag);
317 c.rw.apptag = cpu_to_le16(io.apptag);
318 c.rw.appmask = cpu_to_le16(io.appmask);
319
7b7fdb8e
CH
320 return nvme_submit_user_cmd(ns->queue, &c, io.addr, length, metadata,
321 meta_len, lower_32_bits(io.slba), NULL, 0, 0);
2405252a
CH
322}
323
e7d4b549
CK
324static bool nvme_validate_passthru_nsid(struct nvme_ctrl *ctrl,
325 struct nvme_ns *ns, __u32 nsid)
326{
327 if (ns && nsid != ns->head->ns_id) {
328 dev_err(ctrl->device,
329 "%s: nsid (%u) in cmd does not match nsid (%u)"
330 "of namespace\n",
331 current->comm, nsid, ns->head->ns_id);
332 return false;
333 }
334
335 return true;
336}
337
2405252a 338static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
313c08c7
CH
339 struct nvme_passthru_cmd __user *ucmd, unsigned int flags,
340 fmode_t mode)
2405252a
CH
341{
342 struct nvme_passthru_cmd cmd;
343 struct nvme_command c;
344 unsigned timeout = 0;
345 u64 result;
346 int status;
347
2405252a
CH
348 if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
349 return -EFAULT;
350 if (cmd.flags)
351 return -EINVAL;
e7d4b549 352 if (!nvme_validate_passthru_nsid(ctrl, ns, cmd.nsid))
2405252a 353 return -EINVAL;
2405252a
CH
354
355 memset(&c, 0, sizeof(c));
356 c.common.opcode = cmd.opcode;
357 c.common.flags = cmd.flags;
358 c.common.nsid = cpu_to_le32(cmd.nsid);
359 c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
360 c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
361 c.common.cdw10 = cpu_to_le32(cmd.cdw10);
362 c.common.cdw11 = cpu_to_le32(cmd.cdw11);
363 c.common.cdw12 = cpu_to_le32(cmd.cdw12);
364 c.common.cdw13 = cpu_to_le32(cmd.cdw13);
365 c.common.cdw14 = cpu_to_le32(cmd.cdw14);
366 c.common.cdw15 = cpu_to_le32(cmd.cdw15);
367
313c08c7 368 if (!nvme_cmd_allowed(ns, &c, 0, mode))
855b7717
KJ
369 return -EACCES;
370
2405252a
CH
371 if (cmd.timeout_ms)
372 timeout = msecs_to_jiffies(cmd.timeout_ms);
373
374 status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
7b7fdb8e
CH
375 cmd.addr, cmd.data_len, nvme_to_user_ptr(cmd.metadata),
376 cmd.metadata_len, 0, &result, timeout, 0);
2405252a
CH
377
378 if (status >= 0) {
379 if (put_user(result, &ucmd->result))
380 return -EFAULT;
381 }
382
383 return status;
384}
385
386static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
7b7fdb8e
CH
387 struct nvme_passthru_cmd64 __user *ucmd, unsigned int flags,
388 fmode_t mode)
2405252a
CH
389{
390 struct nvme_passthru_cmd64 cmd;
391 struct nvme_command c;
392 unsigned timeout = 0;
393 int status;
394
2405252a
CH
395 if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
396 return -EFAULT;
397 if (cmd.flags)
398 return -EINVAL;
e7d4b549 399 if (!nvme_validate_passthru_nsid(ctrl, ns, cmd.nsid))
2405252a 400 return -EINVAL;
2405252a
CH
401
402 memset(&c, 0, sizeof(c));
403 c.common.opcode = cmd.opcode;
404 c.common.flags = cmd.flags;
405 c.common.nsid = cpu_to_le32(cmd.nsid);
406 c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
407 c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
408 c.common.cdw10 = cpu_to_le32(cmd.cdw10);
409 c.common.cdw11 = cpu_to_le32(cmd.cdw11);
410 c.common.cdw12 = cpu_to_le32(cmd.cdw12);
411 c.common.cdw13 = cpu_to_le32(cmd.cdw13);
412 c.common.cdw14 = cpu_to_le32(cmd.cdw14);
413 c.common.cdw15 = cpu_to_le32(cmd.cdw15);
414
313c08c7 415 if (!nvme_cmd_allowed(ns, &c, flags, mode))
855b7717
KJ
416 return -EACCES;
417
2405252a
CH
418 if (cmd.timeout_ms)
419 timeout = msecs_to_jiffies(cmd.timeout_ms);
420
421 status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
7b7fdb8e
CH
422 cmd.addr, cmd.data_len, nvme_to_user_ptr(cmd.metadata),
423 cmd.metadata_len, 0, &cmd.result, timeout, flags);
2405252a
CH
424
425 if (status >= 0) {
426 if (put_user(cmd.result, &ucmd->result))
427 return -EFAULT;
428 }
429
430 return status;
431}
432
456cba38
KJ
433struct nvme_uring_data {
434 __u64 metadata;
435 __u64 addr;
436 __u32 data_len;
437 __u32 metadata_len;
438 __u32 timeout_ms;
439};
440
441/*
442 * This overlays struct io_uring_cmd pdu.
443 * Expect build errors if this grows larger than that.
444 */
445struct nvme_uring_cmd_pdu {
446 union {
447 struct bio *bio;
448 struct request *req;
449 };
456cba38 450 u32 meta_len;
c0a7ba77
JA
451 u32 nvme_status;
452 union {
453 struct {
454 void *meta; /* kernel-resident buffer */
455 void __user *meta_buffer;
456 };
457 u64 result;
458 } u;
456cba38
KJ
459};
460
461static inline struct nvme_uring_cmd_pdu *nvme_uring_cmd_pdu(
462 struct io_uring_cmd *ioucmd)
463{
464 return (struct nvme_uring_cmd_pdu *)&ioucmd->pdu;
465}
466
9d2789ac
JA
467static void nvme_uring_task_meta_cb(struct io_uring_cmd *ioucmd,
468 unsigned issue_flags)
456cba38
KJ
469{
470 struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
471 struct request *req = pdu->req;
456cba38
KJ
472 int status;
473 u64 result;
474
475 if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
476 status = -EINTR;
477 else
478 status = nvme_req(req)->status;
479
480 result = le64_to_cpu(nvme_req(req)->result.u64);
481
c0a7ba77
JA
482 if (pdu->meta_len)
483 status = nvme_finish_user_metadata(req, pdu->u.meta_buffer,
484 pdu->u.meta, pdu->meta_len, status);
485 if (req->bio)
486 blk_rq_unmap_user(req->bio);
456cba38
KJ
487 blk_mq_free_request(req);
488
9d2789ac 489 io_uring_cmd_done(ioucmd, status, result, issue_flags);
456cba38
KJ
490}
491
9d2789ac
JA
492static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd,
493 unsigned issue_flags)
c0a7ba77
JA
494{
495 struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
496
497 if (pdu->bio)
498 blk_rq_unmap_user(pdu->bio);
499
9d2789ac 500 io_uring_cmd_done(ioucmd, pdu->nvme_status, pdu->u.result, issue_flags);
c0a7ba77
JA
501}
502
de671d61
JA
503static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
504 blk_status_t err)
456cba38
KJ
505{
506 struct io_uring_cmd *ioucmd = req->end_io_data;
507 struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
585079b6 508 void *cookie = READ_ONCE(ioucmd->cookie);
456cba38 509
c0a7ba77
JA
510 req->bio = pdu->bio;
511 if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
512 pdu->nvme_status = -EINTR;
513 else
514 pdu->nvme_status = nvme_req(req)->status;
515 pdu->u.result = le64_to_cpu(nvme_req(req)->result.u64);
585079b6
KJ
516
517 /*
518 * For iopoll, complete it directly.
519 * Otherwise, move the completion to task work.
520 */
521 if (cookie != NULL && blk_rq_is_poll(req))
9d2789ac 522 nvme_uring_task_cb(ioucmd, IO_URING_F_UNLOCKED);
585079b6
KJ
523 else
524 io_uring_cmd_complete_in_task(ioucmd, nvme_uring_task_cb);
de671d61 525
851eb780 526 return RQ_END_IO_FREE;
c0a7ba77
JA
527}
528
529static enum rq_end_io_ret nvme_uring_cmd_end_io_meta(struct request *req,
530 blk_status_t err)
531{
532 struct io_uring_cmd *ioucmd = req->end_io_data;
533 struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
534 void *cookie = READ_ONCE(ioucmd->cookie);
535
536 req->bio = pdu->bio;
537 pdu->req = req;
538
539 /*
540 * For iopoll, complete it directly.
541 * Otherwise, move the completion to task work.
542 */
543 if (cookie != NULL && blk_rq_is_poll(req))
9d2789ac 544 nvme_uring_task_meta_cb(ioucmd, IO_URING_F_UNLOCKED);
c0a7ba77
JA
545 else
546 io_uring_cmd_complete_in_task(ioucmd, nvme_uring_task_meta_cb);
547
de671d61 548 return RQ_END_IO_NONE;
456cba38
KJ
549}
550
551static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
f569add4 552 struct io_uring_cmd *ioucmd, unsigned int issue_flags, bool vec)
456cba38
KJ
553{
554 struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
fd9b8547 555 const struct nvme_uring_cmd *cmd = io_uring_sqe_cmd(ioucmd->sqe);
456cba38
KJ
556 struct request_queue *q = ns ? ns->queue : ctrl->admin_q;
557 struct nvme_uring_data d;
558 struct nvme_command c;
559 struct request *req;
888545cb 560 blk_opf_t rq_flags = REQ_ALLOC_CACHE;
456cba38
KJ
561 blk_mq_req_flags_t blk_flags = 0;
562 void *meta = NULL;
470e900c 563 int ret;
456cba38 564
456cba38
KJ
565 c.common.opcode = READ_ONCE(cmd->opcode);
566 c.common.flags = READ_ONCE(cmd->flags);
567 if (c.common.flags)
568 return -EINVAL;
569
570 c.common.command_id = 0;
571 c.common.nsid = cpu_to_le32(cmd->nsid);
572 if (!nvme_validate_passthru_nsid(ctrl, ns, le32_to_cpu(c.common.nsid)))
573 return -EINVAL;
574
575 c.common.cdw2[0] = cpu_to_le32(READ_ONCE(cmd->cdw2));
576 c.common.cdw2[1] = cpu_to_le32(READ_ONCE(cmd->cdw3));
577 c.common.metadata = 0;
578 c.common.dptr.prp1 = c.common.dptr.prp2 = 0;
579 c.common.cdw10 = cpu_to_le32(READ_ONCE(cmd->cdw10));
580 c.common.cdw11 = cpu_to_le32(READ_ONCE(cmd->cdw11));
581 c.common.cdw12 = cpu_to_le32(READ_ONCE(cmd->cdw12));
582 c.common.cdw13 = cpu_to_le32(READ_ONCE(cmd->cdw13));
583 c.common.cdw14 = cpu_to_le32(READ_ONCE(cmd->cdw14));
584 c.common.cdw15 = cpu_to_le32(READ_ONCE(cmd->cdw15));
585
313c08c7 586 if (!nvme_cmd_allowed(ns, &c, 0, ioucmd->file->f_mode))
855b7717
KJ
587 return -EACCES;
588
456cba38
KJ
589 d.metadata = READ_ONCE(cmd->metadata);
590 d.addr = READ_ONCE(cmd->addr);
591 d.data_len = READ_ONCE(cmd->data_len);
592 d.metadata_len = READ_ONCE(cmd->metadata_len);
593 d.timeout_ms = READ_ONCE(cmd->timeout_ms);
594
595 if (issue_flags & IO_URING_F_NONBLOCK) {
888545cb 596 rq_flags |= REQ_NOWAIT;
456cba38
KJ
597 blk_flags = BLK_MQ_REQ_NOWAIT;
598 }
585079b6
KJ
599 if (issue_flags & IO_URING_F_IOPOLL)
600 rq_flags |= REQ_POLLED;
456cba38 601
585079b6 602retry:
470e900c 603 req = nvme_alloc_user_request(q, &c, rq_flags, blk_flags);
456cba38
KJ
604 if (IS_ERR(req))
605 return PTR_ERR(req);
470e900c
KJ
606 req->timeout = d.timeout_ms ? msecs_to_jiffies(d.timeout_ms) : 0;
607
608 if (d.addr && d.data_len) {
4d174486 609 ret = nvme_map_user_request(req, d.addr,
470e900c 610 d.data_len, nvme_to_user_ptr(d.metadata),
4d174486 611 d.metadata_len, 0, &meta, ioucmd, vec);
470e900c
KJ
612 if (ret)
613 return ret;
614 }
456cba38 615
585079b6
KJ
616 if (issue_flags & IO_URING_F_IOPOLL && rq_flags & REQ_POLLED) {
617 if (unlikely(!req->bio)) {
618 /* we can't poll this, so alloc regular req instead */
619 blk_mq_free_request(req);
620 rq_flags &= ~REQ_POLLED;
621 goto retry;
622 } else {
623 WRITE_ONCE(ioucmd->cookie, req->bio);
624 req->bio->bi_opf |= REQ_POLLED;
625 }
626 }
456cba38
KJ
627 /* to free bio on completion, as req->bio will be null at that time */
628 pdu->bio = req->bio;
456cba38 629 pdu->meta_len = d.metadata_len;
c0a7ba77
JA
630 req->end_io_data = ioucmd;
631 if (pdu->meta_len) {
632 pdu->u.meta = meta;
633 pdu->u.meta_buffer = nvme_to_user_ptr(d.metadata);
634 req->end_io = nvme_uring_cmd_end_io_meta;
635 } else {
636 req->end_io = nvme_uring_cmd_end_io;
637 }
e2e53086 638 blk_execute_rq_nowait(req, false);
456cba38
KJ
639 return -EIOCBQUEUED;
640}
641
2405252a
CH
642static bool is_ctrl_ioctl(unsigned int cmd)
643{
644 if (cmd == NVME_IOCTL_ADMIN_CMD || cmd == NVME_IOCTL_ADMIN64_CMD)
645 return true;
646 if (is_sed_ioctl(cmd))
647 return true;
648 return false;
649}
650
651static int nvme_ctrl_ioctl(struct nvme_ctrl *ctrl, unsigned int cmd,
855b7717 652 void __user *argp, fmode_t mode)
2405252a
CH
653{
654 switch (cmd) {
655 case NVME_IOCTL_ADMIN_CMD:
313c08c7 656 return nvme_user_cmd(ctrl, NULL, argp, 0, mode);
2405252a 657 case NVME_IOCTL_ADMIN64_CMD:
7b7fdb8e 658 return nvme_user_cmd64(ctrl, NULL, argp, 0, mode);
2405252a
CH
659 default:
660 return sed_ioctl(ctrl->opal_dev, cmd, argp);
661 }
662}
663
664#ifdef COMPAT_FOR_U64_ALIGNMENT
665struct nvme_user_io32 {
666 __u8 opcode;
667 __u8 flags;
668 __u16 control;
669 __u16 nblocks;
670 __u16 rsvd;
671 __u64 metadata;
672 __u64 addr;
673 __u64 slba;
674 __u32 dsmgmt;
675 __u32 reftag;
676 __u16 apptag;
677 __u16 appmask;
678} __attribute__((__packed__));
679#define NVME_IOCTL_SUBMIT_IO32 _IOW('N', 0x42, struct nvme_user_io32)
680#endif /* COMPAT_FOR_U64_ALIGNMENT */
681
682static int nvme_ns_ioctl(struct nvme_ns *ns, unsigned int cmd,
313c08c7 683 void __user *argp, unsigned int flags, fmode_t mode)
2405252a
CH
684{
685 switch (cmd) {
686 case NVME_IOCTL_ID:
687 force_successful_syscall_return();
688 return ns->head->ns_id;
689 case NVME_IOCTL_IO_CMD:
313c08c7 690 return nvme_user_cmd(ns->ctrl, ns, argp, flags, mode);
2405252a
CH
691 /*
692 * struct nvme_user_io can have different padding on some 32-bit ABIs.
693 * Just accept the compat version as all fields that are used are the
694 * same size and at the same offset.
695 */
696#ifdef COMPAT_FOR_U64_ALIGNMENT
697 case NVME_IOCTL_SUBMIT_IO32:
698#endif
699 case NVME_IOCTL_SUBMIT_IO:
700 return nvme_submit_io(ns, argp);
89377bc1 701 case NVME_IOCTL_IO64_CMD_VEC:
7b7fdb8e
CH
702 flags |= NVME_IOCTL_VEC;
703 fallthrough;
704 case NVME_IOCTL_IO64_CMD:
705 return nvme_user_cmd64(ns->ctrl, ns, argp, flags, mode);
2405252a 706 default:
9ea9b9c4 707 return -ENOTTY;
2405252a
CH
708 }
709}
710
711int nvme_ioctl(struct block_device *bdev, fmode_t mode,
712 unsigned int cmd, unsigned long arg)
713{
714 struct nvme_ns *ns = bdev->bd_disk->private_data;
2fa1dc86 715 void __user *argp = (void __user *)arg;
313c08c7
CH
716 unsigned int flags = 0;
717
718 if (bdev_is_partition(bdev))
719 flags |= NVME_IOCTL_PARTITION;
2405252a 720
2fa1dc86
CH
721 if (is_ctrl_ioctl(cmd))
722 return nvme_ctrl_ioctl(ns->ctrl, cmd, argp, mode);
313c08c7 723 return nvme_ns_ioctl(ns, cmd, argp, flags, mode);
2637baed
MI
724}
725
726long nvme_ns_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
727{
728 struct nvme_ns *ns =
729 container_of(file_inode(file)->i_cdev, struct nvme_ns, cdev);
2fa1dc86 730 void __user *argp = (void __user *)arg;
2637baed 731
2fa1dc86
CH
732 if (is_ctrl_ioctl(cmd))
733 return nvme_ctrl_ioctl(ns->ctrl, cmd, argp, file->f_mode);
313c08c7 734 return nvme_ns_ioctl(ns, cmd, argp, 0, file->f_mode);
2405252a
CH
735}
736
00fc2eeb 737static int nvme_uring_cmd_checks(unsigned int issue_flags)
456cba38 738{
456cba38 739
00fc2eeb 740 /* NVMe passthrough requires big SQE/CQE support */
456cba38
KJ
741 if ((issue_flags & (IO_URING_F_SQE128|IO_URING_F_CQE32)) !=
742 (IO_URING_F_SQE128|IO_URING_F_CQE32))
743 return -EOPNOTSUPP;
00fc2eeb
KJ
744 return 0;
745}
746
747static int nvme_ns_uring_cmd(struct nvme_ns *ns, struct io_uring_cmd *ioucmd,
748 unsigned int issue_flags)
749{
750 struct nvme_ctrl *ctrl = ns->ctrl;
751 int ret;
752
753 BUILD_BUG_ON(sizeof(struct nvme_uring_cmd_pdu) > sizeof(ioucmd->pdu));
754
755 ret = nvme_uring_cmd_checks(issue_flags);
756 if (ret)
757 return ret;
456cba38
KJ
758
759 switch (ioucmd->cmd_op) {
760 case NVME_URING_CMD_IO:
f569add4
AG
761 ret = nvme_uring_cmd_io(ctrl, ns, ioucmd, issue_flags, false);
762 break;
763 case NVME_URING_CMD_IO_VEC:
764 ret = nvme_uring_cmd_io(ctrl, ns, ioucmd, issue_flags, true);
456cba38
KJ
765 break;
766 default:
767 ret = -ENOTTY;
768 }
769
770 return ret;
771}
772
773int nvme_ns_chr_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags)
774{
775 struct nvme_ns *ns = container_of(file_inode(ioucmd->file)->i_cdev,
776 struct nvme_ns, cdev);
777
778 return nvme_ns_uring_cmd(ns, ioucmd, issue_flags);
779}
780
de97fcb3
JA
781int nvme_ns_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd,
782 struct io_comp_batch *iob,
783 unsigned int poll_flags)
585079b6
KJ
784{
785 struct bio *bio;
786 int ret = 0;
787 struct nvme_ns *ns;
788 struct request_queue *q;
789
790 rcu_read_lock();
791 bio = READ_ONCE(ioucmd->cookie);
792 ns = container_of(file_inode(ioucmd->file)->i_cdev,
793 struct nvme_ns, cdev);
794 q = ns->queue;
795 if (test_bit(QUEUE_FLAG_POLL, &q->queue_flags) && bio && bio->bi_bdev)
de97fcb3 796 ret = bio_poll(bio, iob, poll_flags);
585079b6
KJ
797 rcu_read_unlock();
798 return ret;
799}
2405252a 800#ifdef CONFIG_NVME_MULTIPATH
48145b62 801static int nvme_ns_head_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd,
855b7717
KJ
802 void __user *argp, struct nvme_ns_head *head, int srcu_idx,
803 fmode_t mode)
85b790a7 804 __releases(&head->srcu)
2405252a 805{
48145b62 806 struct nvme_ctrl *ctrl = ns->ctrl;
2405252a
CH
807 int ret;
808
48145b62 809 nvme_get_ctrl(ns->ctrl);
3e7d1a55 810 srcu_read_unlock(&head->srcu, srcu_idx);
855b7717 811 ret = nvme_ctrl_ioctl(ns->ctrl, cmd, argp, mode);
2405252a 812
48145b62 813 nvme_put_ctrl(ctrl);
2405252a
CH
814 return ret;
815}
816
817int nvme_ns_head_ioctl(struct block_device *bdev, fmode_t mode,
818 unsigned int cmd, unsigned long arg)
819{
86b4284d 820 struct nvme_ns_head *head = bdev->bd_disk->private_data;
2637baed 821 void __user *argp = (void __user *)arg;
48145b62 822 struct nvme_ns *ns;
86b4284d 823 int srcu_idx, ret = -EWOULDBLOCK;
313c08c7
CH
824 unsigned int flags = 0;
825
826 if (bdev_is_partition(bdev))
827 flags |= NVME_IOCTL_PARTITION;
48145b62 828
86b4284d
CH
829 srcu_idx = srcu_read_lock(&head->srcu);
830 ns = nvme_find_path(head);
831 if (!ns)
832 goto out_unlock;
2637baed 833
48145b62
MI
834 /*
835 * Handle ioctls that apply to the controller instead of the namespace
836 * seperately and drop the ns SRCU reference early. This avoids a
837 * deadlock when deleting namespaces using the passthrough interface.
838 */
2637baed 839 if (is_ctrl_ioctl(cmd))
855b7717
KJ
840 return nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx,
841 mode);
48145b62 842
313c08c7 843 ret = nvme_ns_ioctl(ns, cmd, argp, flags, mode);
86b4284d
CH
844out_unlock:
845 srcu_read_unlock(&head->srcu, srcu_idx);
48145b62 846 return ret;
2637baed
MI
847}
848
849long nvme_ns_head_chr_ioctl(struct file *file, unsigned int cmd,
850 unsigned long arg)
851{
852 struct cdev *cdev = file_inode(file)->i_cdev;
853 struct nvme_ns_head *head =
854 container_of(cdev, struct nvme_ns_head, cdev);
855 void __user *argp = (void __user *)arg;
48145b62 856 struct nvme_ns *ns;
f423c85c 857 int srcu_idx, ret = -EWOULDBLOCK;
48145b62
MI
858
859 srcu_idx = srcu_read_lock(&head->srcu);
860 ns = nvme_find_path(head);
f423c85c
CH
861 if (!ns)
862 goto out_unlock;
2405252a
CH
863
864 if (is_ctrl_ioctl(cmd))
855b7717
KJ
865 return nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx,
866 file->f_mode);
48145b62 867
313c08c7 868 ret = nvme_ns_ioctl(ns, cmd, argp, 0, file->f_mode);
f423c85c
CH
869out_unlock:
870 srcu_read_unlock(&head->srcu, srcu_idx);
48145b62 871 return ret;
2405252a 872}
456cba38
KJ
873
874int nvme_ns_head_chr_uring_cmd(struct io_uring_cmd *ioucmd,
875 unsigned int issue_flags)
876{
877 struct cdev *cdev = file_inode(ioucmd->file)->i_cdev;
878 struct nvme_ns_head *head = container_of(cdev, struct nvme_ns_head, cdev);
879 int srcu_idx = srcu_read_lock(&head->srcu);
880 struct nvme_ns *ns = nvme_find_path(head);
881 int ret = -EINVAL;
882
883 if (ns)
884 ret = nvme_ns_uring_cmd(ns, ioucmd, issue_flags);
885 srcu_read_unlock(&head->srcu, srcu_idx);
886 return ret;
887}
585079b6 888
de97fcb3
JA
889int nvme_ns_head_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd,
890 struct io_comp_batch *iob,
891 unsigned int poll_flags)
585079b6
KJ
892{
893 struct cdev *cdev = file_inode(ioucmd->file)->i_cdev;
894 struct nvme_ns_head *head = container_of(cdev, struct nvme_ns_head, cdev);
895 int srcu_idx = srcu_read_lock(&head->srcu);
896 struct nvme_ns *ns = nvme_find_path(head);
897 struct bio *bio;
898 int ret = 0;
899 struct request_queue *q;
900
901 if (ns) {
902 rcu_read_lock();
903 bio = READ_ONCE(ioucmd->cookie);
904 q = ns->queue;
905 if (test_bit(QUEUE_FLAG_POLL, &q->queue_flags) && bio
906 && bio->bi_bdev)
de97fcb3 907 ret = bio_poll(bio, iob, poll_flags);
585079b6
KJ
908 rcu_read_unlock();
909 }
910 srcu_read_unlock(&head->srcu, srcu_idx);
911 return ret;
912}
2405252a
CH
913#endif /* CONFIG_NVME_MULTIPATH */
914
58e5bdeb
KJ
915int nvme_dev_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags)
916{
917 struct nvme_ctrl *ctrl = ioucmd->file->private_data;
918 int ret;
919
585079b6
KJ
920 /* IOPOLL not supported yet */
921 if (issue_flags & IO_URING_F_IOPOLL)
922 return -EOPNOTSUPP;
923
58e5bdeb
KJ
924 ret = nvme_uring_cmd_checks(issue_flags);
925 if (ret)
926 return ret;
927
928 switch (ioucmd->cmd_op) {
929 case NVME_URING_CMD_ADMIN:
930 ret = nvme_uring_cmd_io(ctrl, NULL, ioucmd, issue_flags, false);
931 break;
932 case NVME_URING_CMD_ADMIN_VEC:
933 ret = nvme_uring_cmd_io(ctrl, NULL, ioucmd, issue_flags, true);
934 break;
935 default:
936 ret = -ENOTTY;
937 }
938
939 return ret;
940}
941
855b7717
KJ
942static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp,
943 fmode_t mode)
2405252a
CH
944{
945 struct nvme_ns *ns;
946 int ret;
947
948 down_read(&ctrl->namespaces_rwsem);
949 if (list_empty(&ctrl->namespaces)) {
950 ret = -ENOTTY;
951 goto out_unlock;
952 }
953
954 ns = list_first_entry(&ctrl->namespaces, struct nvme_ns, list);
955 if (ns != list_last_entry(&ctrl->namespaces, struct nvme_ns, list)) {
956 dev_warn(ctrl->device,
957 "NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n");
958 ret = -EINVAL;
959 goto out_unlock;
960 }
961
962 dev_warn(ctrl->device,
963 "using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n");
964 kref_get(&ns->kref);
965 up_read(&ctrl->namespaces_rwsem);
966
313c08c7 967 ret = nvme_user_cmd(ctrl, ns, argp, 0, mode);
2405252a
CH
968 nvme_put_ns(ns);
969 return ret;
970
971out_unlock:
972 up_read(&ctrl->namespaces_rwsem);
973 return ret;
974}
975
976long nvme_dev_ioctl(struct file *file, unsigned int cmd,
977 unsigned long arg)
978{
979 struct nvme_ctrl *ctrl = file->private_data;
980 void __user *argp = (void __user *)arg;
981
982 switch (cmd) {
983 case NVME_IOCTL_ADMIN_CMD:
313c08c7 984 return nvme_user_cmd(ctrl, NULL, argp, 0, file->f_mode);
2405252a 985 case NVME_IOCTL_ADMIN64_CMD:
7b7fdb8e 986 return nvme_user_cmd64(ctrl, NULL, argp, 0, file->f_mode);
2405252a 987 case NVME_IOCTL_IO_CMD:
855b7717 988 return nvme_dev_user_cmd(ctrl, argp, file->f_mode);
2405252a 989 case NVME_IOCTL_RESET:
23e085b2
KB
990 if (!capable(CAP_SYS_ADMIN))
991 return -EACCES;
2405252a
CH
992 dev_warn(ctrl->device, "resetting controller\n");
993 return nvme_reset_ctrl_sync(ctrl);
994 case NVME_IOCTL_SUBSYS_RESET:
23e085b2
KB
995 if (!capable(CAP_SYS_ADMIN))
996 return -EACCES;
2405252a
CH
997 return nvme_reset_subsystem(ctrl);
998 case NVME_IOCTL_RESCAN:
23e085b2
KB
999 if (!capable(CAP_SYS_ADMIN))
1000 return -EACCES;
2405252a
CH
1001 nvme_queue_scan(ctrl);
1002 return 0;
1003 default:
1004 return -ENOTTY;
1005 }
1006}