1 // SPDX-License-Identifier: GPL-2.0
3 * NVMe Over Fabrics Target File I/O commands implementation.
4 * Copyright (c) 2017-2018 Western Digital Corporation or its
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 #include <linux/falloc.h>
10 #include <linux/file.h>
13 #define NVMET_MAX_MPOOL_BVEC 16
14 #define NVMET_MIN_MPOOL_OBJ 16
16 int nvmet_file_ns_revalidate(struct nvmet_ns *ns)
21 ret = vfs_getattr(&ns->file->f_path, &stat, STATX_SIZE,
28 void nvmet_file_ns_disable(struct nvmet_ns *ns)
32 flush_workqueue(buffered_io_wq);
33 mempool_destroy(ns->bvec_pool);
35 kmem_cache_destroy(ns->bvec_cache);
36 ns->bvec_cache = NULL;
42 int nvmet_file_ns_enable(struct nvmet_ns *ns)
44 int flags = O_RDWR | O_LARGEFILE;
50 ns->file = filp_open(ns->device_path, flags, 0);
51 if (IS_ERR(ns->file)) {
52 ret = PTR_ERR(ns->file);
53 pr_err("failed to open file %s: (%d)\n",
54 ns->device_path, ret);
59 ret = nvmet_file_ns_revalidate(ns);
64 * i_blkbits can be greater than the universally accepted upper bound,
65 * so make sure we export a sane namespace lba_shift.
67 ns->blksize_shift = min_t(u8,
68 file_inode(ns->file)->i_blkbits, 12);
70 ns->bvec_cache = kmem_cache_create("nvmet-bvec",
71 NVMET_MAX_MPOOL_BVEC * sizeof(struct bio_vec),
72 0, SLAB_HWCACHE_ALIGN, NULL);
73 if (!ns->bvec_cache) {
78 ns->bvec_pool = mempool_create(NVMET_MIN_MPOOL_OBJ, mempool_alloc_slab,
79 mempool_free_slab, ns->bvec_cache);
89 ns->blksize_shift = 0;
90 nvmet_file_ns_disable(ns);
94 static void nvmet_file_init_bvec(struct bio_vec *bv, struct scatterlist *sg)
96 bv->bv_page = sg_page(sg);
97 bv->bv_offset = sg->offset;
98 bv->bv_len = sg->length;
101 static ssize_t nvmet_file_submit_bvec(struct nvmet_req *req, loff_t pos,
102 unsigned long nr_segs, size_t count, int ki_flags)
104 struct kiocb *iocb = &req->f.iocb;
105 ssize_t (*call_iter)(struct kiocb *iocb, struct iov_iter *iter);
106 struct iov_iter iter;
109 if (req->cmd->rw.opcode == nvme_cmd_write) {
110 if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA))
111 ki_flags |= IOCB_DSYNC;
112 call_iter = req->ns->file->f_op->write_iter;
115 call_iter = req->ns->file->f_op->read_iter;
119 iov_iter_bvec(&iter, rw, req->f.bvec, nr_segs, count);
122 iocb->ki_filp = req->ns->file;
123 iocb->ki_flags = ki_flags | iocb_flags(req->ns->file);
125 return call_iter(iocb, &iter);
128 static void nvmet_file_io_done(struct kiocb *iocb, long ret, long ret2)
130 struct nvmet_req *req = container_of(iocb, struct nvmet_req, f.iocb);
131 u16 status = NVME_SC_SUCCESS;
133 if (req->f.bvec != req->inline_bvec) {
134 if (likely(req->f.mpool_alloc == false))
137 mempool_free(req->f.bvec, req->ns->bvec_pool);
140 if (unlikely(ret != req->transfer_len))
141 status = errno_to_nvme_status(req, ret);
142 nvmet_req_complete(req, status);
145 static bool nvmet_file_execute_io(struct nvmet_req *req, int ki_flags)
147 ssize_t nr_bvec = req->sg_cnt;
148 unsigned long bv_cnt = 0;
149 bool is_sync = false;
150 size_t len = 0, total_len = 0;
154 struct scatterlist *sg;
156 if (req->f.mpool_alloc && nr_bvec > NVMET_MAX_MPOOL_BVEC)
159 pos = le64_to_cpu(req->cmd->rw.slba) << req->ns->blksize_shift;
160 if (unlikely(pos + req->transfer_len > req->ns->size)) {
161 nvmet_req_complete(req, errno_to_nvme_status(req, -ENOSPC));
165 memset(&req->f.iocb, 0, sizeof(struct kiocb));
166 for_each_sg(req->sg, sg, req->sg_cnt, i) {
167 nvmet_file_init_bvec(&req->f.bvec[bv_cnt], sg);
168 len += req->f.bvec[bv_cnt].bv_len;
169 total_len += req->f.bvec[bv_cnt].bv_len;
172 WARN_ON_ONCE((nr_bvec - 1) < 0);
174 if (unlikely(is_sync) &&
175 (nr_bvec - 1 == 0 || bv_cnt == NVMET_MAX_MPOOL_BVEC)) {
176 ret = nvmet_file_submit_bvec(req, pos, bv_cnt, len, 0);
187 if (WARN_ON_ONCE(total_len != req->transfer_len)) {
192 if (unlikely(is_sync)) {
198 * A NULL ki_complete ask for synchronous execution, which we want
199 * for the IOCB_NOWAIT case.
201 if (!(ki_flags & IOCB_NOWAIT))
202 req->f.iocb.ki_complete = nvmet_file_io_done;
204 ret = nvmet_file_submit_bvec(req, pos, bv_cnt, total_len, ki_flags);
210 if (WARN_ON_ONCE(!(ki_flags & IOCB_NOWAIT)))
215 * For file systems returning error -EOPNOTSUPP, handle
216 * IOCB_NOWAIT error case separately and retry without
219 if ((ki_flags & IOCB_NOWAIT))
225 nvmet_file_io_done(&req->f.iocb, ret, 0);
229 static void nvmet_file_buffered_io_work(struct work_struct *w)
231 struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
233 nvmet_file_execute_io(req, 0);
236 static void nvmet_file_submit_buffered_io(struct nvmet_req *req)
238 INIT_WORK(&req->f.work, nvmet_file_buffered_io_work);
239 queue_work(buffered_io_wq, &req->f.work);
242 static void nvmet_file_execute_rw(struct nvmet_req *req)
244 ssize_t nr_bvec = req->sg_cnt;
246 if (!nvmet_check_transfer_len(req, nvmet_rw_data_len(req)))
249 if (!req->sg_cnt || !nr_bvec) {
250 nvmet_req_complete(req, 0);
254 if (nr_bvec > NVMET_MAX_INLINE_BIOVEC)
255 req->f.bvec = kmalloc_array(nr_bvec, sizeof(struct bio_vec),
258 req->f.bvec = req->inline_bvec;
260 if (unlikely(!req->f.bvec)) {
261 /* fallback under memory pressure */
262 req->f.bvec = mempool_alloc(req->ns->bvec_pool, GFP_KERNEL);
263 req->f.mpool_alloc = true;
265 req->f.mpool_alloc = false;
267 if (req->ns->buffered_io) {
268 if (likely(!req->f.mpool_alloc) &&
269 nvmet_file_execute_io(req, IOCB_NOWAIT))
271 nvmet_file_submit_buffered_io(req);
273 nvmet_file_execute_io(req, 0);
276 u16 nvmet_file_flush(struct nvmet_req *req)
278 return errno_to_nvme_status(req, vfs_fsync(req->ns->file, 1));
281 static void nvmet_file_flush_work(struct work_struct *w)
283 struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
285 nvmet_req_complete(req, nvmet_file_flush(req));
288 static void nvmet_file_execute_flush(struct nvmet_req *req)
290 if (!nvmet_check_transfer_len(req, 0))
292 INIT_WORK(&req->f.work, nvmet_file_flush_work);
293 schedule_work(&req->f.work);
296 static void nvmet_file_execute_discard(struct nvmet_req *req)
298 int mode = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE;
299 struct nvme_dsm_range range;
305 for (i = 0; i <= le32_to_cpu(req->cmd->dsm.nr); i++) {
306 status = nvmet_copy_from_sgl(req, i * sizeof(range), &range,
311 offset = le64_to_cpu(range.slba) << req->ns->blksize_shift;
312 len = le32_to_cpu(range.nlb);
313 len <<= req->ns->blksize_shift;
314 if (offset + len > req->ns->size) {
315 req->error_slba = le64_to_cpu(range.slba);
316 status = errno_to_nvme_status(req, -ENOSPC);
320 ret = vfs_fallocate(req->ns->file, mode, offset, len);
321 if (ret && ret != -EOPNOTSUPP) {
322 req->error_slba = le64_to_cpu(range.slba);
323 status = errno_to_nvme_status(req, ret);
328 nvmet_req_complete(req, status);
331 static void nvmet_file_dsm_work(struct work_struct *w)
333 struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
335 switch (le32_to_cpu(req->cmd->dsm.attributes)) {
337 nvmet_file_execute_discard(req);
339 case NVME_DSMGMT_IDR:
340 case NVME_DSMGMT_IDW:
342 /* Not supported yet */
343 nvmet_req_complete(req, 0);
348 static void nvmet_file_execute_dsm(struct nvmet_req *req)
350 if (!nvmet_check_data_len_lte(req, nvmet_dsm_len(req)))
352 INIT_WORK(&req->f.work, nvmet_file_dsm_work);
353 schedule_work(&req->f.work);
356 static void nvmet_file_write_zeroes_work(struct work_struct *w)
358 struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
359 struct nvme_write_zeroes_cmd *write_zeroes = &req->cmd->write_zeroes;
360 int mode = FALLOC_FL_ZERO_RANGE | FALLOC_FL_KEEP_SIZE;
365 offset = le64_to_cpu(write_zeroes->slba) << req->ns->blksize_shift;
366 len = (((sector_t)le16_to_cpu(write_zeroes->length) + 1) <<
367 req->ns->blksize_shift);
369 if (unlikely(offset + len > req->ns->size)) {
370 nvmet_req_complete(req, errno_to_nvme_status(req, -ENOSPC));
374 ret = vfs_fallocate(req->ns->file, mode, offset, len);
375 nvmet_req_complete(req, ret < 0 ? errno_to_nvme_status(req, ret) : 0);
378 static void nvmet_file_execute_write_zeroes(struct nvmet_req *req)
380 if (!nvmet_check_transfer_len(req, 0))
382 INIT_WORK(&req->f.work, nvmet_file_write_zeroes_work);
383 schedule_work(&req->f.work);
386 u16 nvmet_file_parse_io_cmd(struct nvmet_req *req)
388 switch (req->cmd->common.opcode) {
391 req->execute = nvmet_file_execute_rw;
394 req->execute = nvmet_file_execute_flush;
397 req->execute = nvmet_file_execute_dsm;
399 case nvme_cmd_write_zeroes:
400 req->execute = nvmet_file_execute_write_zeroes;
403 return nvmet_report_invalid_opcode(req);