2 * nvme structure declarations and helper functions for the
8 int fio_nvme_uring_cmd_prep(struct nvme_uring_cmd *cmd, struct io_u *io_u,
11 struct nvme_data *data = FILE_ENG_DATA(io_u->file);
15 memset(cmd, 0, sizeof(struct nvme_uring_cmd));
17 if (io_u->ddir == DDIR_READ)
18 cmd->opcode = nvme_cmd_read;
19 else if (io_u->ddir == DDIR_WRITE)
20 cmd->opcode = nvme_cmd_write;
24 slba = io_u->offset >> data->lba_shift;
25 nlb = (io_u->xfer_buflen >> data->lba_shift) - 1;
27 /* cdw10 and cdw11 represent starting lba */
28 cmd->cdw10 = slba & 0xffffffff;
29 cmd->cdw11 = slba >> 32;
30 /* cdw12 represent number of lba's for read/write */
33 iov->iov_base = io_u->xfer_buf;
34 iov->iov_len = io_u->xfer_buflen;
35 cmd->addr = (__u64)(uintptr_t)iov;
38 cmd->addr = (__u64)(uintptr_t)io_u->xfer_buf;
39 cmd->data_len = io_u->xfer_buflen;
41 cmd->nsid = data->nsid;
45 static int nvme_identify(int fd, __u32 nsid, enum nvme_identify_cns cns,
46 enum nvme_csi csi, void *data)
48 struct nvme_passthru_cmd cmd = {
49 .opcode = nvme_admin_identify,
51 .addr = (__u64)(uintptr_t)data,
52 .data_len = NVME_IDENTIFY_DATA_SIZE,
54 .cdw11 = csi << NVME_IDENTIFY_CSI_SHIFT,
55 .timeout_ms = NVME_DEFAULT_IOCTL_TIMEOUT,
58 return ioctl(fd, NVME_IOCTL_ADMIN_CMD, &cmd);
61 int fio_nvme_get_info(struct fio_file *f, __u32 *nsid, __u32 *lba_sz,
68 if (f->filetype != FIO_TYPE_CHAR) {
69 log_err("ioengine io_uring_cmd only works with nvme ns "
70 "generic char devices (/dev/ngXnY)\n");
74 fd = open(f->file_name, O_RDONLY);
78 namespace_id = ioctl(fd, NVME_IOCTL_ID);
79 if (namespace_id < 0) {
80 log_err("failed to fetch namespace-id");
86 * Identify namespace to get namespace-id, namespace size in LBA's
89 err = nvme_identify(fd, namespace_id, NVME_IDENTIFY_CNS_NS,
92 log_err("failed to fetch identify namespace\n");
98 *lba_sz = 1 << ns.lbaf[(ns.flbas & 0x0f)].ds;
105 int fio_nvme_get_zoned_model(struct thread_data *td, struct fio_file *f,
106 enum zbd_zoned_model *model)
108 struct nvme_data *data = FILE_ENG_DATA(f);
109 struct nvme_id_ns ns;
110 struct nvme_passthru_cmd cmd;
113 if (f->filetype != FIO_TYPE_CHAR)
116 /* File is not yet opened */
117 fd = open(f->file_name, O_RDONLY | O_LARGEFILE);
121 /* Using nvme_id_ns for data as sizes are same */
122 ret = nvme_identify(fd, data->nsid, NVME_IDENTIFY_CNS_CSI_CTRL,
129 memset(&cmd, 0, sizeof(struct nvme_passthru_cmd));
131 /* Using nvme_id_ns for data as sizes are same */
132 ret = nvme_identify(fd, data->nsid, NVME_IDENTIFY_CNS_CSI_NS,
139 *model = ZBD_HOST_MANAGED;
145 static int nvme_report_zones(int fd, __u32 nsid, __u64 slba, __u32 zras_feat,
146 __u32 data_len, void *data)
148 struct nvme_passthru_cmd cmd = {
149 .opcode = nvme_zns_cmd_mgmt_recv,
151 .addr = (__u64)(uintptr_t)data,
152 .data_len = data_len,
153 .cdw10 = slba & 0xffffffff,
155 .cdw12 = (data_len >> 2) - 1,
156 .cdw13 = NVME_ZNS_ZRA_REPORT_ZONES | zras_feat,
157 .timeout_ms = NVME_DEFAULT_IOCTL_TIMEOUT,
160 return ioctl(fd, NVME_IOCTL_IO_CMD, &cmd);
163 int fio_nvme_report_zones(struct thread_data *td, struct fio_file *f,
164 uint64_t offset, struct zbd_zone *zbdz,
165 unsigned int nr_zones)
167 struct nvme_data *data = FILE_ENG_DATA(f);
168 struct nvme_zone_report *zr;
169 struct nvme_zns_id_ns zns_ns;
170 struct nvme_id_ns ns;
171 unsigned int i = 0, j, zones_fetched = 0;
172 unsigned int max_zones, zones_chunks = 1024;
177 /* File is not yet opened */
178 fd = open(f->file_name, O_RDONLY | O_LARGEFILE);
183 zr_len = sizeof(*zr) + (zones_chunks * sizeof(struct nvme_zns_desc));
184 zr = calloc(1, zr_len);
190 ret = nvme_identify(fd, data->nsid, NVME_IDENTIFY_CNS_NS,
193 log_err("%s: nvme_identify_ns failed, err=%d\n", f->file_name,
198 ret = nvme_identify(fd, data->nsid, NVME_IDENTIFY_CNS_CSI_NS,
199 NVME_CSI_ZNS, &zns_ns);
201 log_err("%s: nvme_zns_identify_ns failed, err=%d\n",
205 zlen = zns_ns.lbafe[ns.flbas & 0x0f].zsze << data->lba_shift;
207 max_zones = (f->real_file_size - offset) / zlen;
208 if (max_zones < nr_zones)
209 nr_zones = max_zones;
211 if (nr_zones < zones_chunks)
212 zones_chunks = nr_zones;
214 while (zones_fetched < nr_zones) {
215 if (zones_fetched + zones_chunks >= nr_zones) {
216 zones_chunks = nr_zones - zones_fetched;
217 zr_len = sizeof(*zr) + (zones_chunks * sizeof(struct nvme_zns_desc));
219 ret = nvme_report_zones(fd, data->nsid, offset >> data->lba_shift,
220 NVME_ZNS_ZRAS_FEAT_ERZ, zr_len, (void *)zr);
222 log_err("%s: nvme_zns_report_zones failed, err=%d\n",
227 /* Transform the zone-report */
228 for (j = 0; j < zr->nr_zones; j++, i++) {
229 struct nvme_zns_desc *desc = (struct nvme_zns_desc *)&(zr->entries[j]);
231 zbdz[i].start = desc->zslba << data->lba_shift;
233 zbdz[i].wp = desc->wp << data->lba_shift;
234 zbdz[i].capacity = desc->zcap << data->lba_shift;
236 /* Zone Type is stored in first 4 bits. */
237 switch (desc->zt & 0x0f) {
238 case NVME_ZONE_TYPE_SEQWRITE_REQ:
239 zbdz[i].type = ZBD_ZONE_TYPE_SWR;
242 log_err("%s: invalid type for zone at offset %llu.\n",
243 f->file_name, desc->zslba);
248 /* Zone State is stored in last 4 bits. */
249 switch (desc->zs >> 4) {
250 case NVME_ZNS_ZS_EMPTY:
251 zbdz[i].cond = ZBD_ZONE_COND_EMPTY;
253 case NVME_ZNS_ZS_IMPL_OPEN:
254 zbdz[i].cond = ZBD_ZONE_COND_IMP_OPEN;
256 case NVME_ZNS_ZS_EXPL_OPEN:
257 zbdz[i].cond = ZBD_ZONE_COND_EXP_OPEN;
259 case NVME_ZNS_ZS_CLOSED:
260 zbdz[i].cond = ZBD_ZONE_COND_CLOSED;
262 case NVME_ZNS_ZS_FULL:
263 zbdz[i].cond = ZBD_ZONE_COND_FULL;
265 case NVME_ZNS_ZS_READ_ONLY:
266 case NVME_ZNS_ZS_OFFLINE:
268 /* Treat all these conditions as offline (don't use!) */
269 zbdz[i].cond = ZBD_ZONE_COND_OFFLINE;
270 zbdz[i].wp = zbdz[i].start;
273 zones_fetched += zr->nr_zones;
274 offset += zr->nr_zones * zlen;
285 int fio_nvme_reset_wp(struct thread_data *td, struct fio_file *f,
286 uint64_t offset, uint64_t length)
288 struct nvme_data *data = FILE_ENG_DATA(f);
289 unsigned int nr_zones;
290 unsigned long long zslba;
293 /* If the file is not yet opened, open it for this function. */
296 fd = open(f->file_name, O_RDWR | O_LARGEFILE);
301 zslba = offset >> data->lba_shift;
302 nr_zones = (length + td->o.zone_size - 1) / td->o.zone_size;
304 for (i = 0; i < nr_zones; i++, zslba += (td->o.zone_size >> data->lba_shift)) {
305 struct nvme_passthru_cmd cmd = {
306 .opcode = nvme_zns_cmd_mgmt_send,
308 .cdw10 = zslba & 0xffffffff,
309 .cdw11 = zslba >> 32,
310 .cdw13 = NVME_ZNS_ZSA_RESET,
311 .addr = (__u64)(uintptr_t)NULL,
313 .timeout_ms = NVME_DEFAULT_IOCTL_TIMEOUT,
316 ret = ioctl(fd, NVME_IOCTL_IO_CMD, &cmd);
324 int fio_nvme_get_max_open_zones(struct thread_data *td, struct fio_file *f,
325 unsigned int *max_open_zones)
327 struct nvme_data *data = FILE_ENG_DATA(f);
328 struct nvme_zns_id_ns zns_ns;
331 fd = open(f->file_name, O_RDONLY | O_LARGEFILE);
335 ret = nvme_identify(fd, data->nsid, NVME_IDENTIFY_CNS_CSI_NS,
336 NVME_CSI_ZNS, &zns_ns);
338 log_err("%s: nvme_zns_identify_ns failed, err=%d\n",
343 *max_open_zones = zns_ns.mor + 1;