2 * nvme structure declarations and helper functions for the
8 int fio_nvme_uring_cmd_prep(struct nvme_uring_cmd *cmd, struct io_u *io_u,
11 struct nvme_data *data = FILE_ENG_DATA(io_u->file);
15 memset(cmd, 0, sizeof(struct nvme_uring_cmd));
17 if (io_u->ddir == DDIR_READ)
18 cmd->opcode = nvme_cmd_read;
19 else if (io_u->ddir == DDIR_WRITE)
20 cmd->opcode = nvme_cmd_write;
24 slba = io_u->offset >> data->lba_shift;
25 nlb = (io_u->xfer_buflen >> data->lba_shift) - 1;
27 /* cdw10 and cdw11 represent starting lba */
28 cmd->cdw10 = slba & 0xffffffff;
29 cmd->cdw11 = slba >> 32;
30 /* cdw12 represent number of lba's for read/write */
31 cmd->cdw12 = nlb | (io_u->dtype << 20);
32 cmd->cdw13 = io_u->dspec << 16;
34 iov->iov_base = io_u->xfer_buf;
35 iov->iov_len = io_u->xfer_buflen;
36 cmd->addr = (__u64)(uintptr_t)iov;
39 cmd->addr = (__u64)(uintptr_t)io_u->xfer_buf;
40 cmd->data_len = io_u->xfer_buflen;
42 cmd->nsid = data->nsid;
46 static int nvme_identify(int fd, __u32 nsid, enum nvme_identify_cns cns,
47 enum nvme_csi csi, void *data)
49 struct nvme_passthru_cmd cmd = {
50 .opcode = nvme_admin_identify,
52 .addr = (__u64)(uintptr_t)data,
53 .data_len = NVME_IDENTIFY_DATA_SIZE,
55 .cdw11 = csi << NVME_IDENTIFY_CSI_SHIFT,
56 .timeout_ms = NVME_DEFAULT_IOCTL_TIMEOUT,
59 return ioctl(fd, NVME_IOCTL_ADMIN_CMD, &cmd);
62 int fio_nvme_get_info(struct fio_file *f, __u32 *nsid, __u32 *lba_sz,
69 if (f->filetype != FIO_TYPE_CHAR) {
70 log_err("ioengine io_uring_cmd only works with nvme ns "
71 "generic char devices (/dev/ngXnY)\n");
75 fd = open(f->file_name, O_RDONLY);
79 namespace_id = ioctl(fd, NVME_IOCTL_ID);
80 if (namespace_id < 0) {
81 log_err("failed to fetch namespace-id");
87 * Identify namespace to get namespace-id, namespace size in LBA's
90 err = nvme_identify(fd, namespace_id, NVME_IDENTIFY_CNS_NS,
93 log_err("failed to fetch identify namespace\n");
99 *lba_sz = 1 << ns.lbaf[(ns.flbas & 0x0f)].ds;
106 int fio_nvme_get_zoned_model(struct thread_data *td, struct fio_file *f,
107 enum zbd_zoned_model *model)
109 struct nvme_data *data = FILE_ENG_DATA(f);
110 struct nvme_id_ns ns;
111 struct nvme_passthru_cmd cmd;
114 if (f->filetype != FIO_TYPE_CHAR)
117 /* File is not yet opened */
118 fd = open(f->file_name, O_RDONLY | O_LARGEFILE);
122 /* Using nvme_id_ns for data as sizes are same */
123 ret = nvme_identify(fd, data->nsid, NVME_IDENTIFY_CNS_CSI_CTRL,
130 memset(&cmd, 0, sizeof(struct nvme_passthru_cmd));
132 /* Using nvme_id_ns for data as sizes are same */
133 ret = nvme_identify(fd, data->nsid, NVME_IDENTIFY_CNS_CSI_NS,
140 *model = ZBD_HOST_MANAGED;
146 static int nvme_report_zones(int fd, __u32 nsid, __u64 slba, __u32 zras_feat,
147 __u32 data_len, void *data)
149 struct nvme_passthru_cmd cmd = {
150 .opcode = nvme_zns_cmd_mgmt_recv,
152 .addr = (__u64)(uintptr_t)data,
153 .data_len = data_len,
154 .cdw10 = slba & 0xffffffff,
156 .cdw12 = (data_len >> 2) - 1,
157 .cdw13 = NVME_ZNS_ZRA_REPORT_ZONES | zras_feat,
158 .timeout_ms = NVME_DEFAULT_IOCTL_TIMEOUT,
161 return ioctl(fd, NVME_IOCTL_IO_CMD, &cmd);
164 int fio_nvme_report_zones(struct thread_data *td, struct fio_file *f,
165 uint64_t offset, struct zbd_zone *zbdz,
166 unsigned int nr_zones)
168 struct nvme_data *data = FILE_ENG_DATA(f);
169 struct nvme_zone_report *zr;
170 struct nvme_zns_id_ns zns_ns;
171 struct nvme_id_ns ns;
172 unsigned int i = 0, j, zones_fetched = 0;
173 unsigned int max_zones, zones_chunks = 1024;
178 /* File is not yet opened */
179 fd = open(f->file_name, O_RDONLY | O_LARGEFILE);
184 zr_len = sizeof(*zr) + (zones_chunks * sizeof(struct nvme_zns_desc));
185 zr = calloc(1, zr_len);
191 ret = nvme_identify(fd, data->nsid, NVME_IDENTIFY_CNS_NS,
194 log_err("%s: nvme_identify_ns failed, err=%d\n", f->file_name,
199 ret = nvme_identify(fd, data->nsid, NVME_IDENTIFY_CNS_CSI_NS,
200 NVME_CSI_ZNS, &zns_ns);
202 log_err("%s: nvme_zns_identify_ns failed, err=%d\n",
206 zlen = zns_ns.lbafe[ns.flbas & 0x0f].zsze << data->lba_shift;
208 max_zones = (f->real_file_size - offset) / zlen;
209 if (max_zones < nr_zones)
210 nr_zones = max_zones;
212 if (nr_zones < zones_chunks)
213 zones_chunks = nr_zones;
215 while (zones_fetched < nr_zones) {
216 if (zones_fetched + zones_chunks >= nr_zones) {
217 zones_chunks = nr_zones - zones_fetched;
218 zr_len = sizeof(*zr) + (zones_chunks * sizeof(struct nvme_zns_desc));
220 ret = nvme_report_zones(fd, data->nsid, offset >> data->lba_shift,
221 NVME_ZNS_ZRAS_FEAT_ERZ, zr_len, (void *)zr);
223 log_err("%s: nvme_zns_report_zones failed, err=%d\n",
228 /* Transform the zone-report */
229 for (j = 0; j < zr->nr_zones; j++, i++) {
230 struct nvme_zns_desc *desc = (struct nvme_zns_desc *)&(zr->entries[j]);
232 zbdz[i].start = desc->zslba << data->lba_shift;
234 zbdz[i].wp = desc->wp << data->lba_shift;
235 zbdz[i].capacity = desc->zcap << data->lba_shift;
237 /* Zone Type is stored in first 4 bits. */
238 switch (desc->zt & 0x0f) {
239 case NVME_ZONE_TYPE_SEQWRITE_REQ:
240 zbdz[i].type = ZBD_ZONE_TYPE_SWR;
243 log_err("%s: invalid type for zone at offset %llu.\n",
244 f->file_name, desc->zslba);
249 /* Zone State is stored in last 4 bits. */
250 switch (desc->zs >> 4) {
251 case NVME_ZNS_ZS_EMPTY:
252 zbdz[i].cond = ZBD_ZONE_COND_EMPTY;
254 case NVME_ZNS_ZS_IMPL_OPEN:
255 zbdz[i].cond = ZBD_ZONE_COND_IMP_OPEN;
257 case NVME_ZNS_ZS_EXPL_OPEN:
258 zbdz[i].cond = ZBD_ZONE_COND_EXP_OPEN;
260 case NVME_ZNS_ZS_CLOSED:
261 zbdz[i].cond = ZBD_ZONE_COND_CLOSED;
263 case NVME_ZNS_ZS_FULL:
264 zbdz[i].cond = ZBD_ZONE_COND_FULL;
266 case NVME_ZNS_ZS_READ_ONLY:
267 case NVME_ZNS_ZS_OFFLINE:
269 /* Treat all these conditions as offline (don't use!) */
270 zbdz[i].cond = ZBD_ZONE_COND_OFFLINE;
271 zbdz[i].wp = zbdz[i].start;
274 zones_fetched += zr->nr_zones;
275 offset += zr->nr_zones * zlen;
286 int fio_nvme_reset_wp(struct thread_data *td, struct fio_file *f,
287 uint64_t offset, uint64_t length)
289 struct nvme_data *data = FILE_ENG_DATA(f);
290 unsigned int nr_zones;
291 unsigned long long zslba;
294 /* If the file is not yet opened, open it for this function. */
297 fd = open(f->file_name, O_RDWR | O_LARGEFILE);
302 zslba = offset >> data->lba_shift;
303 nr_zones = (length + td->o.zone_size - 1) / td->o.zone_size;
305 for (i = 0; i < nr_zones; i++, zslba += (td->o.zone_size >> data->lba_shift)) {
306 struct nvme_passthru_cmd cmd = {
307 .opcode = nvme_zns_cmd_mgmt_send,
309 .cdw10 = zslba & 0xffffffff,
310 .cdw11 = zslba >> 32,
311 .cdw13 = NVME_ZNS_ZSA_RESET,
312 .addr = (__u64)(uintptr_t)NULL,
314 .timeout_ms = NVME_DEFAULT_IOCTL_TIMEOUT,
317 ret = ioctl(fd, NVME_IOCTL_IO_CMD, &cmd);
325 int fio_nvme_get_max_open_zones(struct thread_data *td, struct fio_file *f,
326 unsigned int *max_open_zones)
328 struct nvme_data *data = FILE_ENG_DATA(f);
329 struct nvme_zns_id_ns zns_ns;
332 fd = open(f->file_name, O_RDONLY | O_LARGEFILE);
336 ret = nvme_identify(fd, data->nsid, NVME_IDENTIFY_CNS_CSI_NS,
337 NVME_CSI_ZNS, &zns_ns);
339 log_err("%s: nvme_zns_identify_ns failed, err=%d\n",
344 *max_open_zones = zns_ns.mor + 1;
350 static inline int nvme_fdp_reclaim_unit_handle_status(int fd, __u32 nsid,
351 __u32 data_len, void *data)
353 struct nvme_passthru_cmd cmd = {
354 .opcode = nvme_cmd_io_mgmt_recv,
356 .addr = (__u64)(uintptr_t)data,
357 .data_len = data_len,
359 .cdw11 = (data_len >> 2) - 1,
362 return ioctl(fd, NVME_IOCTL_IO_CMD, &cmd);
365 int fio_nvme_iomgmt_ruhs(struct thread_data *td, struct fio_file *f,
366 struct nvme_fdp_ruh_status *ruhs, __u32 bytes)
368 struct nvme_data *data = FILE_ENG_DATA(f);
371 fd = open(f->file_name, O_RDONLY | O_LARGEFILE);
375 ret = nvme_fdp_reclaim_unit_handle_status(fd, data->nsid, bytes, ruhs);
377 log_err("%s: nvme_fdp_reclaim_unit_handle_status failed, err=%d\n",