From 673159e61b745adbee5135b710e59d7cef54e9d3 Mon Sep 17 00:00:00 2001 From: celestinechen <166680286+celestinechen@users.noreply.github.com> Date: Fri, 19 Apr 2024 06:22:15 +0000 Subject: [PATCH 01/16] FIO with fsync option issues more DDIR_SYNC commands than expected Issue and root cause: When fsync option is used, the number of flush (or DDIR_SYNC) commands issued is more than the expected number of flush commands. To elaborate: - In the fio config file, consider fsync=1 1. FIO issues 1 write command 2. After write completes, FIO sets last_was_sync variable to false 3. FIO issues 1 flush command 4. FIO keeps issuing flush commands since last_was_sync is still false and this causes more flush commands to be issued than expected 5. last_was_sync is set to true after the flush command completes - The above steps repeats until the workload is completed. Fix: Instead of setting last_was_sync to true after flush command is completed and setting last_was_sync to false after write command is completed, set last_was_sync to true after flush command is issued and set last_was_sync to false after write command is issued. Signed-off-by: Celestine Chen celestinechen@google.com --- io_u.c | 2 -- ioengines.c | 4 ++++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/io_u.c b/io_u.c index 83895893..a499ff07 100644 --- a/io_u.c +++ b/io_u.c @@ -2113,7 +2113,6 @@ static void io_completed(struct thread_data *td, struct io_u **io_u_ptr, if (ddir_sync(ddir)) { if (io_u->error) goto error; - td->last_was_sync = true; if (f) { f->first_write = -1ULL; f->last_write = -1ULL; @@ -2123,7 +2122,6 @@ static void io_completed(struct thread_data *td, struct io_u **io_u_ptr, return; } - td->last_was_sync = false; td->last_ddir = ddir; if (!io_u->error && ddir_rw(ddir)) { diff --git a/ioengines.c b/ioengines.c index 87cc2286..6b81dc77 100644 --- a/ioengines.c +++ b/ioengines.c @@ -436,6 +436,8 @@ enum fio_q_status td_io_queue(struct thread_data *td, struct io_u *io_u) io_u_mark_depth(td, 1); td->ts.total_io_u[io_u->ddir]++; } + + td->last_was_sync = ddir_sync(io_u->ddir); } else if (ret == FIO_Q_QUEUED) { td->io_u_queued++; @@ -445,6 +447,8 @@ enum fio_q_status td_io_queue(struct thread_data *td, struct io_u *io_u) if (td->io_u_queued >= td->o.iodepth_batch) td_io_commit(td); + + td->last_was_sync = ddir_sync(io_u->ddir); } if (!td_ioengine_flagged(td, FIO_SYNCIO) && -- 2.25.1 From c60d54ae79334a88561bbe66a0b422e2d2fa093c Mon Sep 17 00:00:00 2001 From: Vincent Fu Date: Thu, 18 Apr 2024 17:55:27 +0000 Subject: [PATCH 02/16] fio: rename fdp.[c,h] to dataplacement.[c,h] We can use code in the files to support NVMe streams. Streams also falls under the umbrella of data placement, so it seems reasonable to put streams and FDP code in the same source files. Also change the prefix of some functions from fdp_ to dp_ to indicate that they are not specific to FDP but apply more generally to the two data placement features. No functional change. Signed-off-by: Vincent Fu --- Makefile | 2 +- fdp.c => dataplacement.c | 6 +++--- fdp.h => dataplacement.h | 10 +++++----- engines/xnvme.c | 2 +- filesetup.c | 2 +- io_u.c | 2 +- ioengines.h | 2 +- 7 files changed, 13 insertions(+), 13 deletions(-) rename fdp.c => dataplacement.c (95%) rename fdp.h => dataplacement.h (65%) diff --git a/Makefile b/Makefile index cc8164b2..be57e296 100644 --- a/Makefile +++ b/Makefile @@ -62,7 +62,7 @@ SOURCE := $(sort $(patsubst $(SRCDIR)/%,%,$(wildcard $(SRCDIR)/crc/*.c)) \ gettime-thread.c helpers.c json.c idletime.c td_error.c \ profiles/tiobench.c profiles/act.c io_u_queue.c filelock.c \ workqueue.c rate-submit.c optgroup.c helper_thread.c \ - steadystate.c zone-dist.c zbd.c dedupe.c fdp.c + steadystate.c zone-dist.c zbd.c dedupe.c dataplacement.c ifdef CONFIG_LIBHDFS HDFSFLAGS= -I $(JAVA_HOME)/include -I $(JAVA_HOME)/include/linux -I $(FIO_LIBHDFS_INCLUDE) diff --git a/fdp.c b/dataplacement.c similarity index 95% rename from fdp.c rename to dataplacement.c index 49c80d2c..7518d193 100644 --- a/fdp.c +++ b/dataplacement.c @@ -13,7 +13,7 @@ #include "file.h" #include "pshared.h" -#include "fdp.h" +#include "dataplacement.h" static int fdp_ruh_info(struct thread_data *td, struct fio_file *f, struct fio_ruhs_info *ruhs) @@ -86,7 +86,7 @@ out: return ret; } -int fdp_init(struct thread_data *td) +int dp_init(struct thread_data *td) { struct fio_file *f; int i, ret = 0; @@ -107,7 +107,7 @@ void fdp_free_ruhs_info(struct fio_file *f) f->ruhs_info = NULL; } -void fdp_fill_dspec_data(struct thread_data *td, struct io_u *io_u) +void dp_fill_dspec_data(struct thread_data *td, struct io_u *io_u) { struct fio_file *f = io_u->file; struct fio_ruhs_info *ruhs = f->ruhs_info; diff --git a/fdp.h b/dataplacement.h similarity index 65% rename from fdp.h rename to dataplacement.h index accbac38..72bd4c08 100644 --- a/fdp.h +++ b/dataplacement.h @@ -1,5 +1,5 @@ -#ifndef FIO_FDP_H -#define FIO_FDP_H +#ifndef FIO_DATAPLACEMENT_H +#define FIO_DATAPLACEMENT_H #include "io_u.h" @@ -22,8 +22,8 @@ struct fio_ruhs_info { uint16_t plis[]; }; -int fdp_init(struct thread_data *td); +int dp_init(struct thread_data *td); void fdp_free_ruhs_info(struct fio_file *f); -void fdp_fill_dspec_data(struct thread_data *td, struct io_u *io_u); +void dp_fill_dspec_data(struct thread_data *td, struct io_u *io_u); -#endif /* FIO_FDP_H */ +#endif /* FIO_DATAPLACEMENT_H */ diff --git a/engines/xnvme.c b/engines/xnvme.c index a8137286..6ba4aa46 100644 --- a/engines/xnvme.c +++ b/engines/xnvme.c @@ -13,7 +13,7 @@ #include "fio.h" #include "verify.h" #include "zbd_types.h" -#include "fdp.h" +#include "dataplacement.h" #include "optgroup.h" static pthread_mutex_t g_serialize = PTHREAD_MUTEX_INITIALIZER; diff --git a/filesetup.c b/filesetup.c index 2d277a64..8923f2b3 100644 --- a/filesetup.c +++ b/filesetup.c @@ -1412,7 +1412,7 @@ done: td_restore_runstate(td, old_state); if (td->o.fdp) { - err = fdp_init(td); + err = dp_init(td); if (err) goto err_out; } diff --git a/io_u.c b/io_u.c index a499ff07..89f3d789 100644 --- a/io_u.c +++ b/io_u.c @@ -1066,7 +1066,7 @@ static int fill_io_u(struct thread_data *td, struct io_u *io_u) } if (td->o.fdp) - fdp_fill_dspec_data(td, io_u); + dp_fill_dspec_data(td, io_u); if (io_u->offset + io_u->buflen > io_u->file->real_file_size) { dprint(FD_IO, "io_u %p, off=0x%llx + len=0x%llx exceeds file size=0x%llx\n", diff --git a/ioengines.h b/ioengines.h index 4fe9bb98..d5b0cafe 100644 --- a/ioengines.h +++ b/ioengines.h @@ -7,7 +7,7 @@ #include "flist.h" #include "io_u.h" #include "zbd_types.h" -#include "fdp.h" +#include "dataplacement.h" #define FIO_IOOPS_VERSION 34 -- 2.25.1 From 65ca67915d1fab0df9bfafa82a644841516440a4 Mon Sep 17 00:00:00 2001 From: Vincent Fu Date: Wed, 3 Jan 2024 20:16:19 +0000 Subject: [PATCH 03/16] fio: create over-arching data placement option Since FDP and streams are similar, we should have an over-arching data placement option that encompasses both of these frameworks instead of having separate sets of similar options for FDP and streams. With a common set of options, users will be able to select the data placement strategy (fdp or streams), the placement identifiers to use, and the algorithm for selecting from the list of placement identifiers. The original set of FDP options is retained for backward compatibility. No functional change. Signed-off-by: Vincent Fu --- cconv.c | 18 +++++++++------- dataplacement.c | 14 ++++++------ dataplacement.h | 11 ++++++++-- filesetup.c | 2 +- init.c | 10 ++++++++- io_u.c | 2 +- options.c | 55 ++++++++++++++++++++++++++++++++++++------------ server.h | 2 +- thread_options.h | 15 +++++++------ 9 files changed, 87 insertions(+), 42 deletions(-) diff --git a/cconv.c b/cconv.c index ead47248..16112248 100644 --- a/cconv.c +++ b/cconv.c @@ -354,10 +354,11 @@ int convert_thread_options_to_cpu(struct thread_options *o, o->merge_blktrace_iters[i].u.f = fio_uint64_to_double(le64_to_cpu(top->merge_blktrace_iters[i].u.i)); o->fdp = le32_to_cpu(top->fdp); - o->fdp_pli_select = le32_to_cpu(top->fdp_pli_select); - o->fdp_nrpli = le32_to_cpu(top->fdp_nrpli); - for (i = 0; i < o->fdp_nrpli; i++) - o->fdp_plis[i] = le32_to_cpu(top->fdp_plis[i]); + o->dp_type = le32_to_cpu(top->dp_type); + o->dp_id_select = le32_to_cpu(top->dp_id_select); + o->dp_nr_ids = le32_to_cpu(top->dp_nr_ids); + for (i = 0; i < o->dp_nr_ids; i++) + o->dp_ids[i] = le32_to_cpu(top->dp_ids[i]); #if 0 uint8_t cpumask[FIO_TOP_STR_MAX]; uint8_t verify_cpumask[FIO_TOP_STR_MAX]; @@ -652,10 +653,11 @@ void convert_thread_options_to_net(struct thread_options_pack *top, top->merge_blktrace_iters[i].u.i = __cpu_to_le64(fio_double_to_uint64(o->merge_blktrace_iters[i].u.f)); top->fdp = cpu_to_le32(o->fdp); - top->fdp_pli_select = cpu_to_le32(o->fdp_pli_select); - top->fdp_nrpli = cpu_to_le32(o->fdp_nrpli); - for (i = 0; i < o->fdp_nrpli; i++) - top->fdp_plis[i] = cpu_to_le32(o->fdp_plis[i]); + top->dp_type = cpu_to_le32(o->dp_type); + top->dp_id_select = cpu_to_le32(o->dp_id_select); + top->dp_nr_ids = cpu_to_le32(o->dp_nr_ids); + for (i = 0; i < o->dp_nr_ids; i++) + top->dp_ids[i] = cpu_to_le32(o->dp_ids[i]); #if 0 uint8_t cpumask[FIO_TOP_STR_MAX]; uint8_t verify_cpumask[FIO_TOP_STR_MAX]; diff --git a/dataplacement.c b/dataplacement.c index 7518d193..a7170863 100644 --- a/dataplacement.c +++ b/dataplacement.c @@ -59,13 +59,13 @@ static int init_ruh_info(struct thread_data *td, struct fio_file *f) if (ruhs->nr_ruhs > FDP_MAX_RUHS) ruhs->nr_ruhs = FDP_MAX_RUHS; - if (td->o.fdp_nrpli == 0) { + if (td->o.dp_nr_ids == 0) { f->ruhs_info = ruhs; return 0; } - for (i = 0; i < td->o.fdp_nrpli; i++) { - if (td->o.fdp_plis[i] >= ruhs->nr_ruhs) { + for (i = 0; i < td->o.dp_nr_ids; i++) { + if (td->o.dp_ids[i] >= ruhs->nr_ruhs) { ret = -EINVAL; goto out; } @@ -77,9 +77,9 @@ static int init_ruh_info(struct thread_data *td, struct fio_file *f) goto out; } - tmp->nr_ruhs = td->o.fdp_nrpli; - for (i = 0; i < td->o.fdp_nrpli; i++) - tmp->plis[i] = ruhs->plis[td->o.fdp_plis[i]]; + tmp->nr_ruhs = td->o.dp_nr_ids; + for (i = 0; i < td->o.dp_nr_ids; i++) + tmp->plis[i] = ruhs->plis[td->o.dp_ids[i]]; f->ruhs_info = tmp; out: sfree(ruhs); @@ -119,7 +119,7 @@ void dp_fill_dspec_data(struct thread_data *td, struct io_u *io_u) return; } - if (td->o.fdp_pli_select == FIO_FDP_RR) { + if (td->o.dp_id_select == FIO_DP_RR) { if (ruhs->pli_loc >= ruhs->nr_ruhs) ruhs->pli_loc = 0; diff --git a/dataplacement.h b/dataplacement.h index 72bd4c08..b6ceb5bc 100644 --- a/dataplacement.h +++ b/dataplacement.h @@ -5,15 +5,22 @@ #define FDP_DIR_DTYPE 2 #define FDP_MAX_RUHS 128 +#define FIO_MAX_DP_IDS 16 /* * How fio chooses what placement identifier to use next. Choice of * uniformly random, or roundrobin. */ +enum { + FIO_DP_RANDOM = 0x1, + FIO_DP_RR = 0x2, +}; + enum { - FIO_FDP_RANDOM = 0x1, - FIO_FDP_RR = 0x2, + FIO_DP_NONE = 0x0, + FIO_DP_FDP = 0x1, + FIO_DP_STREAMS = 0x2, }; struct fio_ruhs_info { diff --git a/filesetup.c b/filesetup.c index 8923f2b3..6fbfced5 100644 --- a/filesetup.c +++ b/filesetup.c @@ -1411,7 +1411,7 @@ done: td_restore_runstate(td, old_state); - if (td->o.fdp) { + if (td->o.dp_type == FIO_DP_FDP) { err = dp_init(td); if (err) goto err_out; diff --git a/init.c b/init.c index 7a0b14a3..ff3e9a90 100644 --- a/init.c +++ b/init.c @@ -1015,7 +1015,15 @@ static int fixup_options(struct thread_data *td) ret |= 1; } - + if (td->o.fdp) { + if (fio_option_is_set(&td->o, dp_type) && + (td->o.dp_type == FIO_DP_STREAMS || td->o.dp_type == FIO_DP_NONE)) { + log_err("fio: fdp=1 is not compatible with dataplacement={streams, none}\n"); + ret |= 1; + } else { + td->o.dp_type = FIO_DP_FDP; + } + } return ret; } diff --git a/io_u.c b/io_u.c index 89f3d789..86ad7424 100644 --- a/io_u.c +++ b/io_u.c @@ -1065,7 +1065,7 @@ static int fill_io_u(struct thread_data *td, struct io_u *io_u) } } - if (td->o.fdp) + if (td->o.dp_type == FIO_DP_FDP) dp_fill_dspec_data(td, io_u); if (io_u->offset + io_u->buflen > io_u->file->real_file_size) { diff --git a/options.c b/options.c index de935efc..7e589299 100644 --- a/options.c +++ b/options.c @@ -270,12 +270,12 @@ static int str_fdp_pli_cb(void *data, const char *input) strip_blank_front(&str); strip_blank_end(str); - while ((v = strsep(&str, ",")) != NULL && i < FIO_MAX_PLIS) - td->o.fdp_plis[i++] = strtoll(v, NULL, 0); + while ((v = strsep(&str, ",")) != NULL && i < FIO_MAX_DP_IDS) + td->o.dp_ids[i++] = strtoll(v, NULL, 0); free(p); - qsort(td->o.fdp_plis, i, sizeof(*td->o.fdp_plis), fio_fdp_cmp); - td->o.fdp_nrpli = i; + qsort(td->o.dp_ids, i, sizeof(*td->o.dp_ids), fio_fdp_cmp); + td->o.dp_nr_ids = i; return 0; } @@ -3710,32 +3710,59 @@ struct fio_option fio_options[FIO_MAX_OPTS] = { .group = FIO_OPT_G_INVALID, }, { - .name = "fdp_pli_select", - .lname = "FDP Placement ID select", + .name = "dataplacement", + .alias = "data_placement", + .lname = "Data Placement interface", .type = FIO_OPT_STR, - .off1 = offsetof(struct thread_options, fdp_pli_select), - .help = "Select which FDP placement ID to use next", + .off1 = offsetof(struct thread_options, dp_type), + .help = "Data Placement interface to use", + .def = "none", + .category = FIO_OPT_C_IO, + .group = FIO_OPT_G_INVALID, + .posval = { + { .ival = "none", + .oval = FIO_DP_NONE, + .help = "Do not specify a data placement interface", + }, + { .ival = "fdp", + .oval = FIO_DP_FDP, + .help = "Use Flexible Data Placement interface", + }, + { .ival = "streams", + .oval = FIO_DP_STREAMS, + .help = "Use Streams interface", + }, + }, + }, + { + .name = "plid_select", + .alias = "fdp_pli_select", + .lname = "Data Placement ID selection strategy", + .type = FIO_OPT_STR, + .off1 = offsetof(struct thread_options, dp_id_select), + .help = "Strategy for selecting next Data Placement ID", .def = "roundrobin", .category = FIO_OPT_C_IO, .group = FIO_OPT_G_INVALID, .posval = { { .ival = "random", - .oval = FIO_FDP_RANDOM, + .oval = FIO_DP_RANDOM, .help = "Choose a Placement ID at random (uniform)", }, { .ival = "roundrobin", - .oval = FIO_FDP_RR, + .oval = FIO_DP_RR, .help = "Round robin select Placement IDs", }, }, }, { - .name = "fdp_pli", - .lname = "FDP Placement ID indicies", + .name = "plids", + .alias = "fdp_pli", + .lname = "Stream IDs/Data Placement ID indices", .type = FIO_OPT_STR, .cb = str_fdp_pli_cb, - .off1 = offsetof(struct thread_options, fdp_plis), - .help = "Sets which placement ids to use (defaults to all)", + .off1 = offsetof(struct thread_options, dp_ids), + .help = "Sets which Data Placement ids to use (defaults to all for FDP)", .hide = 1, .category = FIO_OPT_C_IO, .group = FIO_OPT_G_INVALID, diff --git a/server.h b/server.h index 6d2659b0..83ce449b 100644 --- a/server.h +++ b/server.h @@ -51,7 +51,7 @@ struct fio_net_cmd_reply { }; enum { - FIO_SERVER_VER = 103, + FIO_SERVER_VER = 104, FIO_SERVER_MAX_FRAGMENT_PDU = 1024, FIO_SERVER_MAX_CMD_MB = 2048, diff --git a/thread_options.h b/thread_options.h index c2e71518..a36b7909 100644 --- a/thread_options.h +++ b/thread_options.h @@ -391,11 +391,11 @@ struct thread_options { fio_fp64_t zrt; fio_fp64_t zrf; -#define FIO_MAX_PLIS 16 unsigned int fdp; - unsigned int fdp_pli_select; - unsigned int fdp_plis[FIO_MAX_PLIS]; - unsigned int fdp_nrpli; + unsigned int dp_type; + unsigned int dp_id_select; + unsigned int dp_ids[FIO_MAX_DP_IDS]; + unsigned int dp_nr_ids; unsigned int log_entries; unsigned int log_prio; @@ -709,9 +709,10 @@ struct thread_options_pack { uint32_t log_prio; uint32_t fdp; - uint32_t fdp_pli_select; - uint32_t fdp_plis[FIO_MAX_PLIS]; - uint32_t fdp_nrpli; + uint32_t dp_type; + uint32_t dp_id_select; + uint32_t dp_ids[FIO_MAX_DP_IDS]; + uint32_t dp_nr_ids; uint32_t num_range; /* -- 2.25.1 From ece3a998831dff9f111eb8432dd85ee476fc5a56 Mon Sep 17 00:00:00 2001 From: Vincent Fu Date: Tue, 9 Jan 2024 04:14:19 +0000 Subject: [PATCH 04/16] t/nvmept_fdp.py: test script for FDP This test script uses the io_uring pass-through ioengine to test fio's FDP support. This uses both the orignal and the new fdp-related options. Signed-off-by: Vincent Fu --- t/nvmept_fdp.py | 745 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 745 insertions(+) create mode 100755 t/nvmept_fdp.py diff --git a/t/nvmept_fdp.py b/t/nvmept_fdp.py new file mode 100755 index 00000000..031b439c --- /dev/null +++ b/t/nvmept_fdp.py @@ -0,0 +1,745 @@ +#!/usr/bin/env python3 +# +# Copyright 2024 Samsung Electronics Co., Ltd All Rights Reserved +# +# For conditions of distribution and use, see the accompanying COPYING file. +# +""" +# nvmept_fdp.py +# +# Test fio's io_uring_cmd ioengine with NVMe pass-through FDP write commands. +# +# USAGE +# see python3 nvmept_fdp.py --help +# +# EXAMPLES +# python3 t/nvmept_fdp.py --dut /dev/ng0n1 +# python3 t/nvmept_fdp.py --dut /dev/ng1n1 -f ./fio +# +# REQUIREMENTS +# Python 3.6 +# Device formatted with LBA data size 4096 bytes +# Device with at least five placement IDs +# +# WARNING +# This is a destructive test +""" +import os +import sys +import json +import time +import locale +import logging +import argparse +import subprocess +from pathlib import Path +from fiotestlib import FioJobCmdTest, run_fio_tests +from fiotestcommon import SUCCESS_NONZERO + + +class FDPTest(FioJobCmdTest): + """ + NVMe pass-through test class. Check to make sure output for selected data + direction(s) is non-zero and that zero data appears for other directions. + """ + + def setup(self, parameters): + """Setup a test.""" + + fio_args = [ + "--name=nvmept-fdp", + "--ioengine=io_uring_cmd", + "--cmd_type=nvme", + "--randrepeat=0", + f"--filename={self.fio_opts['filename']}", + f"--rw={self.fio_opts['rw']}", + f"--output={self.filenames['output']}", + f"--output-format={self.fio_opts['output-format']}", + ] + for opt in ['fixedbufs', 'nonvectored', 'force_async', 'registerfiles', + 'sqthread_poll', 'sqthread_poll_cpu', 'hipri', 'nowait', + 'time_based', 'runtime', 'verify', 'io_size', 'num_range', + 'iodepth', 'iodepth_batch', 'iodepth_batch_complete', + 'size', 'rate', 'bs', 'bssplit', 'bsrange', 'randrepeat', + 'buffer_pattern', 'verify_pattern', 'offset', 'fdp', + 'fdp_pli', 'fdp_pli_select', 'dataplacement', 'plid_select', + 'plids', 'number_ios']: + if opt in self.fio_opts: + option = f"--{opt}={self.fio_opts[opt]}" + fio_args.append(option) + + super().setup(fio_args) + + + def check_result(self): + try: + self._check_result() + finally: + if not update_all_ruhs(self.fio_opts['filename']): + logging.error("Could not reset device") + if not check_all_ruhs(self.fio_opts['filename']): + logging.error("Reclaim units have inconsistent RUAMW values") + + + def _check_result(self): + + super().check_result() + + if 'rw' not in self.fio_opts or \ + not self.passed or \ + 'json' not in self.fio_opts['output-format']: + return + + job = self.json_data['jobs'][0] + + if self.fio_opts['rw'] in ['read', 'randread']: + self.passed = self.check_all_ddirs(['read'], job) + elif self.fio_opts['rw'] in ['write', 'randwrite']: + if 'verify' not in self.fio_opts: + self.passed = self.check_all_ddirs(['write'], job) + else: + self.passed = self.check_all_ddirs(['read', 'write'], job) + elif self.fio_opts['rw'] in ['trim', 'randtrim']: + self.passed = self.check_all_ddirs(['trim'], job) + elif self.fio_opts['rw'] in ['readwrite', 'randrw']: + self.passed = self.check_all_ddirs(['read', 'write'], job) + elif self.fio_opts['rw'] in ['trimwrite', 'randtrimwrite']: + self.passed = self.check_all_ddirs(['trim', 'write'], job) + else: + logging.error("Unhandled rw value %s", self.fio_opts['rw']) + self.passed = False + + if 'iodepth' in self.fio_opts: + # We will need to figure something out if any test uses an iodepth + # different from 8 + if job['iodepth_level']['8'] < 95: + logging.error("Did not achieve requested iodepth") + self.passed = False + else: + logging.debug("iodepth 8 target met %s", job['iodepth_level']['8']) + + +class FDPMultiplePLIDTest(FDPTest): + """ + Write to multiple placement IDs. + """ + + def setup(self, parameters): + mapping = { + 'nruhsd': FIO_FDP_NUMBER_PLIDS, + 'max_ruamw': FIO_FDP_MAX_RUAMW, + } + if 'number_ios' in self.fio_opts and isinstance(self.fio_opts['number_ios'], str): + self.fio_opts['number_ios'] = eval(self.fio_opts['number_ios'].format(**mapping)) + + super().setup(parameters) + + def _check_result(self): + if 'fdp_pli' in self.fio_opts: + plid_list = self.fio_opts['fdp_pli'].split(',') + elif 'plids' in self.fio_opts: + plid_list = self.fio_opts['plids'].split(',') + else: + plid_list = list(range(FIO_FDP_NUMBER_PLIDS)) + + plid_list = sorted([int(i) for i in plid_list]) + logging.debug("plid_list: %s", str(plid_list)) + + fdp_status = get_fdp_status(self.fio_opts['filename']) + + select = "roundrobin" + if 'fdp_pli_select' in self.fio_opts: + select = self.fio_opts['fdp_pli_select'] + elif 'plid_select' in self.fio_opts: + select = self.fio_opts['plid_select'] + + if select == "roundrobin": + self._check_robin(plid_list, fdp_status) + elif select == "random": + self._check_random(plid_list, fdp_status) + else: + logging.error("Unknown plid selection strategy %s", select) + self.passed = False + + super()._check_result() + + def _check_robin(self, plid_list, fdp_status): + """ + With round robin we can know exactly how many writes each PLID will + receive. + """ + ruamw = [FIO_FDP_MAX_RUAMW] * FIO_FDP_NUMBER_PLIDS + + remainder = int(self.fio_opts['number_ios'] % len(plid_list)) + whole = int((self.fio_opts['number_ios'] - remainder) / len(plid_list)) + logging.debug("PLIDs in the list should receive %d writes; %d PLIDs will receive one extra", + whole, remainder) + + for plid in plid_list: + ruamw[plid] -= whole + if remainder: + ruamw[plid] -= 1 + remainder -= 1 + logging.debug("Expected ruamw values: %s", str(ruamw)) + + for idx, ruhs in enumerate(fdp_status['ruhss']): + if ruhs['ruamw'] != ruamw[idx]: + logging.error("RUAMW mismatch with idx %d, pid %d, expected %d, observed %d", idx, + ruhs['pid'], ruamw[idx], ruhs['ruamw']) + self.passed = False + break + + logging.debug("RUAMW match with idx %d, pid %d: ruamw=%d", idx, ruhs['pid'], ruamw[idx]) + + def _check_random(self, plid_list, fdp_status): + """ + With random selection, a set of PLIDs will receive all the write + operations and the remainder will be untouched. + """ + + total_ruamw = 0 + for plid in plid_list: + total_ruamw += fdp_status['ruhss'][plid]['ruamw'] + + expected = len(plid_list) * FIO_FDP_MAX_RUAMW - self.fio_opts['number_ios'] + if total_ruamw != expected: + logging.error("Expected total ruamw %d for plids %s, observed %d", expected, + str(plid_list), total_ruamw) + self.passed = False + else: + logging.debug("Observed expected total ruamw %d for plids %s", expected, str(plid_list)) + + for idx, ruhs in enumerate(fdp_status['ruhss']): + if idx in plid_list: + continue + if ruhs['ruamw'] != FIO_FDP_MAX_RUAMW: + logging.error("Unexpected ruamw %d for idx %d, pid %d, expected %d", ruhs['ruamw'], + idx, ruhs['pid'], FIO_FDP_MAX_RUAMW) + self.passed = False + else: + logging.debug("Observed expected ruamw %d for idx %d, pid %d", ruhs['ruamw'], idx, + ruhs['pid']) + + +class FDPSinglePLIDTest(FDPTest): + """ + Write to a single placement ID only. + """ + + def _check_result(self): + if 'plids' in self.fio_opts: + plid = self.fio_opts['plids'] + elif 'fdp_pli' in self.fio_opts: + plid = self.fio_opts['fdp_pli'] + else: + plid = 0 + + fdp_status = get_fdp_status(self.fio_opts['filename']) + ruamw = fdp_status['ruhss'][plid]['ruamw'] + lba_count = self.fio_opts['number_ios'] + + if FIO_FDP_MAX_RUAMW - lba_count != ruamw: + logging.error("FDP accounting mismatch for plid %d; expected ruamw %d, observed %d", + plid, FIO_FDP_MAX_RUAMW - lba_count, ruamw) + self.passed = False + else: + logging.debug("FDP accounting as expected for plid %d; ruamw = %d", plid, ruamw) + + super()._check_result() + + +class FDPReadTest(FDPTest): + """ + Read workload test. + """ + + def _check_result(self): + ruamw = check_all_ruhs(self.fio_opts['filename']) + + if ruamw != FIO_FDP_MAX_RUAMW: + logging.error("Read workload affected FDP ruamw") + self.passed = False + else: + logging.debug("Read workload did not disturb FDP ruamw") + super()._check_result() + + +def get_fdp_status(dut): + """ + Run the nvme-cli command to obtain FDP status and return result as a JSON + object. + """ + + cmd = f"sudo nvme fdp status --output-format=json {dut}" + cmd = cmd.split(' ') + cmd_result = subprocess.run(cmd, capture_output=True, check=False, + encoding=locale.getpreferredencoding()) + + if cmd_result.returncode != 0: + logging.error("Error obtaining device %s FDP status: %s", dut, cmd_result.stderr) + return False + + return json.loads(cmd_result.stdout) + + +def update_ruh(dut, plid): + """ + Update reclaim unit handles with specified ID(s). This tells the device to + point the RUH to a new (empty) reclaim unit. + """ + + ids = ','.join(plid) if isinstance(plid, list) else plid + cmd = f"nvme fdp update --pids={ids} {dut}" + cmd = cmd.split(' ') + cmd_result = subprocess.run(cmd, capture_output=True, check=False, + encoding=locale.getpreferredencoding()) + + if cmd_result.returncode != 0: + logging.error("Error updating RUH %s ID(s) %s", dut, ids) + return False + + return True + + +def update_all_ruhs(dut): + """ + Update all reclaim unit handles on the device. + """ + + fdp_status = get_fdp_status(dut) + for ruhs in fdp_status['ruhss']: + if not update_ruh(dut, ruhs['pid']): + return False + + return True + + +def check_all_ruhs(dut): + """ + Check that all RUHs have the same value for reclaim unit available media + writes (RUAMW). Return the RUAMW value. + """ + + fdp_status = get_fdp_status(dut) + ruh_status = fdp_status['ruhss'] + + ruamw = ruh_status[0]['ruamw'] + for ruhs in ruh_status: + if ruhs['ruamw'] != ruamw: + logging.error("RUAMW mismatch: found %d, expected %d", ruhs['ruamw'], ruamw) + return False + + return ruamw + + +TEST_LIST = [ + # Write one LBA to one PLID using both the old and new sets of options + ## omit fdp_pli_select/plid_select + { + "test_id": 1, + "fio_opts": { + "rw": 'write', + "bs": 4096, + "number_ios": 1, + "verify": "crc32c", + "fdp": 1, + "fdp_pli": 3, + "output-format": "json", + }, + "test_class": FDPSinglePLIDTest, + }, + { + "test_id": 2, + "fio_opts": { + "rw": 'randwrite', + "bs": 4096, + "number_ios": 1, + "verify": "crc32c", + "dataplacement": "fdp", + "plids": 3, + "output-format": "json", + }, + "test_class": FDPSinglePLIDTest, + }, + ## fdp_pli_select/plid_select=roundrobin + { + "test_id": 3, + "fio_opts": { + "rw": 'write', + "bs": 4096, + "number_ios": 1, + "verify": "crc32c", + "fdp": 1, + "fdp_pli": 3, + "fdp_pli_select": "roundrobin", + "output-format": "json", + }, + "test_class": FDPSinglePLIDTest, + }, + { + "test_id": 4, + "fio_opts": { + "rw": 'randwrite', + "bs": 4096, + "number_ios": 1, + "verify": "crc32c", + "dataplacement": "fdp", + "plids": 3, + "plid_select": "roundrobin", + "output-format": "json", + }, + "test_class": FDPSinglePLIDTest, + }, + ## fdp_pli_select/plid_select=random + { + "test_id": 5, + "fio_opts": { + "rw": 'write', + "bs": 4096, + "number_ios": 1, + "verify": "crc32c", + "fdp": 1, + "fdp_pli": 3, + "fdp_pli_select": "random", + "output-format": "json", + }, + "test_class": FDPSinglePLIDTest, + }, + { + "test_id": 6, + "fio_opts": { + "rw": 'randwrite', + "bs": 4096, + "number_ios": 1, + "verify": "crc32c", + "dataplacement": "fdp", + "plids": 3, + "plid_select": "random", + "output-format": "json", + }, + "test_class": FDPSinglePLIDTest, + }, + # Write four LBAs to one PLID using both the old and new sets of options + ## omit fdp_pli_select/plid_select + { + "test_id": 7, + "fio_opts": { + "rw": 'write', + "bs": 4096, + "number_ios": 4, + "verify": "crc32c", + "fdp": 1, + "fdp_pli": 1, + "output-format": "json", + }, + "test_class": FDPSinglePLIDTest, + }, + { + "test_id": 8, + "fio_opts": { + "rw": 'randwrite', + "bs": 4096, + "number_ios": 4, + "verify": "crc32c", + "dataplacement": "fdp", + "plids": 1, + "output-format": "json", + }, + "test_class": FDPSinglePLIDTest, + }, + ## fdp_pli_select/plid_select=roundrobin + { + "test_id": 9, + "fio_opts": { + "rw": 'write', + "bs": 4096, + "number_ios": 4, + "verify": "crc32c", + "fdp": 1, + "fdp_pli": 1, + "fdp_pli_select": "roundrobin", + "output-format": "json", + }, + "test_class": FDPSinglePLIDTest, + }, + { + "test_id": 10, + "fio_opts": { + "rw": 'randwrite', + "bs": 4096, + "number_ios": 4, + "verify": "crc32c", + "dataplacement": "fdp", + "plids": 1, + "plid_select": "roundrobin", + "output-format": "json", + }, + "test_class": FDPSinglePLIDTest, + }, + ## fdp_pli_select/plid_select=random + { + "test_id": 11, + "fio_opts": { + "rw": 'write', + "bs": 4096, + "number_ios": 4, + "verify": "crc32c", + "fdp": 1, + "fdp_pli": 1, + "fdp_pli_select": "random", + "output-format": "json", + }, + "test_class": FDPSinglePLIDTest, + }, + { + "test_id": 12, + "fio_opts": { + "rw": 'randwrite', + "bs": 4096, + "number_ios": 4, + "verify": "crc32c", + "dataplacement": "fdp", + "plids": 1, + "plid_select": "random", + "output-format": "json", + }, + "test_class": FDPSinglePLIDTest, + }, + # Just a regular write without FDP directive--should land on plid 0 + { + "test_id": 13, + "fio_opts": { + "rw": 'randwrite', + "bs": 4096, + "number_ios": 19, + "verify": "crc32c", + "output-format": "json", + }, + "test_class": FDPSinglePLIDTest, + }, + # Read workload + { + "test_id": 14, + "fio_opts": { + "rw": 'randread', + "bs": 4096, + "number_ios": 19, + "output-format": "json", + }, + "test_class": FDPReadTest, + }, + # write to multiple PLIDs using round robin to select PLIDs + ## write to all PLIDs using old and new sets of options + { + "test_id": 100, + "fio_opts": { + "rw": 'randwrite', + "bs": 4096, + "number_ios": "2*{nruhsd}+3", + "verify": "crc32c", + "fdp": 1, + "fdp_pli_select": "roundrobin", + "output-format": "json", + }, + "test_class": FDPMultiplePLIDTest, + }, + { + "test_id": 101, + "fio_opts": { + "rw": 'randwrite', + "bs": 4096, + "number_ios": "2*{nruhsd}+3", + "verify": "crc32c", + "dataplacement": "fdp", + "plid_select": "roundrobin", + "output-format": "json", + }, + "test_class": FDPMultiplePLIDTest, + }, + ## write to a subset of PLIDs using old and new sets of options + { + "test_id": 102, + "fio_opts": { + "rw": 'randwrite', + "bs": 4096, + "number_ios": "{nruhsd}+1", + "verify": "crc32c", + "fdp": 1, + "fdp_pli": "1,3", + "fdp_pli_select": "roundrobin", + "output-format": "json", + }, + "test_class": FDPMultiplePLIDTest, + }, + { + "test_id": 103, + "fio_opts": { + "rw": 'randwrite', + "bs": 4096, + "number_ios": "{nruhsd}+1", + "verify": "crc32c", + "dataplacement": "fdp", + "plids": "1,3", + "plid_select": "roundrobin", + "output-format": "json", + }, + "test_class": FDPMultiplePLIDTest, + }, + # write to multiple PLIDs using random selection of PLIDs + ## write to all PLIDs using old and new sets of options + { + "test_id": 200, + "fio_opts": { + "rw": 'randwrite', + "bs": 4096, + "number_ios": "{max_ruamw}-1", + "verify": "crc32c", + "fdp": 1, + "fdp_pli_select": "random", + "output-format": "json", + }, + "test_class": FDPMultiplePLIDTest, + }, + { + "test_id": 201, + "fio_opts": { + "rw": 'randwrite', + "bs": 4096, + "number_ios": "{max_ruamw}-1", + "verify": "crc32c", + "dataplacement": "fdp", + "plid_select": "random", + "output-format": "json", + }, + "test_class": FDPMultiplePLIDTest, + }, + ## write to a subset of PLIDs using old and new sets of options + { + "test_id": 202, + "fio_opts": { + "rw": 'randwrite', + "bs": 4096, + "number_ios": "{max_ruamw}-1", + "verify": "crc32c", + "fdp": 1, + "fdp_pli": "1,3,4", + "fdp_pli_select": "random", + "output-format": "json", + }, + "test_class": FDPMultiplePLIDTest, + }, + { + "test_id": 203, + "fio_opts": { + "rw": 'randwrite', + "bs": 4096, + "number_ios": "{max_ruamw}-1", + "verify": "crc32c", + "dataplacement": "fdp", + "plids": "1,3,4", + "plid_select": "random", + "output-format": "json", + }, + "test_class": FDPMultiplePLIDTest, + }, + # Specify invalid options fdp=1 and dataplacement=none + { + "test_id": 300, + "fio_opts": { + "rw": 'write', + "bs": 4096, + "io_size": 4096, + "verify": "crc32c", + "fdp": 1, + "fdp_pli": 3, + "output-format": "normal", + "dataplacement": "none", + }, + "test_class": FDPTest, + "success": SUCCESS_NONZERO, + }, + # Specify invalid options fdp=1 and dataplacement=streams + { + "test_id": 301, + "fio_opts": { + "rw": 'write', + "bs": 4096, + "io_size": 4096, + "verify": "crc32c", + "fdp": 1, + "fdp_pli": 3, + "output-format": "normal", + "dataplacement": "streams", + }, + "test_class": FDPTest, + "success": SUCCESS_NONZERO, + }, +] + +def parse_args(): + """Parse command-line arguments.""" + + parser = argparse.ArgumentParser() + parser.add_argument('-d', '--debug', help='Enable debug messages', action='store_true') + parser.add_argument('-f', '--fio', help='path to file executable (e.g., ./fio)') + parser.add_argument('-a', '--artifact-root', help='artifact root directory') + parser.add_argument('-s', '--skip', nargs='+', type=int, + help='list of test(s) to skip') + parser.add_argument('-o', '--run-only', nargs='+', type=int, + help='list of test(s) to run, skipping all others') + parser.add_argument('--dut', help='target NVMe character device to test ' + '(e.g., /dev/ng0n1). WARNING: THIS IS A DESTRUCTIVE TEST', required=True) + args = parser.parse_args() + + return args + + +FIO_FDP_MAX_RUAMW = 0 +FIO_FDP_NUMBER_PLIDS = 0 + +def main(): + """Run tests using fio's io_uring_cmd ioengine to send NVMe pass through commands.""" + global FIO_FDP_MAX_RUAMW + global FIO_FDP_NUMBER_PLIDS + + args = parse_args() + + if args.debug: + logging.basicConfig(level=logging.DEBUG) + else: + logging.basicConfig(level=logging.INFO) + + artifact_root = args.artifact_root if args.artifact_root else \ + f"nvmept-fdp-test-{time.strftime('%Y%m%d-%H%M%S')}" + os.mkdir(artifact_root) + print(f"Artifact directory is {artifact_root}") + + if args.fio: + fio_path = str(Path(args.fio).absolute()) + else: + fio_path = 'fio' + print(f"fio path is {fio_path}") + + for test in TEST_LIST: + test['fio_opts']['filename'] = args.dut + + fdp_status = get_fdp_status(args.dut) + FIO_FDP_NUMBER_PLIDS = fdp_status['nruhsd'] + update_all_ruhs(args.dut) + FIO_FDP_MAX_RUAMW = check_all_ruhs(args.dut) + if not FIO_FDP_MAX_RUAMW: + sys.exit(-1) + + test_env = { + 'fio_path': fio_path, + 'fio_root': str(Path(__file__).absolute().parent.parent), + 'artifact_root': artifact_root, + 'basename': 'nvmept-fdp', + } + + _, failed, _ = run_fio_tests(TEST_LIST, test_env, args) + sys.exit(failed) + + +if __name__ == '__main__': + main() -- 2.25.1 From 1a3a21b73727358c8bb9b4a9762ce30acd9e492e Mon Sep 17 00:00:00 2001 From: Vincent Fu Date: Wed, 17 Jan 2024 15:19:07 +0000 Subject: [PATCH 05/16] fio: support NVMe streams Make small adjustments to the code supporting FDP to accommodate NVMe streams. Signed-off-by: Vincent Fu --- dataplacement.c | 16 +++++++++++++++- dataplacement.h | 7 ++++--- filesetup.c | 2 +- io_u.c | 2 +- 4 files changed, 21 insertions(+), 6 deletions(-) diff --git a/dataplacement.c b/dataplacement.c index a7170863..a409b825 100644 --- a/dataplacement.c +++ b/dataplacement.c @@ -49,6 +49,20 @@ static int init_ruh_info(struct thread_data *td, struct fio_file *f) if (!ruhs) return -ENOMEM; + /* set up the data structure used for FDP to work with the supplied stream IDs */ + if (td->o.dp_type == FIO_DP_STREAMS) { + if (!td->o.dp_nr_ids) { + log_err("fio: stream IDs must be provided for dataplacement=streams\n"); + return -EINVAL; + } + ruhs->nr_ruhs = td->o.dp_nr_ids; + for (int i = 0; i < ruhs->nr_ruhs; i++) + ruhs->plis[i] = td->o.dp_ids[i]; + + f->ruhs_info = ruhs; + return 0; + } + ret = fdp_ruh_info(td, f, ruhs); if (ret) { log_info("fio: ruh info failed for %s (%d)\n", @@ -129,6 +143,6 @@ void dp_fill_dspec_data(struct thread_data *td, struct io_u *io_u) dspec = ruhs->plis[ruhs->pli_loc]; } - io_u->dtype = FDP_DIR_DTYPE; + io_u->dtype = td->o.dp_type == FIO_DP_FDP ? FDP_DIR_DTYPE : STREAMS_DIR_DTYPE; io_u->dspec = dspec; } diff --git a/dataplacement.h b/dataplacement.h index b6ceb5bc..b5718c86 100644 --- a/dataplacement.h +++ b/dataplacement.h @@ -3,9 +3,10 @@ #include "io_u.h" -#define FDP_DIR_DTYPE 2 -#define FDP_MAX_RUHS 128 -#define FIO_MAX_DP_IDS 16 +#define STREAMS_DIR_DTYPE 1 +#define FDP_DIR_DTYPE 2 +#define FDP_MAX_RUHS 128 +#define FIO_MAX_DP_IDS 16 /* * How fio chooses what placement identifier to use next. Choice of diff --git a/filesetup.c b/filesetup.c index 6fbfced5..cb42a852 100644 --- a/filesetup.c +++ b/filesetup.c @@ -1411,7 +1411,7 @@ done: td_restore_runstate(td, old_state); - if (td->o.dp_type == FIO_DP_FDP) { + if (td->o.dp_type != FIO_DP_NONE) { err = dp_init(td); if (err) goto err_out; diff --git a/io_u.c b/io_u.c index 86ad7424..a090e121 100644 --- a/io_u.c +++ b/io_u.c @@ -1065,7 +1065,7 @@ static int fill_io_u(struct thread_data *td, struct io_u *io_u) } } - if (td->o.dp_type == FIO_DP_FDP) + if (td->o.dp_type != FIO_DP_NONE) dp_fill_dspec_data(td, io_u); if (io_u->offset + io_u->buflen > io_u->file->real_file_size) { -- 2.25.1 From 3dabef7c3139835d4afada65fb3f7ac8609e42eb Mon Sep 17 00:00:00 2001 From: Vincent Fu Date: Thu, 4 Apr 2024 16:40:03 +0000 Subject: [PATCH 06/16] options: reject placement IDs larger than the max Placement IDs are a 16-bit value. So we should notify users if the provided placement IDs are too large. Signed-off-by: Vincent Fu --- options.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/options.c b/options.c index 7e589299..4065b7a0 100644 --- a/options.c +++ b/options.c @@ -270,8 +270,15 @@ static int str_fdp_pli_cb(void *data, const char *input) strip_blank_front(&str); strip_blank_end(str); - while ((v = strsep(&str, ",")) != NULL && i < FIO_MAX_DP_IDS) - td->o.dp_ids[i++] = strtoll(v, NULL, 0); + while ((v = strsep(&str, ",")) != NULL && i < FIO_MAX_DP_IDS) { + unsigned long long id = strtoll(v, NULL, 0); + if (id > 0xFFFF) { + log_err("Placement IDs cannot exceed 0xFFFF\n"); + free(p); + return 1; + } + td->o.dp_ids[i++] = id; + } free(p); qsort(td->o.dp_ids, i, sizeof(*td->o.dp_ids), fio_fdp_cmp); -- 2.25.1 From 33f5cb572c4b5856e18ff8705fe3e9e37da9e6bf Mon Sep 17 00:00:00 2001 From: Vincent Fu Date: Thu, 4 Apr 2024 16:46:21 +0000 Subject: [PATCH 07/16] options: parse placement IDs as unsigned values Signed-off-by: Vincent Fu --- options.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/options.c b/options.c index 4065b7a0..61ea41cc 100644 --- a/options.c +++ b/options.c @@ -271,7 +271,7 @@ static int str_fdp_pli_cb(void *data, const char *input) strip_blank_end(str); while ((v = strsep(&str, ",")) != NULL && i < FIO_MAX_DP_IDS) { - unsigned long long id = strtoll(v, NULL, 0); + unsigned long long id = strtoull(v, NULL, 0); if (id > 0xFFFF) { log_err("Placement IDs cannot exceed 0xFFFF\n"); free(p); -- 2.25.1 From 61d2213925323d4a88a5bf38be75122abb36c309 Mon Sep 17 00:00:00 2001 From: Vincent Fu Date: Thu, 4 Apr 2024 18:11:42 +0000 Subject: [PATCH 08/16] dataplacement: add a debug print for IOs This is useful for testing. Signed-off-by: Vincent Fu --- dataplacement.c | 1 + 1 file changed, 1 insertion(+) diff --git a/dataplacement.c b/dataplacement.c index a409b825..1d5b21ed 100644 --- a/dataplacement.c +++ b/dataplacement.c @@ -145,4 +145,5 @@ void dp_fill_dspec_data(struct thread_data *td, struct io_u *io_u) io_u->dtype = td->o.dp_type == FIO_DP_FDP ? FDP_DIR_DTYPE : STREAMS_DIR_DTYPE; io_u->dspec = dspec; + dprint(FD_IO, "dtype set to 0x%x, dspec set to 0x%x\n", io_u->dtype, io_u->dspec); } -- 2.25.1 From 0c8c808df1d16fc650acb90b94f41680dcd1a92c Mon Sep 17 00:00:00 2001 From: Vincent Fu Date: Fri, 5 Jan 2024 18:04:10 +0000 Subject: [PATCH 09/16] t/nvmept_streams: test NVMe streams support This test script uses the io_uring pass-through ioengine to test NVMe streams support. Signed-off-by: Vincent Fu --- t/nvmept_streams.py | 520 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 520 insertions(+) create mode 100755 t/nvmept_streams.py diff --git a/t/nvmept_streams.py b/t/nvmept_streams.py new file mode 100755 index 00000000..e5425506 --- /dev/null +++ b/t/nvmept_streams.py @@ -0,0 +1,520 @@ +#!/usr/bin/env python3 +# +# Copyright 2024 Samsung Electronics Co., Ltd All Rights Reserved +# +# For conditions of distribution and use, see the accompanying COPYING file. +# +""" +# nvmept_streams.py +# +# Test fio's NVMe streams support using the io_uring_cmd ioengine with NVMe +# pass-through commands. +# +# USAGE +# see python3 nvmept_streams.py --help +# +# EXAMPLES +# python3 t/nvmept_streams.py --dut /dev/ng0n1 +# python3 t/nvmept_streams.py --dut /dev/ng1n1 -f ./fio +# +# REQUIREMENTS +# Python 3.6 +# +# WARNING +# This is a destructive test +# +# Enable streams with +# nvme dir-send -D 0 -O 1 -e 1 -T 1 /dev/nvme0n1 +# +# See streams directive status with +# nvme dir-receive -D 0 -O 1 -H /dev/nvme0n1 +""" +import os +import sys +import time +import locale +import logging +import argparse +import subprocess +from pathlib import Path +from fiotestlib import FioJobCmdTest, run_fio_tests +from fiotestcommon import SUCCESS_NONZERO + + +class StreamsTest(FioJobCmdTest): + """ + NVMe pass-through test class for streams. Check to make sure output for + selected data direction(s) is non-zero and that zero data appears for other + directions. + """ + + def setup(self, parameters): + """Setup a test.""" + + fio_args = [ + "--name=nvmept-streams", + "--ioengine=io_uring_cmd", + "--cmd_type=nvme", + "--randrepeat=0", + f"--filename={self.fio_opts['filename']}", + f"--rw={self.fio_opts['rw']}", + f"--output={self.filenames['output']}", + f"--output-format={self.fio_opts['output-format']}", + ] + for opt in ['fixedbufs', 'nonvectored', 'force_async', 'registerfiles', + 'sqthread_poll', 'sqthread_poll_cpu', 'hipri', 'nowait', + 'time_based', 'runtime', 'verify', 'io_size', 'num_range', + 'iodepth', 'iodepth_batch', 'iodepth_batch_complete', + 'size', 'rate', 'bs', 'bssplit', 'bsrange', 'randrepeat', + 'buffer_pattern', 'verify_pattern', 'offset', 'dataplacement', + 'plids', 'plid_select' ]: + if opt in self.fio_opts: + option = f"--{opt}={self.fio_opts[opt]}" + fio_args.append(option) + + super().setup(fio_args) + + + def check_result(self): + try: + self._check_result() + finally: + release_all_streams(self.fio_opts['filename']) + + + def _check_result(self): + + super().check_result() + + if 'rw' not in self.fio_opts or \ + not self.passed or \ + 'json' not in self.fio_opts['output-format']: + return + + job = self.json_data['jobs'][0] + + if self.fio_opts['rw'] in ['read', 'randread']: + self.passed = self.check_all_ddirs(['read'], job) + elif self.fio_opts['rw'] in ['write', 'randwrite']: + if 'verify' not in self.fio_opts: + self.passed = self.check_all_ddirs(['write'], job) + else: + self.passed = self.check_all_ddirs(['read', 'write'], job) + elif self.fio_opts['rw'] in ['trim', 'randtrim']: + self.passed = self.check_all_ddirs(['trim'], job) + elif self.fio_opts['rw'] in ['readwrite', 'randrw']: + self.passed = self.check_all_ddirs(['read', 'write'], job) + elif self.fio_opts['rw'] in ['trimwrite', 'randtrimwrite']: + self.passed = self.check_all_ddirs(['trim', 'write'], job) + else: + logging.error("Unhandled rw value %s", self.fio_opts['rw']) + self.passed = False + + if 'iodepth' in self.fio_opts: + # We will need to figure something out if any test uses an iodepth + # different from 8 + if job['iodepth_level']['8'] < 95: + logging.error("Did not achieve requested iodepth") + self.passed = False + else: + logging.debug("iodepth 8 target met %s", job['iodepth_level']['8']) + + stream_ids = [int(stream) for stream in self.fio_opts['plids'].split(',')] + if not self.check_streams(self.fio_opts['filename'], stream_ids): + self.passed = False + logging.error("Streams not as expected") + else: + logging.debug("Streams created as expected") + + + def check_streams(self, dut, stream_ids): + """ + Confirm that the specified stream IDs exist on the specified device. + """ + + id_list = get_device_stream_ids(dut) + if not id_list: + return False + + for stream in stream_ids: + if stream in id_list: + logging.debug("Stream ID %d found active on device", stream) + id_list.remove(stream) + else: + if self.__class__.__name__ != "StreamsTestRand": + logging.error("Stream ID %d not found on device", stream) + else: + logging.debug("Stream ID %d not found on device", stream) + return False + + if len(id_list) != 0: + logging.error("Extra stream IDs %s found on device", str(id_list)) + return False + + return True + + +class StreamsTestRR(StreamsTest): + """ + NVMe pass-through test class for streams. Check to make sure output for + selected data direction(s) is non-zero and that zero data appears for other + directions. Check that Stream IDs are accessed in round robin order. + """ + + def check_streams(self, dut, stream_ids): + """ + The number of IOs is less than the number of stream IDs provided. Let N + be the number of IOs. Make sure that the device only has the first N of + the stream IDs provided. + + This will miss some cases where some other selection algorithm happens + to select the first N stream IDs. The solution would be to repeat this + test multiple times. Multiple trials passing would be evidence that + round robin is working correctly. + """ + + id_list = get_device_stream_ids(dut) + if not id_list: + return False + + num_streams = int(self.fio_opts['io_size'] / self.fio_opts['bs']) + stream_ids = sorted(stream_ids)[0:num_streams] + + return super().check_streams(dut, stream_ids) + + +class StreamsTestRand(StreamsTest): + """ + NVMe pass-through test class for streams. Check to make sure output for + selected data direction(s) is non-zero and that zero data appears for other + directions. Check that Stream IDs are accessed in random order. + """ + + def check_streams(self, dut, stream_ids): + """ + The number of IOs is less than the number of stream IDs provided. Let N + be the number of IOs. Confirm that the stream IDs on the device are not + the first N stream IDs. + + This will produce false positives because it is possible for the first + N stream IDs to be randomly selected. We can reduce the probability of + false positives by increasing N and increasing the number of streams + IDs to choose from, although fio has a max of 16 placement IDs. + """ + + id_list = get_device_stream_ids(dut) + if not id_list: + return False + + num_streams = int(self.fio_opts['io_size'] / self.fio_opts['bs']) + stream_ids = sorted(stream_ids)[0:num_streams] + + return not super().check_streams(dut, stream_ids) + + +def get_device_stream_ids(dut): + cmd = f"sudo nvme dir-receive -D 1 -O 2 -H {dut}" + logging.debug("check streams command: %s", cmd) + cmd = cmd.split(' ') + cmd_result = subprocess.run(cmd, capture_output=True, check=False, + encoding=locale.getpreferredencoding()) + + logging.debug(cmd_result.stdout) + + if cmd_result.returncode != 0: + logging.error("Error obtaining device %s stream IDs: %s", dut, cmd_result.stderr) + return False + + id_list = [] + for line in cmd_result.stdout.split('\n'): + if not 'Stream Identifier' in line: + continue + tokens = line.split(':') + id_list.append(int(tokens[1])) + + return id_list + + +def release_stream(dut, stream_id): + """ + Release stream on given device with selected ID. + """ + cmd = f"nvme dir-send -D 1 -O 1 -S {stream_id} {dut}" + logging.debug("release stream command: %s", cmd) + cmd = cmd.split(' ') + cmd_result = subprocess.run(cmd, capture_output=True, check=False, + encoding=locale.getpreferredencoding()) + + if cmd_result.returncode != 0: + logging.error("Error releasing %s stream %d", dut, stream_id) + return False + + return True + + +def release_all_streams(dut): + """ + Release all streams on specified device. + """ + + id_list = get_device_stream_ids(dut) + if not id_list: + return False + + for stream in id_list: + if not release_stream(dut, stream): + return False + + return True + + +TEST_LIST = [ + # 4k block size + # {seq write, rand write} x {single stream, four streams} + { + "test_id": 1, + "fio_opts": { + "rw": 'write', + "bs": 4096, + "io_size": 256*1024*1024, + "verify": "crc32c", + "plids": "8", + "dataplacement": "streams", + "output-format": "json", + }, + "test_class": StreamsTest, + }, + { + "test_id": 2, + "fio_opts": { + "rw": 'randwrite', + "bs": 4096, + "io_size": 256*1024*1024, + "verify": "crc32c", + "plids": "3", + "dataplacement": "streams", + "output-format": "json", + }, + "test_class": StreamsTest, + }, + { + "test_id": 3, + "fio_opts": { + "rw": 'write', + "bs": 4096, + "io_size": 256*1024*1024, + "verify": "crc32c", + "plids": "1,2,3,4", + "dataplacement": "streams", + "output-format": "json", + }, + "test_class": StreamsTest, + }, + { + "test_id": 4, + "fio_opts": { + "rw": 'randwrite', + "bs": 4096, + "io_size": 256*1024*1024, + "verify": "crc32c", + "plids": "5,6,7,8", + "dataplacement": "streams", + "output-format": "json", + }, + "test_class": StreamsTest, + }, + # 256KiB block size + # {seq write, rand write} x {single stream, four streams} + { + "test_id": 10, + "fio_opts": { + "rw": 'write', + "bs": 256*1024, + "io_size": 256*1024*1024, + "verify": "crc32c", + "plids": "88", + "dataplacement": "streams", + "output-format": "json", + }, + "test_class": StreamsTest, + }, + { + "test_id": 11, + "fio_opts": { + "rw": 'randwrite', + "bs": 256*1024, + "io_size": 256*1024*1024, + "verify": "crc32c", + "plids": "20", + "dataplacement": "streams", + "output-format": "json", + }, + "test_class": StreamsTest, + }, + { + "test_id": 12, + "fio_opts": { + "rw": 'write', + "bs": 256*1024, + "io_size": 256*1024*1024, + "verify": "crc32c", + "plids": "16,32,64,128", + "dataplacement": "streams", + "output-format": "json", + }, + "test_class": StreamsTest, + }, + { + "test_id": 13, + "fio_opts": { + "rw": 'randwrite', + "bs": 256*1024, + "io_size": 256*1024*1024, + "verify": "crc32c", + "plids": "10,20,40,82", + "dataplacement": "streams", + "output-format": "json", + }, + "test_class": StreamsTest, + }, + # Test placement ID selection patterns + # default is round robin + { + "test_id": 20, + "fio_opts": { + "rw": 'write', + "bs": 4096, + "io_size": 8192, + "plids": '88,99,100,123,124,125,126,127,128,129,130,131,132,133,134,135', + "dataplacement": "streams", + "output-format": "json", + }, + "test_class": StreamsTestRR, + }, + { + "test_id": 21, + "fio_opts": { + "rw": 'write', + "bs": 4096, + "io_size": 8192, + "plids": '12,88,99,100,123,124,125,126,127,128,129,130,131,132,133,11', + "dataplacement": "streams", + "output-format": "json", + }, + "test_class": StreamsTestRR, + }, + # explicitly select round robin + { + "test_id": 22, + "fio_opts": { + "rw": 'write', + "bs": 4096, + "io_size": 8192, + "plids": '22,88,99,100,123,124,125,126,127,128,129,130,131,132,133,134', + "dataplacement": "streams", + "output-format": "json", + "plid_select": "roundrobin", + }, + "test_class": StreamsTestRR, + }, + # explicitly select random + { + "test_id": 23, + "fio_opts": { + "rw": 'write', + "bs": 4096, + "io_size": 8192, + "plids": '1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16', + "dataplacement": "streams", + "output-format": "json", + "plid_select": "random", + }, + "test_class": StreamsTestRand, + }, + # Error case with placement ID > 0xFFFF + { + "test_id": 30, + "fio_opts": { + "rw": 'write', + "bs": 4096, + "io_size": 8192, + "plids": "1,2,3,0x10000", + "dataplacement": "streams", + "output-format": "normal", + "plid_select": "random", + }, + "test_class": StreamsTestRand, + "success": SUCCESS_NONZERO, + }, + # Error case with no stream IDs provided + { + "test_id": 31, + "fio_opts": { + "rw": 'write', + "bs": 4096, + "io_size": 8192, + "dataplacement": "streams", + "output-format": "normal", + }, + "test_class": StreamsTestRand, + "success": SUCCESS_NONZERO, + }, + +] + +def parse_args(): + """Parse command-line arguments.""" + + parser = argparse.ArgumentParser() + parser.add_argument('-d', '--debug', help='Enable debug messages', action='store_true') + parser.add_argument('-f', '--fio', help='path to file executable (e.g., ./fio)') + parser.add_argument('-a', '--artifact-root', help='artifact root directory') + parser.add_argument('-s', '--skip', nargs='+', type=int, + help='list of test(s) to skip') + parser.add_argument('-o', '--run-only', nargs='+', type=int, + help='list of test(s) to run, skipping all others') + parser.add_argument('--dut', help='target NVMe character device to test ' + '(e.g., /dev/ng0n1). WARNING: THIS IS A DESTRUCTIVE TEST', required=True) + args = parser.parse_args() + + return args + + +def main(): + """Run tests using fio's io_uring_cmd ioengine to send NVMe pass through commands.""" + + args = parse_args() + + if args.debug: + logging.basicConfig(level=logging.DEBUG) + else: + logging.basicConfig(level=logging.INFO) + + artifact_root = args.artifact_root if args.artifact_root else \ + f"nvmept-streams-test-{time.strftime('%Y%m%d-%H%M%S')}" + os.mkdir(artifact_root) + print(f"Artifact directory is {artifact_root}") + + if args.fio: + fio_path = str(Path(args.fio).absolute()) + else: + fio_path = 'fio' + print(f"fio path is {fio_path}") + + for test in TEST_LIST: + test['fio_opts']['filename'] = args.dut + + release_all_streams(args.dut) + test_env = { + 'fio_path': fio_path, + 'fio_root': str(Path(__file__).absolute().parent.parent), + 'artifact_root': artifact_root, + 'basename': 'nvmept-streams', + } + + _, failed, _ = run_fio_tests(TEST_LIST, test_env, args) + sys.exit(failed) + + +if __name__ == '__main__': + main() -- 2.25.1 From 349bbcb2e36658db05d5247ef8cbcb285d58dfbf Mon Sep 17 00:00:00 2001 From: Vincent Fu Date: Wed, 17 Jan 2024 18:52:48 +0000 Subject: [PATCH 10/16] docs: update for new data placement options Update the HOWTO and man page for the unified data placement options that cover both FDP and Streams. Signed-off-by: Vincent Fu --- HOWTO.rst | 36 +++++++++++++++++++++++++++--------- fio.1 | 35 ++++++++++++++++++++++++++++------- 2 files changed, 55 insertions(+), 16 deletions(-) diff --git a/HOWTO.rst b/HOWTO.rst index 2f108e36..2f8ef6d4 100644 --- a/HOWTO.rst +++ b/HOWTO.rst @@ -2500,7 +2500,24 @@ with the caveat that when used on the command line, they must come after the Enable Flexible Data Placement mode for write commands. -.. option:: fdp_pli_select=str : [io_uring_cmd] [xnvme] +.. option:: dataplacement=str : [io_uring_cmd] [xnvme] + + Specifies the data placement directive type to use for write commands. + The following types are supported: + + **none** + Do not use a data placement directive. This is the + default. + + **fdp** + Use Flexible Data Placement directives for write + commands. This is equivalent to specifying + :option:`fdp` =1. + + **streams** + Use Streams directives for write commands. + +.. option:: plid_select=str, fdp_pli_select=str : [io_uring_cmd] [xnvme] Defines how fio decides which placement ID to use next. The following types are defined: @@ -2512,16 +2529,17 @@ with the caveat that when used on the command line, they must come after the Round robin over available placement IDs. This is the default. - The available placement ID index/indices is defined by the option - :option:`fdp_pli`. + The available placement ID (indices) are defined by the option + :option:`plids`. -.. option:: fdp_pli=str : [io_uring_cmd] [xnvme] +.. option:: plids=str, fdp_pli=str : [io_uring_cmd] [xnvme] - Select which Placement ID Index/Indicies this job is allowed to use for - writes. By default, the job will cycle through all available Placement - IDs, so use this to isolate these identifiers to specific jobs. If you - want fio to use placement identifier only at indices 0, 2 and 5 specify - ``fdp_pli=0,2,5``. + Select which Placement IDs (streams) or Placement ID Indices (FDP) this + job is allowed to use for writes. For FDP by default, the job will + cycle through all available Placement IDs, so use this to isolate these + identifiers to specific jobs. If you want fio to use FDP placement + identifiers only at indices 0, 2 and 5 specify ``plids=0,2,5``. For + streams this should be a comma-separated list of Stream IDs. .. option:: md_per_io_size=int : [io_uring_cmd] [xnvme] diff --git a/fio.1 b/fio.1 index 5fd3603d..ee812494 100644 --- a/fio.1 +++ b/fio.1 @@ -2264,7 +2264,26 @@ file blocks are fully allocated and the disk request could be issued immediately .BI (io_uring_cmd,xnvme)fdp \fR=\fPbool Enable Flexible Data Placement mode for write commands. .TP -.BI (io_uring_cmd,xnvme)fdp_pli_select \fR=\fPstr +.BI (io_uring_cmd,xnvme)dataplacement \fR=\fPstr +Specifies the data placement directive type to use for write commands. The +following types are supported: +.RS +.RS +.TP +.B none +Do not use a data placement directive. This is the default. +.TP +.B fdp +Use Flexible Data placement directives for write commands. This is equivalent +to specifying \fBfdp\fR=1. +.TP +.B streams +Use Streams directives for write commands. +.TP +.RE +.RE +.TP +.BI (io_uring_cmd,xnvme)plid_select=str, fdp_pli_select \fR=\fPstr Defines how fio decides which placement ID to use next. The following types are defined: .RS @@ -2277,14 +2296,16 @@ Choose a placement ID at random (uniform). Round robin over available placement IDs. This is the default. .RE .P -The available placement ID index/indices is defined by \fBfdp_pli\fR option. +The available placement ID (indices) are defined by the \fBplids\fR option. .RE .TP -.BI (io_uring_cmd,xnvme)fdp_pli \fR=\fPstr -Select which Placement ID Index/Indicies this job is allowed to use for writes. -By default, the job will cycle through all available Placement IDs, so use this -to isolate these identifiers to specific jobs. If you want fio to use placement -identifier only at indices 0, 2 and 5 specify, you would set `fdp_pli=0,2,5`. +.BI (io_uring_cmd,xnvme)plids=str, fdp_pli \fR=\fPstr +Select which Placement IDs (streams) or Placement ID Indicies (FDP) this job is +allowed to use for writes. For FDP by default, the job will cycle through all +available Placement IDs, so use this to isolate these identifiers to specific +jobs. If you want fio to use placement identifier only at indices 0, 2 and 5 +specify, you would set `plids=0,2,5`. For streams this should be a +comma-separated list of Stream IDs. .TP .BI (io_uring_cmd,xnvme)md_per_io_size \fR=\fPint Size in bytes for separate metadata buffer per IO. Default: 0. -- 2.25.1 From 6a9a9bd2c524a7ef58d2fb8181038408155902b8 Mon Sep 17 00:00:00 2001 From: Vincent Fu Date: Thu, 25 Apr 2024 13:00:54 -0400 Subject: [PATCH 11/16] t/nvmept_trim: increase transfer size for some tests The final sequence of tests uses a block size of 4096 bytes. This can be slow enough on some platforms to trigger a 10-minute timeout. Increase the block size to 256K to reduce the run time. Signed-off-by: Vincent Fu --- t/nvmept_trim.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/t/nvmept_trim.py b/t/nvmept_trim.py index 57568384..c990747d 100755 --- a/t/nvmept_trim.py +++ b/t/nvmept_trim.py @@ -470,6 +470,7 @@ TEST_LIST = [ "output-format": 'json', "buffer_pattern": 0x0f, "size": 256*1024*1024, + "bs": 256*1024, }, "test_class": TrimTest, }, @@ -481,6 +482,7 @@ TEST_LIST = [ "verify_pattern": 0x0f, "verify": "pattern", "size": 256*1024*1024, + "bs": 256*1024, }, "test_class": TrimTest, }, @@ -491,6 +493,7 @@ TEST_LIST = [ "num_range": 8, "output-format": 'json', "size": 128*1024*1024, + "bs": 256*1024, }, "test_class": TrimTest, }, @@ -511,6 +514,7 @@ TEST_LIST = [ "verify_pattern": 0x0f, "verify": "pattern", "size": 128*1024*1024, + "bs": 256*1024, }, "test_class": TrimTest, "success": SUCCESS_NONZERO, @@ -524,6 +528,7 @@ TEST_LIST = [ "verify": "pattern", "offset": 128*1024*1024, "size": 128*1024*1024, + "bs": 256*1024, }, "test_class": TrimTest, }, -- 2.25.1 From 0c9c3a9b053b47543e38dd7242b9bfdf30f851e6 Mon Sep 17 00:00:00 2001 From: Shin'ichiro Kawasaki Date: Tue, 30 Apr 2024 19:30:20 +0900 Subject: [PATCH 12/16] zbd: remove unnecessary verify_backlog check in zbd_file_reset() The commit c5c8b92be5a2 ("zbd: fix zone reset condition for verify") improved zbd_file_reset() to not reset zones when data to verify is left. To check the left verify data, it tried to do the same as check_get_verify() including the check for the modulo operation "td->io_hist_len % td->o.verify_backlog". This check is required in check_get_verify() to know when to do the verify backlog operation. However, this check is not required in zbd_file_reset() since zone reset is not related to the verify backlog timing. The unnecessary check for "td->io_hist_len % td->o.verify_backlog" allows to reset zones even when td->io_hist_len is non-zero and the data to verify is left. It erases the data to verify and causes verify errors. Fix this by removing the unnecessary check. Fixes: c5c8b92be5a2 ("zbd: fix zone reset condition for verify") Signed-off-by: Shin'ichiro Kawasaki Link: https://lore.kernel.org/r/20240430103022.4136039-2-shinichiro.kawasaki@wdc.com Signed-off-by: Vincent Fu --- zbd.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/zbd.c b/zbd.c index 37417660..8a092cbe 100644 --- a/zbd.c +++ b/zbd.c @@ -1361,9 +1361,6 @@ void zbd_file_reset(struct thread_data *td, struct fio_file *f) if (td->o.verify != VERIFY_NONE) { verify_data_left = td->runstate == TD_VERIFYING || td->io_hist_len || td->verify_batch; - if (td->io_hist_len && td->o.verify_backlog) - verify_data_left = - td->io_hist_len % td->o.verify_backlog; if (!verify_data_left) zbd_reset_zones(td, f, zb, ze); } -- 2.25.1 From 06eb4c1f131b667b0ffbaf167af34b5f04691554 Mon Sep 17 00:00:00 2001 From: Shin'ichiro Kawasaki Date: Tue, 30 Apr 2024 19:30:21 +0900 Subject: [PATCH 13/16] t/zbd: add test case to confirm verify_backlog=1 options The previous commit fixed the verify failure due to the zone reset with the verify_backlog option. Add a test to confirm the fix. Signed-off-by: Shin'ichiro Kawasaki Link: https://lore.kernel.org/r/20240430103022.4136039-3-shinichiro.kawasaki@wdc.com Signed-off-by: Vincent Fu --- t/zbd/test-zbd-support | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/t/zbd/test-zbd-support b/t/zbd/test-zbd-support index c27d2ad6..ef98835c 100755 --- a/t/zbd/test-zbd-support +++ b/t/zbd/test-zbd-support @@ -1593,6 +1593,18 @@ test68() { [[ $(grep -c "WRITE:" "${logfile}.${test_number}") == 1 ]] } +# Test rw=rw and verify_backlog=1 options do not cause verify failure +test69() { + require_zbd || return "$SKIP_TESTCASE" + + prep_write + run_fio --name=job --filename="$dev" --time_based --runtime=15s \ + --rw=rw --offset=$((first_sequential_zone_sector * 512)) \ + "$(ioengine "libaio")" --iodepth=32 --randrepeat=0 \ + --verify=crc32 --verify_backlog=1 --zonemode=zbd --direct=1 \ + >> "${logfile}.${test_number}" 2>&1 || return $? +} + SECONDS=0 tests=() dynamic_analyzer=() -- 2.25.1 From 3ed8eea0ee9b5d7de603e9b128e6c05941b99938 Mon Sep 17 00:00:00 2001 From: Shin'ichiro Kawasaki Date: Tue, 30 Apr 2024 19:30:22 +0900 Subject: [PATCH 14/16] t/zbd: avoid test case 31 failure with small devices The test case assumed that the test target devices have 128 or more sequential write required zones and uses 128 as the minimum number of zones to write. This caused failure when the devices had a smaller number of sequential write required zones. To avoid the failure, count the actual number of sequential write required zones and use it if it is smaller than 128. Signed-off-by: Shin'ichiro Kawasaki Link: https://lore.kernel.org/r/20240430103022.4136039-4-shinichiro.kawasaki@wdc.com Signed-off-by: Vincent Fu --- t/zbd/test-zbd-support | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/t/zbd/test-zbd-support b/t/zbd/test-zbd-support index ef98835c..e0b2a755 100755 --- a/t/zbd/test-zbd-support +++ b/t/zbd/test-zbd-support @@ -852,12 +852,15 @@ test31() { # To distribute the write target zones evenly, skip certain zones for every # write. Utilize zonemode strided for such write patterns. bs=$((128 * 1024)) + off=$((first_sequential_zone_sector * 512)) + size=$((disk_size - off)) nz=$((max_open_zones)) if [[ $nz -eq 0 ]]; then nz=128 fi - off=$((first_sequential_zone_sector * 512)) - size=$((disk_size - off)) + if ((size / zone_size < nz)); then + nz=$((size / zone_size)) + fi inc=$(((size / nz / zone_size) * zone_size)) opts=("--name=$dev" "--filename=$dev" "--rw=write" "--bs=${bs}") opts+=("--offset=$off" "--size=$((inc * nz))" "--io_size=$((bs * nz))") -- 2.25.1 From 04d5add8452b55b89c39625d9139511584fd4c21 Mon Sep 17 00:00:00 2001 From: Hyunwoo Park Date: Tue, 30 Apr 2024 06:58:12 +0000 Subject: [PATCH 15/16] fdp: support scheme placement id (index) selection Add a new placement id selection method called scheme. It allows users to assign a placement ID (index) depending on the offset range. The strategy of the scheme is specified in the file by user and is applicable using the option dp_scheme. Signed-off-by: Hyunwoo Park --- HOWTO.rst | 28 +++++++++++++++-- cconv.c | 2 ++ dataplacement.c | 78 ++++++++++++++++++++++++++++++++++++++++++++++++ dataplacement.h | 14 ++++++++- file.h | 1 + fio.1 | 32 +++++++++++++++++++- options.c | 52 ++++++++++++++++++++++++++++++++ server.h | 2 +- thread_options.h | 2 ++ 9 files changed, 206 insertions(+), 5 deletions(-) diff --git a/HOWTO.rst b/HOWTO.rst index 2f8ef6d4..3b262fae 100644 --- a/HOWTO.rst +++ b/HOWTO.rst @@ -2529,8 +2529,12 @@ with the caveat that when used on the command line, they must come after the Round robin over available placement IDs. This is the default. - The available placement ID (indices) are defined by the option - :option:`plids`. + **scheme** + Choose a placement ID (index) based on the scheme file defined by + the option :option:`dp_scheme`. + + The available placement ID (indices) are defined by the option :option:`fdp_pli` + or :option:`plids` except for the case of **scheme**. .. option:: plids=str, fdp_pli=str : [io_uring_cmd] [xnvme] @@ -2541,6 +2545,26 @@ with the caveat that when used on the command line, they must come after the identifiers only at indices 0, 2 and 5 specify ``plids=0,2,5``. For streams this should be a comma-separated list of Stream IDs. +.. option:: dp_scheme=str : [io_uring_cmd] [xnvme] + + Defines which placement ID (index) to be selected based on offset(LBA) range. + The file should contains one or more scheme entries in the following format: + + 0, 10737418240, 0 + 10737418240, 21474836480, 1 + 21474836480, 32212254720, 2 + ... + + Each line, a scheme entry, contains start offset, end offset, and placement ID + (index) separated by comma(,). If the write offset is within the range of a certain + scheme entry(start offset ≤ offset < end offset), the corresponding placement ID + (index) will be selected. If the write offset belongs to multiple scheme entries, + the first matched scheme entry will be applied. If the offset is not within any range + of scheme entry, dspec field will be set to 0, default RUH. (Caution: In case of + multiple devices in a job, all devices of the job will be affected by the scheme. If + this option is specified, the option :option:`plids` or :option:`fdp_pli` will be + ignored.) + .. option:: md_per_io_size=int : [io_uring_cmd] [xnvme] Size in bytes for separate metadata buffer per IO. Default: 0. diff --git a/cconv.c b/cconv.c index 16112248..9b344940 100644 --- a/cconv.c +++ b/cconv.c @@ -94,6 +94,7 @@ int convert_thread_options_to_cpu(struct thread_options *o, string_to_cpu(&o->ioscheduler, top->ioscheduler); string_to_cpu(&o->profile, top->profile); string_to_cpu(&o->cgroup, top->cgroup); + string_to_cpu(&o->dp_scheme_file, top->dp_scheme_file); o->allow_create = le32_to_cpu(top->allow_create); o->allow_mounted_write = le32_to_cpu(top->allow_mounted_write); @@ -398,6 +399,7 @@ void convert_thread_options_to_net(struct thread_options_pack *top, string_to_net(top->ioscheduler, o->ioscheduler); string_to_net(top->profile, o->profile); string_to_net(top->cgroup, o->cgroup); + string_to_net(top->dp_scheme_file, o->dp_scheme_file); top->allow_create = cpu_to_le32(o->allow_create); top->allow_mounted_write = cpu_to_le32(o->allow_mounted_write); diff --git a/dataplacement.c b/dataplacement.c index 1d5b21ed..8a4c8e64 100644 --- a/dataplacement.c +++ b/dataplacement.c @@ -100,6 +100,56 @@ out: return ret; } +static int init_ruh_scheme(struct thread_data *td, struct fio_file *f) +{ + struct fio_ruhs_scheme *ruh_scheme; + FILE *scheme_fp; + unsigned long long start, end; + uint16_t pli; + int ret = 0; + + if (td->o.dp_id_select != FIO_DP_SCHEME) + return 0; + + /* Get the scheme from the file */ + scheme_fp = fopen(td->o.dp_scheme_file, "r"); + + if (!scheme_fp) { + log_err("fio: ruh scheme failed to open scheme file %s\n", + td->o.dp_scheme_file); + ret = -errno; + goto out; + } + + ruh_scheme = scalloc(1, sizeof(*ruh_scheme)); + if (!ruh_scheme) { + ret = -ENOMEM; + goto out_with_close_fp; + } + + for (int i = 0; + i < DP_MAX_SCHEME_ENTRIES && fscanf(scheme_fp, "%llu,%llu,%hu\n", &start, &end, &pli) == 3; + i++) { + + ruh_scheme->scheme_entries[i].start_offset = start; + ruh_scheme->scheme_entries[i].end_offset = end; + ruh_scheme->scheme_entries[i].pli = pli; + ruh_scheme->nr_schemes++; + } + + if (fscanf(scheme_fp, "%llu,%llu,%hu\n", &start, &end, &pli) == 3) + log_info("fio: too many scheme entries in %s. Only the first %d scheme entries are applied\n", + td->o.dp_scheme_file, + DP_MAX_SCHEME_ENTRIES); + + f->ruhs_scheme = ruh_scheme; + +out_with_close_fp: + fclose(scheme_fp); +out: + return ret; +} + int dp_init(struct thread_data *td) { struct fio_file *f; @@ -109,6 +159,10 @@ int dp_init(struct thread_data *td) ret = init_ruh_info(td, f); if (ret) break; + + ret = init_ruh_scheme(td, f); + if (ret) + break; } return ret; } @@ -119,6 +173,11 @@ void fdp_free_ruhs_info(struct fio_file *f) return; sfree(f->ruhs_info); f->ruhs_info = NULL; + + if (!f->ruhs_scheme) + return; + sfree(f->ruhs_scheme); + f->ruhs_scheme = NULL; } void dp_fill_dspec_data(struct thread_data *td, struct io_u *io_u) @@ -138,6 +197,25 @@ void dp_fill_dspec_data(struct thread_data *td, struct io_u *io_u) ruhs->pli_loc = 0; dspec = ruhs->plis[ruhs->pli_loc++]; + } else if (td->o.dp_id_select == FIO_DP_SCHEME) { + struct fio_ruhs_scheme *ruhs_scheme = f->ruhs_scheme; + unsigned long long offset = io_u->offset; + int i; + + for (i = 0; i < ruhs_scheme->nr_schemes; i++) { + if (offset >= ruhs_scheme->scheme_entries[i].start_offset && + offset < ruhs_scheme->scheme_entries[i].end_offset) { + dspec = ruhs_scheme->scheme_entries[i].pli; + break; + } + } + + /* + * If the write offset is not affected by any scheme entry, + * 0(default RUH) will be assigned to dspec + */ + if (i == ruhs_scheme->nr_schemes) + dspec = 0; } else { ruhs->pli_loc = rand_between(&td->fdp_state, 0, ruhs->nr_ruhs - 1); dspec = ruhs->plis[ruhs->pli_loc]; diff --git a/dataplacement.h b/dataplacement.h index b5718c86..71d19d69 100644 --- a/dataplacement.h +++ b/dataplacement.h @@ -7,6 +7,7 @@ #define FDP_DIR_DTYPE 2 #define FDP_MAX_RUHS 128 #define FIO_MAX_DP_IDS 16 +#define DP_MAX_SCHEME_ENTRIES 32 /* * How fio chooses what placement identifier to use next. Choice of @@ -15,9 +16,9 @@ enum { FIO_DP_RANDOM = 0x1, FIO_DP_RR = 0x2, + FIO_DP_SCHEME = 0x3, }; - enum { FIO_DP_NONE = 0x0, FIO_DP_FDP = 0x1, @@ -30,6 +31,17 @@ struct fio_ruhs_info { uint16_t plis[]; }; +struct fio_ruhs_scheme_entry { + unsigned long long start_offset; + unsigned long long end_offset; + uint16_t pli; +}; + +struct fio_ruhs_scheme { + uint16_t nr_schemes; + struct fio_ruhs_scheme_entry scheme_entries[DP_MAX_SCHEME_ENTRIES]; +}; + int dp_init(struct thread_data *td); void fdp_free_ruhs_info(struct fio_file *f); void dp_fill_dspec_data(struct thread_data *td, struct io_u *io_u); diff --git a/file.h b/file.h index deb36e02..e38ed2f1 100644 --- a/file.h +++ b/file.h @@ -103,6 +103,7 @@ struct fio_file { uint64_t io_size; struct fio_ruhs_info *ruhs_info; + struct fio_ruhs_scheme *ruhs_scheme; /* * Zoned block device information. See also zonemode=zbd. diff --git a/fio.1 b/fio.1 index ee812494..1c8e3a56 100644 --- a/fio.1 +++ b/fio.1 @@ -2294,9 +2294,14 @@ Choose a placement ID at random (uniform). .TP .B roundrobin Round robin over available placement IDs. This is the default. +.TP +.B scheme +Choose a placement ID (index) based on the scheme file defined by +the option \fBdp_scheme\fP. .RE .P -The available placement ID (indices) are defined by the \fBplids\fR option. +The available placement ID (indices) are defined by \fBplids\fR or +\fBfdp_pli\fR option except for the case of \fBscheme\fP. .RE .TP .BI (io_uring_cmd,xnvme)plids=str, fdp_pli \fR=\fPstr @@ -2307,6 +2312,31 @@ jobs. If you want fio to use placement identifier only at indices 0, 2 and 5 specify, you would set `plids=0,2,5`. For streams this should be a comma-separated list of Stream IDs. .TP +.BI (io_uring_cmd,xnvme)\fR\fBdp_scheme\fP=str +Defines which placement ID (index) to be selected based on offset(LBA) range. +The file should contains one or more scheme entries in the following format: +.sp +.RS +.RS +0, 10737418240, 0 +.br +10737418240, 21474836480, 1 +.br +21474836480, 32212254720, 2 +.br +\&... +.RE +.sp +Each line, a scheme entry, contains start offset, end offset, and placement ID +(index) separated by comma(,). If the write offset is within the range of a certain +scheme entry(start offset ≤ offset < end offset), the corresponding placement ID +(index) will be selected. If the write offset belongs to multiple scheme entries, +the first matched scheme entry will be applied. If the offset is not within any range +of scheme entry, dspec field will be set to 0, default RUH. (Caution: In case of +multiple devices in a job, all devices of the job will be affected by the scheme. If +this option is specified, the option \fBplids\fP or \fBfdp_pli\fP will be ignored.) +.RE +.TP .BI (io_uring_cmd,xnvme)md_per_io_size \fR=\fPint Size in bytes for separate metadata buffer per IO. Default: 0. .TP diff --git a/options.c b/options.c index 61ea41cc..f5d221c7 100644 --- a/options.c +++ b/options.c @@ -287,6 +287,43 @@ static int str_fdp_pli_cb(void *data, const char *input) return 0; } +/* str_dp_scheme_cb() is a callback function for parsing the fdp_scheme option + This function validates the fdp_scheme filename. */ +static int str_dp_scheme_cb(void *data, const char *input) +{ + struct thread_data *td = cb_data_to_td(data); + struct stat sb; + char *filename; + int ret = 0; + + if (parse_dryrun()) + return 0; + + filename = strdup(td->o.dp_scheme_file); + strip_blank_front(&filename); + strip_blank_end(filename); + + strcpy(td->o.dp_scheme_file, filename); + + if (lstat(filename, &sb) < 0){ + ret = errno; + log_err("fio: lstat() error related to %s\n", filename); + td_verror(td, ret, "lstat"); + goto out; + } + + if (!S_ISREG(sb.st_mode)) { + ret = errno; + log_err("fio: %s is not a file\n", filename); + td_verror(td, ret, "S_ISREG"); + goto out; + } + +out: + free(filename); + return ret; +} + static int str_bssplit_cb(void *data, const char *input) { struct thread_data *td = cb_data_to_td(data); @@ -3760,6 +3797,10 @@ struct fio_option fio_options[FIO_MAX_OPTS] = { .oval = FIO_DP_RR, .help = "Round robin select Placement IDs", }, + { .ival = "scheme", + .oval = FIO_DP_SCHEME, + .help = "Use a scheme(based on LBA) to select Placement IDs", + }, }, }, { @@ -3774,6 +3815,17 @@ struct fio_option fio_options[FIO_MAX_OPTS] = { .category = FIO_OPT_C_IO, .group = FIO_OPT_G_INVALID, }, + { + .name = "dp_scheme", + .lname = "Data Placement Scheme", + .type = FIO_OPT_STR_STORE, + .cb = str_dp_scheme_cb, + .off1 = offsetof(struct thread_options, dp_scheme_file), + .maxlen = PATH_MAX, + .help = "scheme file that specifies offset-RUH mapping", + .category = FIO_OPT_C_IO, + .group = FIO_OPT_G_INVALID, + }, { .name = "lockmem", .lname = "Lock memory", diff --git a/server.h b/server.h index 83ce449b..e8659f79 100644 --- a/server.h +++ b/server.h @@ -51,7 +51,7 @@ struct fio_net_cmd_reply { }; enum { - FIO_SERVER_VER = 104, + FIO_SERVER_VER = 105, FIO_SERVER_MAX_FRAGMENT_PDU = 1024, FIO_SERVER_MAX_CMD_MB = 2048, diff --git a/thread_options.h b/thread_options.h index a36b7909..ccd0c064 100644 --- a/thread_options.h +++ b/thread_options.h @@ -396,6 +396,7 @@ struct thread_options { unsigned int dp_id_select; unsigned int dp_ids[FIO_MAX_DP_IDS]; unsigned int dp_nr_ids; + char *dp_scheme_file; unsigned int log_entries; unsigned int log_prio; @@ -713,6 +714,7 @@ struct thread_options_pack { uint32_t dp_id_select; uint32_t dp_ids[FIO_MAX_DP_IDS]; uint32_t dp_nr_ids; + uint8_t dp_scheme_file[FIO_TOP_STR_MAX]; uint32_t num_range; /* -- 2.25.1 From 96566b090025fb35d5ef12322ad9ea0c573f1a90 Mon Sep 17 00:00:00 2001 From: Hyunwoo Park Date: Thu, 9 May 2024 04:16:37 +0000 Subject: [PATCH 16/16] t/nvmept_fdp: add tests(302,303,400,401) for fdp scheme - 302/303: invalid options tests - 400/401: check whether fdp scheme works properly Signed-off-by: Hyunwoo Park --- t/nvmept_fdp.py | 131 +++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 123 insertions(+), 8 deletions(-) diff --git a/t/nvmept_fdp.py b/t/nvmept_fdp.py index 031b439c..d6a543f2 100755 --- a/t/nvmept_fdp.py +++ b/t/nvmept_fdp.py @@ -56,6 +56,7 @@ class FDPTest(FioJobCmdTest): f"--output={self.filenames['output']}", f"--output-format={self.fio_opts['output-format']}", ] + for opt in ['fixedbufs', 'nonvectored', 'force_async', 'registerfiles', 'sqthread_poll', 'sqthread_poll_cpu', 'hipri', 'nowait', 'time_based', 'runtime', 'verify', 'io_size', 'num_range', @@ -63,7 +64,7 @@ class FDPTest(FioJobCmdTest): 'size', 'rate', 'bs', 'bssplit', 'bsrange', 'randrepeat', 'buffer_pattern', 'verify_pattern', 'offset', 'fdp', 'fdp_pli', 'fdp_pli_select', 'dataplacement', 'plid_select', - 'plids', 'number_ios']: + 'plids', 'dp_scheme', 'number_ios']: if opt in self.fio_opts: option = f"--{opt}={self.fio_opts[opt]}" fio_args.append(option) @@ -91,19 +92,20 @@ class FDPTest(FioJobCmdTest): return job = self.json_data['jobs'][0] + rw_fio_opts = self.fio_opts['rw'].split(':')[0] - if self.fio_opts['rw'] in ['read', 'randread']: + if rw_fio_opts in ['read', 'randread']: self.passed = self.check_all_ddirs(['read'], job) - elif self.fio_opts['rw'] in ['write', 'randwrite']: + elif rw_fio_opts in ['write', 'randwrite']: if 'verify' not in self.fio_opts: self.passed = self.check_all_ddirs(['write'], job) else: self.passed = self.check_all_ddirs(['read', 'write'], job) - elif self.fio_opts['rw'] in ['trim', 'randtrim']: + elif rw_fio_opts in ['trim', 'randtrim']: self.passed = self.check_all_ddirs(['trim'], job) - elif self.fio_opts['rw'] in ['readwrite', 'randrw']: + elif rw_fio_opts in ['readwrite', 'randrw']: self.passed = self.check_all_ddirs(['read', 'write'], job) - elif self.fio_opts['rw'] in ['trimwrite', 'randtrimwrite']: + elif rw_fio_opts in ['trimwrite', 'randtrimwrite']: self.passed = self.check_all_ddirs(['trim', 'write'], job) else: logging.error("Unhandled rw value %s", self.fio_opts['rw']) @@ -128,12 +130,25 @@ class FDPMultiplePLIDTest(FDPTest): mapping = { 'nruhsd': FIO_FDP_NUMBER_PLIDS, 'max_ruamw': FIO_FDP_MAX_RUAMW, + # parameters for 400, 401 tests + 'hole_size': 64*1024, + 'nios_for_scheme': FIO_FDP_NUMBER_PLIDS//2, } if 'number_ios' in self.fio_opts and isinstance(self.fio_opts['number_ios'], str): self.fio_opts['number_ios'] = eval(self.fio_opts['number_ios'].format(**mapping)) + if 'bs' in self.fio_opts and isinstance(self.fio_opts['bs'], str): + self.fio_opts['bs'] = eval(self.fio_opts['bs'].format(**mapping)) + if 'rw' in self.fio_opts and isinstance(self.fio_opts['rw'], str): + self.fio_opts['rw'] = self.fio_opts['rw'].format(**mapping) super().setup(parameters) - + + if 'dp_scheme' in self.fio_opts: + scheme_path = os.path.join(self.paths['test_dir'], self.fio_opts['dp_scheme']) + with open(scheme_path, mode='w') as f: + for i in range(mapping['nios_for_scheme']): + f.write(f'{mapping["hole_size"] * 2 * i}, {mapping["hole_size"] * 2 * (i+1)}, {i}\n') + def _check_result(self): if 'fdp_pli' in self.fio_opts: plid_list = self.fio_opts['fdp_pli'].split(',') @@ -157,10 +172,12 @@ class FDPMultiplePLIDTest(FDPTest): self._check_robin(plid_list, fdp_status) elif select == "random": self._check_random(plid_list, fdp_status) + elif select == "scheme": + self._check_scheme(plid_list, fdp_status) else: logging.error("Unknown plid selection strategy %s", select) self.passed = False - + super()._check_result() def _check_robin(self, plid_list, fdp_status): @@ -220,6 +237,42 @@ class FDPMultiplePLIDTest(FDPTest): logging.debug("Observed expected ruamw %d for idx %d, pid %d", ruhs['ruamw'], idx, ruhs['pid']) + def _check_scheme(self, plid_list, fdp_status): + """ + With scheme selection, a set of PLIDs touched by the scheme + """ + + PLID_IDX_POS = 2 + plid_list_from_scheme = set() + + scheme_path = os.path.join(self.paths['test_dir'], self.fio_opts['dp_scheme']) + + with open(scheme_path) as f: + lines = f.readlines() + for line in lines: + line_elem = line.strip().replace(' ', '').split(',') + plid_list_from_scheme.add(int(line_elem[PLID_IDX_POS])) + + logging.debug(f'plid_list_from_scheme: {plid_list_from_scheme}') + + for idx, ruhs in enumerate(fdp_status['ruhss']): + if ruhs['pid'] in plid_list_from_scheme: + if ruhs['ruamw'] == FIO_FDP_MAX_RUAMW: + logging.error("pid %d should be touched by the scheme. But ruamw of it(%d) equals to %d", + ruhs['pid'], ruhs['ruamw'], FIO_FDP_MAX_RUAMW) + self.passed = False + else: + logging.debug("pid %d should be touched by the scheme. ruamw of it(%d) is under %d", + ruhs['pid'], ruhs['ruamw'], FIO_FDP_MAX_RUAMW) + else: + if ruhs['ruamw'] == FIO_FDP_MAX_RUAMW: + logging.debug("pid %d should not be touched by the scheme. ruamw of it(%d) equals to %d", + ruhs['pid'], ruhs['ruamw'], FIO_FDP_MAX_RUAMW) + else: + logging.error("pid %d should not be touched by the scheme. But ruamw of it(%d) is under %d", + ruhs['pid'], ruhs['ruamw'], FIO_FDP_MAX_RUAMW) + self.passed = False + class FDPSinglePLIDTest(FDPTest): """ @@ -674,6 +727,68 @@ TEST_LIST = [ "test_class": FDPTest, "success": SUCCESS_NONZERO, }, + # Specify invalid options related to dataplacement scheme + ## using old and new sets of options + { + "test_id": 302, + "fio_opts": { + "rw": 'write', + "bs": 4096, + "io_size": 4096, + "verify": "crc32c", + "fdp": 1, + "fdp_pli": 3, + "fdp_pli_select": "scheme", + "output-format": "normal", + }, + "test_class": FDPTest, + "success": SUCCESS_NONZERO, + }, + { + "test_id": 303, + "fio_opts": { + "rw": 'write', + "bs": 4096, + "io_size": 4096, + "verify": "crc32c", + "dataplacement": "fdp", + "plids": 3, + "plid_select": "scheme", + "output-format": "normal", + }, + "test_class": FDPTest, + "success": SUCCESS_NONZERO, + }, + # write to multiple PLIDs using scheme selection of PLIDs + ## using old and new sets of options + { + "test_id": 400, + "fio_opts": { + "rw": "write:{hole_size}", + "bs": "{hole_size}", + "number_ios": "{nios_for_scheme}", + "verify": "crc32c", + "fdp": 1, + "fdp_pli_select": "scheme", + "dp_scheme": "lba.scheme", + "output-format": "json", + }, + "test_class": FDPMultiplePLIDTest, + }, + { + "test_id": 401, + "fio_opts": { + "rw": "write:{hole_size}", + "bs": "{hole_size}", + "number_ios": "{nios_for_scheme}", + "verify": "crc32c", + "dataplacement": "fdp", + "plid_select": "scheme", + "dp_scheme": "lba.scheme", + "output-format": "json", + }, + "test_class": FDPMultiplePLIDTest, + }, ] def parse_args(): -- 2.25.1