GtkWidget *box;
unsigned int opt_index;
unsigned int opt_type;
+ gulong sig_handler;
};
struct gopt_combo {
struct gopt_int {
struct gopt gopt;
+ unsigned int lastval;
GtkWidget *spin;
};
GtkWidget *spins[GOPT_RANGE_SPIN];
};
-struct gopt_widget {
- struct flist_head list;
- GtkWidget *widget;
+struct gopt_str_multi {
+ struct gopt gopt;
+ GtkWidget *checks[PARSE_MAX_VP];
};
-static struct flist_head gopt_list[FIO_MAX_OPTS];
+static GtkWidget *gopt_widgets[FIO_MAX_OPTS];
-static void __gopt_set_children_visible(unsigned int idx, gboolean visible)
-{
- struct flist_head *entry;
- struct gopt_widget *gw;
+struct gopt_frame_widget {
+ GtkWidget *vbox[2];
+ unsigned int nr;
+};
+static struct gopt_frame_widget gopt_g_widgets[__FIO_OPT_G_NR];
- flist_for_each(entry, &gopt_list[idx]) {
- gw = flist_entry(entry, struct gopt_widget, list);
- gtk_widget_set_sensitive(gw->widget, visible);
+static GtkWidget *gopt_get_group_frame(GtkWidget *box, unsigned int groupmask)
+{
+ unsigned int mask, group;
+ struct opt_group *og;
+ GtkWidget *frame, *hbox;
+ struct gopt_frame_widget *gfw;
+
+ if (!groupmask)
+ return 0;
+
+ mask = groupmask;
+ og = opt_group_cat_from_mask(&mask);
+ if (!og)
+ return NULL;
+
+ group = ffz(~groupmask);
+ gfw = &gopt_g_widgets[group];
+ if (!gfw->vbox[0]) {
+ frame = gtk_frame_new(og->name);
+ gtk_box_pack_start(GTK_BOX(box), frame, FALSE, FALSE, 3);
+ hbox = gtk_hbox_new(FALSE, 0);
+ gtk_container_add(GTK_CONTAINER(frame), hbox);
+ gfw->vbox[0] = gtk_vbox_new(TRUE, 5);
+ gfw->vbox[1] = gtk_vbox_new(TRUE, 5);
+ gtk_box_pack_start(GTK_BOX(hbox), gfw->vbox[0], TRUE, TRUE, 5);
+ gtk_box_pack_start(GTK_BOX(hbox), gfw->vbox[1], TRUE, TRUE, 5);
}
+
+ hbox = gtk_hbox_new(FALSE, 3);
+ gtk_box_pack_start(GTK_BOX(gfw->vbox[gfw->nr++ & 1]), hbox, FALSE, FALSE, 5);
+ return hbox;
}
/*
if (strcmp(parent->name, o->parent))
continue;
- __gopt_set_children_visible(i, visible);
+ if (gopt_widgets[i])
+ gtk_widget_set_sensitive(gopt_widgets[i], visible);
}
}
static void gopt_mark_index(struct gopt *gopt, unsigned int idx)
{
- struct gopt_widget *gw;
-
+ assert(!gopt_widgets[idx]);
gopt->opt_index = idx;
-
- gw = malloc(sizeof(*gw));
- gw->widget = gopt->box;
- flist_add_tail(&gw->list, &gopt_list[idx]);
+ gopt_widgets[idx] = gopt->box;
}
static struct gopt *gopt_new_str_store(struct fio_option *o, const char *text, unsigned int idx)
GtkWidget *label;
s = malloc(sizeof(*s));
+ memset(s, 0, sizeof(*s));
s->gopt.box = gtk_hbox_new(FALSE, 3);
- label = gtk_label_new(o->name);
- gtk_box_pack_start(GTK_BOX(s->gopt.box), label, FALSE, FALSE, 0);
+ if (!o->lname)
+ label = gtk_label_new(o->name);
+ else
+ label = gtk_label_new(o->lname);
s->entry = gtk_entry_new();
gopt_mark_index(&s->gopt, idx);
if (text)
gtk_entry_set_text(GTK_ENTRY(s->entry), text);
gtk_entry_set_editable(GTK_ENTRY(s->entry), 1);
- g_signal_connect(GTK_OBJECT(s->entry), "changed", G_CALLBACK(gopt_str_changed), s);
+ s->gopt.sig_handler = g_signal_connect(GTK_OBJECT(s->entry), "changed", G_CALLBACK(gopt_str_changed), s);
if (o->def)
gtk_entry_set_text(GTK_ENTRY(s->entry), o->def);
gtk_box_pack_start(GTK_BOX(s->gopt.box), s->entry, FALSE, FALSE, 0);
+ gtk_box_pack_start(GTK_BOX(s->gopt.box), label, FALSE, FALSE, 0);
+ o->gui_data = s;
return &s->gopt;
}
GtkWidget *label;
c = malloc(sizeof(*c));
+ memset(c, 0, sizeof(*c));
c->gopt.box = gtk_hbox_new(FALSE, 3);
- label = gtk_label_new(o->name);
- gtk_box_pack_start(GTK_BOX(c->gopt.box), label, FALSE, FALSE, 0);
+ if (!o->lname)
+ label = gtk_label_new(o->name);
+ else
+ label = gtk_label_new(o->lname);
c->combo = gtk_combo_box_new_text();
gopt_mark_index(&c->gopt, idx);
- gtk_box_pack_start(GTK_BOX(c->gopt.box), c->combo, FALSE, FALSE, 0);
+ c->gopt.sig_handler = g_signal_connect(GTK_OBJECT(c->combo), "changed", G_CALLBACK(gopt_combo_changed), c);
- g_signal_connect(GTK_OBJECT(c->combo), "changed", G_CALLBACK(gopt_combo_changed), c);
+ gtk_box_pack_start(GTK_BOX(c->gopt.box), c->combo, FALSE, FALSE, 0);
+ gtk_box_pack_start(GTK_BOX(c->gopt.box), label, FALSE, FALSE, 0);
+ o->gui_data = c;
return c;
}
return &combo->gopt;
}
+static struct gopt *gopt_new_str_multi(struct fio_option *o, unsigned int idx)
+{
+ struct gopt_str_multi *m;
+ struct value_pair *vp;
+ GtkWidget *frame, *hbox;
+ int i;
+
+ m = malloc(sizeof(*m));
+ memset(m, 0, sizeof(*m));
+ m->gopt.box = gtk_hbox_new(FALSE, 3);
+ gopt_mark_index(&m->gopt, idx);
+
+ if (!o->lname)
+ frame = gtk_frame_new(o->name);
+ else
+ frame = gtk_frame_new(o->lname);
+ gtk_box_pack_start(GTK_BOX(m->gopt.box), frame, FALSE, FALSE, 3);
+
+ hbox = gtk_hbox_new(FALSE, 3);
+ gtk_container_add(GTK_CONTAINER(frame), hbox);
+
+ i = 0;
+ vp = &o->posval[0];
+ while (vp->ival) {
+ m->checks[i] = gtk_check_button_new_with_label(vp->ival);
+ gtk_widget_set_tooltip_text(m->checks[i], vp->help);
+ gtk_box_pack_start(GTK_BOX(hbox), m->checks[i], FALSE, FALSE, 3);
+ vp++;
+ }
+
+ return &m->gopt;
+}
+
static void gopt_int_changed(GtkSpinButton *spin, gpointer data)
{
struct gopt_int *i = (struct gopt_int *) data;
struct fio_option *o = &fio_options[i->gopt.opt_index];
+ GtkAdjustment *adj;
+ int value, delta;
- printf("int %s changed\n", o->name);
+ adj = gtk_spin_button_get_adjustment(spin);
+ value = gtk_adjustment_get_value(adj);
+ delta = value - i->lastval;
+ i->lastval = value;
+
+ if (o->inv_opt) {
+ struct gopt_int *i_inv = o->inv_opt->gui_data;
+ int cur_val;
+
+ assert(o->type == o->inv_opt->type);
+
+ cur_val = gtk_spin_button_get_value(GTK_SPIN_BUTTON(i_inv->spin));
+ cur_val -= delta;
+ g_signal_handler_block(G_OBJECT(i_inv->spin), i_inv->gopt.sig_handler);
+ gtk_spin_button_set_value(GTK_SPIN_BUTTON(i_inv->spin), cur_val);
+ g_signal_handler_unblock(G_OBJECT(i_inv->spin), i_inv->gopt.sig_handler);
+ }
}
static struct gopt_int *__gopt_new_int(struct fio_option *o, unsigned long long *p,
GtkWidget *label;
i = malloc(sizeof(*i));
+ memset(i, 0, sizeof(*i));
i->gopt.box = gtk_hbox_new(FALSE, 3);
- label = gtk_label_new(o->name);
- gtk_box_pack_start(GTK_BOX(i->gopt.box), label, FALSE, FALSE, 0);
+ if (!o->lname)
+ label = gtk_label_new(o->name);
+ else
+ label = gtk_label_new(o->lname);
maxval = o->maxval;
if (!maxval)
gopt_mark_index(&i->gopt, idx);
gtk_spin_button_set_update_policy(GTK_SPIN_BUTTON(i->spin), GTK_UPDATE_IF_VALID);
gtk_spin_button_set_value(GTK_SPIN_BUTTON(i->spin), defval);
+ i->lastval = defval;
+ i->gopt.sig_handler = g_signal_connect(G_OBJECT(i->spin), "value-changed", G_CALLBACK(gopt_int_changed), i);
gtk_box_pack_start(GTK_BOX(i->gopt.box), i->spin, FALSE, FALSE, 0);
+ gtk_box_pack_start(GTK_BOX(i->gopt.box), label, FALSE, FALSE, 0);
- g_signal_connect(G_OBJECT(i->spin), "value-changed", G_CALLBACK(gopt_int_changed), i);
-
+ o->gui_data = i;
return i;
}
gboolean set;
set = gtk_toggle_button_get_active(GTK_TOGGLE_BUTTON(b->check));
+
+ if (o->inv_opt) {
+ struct gopt_bool *b_inv = o->inv_opt->gui_data;
+
+ assert(o->type == o->inv_opt->type);
+
+ g_signal_handler_block(G_OBJECT(b_inv->check), b_inv->gopt.sig_handler);
+ gtk_toggle_button_set_active(GTK_TOGGLE_BUTTON(b_inv->check), !set);
+ g_signal_handler_unblock(G_OBJECT(b_inv->check), b_inv->gopt.sig_handler);
+ }
+
gopt_set_children_visible(o, set);
}
int defstate = 0;
b = malloc(sizeof(*b));
+ memset(b, 0, sizeof(*b));
b->gopt.box = gtk_hbox_new(FALSE, 3);
- label = gtk_label_new(o->name);
- gtk_box_pack_start(GTK_BOX(b->gopt.box), label, FALSE, FALSE, 0);
+ if (!o->lname)
+ label = gtk_label_new(o->name);
+ else
+ label = gtk_label_new(o->lname);
b->check = gtk_check_button_new();
gopt_mark_index(&b->gopt, idx);
else if (o->def && !strcmp(o->def, "1"))
defstate = 1;
+ if (o->neg)
+ defstate = !defstate;
+
gtk_toggle_button_set_active(GTK_TOGGLE_BUTTON(b->check), defstate);
- g_signal_connect(G_OBJECT(b->check), "toggled", G_CALLBACK(gopt_bool_toggled), b);
+ b->gopt.sig_handler = g_signal_connect(G_OBJECT(b->check), "toggled", G_CALLBACK(gopt_bool_toggled), b);
gtk_box_pack_start(GTK_BOX(b->gopt.box), b->check, FALSE, FALSE, 0);
+ gtk_box_pack_start(GTK_BOX(b->gopt.box), label, FALSE, FALSE, 0);
+ o->gui_data = b;
return &b->gopt;
}
int i;
r = malloc(sizeof(*r));
+ memset(r, 0, sizeof(*r));
r->gopt.box = gtk_hbox_new(FALSE, 3);
gopt_mark_index(&r->gopt, idx);
- label = gtk_label_new(o->name);
- gtk_box_pack_start(GTK_BOX(r->gopt.box), label, FALSE, FALSE, 0);
+ if (!o->lname)
+ label = gtk_label_new(o->name);
+ else
+ label = gtk_label_new(o->lname);
maxval = o->maxval;
if (!maxval)
g_signal_connect(G_OBJECT(r->spins[i]), "value-changed", G_CALLBACK(range_value_changed), r);
}
+ gtk_box_pack_start(GTK_BOX(r->gopt.box), label, FALSE, FALSE, 0);
+ o->gui_data = r;
return &r->gopt;
}
break;
}
case FIO_OPT_STR: {
- unsigned int *ip = NULL;
+ if (o->posval[0].ival) {
+ unsigned int *ip = NULL;
- if (o->off1)
- ip = td_var(to, o->off1);
+ if (o->off1)
+ ip = td_var(to, o->off1);
+
+ go = gopt_new_combo_int(o, ip, opt_index);
+ } else {
+ /* TODO: usually ->cb, or unsigned int pointer */
+ go = gopt_new_str_store(o, NULL, opt_index);
+ }
- go = gopt_new_combo_int(o, ip, opt_index);
break;
}
case FIO_OPT_STR_STORE: {
break;
}
case FIO_OPT_STR_MULTI:
- go = gopt_new_combo_str(o, NULL, opt_index);
+ go = gopt_new_str_multi(o, opt_index);
break;
case FIO_OPT_RANGE: {
unsigned int *ip[4] = { td_var(to, o->off1),
}
if (go) {
+ GtkWidget *dest;
+
if (o->help)
gtk_widget_set_tooltip_text(go->box, o->help);
-
- gtk_box_pack_start(GTK_BOX(hbox), go->box, FALSE, FALSE, 5);
+
go->opt_type = o->type;
+
+ dest = gopt_get_group_frame(hbox, o->group);
+ if (!dest)
+ gtk_box_pack_start(GTK_BOX(hbox), go->box, FALSE, FALSE, 5);
+ else
+ gtk_box_pack_start(GTK_BOX(dest), go->box, FALSE, FALSE, 5);
}
}
GtkWidget *hbox = NULL;
int i;
+ /*
+ * First add all options
+ */
for (i = 0; fio_options[i].name; i++) {
struct fio_option *o = &fio_options[i];
unsigned int mask = o->category;
struct opt_group *og;
unsigned int i;
- for (i = 0; i < FIO_MAX_OPTS; i++)
- INIT_FLIST_HEAD(&gopt_list[i]);
-
i = 0;
do {
unsigned int mask = (1U << i);
void gopt_get_options_window(GtkWidget *window, struct thread_options *o)
{
GtkWidget *dialog, *notebook;
- GtkWidget *vboxes[__FIO_OPT_G_NR];
+ GtkWidget *vboxes[__FIO_OPT_C_NR];
dialog = gtk_dialog_new_with_buttons("Fio options",
GTK_WINDOW(window), GTK_DIALOG_DESTROY_WITH_PARENT,
gtk_dialog_run(GTK_DIALOG(dialog));
gtk_widget_destroy(dialog);
+ memset(gopt_widgets, 0, sizeof(gopt_widgets));
+ memset(gopt_g_widgets, 0, sizeof(gopt_g_widgets));
}
*/
static struct opt_group fio_opt_groups[] = {
{
- .name = "Description",
- .mask = FIO_OPT_G_DESC,
+ .name = "General",
+ .mask = FIO_OPT_C_GENERAL,
+ },
+ {
+ .name = "I/O",
+ .mask = FIO_OPT_C_IO,
},
{
.name = "File",
- .mask = FIO_OPT_G_FILE,
+ .mask = FIO_OPT_C_FILE,
},
{
- .name = "Misc",
- .mask = FIO_OPT_G_MISC,
+ .name = "Statistics",
+ .mask = FIO_OPT_C_STAT,
},
{
- .name = "IO (main)",
- .mask = FIO_OPT_G_IO,
+ .name = "Logging",
+ .mask = FIO_OPT_C_LOG,
},
{
- .name = "IO direction",
- .mask = FIO_OPT_G_IO_DDIR,
+ .name = NULL,
},
+};
+
+static struct opt_group *__opt_group_from_mask(struct opt_group *ogs, unsigned int *mask,
+ unsigned int inv_mask)
+{
+ struct opt_group *og;
+ int i;
+
+ if (*mask == inv_mask || !*mask)
+ return NULL;
+
+ for (i = 0; ogs[i].name; i++) {
+ og = &ogs[i];
+
+ if (*mask & og->mask) {
+ *mask &= ~(og->mask);
+ return og;
+ }
+ }
+
+ return NULL;
+}
+
+struct opt_group *opt_group_from_mask(unsigned int *mask)
+{
+ return __opt_group_from_mask(fio_opt_groups, mask, FIO_OPT_C_INVALID);
+}
+
+static struct opt_group fio_opt_cat_groups[] = {
{
- .name = "IO buffer",
- .mask = FIO_OPT_G_IO_BUF,
+ .name = "Rate",
+ .mask = FIO_OPT_G_RATE,
},
{
- .name = "IO engine",
- .mask = FIO_OPT_G_IO_ENG,
+ .name = "Zone",
+ .mask = FIO_OPT_G_ZONE,
},
{
- .name = "Random",
- .mask = FIO_OPT_G_RAND,
+ .name = "Read/write mix",
+ .mask = FIO_OPT_G_RWMIX,
},
{
- .name = "OS",
- .mask = FIO_OPT_G_OS,
+ .name = "Verify",
+ .mask = FIO_OPT_G_VERIFY,
},
{
- .name = "Memory",
- .mask = FIO_OPT_G_MEM,
+ .name = "Trim",
+ .mask = FIO_OPT_G_TRIM,
},
{
- .name = "Verify",
- .mask = FIO_OPT_G_VERIFY,
+ .name = "I/O Logging",
+ .mask = FIO_OPT_G_IOLOG,
},
{
- .name = "CPU",
- .mask = FIO_OPT_G_CPU,
+ .name = "I/O Depth",
+ .mask = FIO_OPT_G_IO_DEPTH,
},
{
- .name = "Log",
- .mask = FIO_OPT_G_LOG,
+ .name = "I/O Flow",
+ .mask = FIO_OPT_G_IO_FLOW,
},
{
- .name = "Zone",
- .mask = FIO_OPT_G_ZONE,
+ .name = "Description",
+ .mask = FIO_OPT_G_DESC,
},
{
- .name = "Cache",
- .mask = FIO_OPT_G_CACHE,
+ .name = "Filename",
+ .mask = FIO_OPT_G_FILENAME,
},
{
- .name = "Stat",
- .mask = FIO_OPT_G_STAT,
+ .name = "General I/O",
+ .mask = FIO_OPT_G_IO_BASIC,
},
{
- .name = "Error",
- .mask = FIO_OPT_G_ERR,
+ .name = "Cgroups",
+ .mask = FIO_OPT_G_CGROUP,
},
{
- .name = "Job",
- .mask = FIO_OPT_G_JOB,
+ .name = "Runtime",
+ .mask = FIO_OPT_G_RUNTIME,
},
{
- .name = NULL,
+ .name = "Process",
+ .mask = FIO_OPT_G_PROCESS,
+ },
+ {
+ .name = "Job credentials / priority",
+ .mask = FIO_OPT_G_CRED,
+ },
+ {
+ .name = "Clock settings",
+ .mask = FIO_OPT_G_CLOCK,
+ },
+ {
+ .name = "I/O Type",
+ .mask = FIO_OPT_G_IO_TYPE,
+ },
+ {
+ .name = "I/O Thinktime",
+ .mask = FIO_OPT_G_THINKTIME,
+ },
+ {
+ .name = "Randomizations",
+ .mask = FIO_OPT_G_RANDOM,
+ },
+ {
+ .name = "I/O buffers",
+ .mask = FIO_OPT_G_IO_BUF,
},
+ {
+ .name = NULL,
+ }
};
-struct opt_group *opt_group_from_mask(unsigned int *mask)
+struct opt_group *opt_group_cat_from_mask(unsigned int *mask)
{
- struct opt_group *og;
- int i;
-
- if (*mask == FIO_OPT_G_INVALID)
- return NULL;
-
- for (i = 0; fio_opt_groups[i].name; i++) {
- og = &fio_opt_groups[i];
-
- if (*mask & og->mask) {
- *mask &= ~(og->mask);
- return og;
- }
- }
-
- return NULL;
+ return __opt_group_from_mask(fio_opt_cat_groups, mask, FIO_OPT_G_INVALID);
}
/*
struct fio_option fio_options[FIO_MAX_OPTS] = {
{
.name = "description",
+ .lname = "Description of job",
.type = FIO_OPT_STR_STORE,
.off1 = td_var_offset(description),
.help = "Text job description",
- .category = FIO_OPT_G_DESC,
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_DESC,
},
{
.name = "name",
+ .lname = "Job name",
.type = FIO_OPT_STR_STORE,
.off1 = td_var_offset(name),
.help = "Name of this job",
- .category = FIO_OPT_G_DESC,
- },
- {
- .name = "directory",
- .type = FIO_OPT_STR_STORE,
- .off1 = td_var_offset(directory),
- .cb = str_directory_cb,
- .help = "Directory to store files in",
- .category = FIO_OPT_G_FILE,
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_DESC,
},
{
.name = "filename",
+ .lname = "Filename(s)",
.type = FIO_OPT_STR_STORE,
.off1 = td_var_offset(filename),
.cb = str_filename_cb,
.prio = -1, /* must come after "directory" */
.help = "File(s) to use for the workload",
- .category = FIO_OPT_G_FILE,
+ .category = FIO_OPT_C_FILE,
+ .group = FIO_OPT_G_FILENAME,
},
{
- .name = "kb_base",
- .type = FIO_OPT_INT,
- .off1 = td_var_offset(kb_base),
- .verify = kb_base_verify,
- .prio = 1,
- .def = "1024",
- .help = "How many bytes per KB for reporting (1000 or 1024)",
- .category = FIO_OPT_G_MISC,
+ .name = "directory",
+ .lname = "Directory",
+ .type = FIO_OPT_STR_STORE,
+ .off1 = td_var_offset(directory),
+ .cb = str_directory_cb,
+ .help = "Directory to store files in",
+ .category = FIO_OPT_C_FILE,
+ .group = FIO_OPT_G_FILENAME,
},
{
.name = "lockfile",
+ .lname = "Lockfile",
.type = FIO_OPT_STR,
.cb = str_lockfile_cb,
.off1 = td_var_offset(file_lock_mode),
.parent = "filename",
.hide = 0,
.def = "none",
- .category = FIO_OPT_G_FILE,
+ .category = FIO_OPT_C_FILE,
+ .group = FIO_OPT_G_FILENAME,
.posval = {
{ .ival = "none",
.oval = FILE_LOCK_NONE,
},
{
.name = "opendir",
+ .lname = "Open directory",
.type = FIO_OPT_STR_STORE,
.off1 = td_var_offset(opendir),
.cb = str_opendir_cb,
.help = "Recursively add files from this directory and down",
- .category = FIO_OPT_G_FILE,
+ .category = FIO_OPT_C_FILE,
+ .group = FIO_OPT_G_FILENAME,
},
{
.name = "rw",
+ .lname = "Read/write",
.alias = "readwrite",
.type = FIO_OPT_STR,
.cb = str_rw_cb,
.help = "IO direction",
.def = "read",
.verify = rw_verify,
- .category = FIO_OPT_G_IO_DDIR,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IO_BASIC,
.posval = {
{ .ival = "read",
.oval = TD_DDIR_READ,
},
{
.name = "rw_sequencer",
+ .lname = "RW Sequencer",
.type = FIO_OPT_STR,
.off1 = td_var_offset(rw_seq),
.help = "IO offset generator modifier",
.def = "sequential",
- .category = FIO_OPT_G_IO_DDIR,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IO_BASIC,
.posval = {
{ .ival = "sequential",
.oval = RW_SEQ_SEQ,
{
.name = "ioengine",
+ .lname = "IO Engine",
.type = FIO_OPT_STR_STORE,
.off1 = td_var_offset(ioengine),
.help = "IO engine to use",
.def = FIO_PREFERRED_ENGINE,
- .category = FIO_OPT_G_IO,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IO_BASIC,
.posval = {
{ .ival = "sync",
.help = "Use read/write",
},
{
.name = "iodepth",
+ .lname = "IO Depth",
.type = FIO_OPT_INT,
.off1 = td_var_offset(iodepth),
.help = "Number of IO buffers to keep in flight",
.minval = 1,
.interval = 1,
.def = "1",
- .category = FIO_OPT_G_IO,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IO_BASIC,
},
{
.name = "iodepth_batch",
+ .lname = "IO Depth batch",
.alias = "iodepth_batch_submit",
.type = FIO_OPT_INT,
.off1 = td_var_offset(iodepth_batch),
.minval = 1,
.interval = 1,
.def = "1",
- .category = FIO_OPT_G_IO,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IO_BASIC,
},
{
.name = "iodepth_batch_complete",
+ .lname = "IO Depth batch complete",
.type = FIO_OPT_INT,
.off1 = td_var_offset(iodepth_batch_complete),
.help = "Number of IO buffers to retrieve in one go",
.minval = 0,
.interval = 1,
.def = "1",
- .category = FIO_OPT_G_IO,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IO_BASIC,
},
{
.name = "iodepth_low",
+ .lname = "IO Depth batch low",
.type = FIO_OPT_INT,
.off1 = td_var_offset(iodepth_low),
.help = "Low water mark for queuing depth",
.parent = "iodepth",
.hide = 1,
.interval = 1,
- .category = FIO_OPT_G_IO,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IO_BASIC,
},
{
.name = "size",
+ .lname = "Size",
.type = FIO_OPT_STR_VAL,
.cb = str_size_cb,
.help = "Total size of device or files",
.interval = 1024 * 1024,
- .category = FIO_OPT_G_IO,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "fill_device",
+ .lname = "Fill device",
.alias = "fill_fs",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(fill_device),
.help = "Write until an ENOSPC error occurs",
.def = "0",
- .category = FIO_OPT_G_IO,
+ .category = FIO_OPT_C_FILE,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "filesize",
+ .lname = "File size",
.type = FIO_OPT_STR_VAL,
.off1 = td_var_offset(file_size_low),
.off2 = td_var_offset(file_size_high),
.minval = 1,
.help = "Size of individual files",
.interval = 1024 * 1024,
- .category = FIO_OPT_G_IO | FIO_OPT_G_FILE,
+ .category = FIO_OPT_C_FILE,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "offset",
+ .lname = "IO offset",
.alias = "fileoffset",
.type = FIO_OPT_STR_VAL,
.off1 = td_var_offset(start_offset),
.help = "Start IO from this offset",
.def = "0",
.interval = 1024 * 1024,
- .category = FIO_OPT_G_IO,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "offset_increment",
+ .lname = "IO offset increment",
.type = FIO_OPT_STR_VAL,
.off1 = td_var_offset(offset_increment),
.help = "What is the increment from one offset to the next",
.hide = 1,
.def = "0",
.interval = 1024 * 1024,
- .category = FIO_OPT_G_IO,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "bs",
+ .lname = "Block size",
.alias = "blocksize",
.type = FIO_OPT_INT,
.off1 = td_var_offset(bs[DDIR_READ]),
.parent = "rw",
.hide = 1,
.interval = 512,
- .category = FIO_OPT_G_IO,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "ba",
+ .lname = "Block size align",
.alias = "blockalign",
.type = FIO_OPT_INT,
.off1 = td_var_offset(ba[DDIR_READ]),
.parent = "rw",
.hide = 1,
.interval = 512,
- .category = FIO_OPT_G_IO | FIO_OPT_G_IO_BUF,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "bsrange",
+ .lname = "Block size range",
.alias = "blocksize_range",
.type = FIO_OPT_RANGE,
.off1 = td_var_offset(min_bs[DDIR_READ]),
.parent = "rw",
.hide = 1,
.interval = 4096,
- .category = FIO_OPT_G_IO,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "bssplit",
+ .lname = "Block size split",
.type = FIO_OPT_STR,
.cb = str_bssplit_cb,
.help = "Set a specific mix of block sizes",
.parent = "rw",
.hide = 1,
- .category = FIO_OPT_G_IO,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "bs_unaligned",
+ .lname = "Block size unaligned",
.alias = "blocksize_unaligned",
.type = FIO_OPT_STR_SET,
.off1 = td_var_offset(bs_unaligned),
.help = "Don't sector align IO buffer sizes",
.parent = "rw",
.hide = 1,
- .category = FIO_OPT_G_IO,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "randrepeat",
+ .lname = "Random repeatable",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(rand_repeatable),
.help = "Use repeatable random IO pattern",
.def = "1",
.parent = "rw",
.hide = 1,
- .category = FIO_OPT_G_IO | FIO_OPT_G_RAND,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_RANDOM,
},
{
.name = "use_os_rand",
+ .lname = "Use OS random",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(use_os_rand),
.help = "Set to use OS random generator",
.def = "0",
.parent = "rw",
.hide = 1,
- .category = FIO_OPT_G_RAND,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_RANDOM,
},
{
.name = "norandommap",
+ .lname = "No randommap",
.type = FIO_OPT_STR_SET,
.off1 = td_var_offset(norandommap),
.help = "Accept potential duplicate random blocks",
.parent = "rw",
.hide = 1,
- .category = FIO_OPT_G_RAND,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_RANDOM,
},
{
.name = "softrandommap",
+ .lname = "Soft randommap",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(softrandommap),
.help = "Set norandommap if randommap allocation fails",
.parent = "norandommap",
.hide = 1,
.def = "0",
- .category = FIO_OPT_G_RAND,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_RANDOM,
},
{
.name = "nrfiles",
+ .lname = "Number of files",
.alias = "nr_files",
.type = FIO_OPT_INT,
.off1 = td_var_offset(nr_files),
.help = "Split job workload between this number of files",
.def = "1",
.interval = 1,
- .category = FIO_OPT_G_FILE,
+ .category = FIO_OPT_C_FILE,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "openfiles",
+ .lname = "Number of open files",
.type = FIO_OPT_INT,
.off1 = td_var_offset(open_files),
.help = "Number of files to keep open at the same time",
- .category = FIO_OPT_G_FILE,
+ .category = FIO_OPT_C_FILE,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "file_service_type",
+ .lname = "File service type",
.type = FIO_OPT_STR,
.cb = str_fst_cb,
.off1 = td_var_offset(file_service_type),
.help = "How to select which file to service next",
.def = "roundrobin",
- .category = FIO_OPT_G_FILE,
+ .category = FIO_OPT_C_FILE,
+ .group = FIO_OPT_G_INVALID,
.posval = {
{ .ival = "random",
.oval = FIO_FSERVICE_RANDOM,
#ifdef FIO_HAVE_FALLOCATE
{
.name = "fallocate",
+ .lname = "Fallocate",
.type = FIO_OPT_STR,
.off1 = td_var_offset(fallocate_mode),
.help = "Whether pre-allocation is performed when laying out files",
.def = "posix",
- .category = FIO_OPT_G_FILE,
+ .category = FIO_OPT_C_FILE,
+ .group = FIO_OPT_G_INVALID,
.posval = {
{ .ival = "none",
.oval = FIO_FALLOCATE_NONE,
#endif /* FIO_HAVE_FALLOCATE */
{
.name = "fadvise_hint",
+ .lname = "Fadvise hint",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(fadvise_hint),
.help = "Use fadvise() to advise the kernel on IO pattern",
.def = "1",
- .category = FIO_OPT_G_FILE,
+ .category = FIO_OPT_C_FILE,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "fsync",
+ .lname = "Fsync",
.type = FIO_OPT_INT,
.off1 = td_var_offset(fsync_blocks),
.help = "Issue fsync for writes every given number of blocks",
.def = "0",
.interval = 1,
- .category = FIO_OPT_G_FILE,
+ .category = FIO_OPT_C_FILE,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "fdatasync",
+ .lname = "Fdatasync",
.type = FIO_OPT_INT,
.off1 = td_var_offset(fdatasync_blocks),
.help = "Issue fdatasync for writes every given number of blocks",
.def = "0",
.interval = 1,
- .category = FIO_OPT_G_FILE,
+ .category = FIO_OPT_C_FILE,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "write_barrier",
+ .lname = "Write barrier",
.type = FIO_OPT_INT,
.off1 = td_var_offset(barrier_blocks),
.help = "Make every Nth write a barrier write",
.def = "0",
.interval = 1,
- .category = FIO_OPT_G_IO,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_INVALID,
},
#ifdef FIO_HAVE_SYNC_FILE_RANGE
{
.name = "sync_file_range",
+ .lname = "Sync file range",
.posval = {
{ .ival = "wait_before",
.oval = SYNC_FILE_RANGE_WAIT_BEFORE,
.cb = str_sfr_cb,
.off1 = td_var_offset(sync_file_range),
.help = "Use sync_file_range()",
- .category = FIO_OPT_G_FILE,
+ .category = FIO_OPT_C_FILE,
+ .group = FIO_OPT_G_INVALID,
},
#endif
{
.name = "direct",
+ .lname = "Direct I/O",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(odirect),
.help = "Use O_DIRECT IO (negates buffered)",
.def = "0",
- .category = FIO_OPT_G_IO,
+ .inverse = "buffered",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IO_TYPE,
},
{
.name = "buffered",
+ .lname = "Buffered I/O",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(odirect),
.neg = 1,
.help = "Use buffered IO (negates direct)",
.def = "1",
- .category = FIO_OPT_G_IO,
+ .inverse = "direct",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IO_TYPE,
},
{
.name = "overwrite",
+ .lname = "Overwrite",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(overwrite),
.help = "When writing, set whether to overwrite current data",
.def = "0",
- .category = FIO_OPT_G_IO | FIO_OPT_G_FILE,
+ .category = FIO_OPT_C_FILE,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "loops",
+ .lname = "Loops",
.type = FIO_OPT_INT,
.off1 = td_var_offset(loops),
.help = "Number of times to run the job",
.def = "1",
.interval = 1,
- .category = FIO_OPT_G_MISC,
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_RUNTIME,
},
{
.name = "numjobs",
+ .lname = "Number of jobs",
.type = FIO_OPT_INT,
.off1 = td_var_offset(numjobs),
.help = "Duplicate this job this many times",
.def = "1",
.interval = 1,
- .category = FIO_OPT_G_MISC,
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_RUNTIME,
},
{
.name = "startdelay",
+ .lname = "Start delay",
.type = FIO_OPT_STR_VAL_TIME,
.off1 = td_var_offset(start_delay),
.help = "Only start job when this period has passed",
.def = "0",
- .category = FIO_OPT_G_MISC,
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_RUNTIME,
},
{
.name = "runtime",
+ .lname = "Runtime",
.alias = "timeout",
.type = FIO_OPT_STR_VAL_TIME,
.off1 = td_var_offset(timeout),
.help = "Stop workload when this amount of time has passed",
.def = "0",
- .category = FIO_OPT_G_MISC,
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_RUNTIME,
},
{
.name = "time_based",
+ .lname = "Time based",
.type = FIO_OPT_STR_SET,
.off1 = td_var_offset(time_based),
.help = "Keep running until runtime/timeout is met",
- .category = FIO_OPT_G_MISC,
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_RUNTIME,
},
{
.name = "ramp_time",
+ .lname = "Ramp time",
.type = FIO_OPT_STR_VAL_TIME,
.off1 = td_var_offset(ramp_time),
.help = "Ramp up time before measuring performance",
- .category = FIO_OPT_G_MISC,
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_RUNTIME,
},
{
.name = "clocksource",
+ .lname = "Clock source",
.type = FIO_OPT_STR,
.cb = fio_clock_source_cb,
.off1 = td_var_offset(clocksource),
.help = "What type of timing source to use",
- .category = FIO_OPT_G_OS,
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_CLOCK,
.posval = {
{ .ival = "gettimeofday",
.oval = CS_GTOD,
{
.name = "mem",
.alias = "iomem",
+ .lname = "I/O Memory",
.type = FIO_OPT_STR,
.cb = str_mem_cb,
.off1 = td_var_offset(mem_type),
.help = "Backing type for IO buffers",
.def = "malloc",
- .category = FIO_OPT_G_IO_BUF | FIO_OPT_G_MEM,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_INVALID,
.posval = {
{ .ival = "malloc",
.oval = MEM_MALLOC,
{
.name = "iomem_align",
.alias = "mem_align",
+ .lname = "I/O memory alignment",
.type = FIO_OPT_INT,
.off1 = td_var_offset(mem_align),
.minval = 0,
.def = "0",
.parent = "iomem",
.hide = 1,
- .category = FIO_OPT_G_IO_BUF | FIO_OPT_G_MEM,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "verify",
+ .lname = "Verify",
.type = FIO_OPT_STR,
.off1 = td_var_offset(verify),
.help = "Verify data written",
.cb = str_verify_cb,
.def = "0",
- .category = FIO_OPT_G_IO | FIO_OPT_G_VERIFY,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_VERIFY,
.posval = {
{ .ival = "0",
.oval = VERIFY_NONE,
},
{
.name = "do_verify",
+ .lname = "Perform verify step",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(do_verify),
.help = "Run verification stage after write",
.def = "1",
.parent = "verify",
.hide = 1,
- .category = FIO_OPT_G_IO | FIO_OPT_G_VERIFY,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_VERIFY,
},
{
.name = "verifysort",
+ .lname = "Verify sort",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(verifysort),
.help = "Sort written verify blocks for read back",
.def = "1",
.parent = "verify",
.hide = 1,
- .category = FIO_OPT_G_IO | FIO_OPT_G_VERIFY,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_VERIFY,
},
{
.name = "verify_interval",
+ .lname = "Verify interval",
.type = FIO_OPT_INT,
.off1 = td_var_offset(verify_interval),
.minval = 2 * sizeof(struct verify_header),
.parent = "verify",
.hide = 1,
.interval = 2 * sizeof(struct verify_header),
- .category = FIO_OPT_G_IO | FIO_OPT_G_VERIFY,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_VERIFY,
},
{
.name = "verify_offset",
+ .lname = "Verify offset",
.type = FIO_OPT_INT,
.help = "Offset verify header location by N bytes",
.def = "0",
.cb = str_verify_offset_cb,
.parent = "verify",
.hide = 1,
- .category = FIO_OPT_G_IO | FIO_OPT_G_VERIFY,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_VERIFY,
},
{
.name = "verify_pattern",
+ .lname = "Verify pattern",
.type = FIO_OPT_STR,
.cb = str_verify_pattern_cb,
.help = "Fill pattern for IO buffers",
.parent = "verify",
.hide = 1,
- .category = FIO_OPT_G_IO | FIO_OPT_G_VERIFY,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_VERIFY,
},
{
.name = "verify_fatal",
+ .lname = "Verify fatal",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(verify_fatal),
.def = "0",
.help = "Exit on a single verify failure, don't continue",
.parent = "verify",
.hide = 1,
- .category = FIO_OPT_G_IO | FIO_OPT_G_VERIFY | FIO_OPT_G_ERR,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_VERIFY,
},
{
.name = "verify_dump",
+ .lname = "Verify dump",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(verify_dump),
.def = "0",
.help = "Dump contents of good and bad blocks on failure",
.parent = "verify",
.hide = 1,
- .category = FIO_OPT_G_IO | FIO_OPT_G_VERIFY | FIO_OPT_G_ERR,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_VERIFY,
},
{
.name = "verify_async",
+ .lname = "Verify asynchronously",
.type = FIO_OPT_INT,
.off1 = td_var_offset(verify_async),
.def = "0",
.help = "Number of async verifier threads to use",
.parent = "verify",
.hide = 1,
- .category = FIO_OPT_G_IO | FIO_OPT_G_VERIFY,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_VERIFY,
},
{
.name = "verify_backlog",
+ .lname = "Verify backlog",
.type = FIO_OPT_STR_VAL,
.off1 = td_var_offset(verify_backlog),
.help = "Verify after this number of blocks are written",
.parent = "verify",
.hide = 1,
- .category = FIO_OPT_G_IO | FIO_OPT_G_VERIFY,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_VERIFY,
},
{
.name = "verify_backlog_batch",
+ .lname = "Verify backlog batch",
.type = FIO_OPT_INT,
.off1 = td_var_offset(verify_batch),
.help = "Verify this number of IO blocks",
.parent = "verify",
.hide = 1,
- .category = FIO_OPT_G_IO | FIO_OPT_G_VERIFY,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_VERIFY,
},
#ifdef FIO_HAVE_CPU_AFFINITY
{
.name = "verify_async_cpus",
+ .lname = "Async verify CPUs",
.type = FIO_OPT_STR,
.cb = str_verify_cpus_allowed_cb,
.help = "Set CPUs allowed for async verify threads",
.parent = "verify_async",
.hide = 1,
- .category = FIO_OPT_G_OS | FIO_OPT_G_CPU | FIO_OPT_G_VERIFY,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_VERIFY,
},
#endif
#ifdef FIO_HAVE_TRIM
{
.name = "trim_percentage",
+ .lname = "Trim percentage",
.type = FIO_OPT_INT,
.cb = str_verify_trim_cb,
.minval = 0,
.def = "0",
.interval = 1,
.hide = 1,
- .category = FIO_OPT_G_IO,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_TRIM,
},
{
.name = "trim_verify_zero",
+ .lname = "Verify trim zero",
.type = FIO_OPT_BOOL,
.help = "Verify that trim/discarded blocks are returned as zeroes",
.off1 = td_var_offset(trim_zero),
.parent = "trim_percentage",
.hide = 1,
.def = "1",
- .category = FIO_OPT_G_IO,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_TRIM,
},
{
.name = "trim_backlog",
+ .lname = "Trim backlog",
.type = FIO_OPT_STR_VAL,
.off1 = td_var_offset(trim_backlog),
.help = "Trim after this number of blocks are written",
.parent = "trim_percentage",
.hide = 1,
.interval = 1,
- .category = FIO_OPT_G_IO,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_TRIM,
},
{
.name = "trim_backlog_batch",
+ .lname = "Trim backlog batch",
.type = FIO_OPT_INT,
.off1 = td_var_offset(trim_batch),
.help = "Trim this number of IO blocks",
.parent = "trim_percentage",
.hide = 1,
.interval = 1,
- .category = FIO_OPT_G_IO,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_TRIM,
},
#endif
{
.name = "write_iolog",
+ .lname = "Write I/O log",
.type = FIO_OPT_STR_STORE,
.off1 = td_var_offset(write_iolog_file),
.help = "Store IO pattern to file",
- .category = FIO_OPT_G_IO | FIO_OPT_G_LOG,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IOLOG,
},
{
.name = "read_iolog",
+ .lname = "Read I/O log",
.type = FIO_OPT_STR_STORE,
.off1 = td_var_offset(read_iolog_file),
.help = "Playback IO pattern from file",
- .category = FIO_OPT_G_IO | FIO_OPT_G_LOG,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IOLOG,
},
{
.name = "replay_no_stall",
+ .lname = "Don't stall on replay",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(no_stall),
.def = "0",
.parent = "read_iolog",
.hide = 1,
.help = "Playback IO pattern file as fast as possible without stalls",
- .category = FIO_OPT_G_IO | FIO_OPT_G_LOG,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IOLOG,
},
{
.name = "replay_redirect",
+ .lname = "Redirect device for replay",
.type = FIO_OPT_STR_STORE,
.off1 = td_var_offset(replay_redirect),
.parent = "read_iolog",
.hide = 1,
.help = "Replay all I/O onto this device, regardless of trace device",
- .category = FIO_OPT_G_IO | FIO_OPT_G_LOG,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IOLOG,
},
{
.name = "exec_prerun",
+ .lname = "Pre-execute runnable",
.type = FIO_OPT_STR_STORE,
.off1 = td_var_offset(exec_prerun),
.help = "Execute this file prior to running job",
- .category = FIO_OPT_G_MISC | FIO_OPT_G_OS,
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "exec_postrun",
+ .lname = "Post-execute runnable",
.type = FIO_OPT_STR_STORE,
.off1 = td_var_offset(exec_postrun),
.help = "Execute this file after running job",
- .category = FIO_OPT_G_MISC | FIO_OPT_G_OS,
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_INVALID,
},
#ifdef FIO_HAVE_IOSCHED_SWITCH
{
.name = "ioscheduler",
+ .lname = "I/O scheduler",
.type = FIO_OPT_STR_STORE,
.off1 = td_var_offset(ioscheduler),
.help = "Use this IO scheduler on the backing device",
- .category = FIO_OPT_G_OS | FIO_OPT_G_IO,
+ .category = FIO_OPT_C_FILE,
+ .group = FIO_OPT_G_INVALID,
},
#endif
{
.name = "zonesize",
+ .lname = "Zone size",
.type = FIO_OPT_STR_VAL,
.off1 = td_var_offset(zone_size),
.help = "Amount of data to read per zone",
.def = "0",
.interval = 1024 * 1024,
- .category = FIO_OPT_G_IO | FIO_OPT_G_ZONE,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_ZONE,
},
{
.name = "zonerange",
+ .lname = "Zone range",
.type = FIO_OPT_STR_VAL,
.off1 = td_var_offset(zone_range),
.help = "Give size of an IO zone",
.def = "0",
.interval = 1024 * 1024,
- .category = FIO_OPT_G_IO | FIO_OPT_G_ZONE,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_ZONE,
},
{
.name = "zoneskip",
+ .lname = "Zone skip",
.type = FIO_OPT_STR_VAL,
.off1 = td_var_offset(zone_skip),
.help = "Space between IO zones",
.def = "0",
.interval = 1024 * 1024,
- .category = FIO_OPT_G_IO | FIO_OPT_G_ZONE,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_ZONE,
},
{
.name = "lockmem",
+ .lname = "Lock memory",
.type = FIO_OPT_STR_VAL,
.cb = str_lockmem_cb,
.help = "Lock down this amount of memory",
.def = "0",
.interval = 1024 * 1024,
- .category = FIO_OPT_G_OS | FIO_OPT_G_MEM,
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "rwmixread",
+ .lname = "Read/write mix read",
.type = FIO_OPT_INT,
.cb = str_rwmix_read_cb,
.maxval = 100,
.help = "Percentage of mixed workload that is reads",
.def = "50",
.interval = 5,
- .category = FIO_OPT_G_IO,
+ .inverse = "rwmixwrite",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_RWMIX,
},
{
.name = "rwmixwrite",
+ .lname = "Read/write mix write",
.type = FIO_OPT_INT,
.cb = str_rwmix_write_cb,
.maxval = 100,
.help = "Percentage of mixed workload that is writes",
.def = "50",
.interval = 5,
- .category = FIO_OPT_G_IO,
+ .inverse = "rwmixread",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_RWMIX,
},
{
.name = "rwmixcycle",
+ .lname = "Read/write mix cycle",
.type = FIO_OPT_DEPRECATED,
- .category = FIO_OPT_G_IO,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_RWMIX,
},
{
.name = "nice",
+ .lname = "Nice",
.type = FIO_OPT_INT,
.off1 = td_var_offset(nice),
.help = "Set job CPU nice value",
.maxval = 20,
.def = "0",
.interval = 1,
- .category = FIO_OPT_G_OS | FIO_OPT_G_CPU,
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_CRED,
},
#ifdef FIO_HAVE_IOPRIO
{
.name = "prio",
+ .lname = "I/O nice priority",
.type = FIO_OPT_INT,
.cb = str_prio_cb,
.help = "Set job IO priority value",
.minval = 0,
.maxval = 7,
.interval = 1,
- .category = FIO_OPT_G_OS | FIO_OPT_G_CPU,
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_CRED,
},
{
.name = "prioclass",
+ .lname = "I/O nice priority class",
.type = FIO_OPT_INT,
.cb = str_prioclass_cb,
.help = "Set job IO priority class",
.minval = 0,
.maxval = 3,
.interval = 1,
- .category = FIO_OPT_G_OS | FIO_OPT_G_CPU,
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_CRED,
},
#endif
{
.name = "thinktime",
+ .lname = "Thinktime",
.type = FIO_OPT_INT,
.off1 = td_var_offset(thinktime),
.help = "Idle time between IO buffers (usec)",
.def = "0",
- .category = FIO_OPT_G_MISC,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_THINKTIME,
},
{
.name = "thinktime_spin",
+ .lname = "Thinktime spin",
.type = FIO_OPT_INT,
.off1 = td_var_offset(thinktime_spin),
.help = "Start think time by spinning this amount (usec)",
.def = "0",
.parent = "thinktime",
.hide = 1,
- .category = FIO_OPT_G_MISC,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_THINKTIME,
},
{
.name = "thinktime_blocks",
+ .lname = "Thinktime blocks",
.type = FIO_OPT_INT,
.off1 = td_var_offset(thinktime_blocks),
.help = "IO buffer period between 'thinktime'",
.def = "1",
.parent = "thinktime",
.hide = 1,
- .category = FIO_OPT_G_MISC,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_THINKTIME,
},
{
.name = "rate",
+ .lname = "I/O rate",
.type = FIO_OPT_INT,
.off1 = td_var_offset(rate[0]),
.off2 = td_var_offset(rate[1]),
.help = "Set bandwidth rate",
- .category = FIO_OPT_G_IO,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_RATE,
},
{
.name = "ratemin",
+ .lname = "I/O min rate",
.type = FIO_OPT_INT,
.off1 = td_var_offset(ratemin[0]),
.off2 = td_var_offset(ratemin[1]),
.help = "Job must meet this rate or it will be shutdown",
.parent = "rate",
.hide = 1,
- .category = FIO_OPT_G_IO,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_RATE,
},
{
.name = "rate_iops",
+ .lname = "I/O rate IOPS",
.type = FIO_OPT_INT,
.off1 = td_var_offset(rate_iops[0]),
.off2 = td_var_offset(rate_iops[1]),
.help = "Limit IO used to this number of IO operations/sec",
.hide = 1,
- .category = FIO_OPT_G_IO,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_RATE,
},
{
.name = "rate_iops_min",
+ .lname = "I/O min rate IOPS",
.type = FIO_OPT_INT,
.off1 = td_var_offset(rate_iops_min[0]),
.off2 = td_var_offset(rate_iops_min[1]),
.help = "Job must meet this rate or it will be shut down",
.parent = "rate_iops",
.hide = 1,
- .category = FIO_OPT_G_IO,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_RATE,
},
{
.name = "ratecycle",
+ .lname = "I/O rate cycle",
.type = FIO_OPT_INT,
.off1 = td_var_offset(ratecycle),
.help = "Window average for rate limits (msec)",
.def = "1000",
.parent = "rate",
.hide = 1,
- .category = FIO_OPT_G_IO,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_RATE,
},
{
.name = "invalidate",
+ .lname = "Cache invalidate",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(invalidate_cache),
.help = "Invalidate buffer/page cache prior to running job",
.def = "1",
- .category = FIO_OPT_G_IO | FIO_OPT_G_CACHE,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IO_TYPE,
},
{
.name = "sync",
+ .lname = "Synchronous I/O",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(sync_io),
.help = "Use O_SYNC for buffered writes",
.def = "0",
.parent = "buffered",
.hide = 1,
- .category = FIO_OPT_G_IO | FIO_OPT_G_FILE,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IO_TYPE,
},
{
.name = "bwavgtime",
+ .lname = "Bandwidth average time",
.type = FIO_OPT_INT,
.off1 = td_var_offset(bw_avg_time),
.help = "Time window over which to calculate bandwidth"
.parent = "write_bw_log",
.hide = 1,
.interval = 100,
- .category = FIO_OPT_G_LOG | FIO_OPT_G_STAT,
+ .category = FIO_OPT_C_LOG,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "iopsavgtime",
+ .lname = "IOPS average time",
.type = FIO_OPT_INT,
.off1 = td_var_offset(iops_avg_time),
.help = "Time window over which to calculate IOPS (msec)",
.parent = "write_iops_log",
.hide = 1,
.interval = 100,
- .category = FIO_OPT_G_LOG | FIO_OPT_G_STAT,
+ .category = FIO_OPT_C_LOG,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "create_serialize",
+ .lname = "Create serialize",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(create_serialize),
.help = "Serialize creating of job files",
.def = "1",
- .category = FIO_OPT_G_FILE,
+ .category = FIO_OPT_C_FILE,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "create_fsync",
+ .lname = "Create fsync",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(create_fsync),
.help = "fsync file after creation",
.def = "1",
- .category = FIO_OPT_G_FILE,
+ .category = FIO_OPT_C_FILE,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "create_on_open",
+ .lname = "Create on open",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(create_on_open),
.help = "Create files when they are opened for IO",
.def = "0",
- .category = FIO_OPT_G_FILE,
+ .category = FIO_OPT_C_FILE,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "pre_read",
+ .lname = "Pre-read files",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(pre_read),
.help = "Pre-read files before starting official testing",
.def = "0",
- .category = FIO_OPT_G_FILE | FIO_OPT_G_CACHE,
- },
- {
- .name = "cpuload",
- .type = FIO_OPT_INT,
- .off1 = td_var_offset(cpuload),
- .help = "Use this percentage of CPU",
- .category = FIO_OPT_G_CPU,
- },
- {
- .name = "cpuchunks",
- .type = FIO_OPT_INT,
- .off1 = td_var_offset(cpucycle),
- .help = "Length of the CPU burn cycles (usecs)",
- .def = "50000",
- .parent = "cpuload",
- .hide = 1,
- .category = FIO_OPT_G_CPU,
+ .category = FIO_OPT_C_FILE,
+ .group = FIO_OPT_G_INVALID,
},
#ifdef FIO_HAVE_CPU_AFFINITY
{
.name = "cpumask",
+ .lname = "CPU mask",
.type = FIO_OPT_INT,
.cb = str_cpumask_cb,
.help = "CPU affinity mask",
- .category = FIO_OPT_G_CPU | FIO_OPT_G_OS,
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_CRED,
},
{
.name = "cpus_allowed",
+ .lname = "CPUs allowed",
.type = FIO_OPT_STR,
.cb = str_cpus_allowed_cb,
.help = "Set CPUs allowed",
- .category = FIO_OPT_G_CPU | FIO_OPT_G_OS,
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_CRED,
},
#endif
{
.name = "end_fsync",
+ .lname = "End fsync",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(end_fsync),
.help = "Include fsync at the end of job",
.def = "0",
- .category = FIO_OPT_G_FILE,
+ .category = FIO_OPT_C_FILE,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "fsync_on_close",
+ .lname = "Fsync on close",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(fsync_on_close),
.help = "fsync files on close",
.def = "0",
- .category = FIO_OPT_G_FILE,
+ .category = FIO_OPT_C_FILE,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "unlink",
+ .lname = "Unlink file",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(unlink),
.help = "Unlink created files after job has completed",
.def = "0",
- .category = FIO_OPT_G_FILE,
+ .category = FIO_OPT_C_FILE,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "exitall",
+ .lname = "Exit-all on terminate",
.type = FIO_OPT_STR_SET,
.cb = str_exitall_cb,
.help = "Terminate all jobs when one exits",
- .category = FIO_OPT_G_MISC | FIO_OPT_G_JOB,
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_PROCESS,
},
{
.name = "stonewall",
+ .lname = "Wait for previous",
.alias = "wait_for_previous",
.type = FIO_OPT_STR_SET,
.off1 = td_var_offset(stonewall),
.help = "Insert a hard barrier between this job and previous",
- .category = FIO_OPT_G_MISC | FIO_OPT_G_JOB,
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_PROCESS,
},
{
.name = "new_group",
+ .lname = "New group",
.type = FIO_OPT_STR_SET,
.off1 = td_var_offset(new_group),
.help = "Mark the start of a new group (for reporting)",
- .category = FIO_OPT_G_MISC | FIO_OPT_G_JOB,
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_PROCESS,
},
{
.name = "thread",
+ .lname = "Thread",
.type = FIO_OPT_STR_SET,
.off1 = td_var_offset(use_thread),
.help = "Use threads instead of processes",
- .category = FIO_OPT_G_MISC | FIO_OPT_G_OS | FIO_OPT_G_JOB,
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_PROCESS,
},
{
.name = "write_bw_log",
+ .lname = "Write bandwidth log",
.type = FIO_OPT_STR,
.off1 = td_var_offset(write_bw_log),
.cb = str_write_bw_log_cb,
.help = "Write log of bandwidth during run",
- .category = FIO_OPT_G_LOG,
+ .category = FIO_OPT_C_LOG,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "write_lat_log",
+ .lname = "Write latency log",
.type = FIO_OPT_STR,
.off1 = td_var_offset(write_lat_log),
.cb = str_write_lat_log_cb,
.help = "Write log of latency during run",
- .category = FIO_OPT_G_LOG,
+ .category = FIO_OPT_C_LOG,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "write_iops_log",
+ .lname = "Write IOPS log",
.type = FIO_OPT_STR,
.off1 = td_var_offset(write_iops_log),
.cb = str_write_iops_log_cb,
.help = "Write log of IOPS during run",
- .category = FIO_OPT_G_LOG,
+ .category = FIO_OPT_C_LOG,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "log_avg_msec",
+ .lname = "Log averaging (msec)",
.type = FIO_OPT_INT,
.off1 = td_var_offset(log_avg_msec),
.help = "Average bw/iops/lat logs over this period of time",
.def = "0",
- .category = FIO_OPT_G_LOG,
- },
- {
- .name = "hugepage-size",
- .type = FIO_OPT_INT,
- .off1 = td_var_offset(hugepage_size),
- .help = "When using hugepages, specify size of each page",
- .def = __fio_stringify(FIO_HUGE_PAGE),
- .interval = 1024 * 1024,
- .category = FIO_OPT_G_OS | FIO_OPT_G_MEM,
+ .category = FIO_OPT_C_LOG,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "group_reporting",
- .type = FIO_OPT_STR_SET,
+ .lname = "Group reporting",
+ .type = FIO_OPT_BOOL,
.off1 = td_var_offset(group_reporting),
.help = "Do reporting on a per-group basis",
- .category = FIO_OPT_G_MISC,
+ .def = "1",
+ .category = FIO_OPT_C_STAT,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "zero_buffers",
+ .lname = "Zero I/O buffers",
.type = FIO_OPT_STR_SET,
.off1 = td_var_offset(zero_buffers),
.help = "Init IO buffers to all zeroes",
- .category = FIO_OPT_G_IO_BUF,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IO_BUF,
},
{
.name = "refill_buffers",
+ .lname = "Refill I/O buffers",
.type = FIO_OPT_STR_SET,
.off1 = td_var_offset(refill_buffers),
.help = "Refill IO buffers on every IO submit",
- .category = FIO_OPT_G_IO_BUF,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IO_BUF,
},
{
.name = "scramble_buffers",
+ .lname = "Scramble I/O buffers",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(scramble_buffers),
.help = "Slightly scramble buffers on every IO submit",
.def = "1",
- .category = FIO_OPT_G_IO_BUF,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IO_BUF,
},
{
.name = "buffer_compress_percentage",
+ .lname = "Buffer compression percentage",
.type = FIO_OPT_INT,
.off1 = td_var_offset(compress_percentage),
.maxval = 100,
.minval = 1,
.help = "How compressible the buffer is (approximately)",
.interval = 5,
- .category = FIO_OPT_G_IO_BUF,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IO_BUF,
},
{
.name = "buffer_compress_chunk",
+ .lname = "Buffer compression chunk size",
.type = FIO_OPT_INT,
.off1 = td_var_offset(compress_chunk),
.parent = "buffer_compress_percentage",
.hide = 1,
.help = "Size of compressible region in buffer",
.interval = 256,
- .category = FIO_OPT_G_IO_BUF,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IO_BUF,
},
{
.name = "clat_percentiles",
+ .lname = "Completion latency percentiles",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(clat_percentiles),
.help = "Enable the reporting of completion latency percentiles",
.def = "1",
- .category = FIO_OPT_G_STAT,
+ .category = FIO_OPT_C_STAT,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "percentile_list",
+ .lname = "Completion latency percentile list",
.type = FIO_OPT_FLOAT_LIST,
.off1 = td_var_offset(percentile_list),
.off2 = td_var_offset(overwrite_plist),
.maxlen = FIO_IO_U_LIST_MAX_LEN,
.minfp = 0.0,
.maxfp = 100.0,
- .category = FIO_OPT_G_STAT,
+ .category = FIO_OPT_C_STAT,
+ .group = FIO_OPT_G_INVALID,
},
#ifdef FIO_HAVE_DISK_UTIL
{
.name = "disk_util",
+ .lname = "Disk utilization",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(do_disk_util),
.help = "Log disk utilization statistics",
.def = "1",
- .category = FIO_OPT_G_OS | FIO_OPT_G_STAT,
+ .category = FIO_OPT_C_STAT,
+ .group = FIO_OPT_G_INVALID,
},
#endif
{
.name = "gtod_reduce",
+ .lname = "Reduce gettimeofday() calls",
.type = FIO_OPT_BOOL,
.help = "Greatly reduce number of gettimeofday() calls",
.cb = str_gtod_reduce_cb,
.def = "0",
- .category = FIO_OPT_G_OS | FIO_OPT_G_MISC | FIO_OPT_G_STAT,
+ .category = FIO_OPT_C_STAT,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "disable_lat",
+ .lname = "Disable all latency stats",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(disable_lat),
.help = "Disable latency numbers",
.parent = "gtod_reduce",
.hide = 1,
.def = "0",
- .category = FIO_OPT_G_OS | FIO_OPT_G_MISC | FIO_OPT_G_STAT,
+ .category = FIO_OPT_C_STAT,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "disable_clat",
+ .lname = "Disable completion latency stats",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(disable_clat),
.help = "Disable completion latency numbers",
.parent = "gtod_reduce",
.hide = 1,
.def = "0",
- .category = FIO_OPT_G_OS | FIO_OPT_G_MISC | FIO_OPT_G_STAT,
+ .category = FIO_OPT_C_STAT,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "disable_slat",
+ .lname = "Disable submission latency stats",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(disable_slat),
.help = "Disable submission latency numbers",
.parent = "gtod_reduce",
.hide = 1,
.def = "0",
- .category = FIO_OPT_G_OS | FIO_OPT_G_MISC | FIO_OPT_G_STAT,
+ .category = FIO_OPT_C_STAT,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "disable_bw_measurement",
+ .lname = "Disable bandwidth stats",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(disable_bw),
.help = "Disable bandwidth logging",
.parent = "gtod_reduce",
.hide = 1,
.def = "0",
- .category = FIO_OPT_G_OS | FIO_OPT_G_MISC | FIO_OPT_G_STAT,
+ .category = FIO_OPT_C_STAT,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "gtod_cpu",
+ .lname = "Dedicated gettimeofday() CPU",
.type = FIO_OPT_INT,
.cb = str_gtod_cpu_cb,
.help = "Set up dedicated gettimeofday() thread on this CPU",
.verify = gtod_cpu_verify,
- .category = FIO_OPT_G_OS | FIO_OPT_G_MISC | FIO_OPT_G_STAT,
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_CLOCK,
},
{
.name = "continue_on_error",
+ .lname = "Continue on error",
.type = FIO_OPT_STR,
.off1 = td_var_offset(continue_on_error),
.help = "Continue on non-fatal errors during IO",
.def = "none",
- .category = FIO_OPT_G_MISC | FIO_OPT_G_ERR,
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_INVALID,
.posval = {
{ .ival = "none",
.oval = ERROR_TYPE_NONE,
},
{
.name = "profile",
+ .lname = "Profile",
.type = FIO_OPT_STR_STORE,
.off1 = td_var_offset(profile),
.help = "Select a specific builtin performance test",
- .category = FIO_OPT_G_MISC | FIO_OPT_G_JOB,
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "cgroup",
+ .lname = "Cgroup",
.type = FIO_OPT_STR_STORE,
.off1 = td_var_offset(cgroup),
.help = "Add job to cgroup of this name",
- .category = FIO_OPT_G_MISC | FIO_OPT_G_OS,
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_CGROUP,
+ },
+ {
+ .name = "cgroup_nodelete",
+ .lname = "Cgroup no-delete",
+ .type = FIO_OPT_BOOL,
+ .off1 = td_var_offset(cgroup_nodelete),
+ .help = "Do not delete cgroups after job completion",
+ .def = "0",
+ .parent = "cgroup",
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_CGROUP,
},
{
.name = "cgroup_weight",
+ .lname = "Cgroup weight",
.type = FIO_OPT_INT,
.off1 = td_var_offset(cgroup_weight),
.help = "Use given weight for cgroup",
.minval = 100,
.maxval = 1000,
- .category = FIO_OPT_G_MISC | FIO_OPT_G_OS,
- },
- {
- .name = "cgroup_nodelete",
- .type = FIO_OPT_BOOL,
- .off1 = td_var_offset(cgroup_nodelete),
- .help = "Do not delete cgroups after job completion",
- .def = "0",
- .category = FIO_OPT_G_MISC | FIO_OPT_G_OS,
+ .parent = "cgroup",
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_CGROUP,
},
{
.name = "uid",
+ .lname = "User ID",
.type = FIO_OPT_INT,
.off1 = td_var_offset(uid),
.help = "Run job with this user ID",
- .category = FIO_OPT_G_OS | FIO_OPT_G_JOB,
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_CRED,
},
{
.name = "gid",
+ .lname = "Group ID",
.type = FIO_OPT_INT,
.off1 = td_var_offset(gid),
.help = "Run job with this group ID",
- .category = FIO_OPT_G_OS | FIO_OPT_G_JOB,
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_CRED,
+ },
+ {
+ .name = "kb_base",
+ .lname = "KB Base",
+ .type = FIO_OPT_INT,
+ .off1 = td_var_offset(kb_base),
+ .verify = kb_base_verify,
+ .prio = 1,
+ .def = "1024",
+ .help = "How many bytes per KB for reporting (1000 or 1024)",
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_INVALID,
+ },
+ {
+ .name = "hugepage-size",
+ .lname = "Hugepage size",
+ .type = FIO_OPT_INT,
+ .off1 = td_var_offset(hugepage_size),
+ .help = "When using hugepages, specify size of each page",
+ .def = __fio_stringify(FIO_HUGE_PAGE),
+ .interval = 1024 * 1024,
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_INVALID,
},
{
.name = "flow_id",
+ .lname = "I/O flow ID",
.type = FIO_OPT_INT,
.off1 = td_var_offset(flow_id),
.help = "The flow index ID to use",
.def = "0",
- .category = FIO_OPT_G_IO,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IO_FLOW,
},
{
.name = "flow",
+ .lname = "I/O flow weight",
.type = FIO_OPT_INT,
.off1 = td_var_offset(flow),
.help = "Weight for flow control of this job",
.parent = "flow_id",
.hide = 1,
.def = "0",
- .category = FIO_OPT_G_IO,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IO_FLOW,
},
{
.name = "flow_watermark",
+ .lname = "I/O flow watermark",
.type = FIO_OPT_INT,
.off1 = td_var_offset(flow_watermark),
.help = "High watermark for flow control. This option"
.parent = "flow_id",
.hide = 1,
.def = "1024",
- .category = FIO_OPT_G_IO,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IO_FLOW,
},
{
.name = "flow_sleep",
+ .lname = "I/O flow sleep",
.type = FIO_OPT_INT,
.off1 = td_var_offset(flow_sleep),
.help = "How many microseconds to sleep after being held"
.parent = "flow_id",
.hide = 1,
.def = "0",
- .category = FIO_OPT_G_IO,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IO_FLOW,
},
{
.name = NULL,