unsigned long long *elapsed_us,
const enum fio_ddir ddir)
{
+ if (ddir == DDIR_WRITE && td_write(td) && td->o.verify_only)
+ return;
+
td->ts.runtime[ddir] -= (elapsed_us[ddir] + 999) / 1000;
elapsed_us[ddir] += utime_since_now(&td->start);
td->ts.runtime[ddir] += (elapsed_us[ddir] + 999) / 1000;
/*
* Read back and check that the selected scheduler is now the default.
*/
+ memset(tmp, 0, sizeof(tmp));
ret = fread(tmp, sizeof(tmp), 1, f);
if (ferror(f) || ret < 0) {
td_verror(td, errno, "fread");
fclose(f);
return 1;
}
- tmp[sizeof(tmp) - 1] = '\0';
+ /*
+ * either a list of io schedulers or "none\n" is expected.
+ */
+ tmp[strlen(tmp) - 1] = '\0';
sprintf(tmp2, "[%s]", td->o.ioscheduler);
static int exec_string(struct thread_options *o, const char *string, const char *mode)
{
- int ret, newlen = strlen(string) + strlen(o->name) + strlen(mode) + 9 + 1;
+ size_t newlen = strlen(string) + strlen(o->name) + strlen(mode) + 9 + 1;
+ int ret;
char *str;
str = malloc(newlen);
if (is_backend) {
void *data;
+ int ver;
ret = fio_server_get_verify_state(td->o.name,
- td->thread_number - 1, &data);
+ td->thread_number - 1, &data, &ver);
if (!ret)
- verify_convert_assign_state(td, data);
+ verify_convert_assign_state(td, data, ver);
} else
ret = verify_load_state(td, "local");