cpumask=x Allow job to run on CPUs defined by mask
fsync=x If writing, fsync after every x blocks have been written
startdelay=x Start this thread x seconds after startup
+ timeout=x Terminate x seconds after startup
aio Use Linux async io
aio_depth=x Allow x iocbs in flight
return 0;
}
+static inline int runtime_exceeded(struct thread_data *td, struct timeval *t)
+{
+ if (mtime_since(&td->start, t) >= td->timeout * 1000)
+ return 1;
+
+ return 0;
+}
+
#define should_fsync(td) ((td)->ddir == DDIR_WRITE && !(td)->odirect)
static void do_sync_io(struct thread_data *td)
td->min_latency = msec;
if (msec > td->max_latency)
td->max_latency = msec;
+
+ if (runtime_exceeded(td, &e))
+ break;
}
if (should_fsync(td))
td->error = ENODATA;
break;
}
+
+ if (runtime_exceeded(td, &e))
+ break;
}
}
td->odirect = def_thread.odirect;
td->ratecycle = def_thread.ratecycle;
td->sequential = def_thread.sequential;
+ td->timeout = def_thread.timeout;
memcpy(&td->cpumask, &def_thread.cpumask, sizeof(td->cpumask));
return td;
td->start_delay = strtoul(string, NULL, 10);
}
+ c = strstr(p, "timeout=");
+ if (c) {
+ c += 8;
+ fill_option(c, string);
+ td->timeout = strtoul(string, NULL, 10);
+ }
+
c = strstr(p, "aio_depth=");
if (c) {
c += 10;
fgetpos(f, &off);
continue;
}
+ if (!check_int(p, "timeout", &td->timeout)) {
+ fgetpos(f, &off);
+ continue;
+ }
if (!check_int(p, "aio_depth", &td->aio_depth)) {
fgetpos(f, &off);
continue;
signal(SIGINT, sig_handler);
- if (def_thread.timeout) {
- signal(SIGALRM, sig_handler);
- alarm(def_thread.timeout);
- }
-
todo = thread_number;
nr_running = 0;
m_rate = t_rate = 0;