The CPU IO engine is most often used to saturate the system,
while running an IO load on it. As such, it's useful to have
CPU engine threads exit automatically, when IO has completed.
Add exit_on_io_done as a CPU IO engine option for that purpose.
Signed-off-by: Jens Axboe <axboe@fb.com>
[cpu] cpuchunks=int Split the load into cycles of the given time. In
microseconds.
[cpu] cpuchunks=int Split the load into cycles of the given time. In
microseconds.
+[cpu] exit_on_io_done=bool Detect when IO threads are done, then exit.
+
[netsplice] hostname=str
[net] hostname=str The host name or IP address to use for TCP or UDP based IO.
If the job is a TCP listener or UDP reader, the hostname is not
[netsplice] hostname=str
[net] hostname=str The host name or IP address to use for TCP or UDP based IO.
If the job is a TCP listener or UDP reader, the hostname is not
struct thread_data *td;
unsigned int cpuload;
unsigned int cpucycle;
struct thread_data *td;
unsigned int cpuload;
unsigned int cpucycle;
+ unsigned int exit_io_done;
};
static struct fio_option options[] = {
};
static struct fio_option options[] = {
.category = FIO_OPT_C_GENERAL,
.group = FIO_OPT_G_INVALID,
},
.category = FIO_OPT_C_GENERAL,
.group = FIO_OPT_G_INVALID,
},
+ {
+ .name = "exit_on_io_done",
+ .lname = "Exit when IO threads are done",
+ .type = FIO_OPT_BOOL,
+ .off1 = offsetof(struct cpu_options, exit_io_done),
+ .help = "Exit when IO threads finish",
+ .def = "0",
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_INVALID,
+ },
{
struct cpu_options *co = td->eo;
{
struct cpu_options *co = td->eo;
+ if (co->exit_io_done && !fio_running_or_pending_io_threads()) {
+ td->done = 1;
+ return FIO_Q_BUSY;
+ }
+
usec_spin(co->cpucycle);
return FIO_Q_COMPLETED;
}
usec_spin(co->cpucycle);
return FIO_Q_COMPLETED;
}
.BI ioscheduler \fR=\fPstr
Attempt to switch the device hosting the file to the specified I/O scheduler.
.TP
.BI ioscheduler \fR=\fPstr
Attempt to switch the device hosting the file to the specified I/O scheduler.
.TP
-.BI cpuload \fR=\fPint
-If the job is a CPU cycle-eater, attempt to use the specified percentage of
-CPU cycles.
-.TP
-.BI cpuchunks \fR=\fPint
-If the job is a CPU cycle-eater, split the load into cycles of the
-given time in milliseconds.
-.TP
.BI disk_util \fR=\fPbool
Generate disk utilization statistics if the platform supports it. Default: true.
.TP
.BI disk_util \fR=\fPbool
Generate disk utilization statistics if the platform supports it. Default: true.
.TP
.BI (cpu)cpuchunks \fR=\fPint
Split the load into cycles of the given time. In microseconds.
.TP
.BI (cpu)cpuchunks \fR=\fPint
Split the load into cycles of the given time. In microseconds.
.TP
+.BI (cpu)exit_on_io_done \fR=\fPbool
+Detect when IO threads are done, then exit.
+.TP
.BI (libaio)userspace_reap
Normally, with the libaio engine in use, fio will use
the io_getevents system call to reap newly returned events.
.BI (libaio)userspace_reap
Normally, with the libaio engine in use, fio will use
the io_getevents system call to reap newly returned events.
TD_F_VER_NONE = 32,
TD_F_PROFILE_OPS = 64,
TD_F_COMPRESS = 128,
TD_F_VER_NONE = 32,
TD_F_PROFILE_OPS = 64,
TD_F_COMPRESS = 128,
extern char *num2str(unsigned long, int, int, int, int);
extern int ioengine_load(struct thread_data *);
extern int parse_dryrun(void);
extern char *num2str(unsigned long, int, int, int, int);
extern int ioengine_load(struct thread_data *);
extern int parse_dryrun(void);
+extern int fio_running_or_pending_io_threads(void);
extern uintptr_t page_mask;
extern uintptr_t page_size;
extern uintptr_t page_mask;
extern uintptr_t page_size;
+ if (!ret && (td->io_ops->flags & FIO_NOIO))
+ td->flags |= TD_F_NOIO;
+
+int fio_running_or_pending_io_threads(void)
+{
+ struct thread_data *td;
+ int i;
+
+ for_each_td(td, i) {
+ if (td->flags & TD_F_NOIO)
+ continue;
+ if (td->runstate < TD_EXITED)
+ return 1;
+ }
+
+ return 0;
+}
+
static int endian_check(void)
{
union {
static int endian_check(void)
{
union {