exec_prerun=x Run 'x' before job io is begun.
exec_postrun=x Run 'x' after job io has finished.
ioscheduler=x Use ioscheduler 'x' for this job.
+ cpuload=x For a CPU io thread, percentage of CPU time to attempt
+ to burn.
+ cpuchunks=x Split burn cycles into pieces of x.
Examples using a job file
td_set_runstate(td, TD_RUNNING);
}
+/*
+ * Not really an io thread, all it does is burn CPU cycles in the specified
+ * manner.
+ */
+static void do_cpuio(struct thread_data *td)
+{
+ struct timeval e;
+ int split = 100 / td->cpuload;
+ int i = 0;
+
+ while (!td->terminate) {
+ gettimeofday(&e, NULL);
+
+ if (runtime_exceeded(td, &e))
+ break;
+
+ if (!(i % split))
+ __usec_sleep(10000);
+ else
+ usec_sleep(td, 10000);
+
+ i++;
+ }
+}
+
/*
* Main IO worker function. It retrieves io_u's to process and queues
* and reaps them, checking for rate and errors along the way.
return fio_sgio_init(td);
else if (td->io_engine == FIO_SPLICEIO)
return fio_spliceio_init(td);
+ else if (td->io_engine == FIO_CPUIO)
+ return fio_cpuio_init(td);
else {
log_err("bad io_engine %d\n", td->io_engine);
return 1;
int i, max_units;
char *p;
+ if (td->io_engine == FIO_CPUIO)
+ return 0;
+
if (td->io_engine & FIO_SYNCIO)
max_units = 1;
else
struct stat st;
int flags = 0;
+ if (td->io_engine == FIO_CPUIO)
+ return 0;
+
if (stat(td->file_name, &st) == -1) {
if (errno != ENOENT) {
td_verror(td, errno);
clear_io_state(td);
prune_io_piece_log(td);
- do_io(td);
+ if (td->io_engine == FIO_CPUIO)
+ do_cpuio(td);
+ else
+ do_io(td);
td->runtime[td->ddir] += mtime_since_now(&td->start);
if (td_rw(td) && td->io_bytes[td->ddir ^ 1])
*/
static void reap_threads(int *nr_running, int *t_rate, int *m_rate)
{
- int i;
+ int i, cputhreads;
/*
* reap exited threads (TD_EXITED -> TD_REAPED)
*/
- for (i = 0; i < thread_number; i++) {
+ for (i = 0, cputhreads = 0; i < thread_number; i++) {
struct thread_data *td = &threads[i];
+ if (td->io_engine == FIO_CPUIO)
+ cputhreads++;
+
if (td->runstate != TD_EXITED)
continue;
(*m_rate) -= td->ratemin;
(*t_rate) -= td->rate;
}
+
+ if (*nr_running == cputhreads)
+ terminate_threads(TERMINATE_ALL);
}
static void fio_unpin_memory(void *pinned)
FIO_POSIXAIO = 1 << 3,
FIO_SGIO = 1 << 4,
FIO_SPLICEIO = 1 << 5 | FIO_SYNCIO,
+ FIO_CPUIO = 1 << 6,
};
/*
unsigned long *file_map;
unsigned int num_maps;
+ /*
+ * CPU "io" cycle burner
+ */
+ unsigned int cpuload;
+ unsigned int cpucycle;
+
/*
* bandwidth and latency stats
*/
struct list_head io_log_list;
};
-#define td_verror(td, err) \
+#define __td_verror(td, err, msg) \
do { \
int e = (err); \
(td)->error = e; \
- snprintf(td->verror, sizeof(td->verror) - 1, "file:%s:%d, error=%s", __FILE__, __LINE__, strerror(e)); \
+ snprintf(td->verror, sizeof(td->verror) - 1, "file:%s:%d, error=%s", __FILE__, __LINE__, (msg)); \
} while (0)
+
+#define td_verror(td, err) __td_verror((td), (err), strerror((err)))
+#define td_vmsg(td, err, msg) __td_verror((td), (err), (msg))
+
extern struct io_u *__get_io_u(struct thread_data *);
extern void put_io_u(struct thread_data *, struct io_u *);
extern unsigned long mtime_since(struct timeval *, struct timeval *);
extern unsigned long mtime_since_now(struct timeval *);
extern unsigned long time_since_now(struct timeval *);
+extern void __usec_sleep(unsigned int);
extern void usec_sleep(struct thread_data *, unsigned long);
extern void rate_throttle(struct thread_data *, unsigned long, unsigned int);
ddir = td->ddir + (!td->sequential << 1) + (td->iomix << 2);
if (!terse_output) {
- if (!job_add_num)
- fprintf(f_out, "%s: (g=%d): rw=%s, odir=%d, bs=%d-%d, rate=%d, ioengine=%s, iodepth=%d\n", td->name, td->groupid, ddir_str[ddir], td->odirect, td->min_bs, td->max_bs, td->rate, td->io_engine_name, td->iodepth);
- else if (job_add_num == 1)
+ if (!job_add_num) {
+ if (td->io_engine == FIO_CPUIO)
+ fprintf(f_out, "%s: ioengine=cpu, cpuload=%u, cpucycle=%u\n", td->name, td->cpuload, td->cpucycle);
+ else
+ fprintf(f_out, "%s: (g=%d): rw=%s, odir=%d, bs=%d-%d, rate=%d, ioengine=%s, iodepth=%d\n", td->name, td->groupid, ddir_str[ddir], td->odirect, td->min_bs, td->max_bs, td->rate, td->io_engine_name, td->iodepth);
+ } else if (job_add_num == 1)
fprintf(f_out, "...\n");
}
strcpy(td->io_engine_name, "splice");
td->io_engine = FIO_SPLICEIO;
return 0;
+ } else if (!strncmp(str, "cpu", 3)) {
+ strcpy(td->io_engine_name, "cpu");
+ td->io_engine = FIO_CPUIO;
+ return 0;
}
- log_err("fio: ioengine: { linuxaio, aio, libaio }, posixaio, sync, mmap, sgio, splice\n");
+ log_err("fio: ioengine: { linuxaio, aio, libaio }, posixaio, sync, mmap, sgio, splice, cpu\n");
return 1;
}
fgetpos(f, &off);
continue;
}
+ if (!check_int(p, "cpuload", &td->cpuload)) {
+ fgetpos(f, &off);
+ continue;
+ }
+ if (!check_int(p, "cpuchunks", &td->cpucycle)) {
+ fgetpos(f, &off);
+ continue;
+ }
if (!check_int(p, "thinktime", &td->thinktime)) {
fgetpos(f, &off);
continue;
}
#endif /* FIO_HAVE_SPLICE */
+
+int fio_cpuio_init(struct thread_data *td)
+{
+ if (!td->cpuload) {
+ td_vmsg(td, EINVAL, "cpu thread needs rate");
+ return 1;
+ } else if (td->cpuload > 100)
+ td->cpuload = 100;
+
+ td->read_iolog = td->write_iolog = 0;
+ td->fd = -1;
+
+ return 0;
+}
extern int fio_mmapio_init(struct thread_data *);
extern int fio_sgio_init(struct thread_data *);
extern int fio_spliceio_init(struct thread_data *);
+extern int fio_cpuio_init(struct thread_data *);
#endif
/*
* busy looping version for the last few usec
*/
-static void __usec_sleep(unsigned int usec)
+void __usec_sleep(unsigned int usec)
{
struct timeval start;