#include "profile.h"
#include "lib/rand.h"
#include "memalign.h"
+#include "server.h"
unsigned long page_mask;
unsigned long page_size;
int temp_stall_ts;
unsigned long done_secs = 0;
+/*
+ * Just expose an empty list, if the OS does not support disk util stats
+ */
+#ifndef FIO_HAVE_DISK_UTIL
+FLIST_HEAD(disk_list);
+#endif
+
static struct fio_mutex *startup_mutex;
static struct fio_mutex *writeout_mutex;
static volatile int fio_abort;
static struct flist_head *cgroup_list;
static char *cgroup_mnt;
+unsigned long arch_flags = 0;
+
struct io_log *agg_io_log[2];
-#define TERMINATE_ALL (-1)
#define JOB_START_TIMEOUT (5 * 1000)
+static const char *fio_os_strings[os_nr] = {
+ "Invalid",
+ "Linux",
+ "AIX",
+ "FreeBSD",
+ "HP-UX",
+ "OSX",
+ "NetBSD",
+ "Solaris",
+ "Windows"
+};
+
+static const char *fio_arch_strings[arch_nr] = {
+ "Invalid",
+ "x86-64",
+ "x86",
+ "ppc",
+ "ia64",
+ "s390",
+ "alpha",
+ "sparc",
+ "sparc64",
+ "arm",
+ "sh",
+ "hppa",
+ "generic"
+};
+
+const char *fio_get_os_string(int nr)
+{
+ if (nr < os_nr)
+ return fio_os_strings[nr];
+
+ return NULL;
+}
+
+const char *fio_get_arch_string(int nr)
+{
+ if (nr < arch_nr)
+ return fio_arch_strings[nr];
+
+ return NULL;
+}
+
void td_set_runstate(struct thread_data *td, int runstate)
{
if (td->runstate == runstate)
td->runstate = runstate;
}
-static void terminate_threads(int group_id)
+void fio_terminate_threads(int group_id)
{
struct thread_data *td;
int i;
/*
* if the thread is running, just let it exit
*/
- if (td->runstate < TD_RUNNING)
+ if (!td->pid)
+ continue;
+ else if (td->runstate < TD_RAMP)
kill(td->pid, SIGTERM);
else {
struct ioengine_ops *ops = td->io_ops;
static void sig_int(int sig)
{
if (threads) {
- log_info("\nfio: terminating on signal %d\n", sig);
- fflush(stdout);
- exit_value = 128;
- terminate_threads(TERMINATE_ALL);
+ if (is_backend)
+ fio_server_got_signal(sig);
+ else {
+ log_info("\nfio: terminating on signal %d\n", sig);
+ fflush(stdout);
+ exit_value = 128;
+ }
+
+ fio_terminate_threads(TERMINATE_ALL);
}
}
if (!threads)
break;
update_io_ticks();
- print_thread_status();
+
+ if (!is_backend)
+ print_thread_status();
}
return NULL;
act.sa_handler = sig_int;
act.sa_flags = SA_RESTART;
sigaction(SIGTERM, &act, NULL);
+
+ if (is_backend) {
+ memset(&act, 0, sizeof(act));
+ act.sa_handler = sig_int;
+ act.sa_flags = SA_RESTART;
+ sigaction(SIGPIPE, &act, NULL);
+ }
}
/*
if (mtime_since(&td->start, now) < 2000)
return 0;
- iops += td->io_blocks[ddir];
+ iops += td->this_io_blocks[ddir];
bytes += td->this_io_bytes[ddir];
ratemin += td->o.ratemin[ddir];
rate_iops += td->o.rate_iops[ddir];
return 0;
}
+static inline void __update_tv_cache(struct thread_data *td)
+{
+ fio_gettime(&td->tv_cache, NULL);
+}
+
static inline void update_tv_cache(struct thread_data *td)
{
if ((++td->tv_cache_nr & td->tv_cache_mask) == td->tv_cache_mask)
- fio_gettime(&td->tv_cache, NULL);
+ __update_tv_cache(td);
}
static int break_on_this_error(struct thread_data *td, int *retptr)
update_tv_cache(td);
if (runtime_exceeded(td, &td->tv_cache)) {
- td->terminate = 1;
- break;
+ __update_tv_cache(td);
+ if (runtime_exceeded(td, &td->tv_cache)) {
+ td->terminate = 1;
+ break;
+ }
}
io_u = __get_io_u(td);
/*
* if we can queue more, do so. but check if there are
- * completed io_u's first.
+ * completed io_u's first. Note that we can get BUSY even
+ * without IO queued, if the system is resource starved.
*/
- full = queue_full(td) || ret == FIO_Q_BUSY;
+ full = queue_full(td) || (ret == FIO_Q_BUSY && td->cur_depth);
if (full || !td->o.iodepth_batch_complete) {
min_events = min(td->o.iodepth_batch_complete,
td->cur_depth);
- if (full && !min_events)
+ if (full && !min_events && td->o.iodepth_batch_complete != 0)
min_events = 1;
do {
update_tv_cache(td);
if (runtime_exceeded(td, &td->tv_cache)) {
- td->terminate = 1;
- break;
+ __update_tv_cache(td);
+ if (runtime_exceeded(td, &td->tv_cache)) {
+ td->terminate = 1;
+ break;
+ }
}
io_u = get_io_u(td);
break;
/*
- * See if we need to complete some commands
+ * See if we need to complete some commands. Note that we
+ * can get BUSY even without IO queued, if the system is
+ * resource starved.
*/
- full = queue_full(td) || ret == FIO_Q_BUSY;
+ full = queue_full(td) || (ret == FIO_Q_BUSY && td->cur_depth);
if (full || !td->o.iodepth_batch_complete) {
min_evts = min(td->o.iodepth_batch_complete,
td->cur_depth);
- if (full && !min_evts)
+ if (full && !min_evts && td->o.iodepth_batch_complete != 0)
min_evts = 1;
if (__should_check_rate(td, 0) ||
if (!in_ramp_time(td) && should_check_rate(td, bytes_done)) {
if (check_min_rate(td, &comp_time, bytes_done)) {
if (exitall_on_terminate)
- terminate_threads(td->groupid);
+ fio_terminate_threads(td->groupid);
td_verror(td, EIO, "check_min_rate");
break;
}
io_u->buf = p + max_bs * i;
dprint(FD_MEM, "io_u %p, mem %p\n", io_u, io_u->buf);
- if (td_write(td) && !td->o.refill_buffers)
+ if (td_write(td))
io_u_fill_buffer(td, io_u, max_bs);
- else if (td_write(td) && td->o.verify_pattern_bytes) {
+ if (td_write(td) && td->o.verify_pattern_bytes) {
/*
* Fill the buffer with the pattern if we are
* going to be doing writes.
static void reset_io_counters(struct thread_data *td)
{
- td->ts.stat_io_bytes[0] = td->ts.stat_io_bytes[1] = 0;
+ td->stat_io_bytes[0] = td->stat_io_bytes[1] = 0;
td->this_io_bytes[0] = td->this_io_bytes[1] = 0;
+ td->stat_io_blocks[0] = td->stat_io_blocks[1] = 0;
+ td->this_io_blocks[0] = td->this_io_blocks[1] = 0;
td->zone_bytes = 0;
td->rate_bytes[0] = td->rate_bytes[1] = 0;
td->rate_blocks[0] = td->rate_blocks[1] = 0;
*/
if (td->o.time_based || td->o.loops)
td->nr_done_files = 0;
-
- /*
- * Set the same seed to get repeatable runs
- */
- td_fill_rand_seeds(td);
}
void reset_all_stats(struct thread_data *td)
close_files(td);
for_each_file(td, f, i)
fio_file_clear_done(f);
+
+ /*
+ * Set the same seed to get repeatable runs
+ */
+ td_fill_rand_seeds(td);
}
static int exec_string(const char *string)
pthread_condattr_t attr;
int clear_state;
- if (!td->o.use_thread)
+ if (!td->o.use_thread) {
setsid();
-
- td->pid = getpid();
+ td->pid = getpid();
+ } else
+ td->pid = gettid();
dprint(FD_PROCESS, "jobs pid=%d started\n", (int) td->pid);
goto err;
}
+ /*
+ * If we have a gettimeofday() thread, make sure we exclude that
+ * thread from this job
+ */
+ if (td->o.gtod_cpu)
+ fio_cpu_clear(&td->o.cpumask, td->o.gtod_cpu);
+
+ /*
+ * Set affinity first, in case it has an impact on the memory
+ * allocations.
+ */
+ if (td->o.cpumask_set && fio_setaffinity(td->pid, td->o.cpumask) == -1) {
+ td_verror(td, errno, "cpu_set_affinity");
+ goto err;
+ }
+
/*
* May alter parameters that init_io_u() will use, so we need to
* do this first.
if (td->o.verify_async && verify_async_init(td))
goto err;
- if (td->o.cpumask_set && fio_setaffinity(td->pid, td->o.cpumask) == -1) {
- td_verror(td, errno, "cpu_set_affinity");
- goto err;
- }
-
- /*
- * If we have a gettimeofday() thread, make sure we exclude that
- * thread from this job
- */
- if (td->o.gtod_cpu) {
- fio_cpu_clear(&td->o.cpumask, td->o.gtod_cpu);
- if (fio_setaffinity(td->pid, td->o.cpumask) == -1) {
- td_verror(td, errno, "cpu_set_affinity");
- goto err;
- }
- }
-
if (td->ioprio_set) {
if (ioprio_set(IOPRIO_WHO_PROCESS, 0, td->ioprio) == -1) {
td_verror(td, errno, "ioprio_set");
}
fio_gettime(&td->epoch, NULL);
- getrusage(RUSAGE_SELF, &td->ts.ru_start);
+ getrusage(RUSAGE_SELF, &td->ru_start);
clear_state = 0;
while (keep_running(td)) {
fio_gettime(&td->start, NULL);
- memcpy(&td->ts.stat_sample_time[0], &td->start,
- sizeof(td->start));
- memcpy(&td->ts.stat_sample_time[1], &td->start,
- sizeof(td->start));
+ memcpy(&td->bw_sample_time, &td->start, sizeof(td->start));
+ memcpy(&td->iops_sample_time, &td->start, sizeof(td->start));
memcpy(&td->tv_cache, &td->start, sizeof(td->start));
if (td->o.ratemin[0] || td->o.ratemin[1])
- memcpy(&td->lastrate, &td->ts.stat_sample_time,
+ memcpy(&td->lastrate, &td->bw_sample_time,
sizeof(td->lastrate));
if (clear_state)
td->ts.io_bytes[1] = td->io_bytes[1];
fio_mutex_down(writeout_mutex);
- if (td->ts.bw_log) {
+ if (td->bw_log) {
if (td->o.bw_log_file) {
- finish_log_named(td, td->ts.bw_log,
+ finish_log_named(td, td->bw_log,
td->o.bw_log_file, "bw");
} else
- finish_log(td, td->ts.bw_log, "bw");
+ finish_log(td, td->bw_log, "bw");
}
- if (td->ts.lat_log) {
+ if (td->lat_log) {
if (td->o.lat_log_file) {
- finish_log_named(td, td->ts.lat_log,
+ finish_log_named(td, td->lat_log,
td->o.lat_log_file, "lat");
} else
- finish_log(td, td->ts.lat_log, "lat");
+ finish_log(td, td->lat_log, "lat");
}
- if (td->ts.slat_log) {
+ if (td->slat_log) {
if (td->o.lat_log_file) {
- finish_log_named(td, td->ts.slat_log,
+ finish_log_named(td, td->slat_log,
td->o.lat_log_file, "slat");
} else
- finish_log(td, td->ts.slat_log, "slat");
+ finish_log(td, td->slat_log, "slat");
}
- if (td->ts.clat_log) {
+ if (td->clat_log) {
if (td->o.lat_log_file) {
- finish_log_named(td, td->ts.clat_log,
+ finish_log_named(td, td->clat_log,
td->o.lat_log_file, "clat");
} else
- finish_log(td, td->ts.clat_log, "clat");
+ finish_log(td, td->clat_log, "clat");
+ }
+ if (td->iops_log) {
+ if (td->o.iops_log_file) {
+ finish_log_named(td, td->iops_log,
+ td->o.iops_log_file, "iops");
+ } else
+ finish_log(td, td->iops_log, "iops");
}
+
fio_mutex_up(writeout_mutex);
if (td->o.exec_postrun)
exec_string(td->o.exec_postrun);
if (exitall_on_terminate)
- terminate_threads(td->groupid);
+ fio_terminate_threads(td->groupid);
err:
if (td->error)
if (td->o.write_iolog_file)
write_iolog_close(td);
- options_mem_free(td);
td_set_runstate(td, TD_EXITED);
return (void *) (unsigned long) td->error;
}
struct thread_data *td;
void *data, *ret;
+#ifndef __hpux
data = shmat(shmid, NULL, 0);
if (data == (void *) -1) {
int __err = errno;
perror("shmat");
return __err;
}
+#else
+ /*
+ * HP-UX inherits shm mappings?
+ */
+ data = threads;
+#endif
td = data + offset * sizeof(struct thread_data);
ret = thread_main(td);
}
if (*nr_running == cputhreads && !pending && realthreads)
- terminate_threads(TERMINATE_ALL);
+ fio_terminate_threads(TERMINATE_ALL);
}
static void *gtod_thread_main(void *data)
if (fio_gtod_offload && fio_start_gtod_thread())
return;
+ set_sig_handlers();
+
if (!terse_output) {
log_info("Starting ");
if (nr_thread)
fflush(stdout);
}
- set_sig_handlers();
-
todo = thread_number;
nr_running = 0;
nr_started = 0;
for_each_td(td, i) {
print_status_init(td->thread_number - 1);
- if (!td->o.create_serialize) {
- init_disk_util(td);
+ if (!td->o.create_serialize)
continue;
- }
/*
* do file setup here so it happens sequentially,
td_io_close_file(td, f);
}
}
-
- init_disk_util(td);
}
set_genesis_time();
break;
}
+ init_disk_util(td);
+
/*
* Set state to created. Thread will transition
* to TD_INITIALIZED when it's done setting up.
dprint(FD_MUTEX, "wait on startup_mutex\n");
if (fio_mutex_down_timeout(startup_mutex, 10)) {
log_err("fio: job startup hung? exiting.\n");
- terminate_threads(TERMINATE_ALL);
+ fio_terminate_threads(TERMINATE_ALL);
fio_abort = 1;
nr_started--;
break;
reap_threads(&nr_running, &t_rate, &m_rate);
- if (todo)
- usleep(100000);
+ if (todo) {
+ if (is_backend)
+ fio_server_idle_loop();
+ else
+ usleep(100000);
+ }
}
while (nr_running) {
reap_threads(&nr_running, &t_rate, &m_rate);
- usleep(10000);
+
+ if (is_backend)
+ fio_server_idle_loop();
+ else
+ usleep(10000);
}
update_io_ticks();
fio_unpin_memory();
}
-int main(int argc, char *argv[])
+int exec_run(void)
{
- long ps;
-
- sinit();
- init_rand(&__fio_rand_state);
-
- /*
- * We need locale for number printing, if it isn't set then just
- * go with the US format.
- */
- if (!getenv("LC_NUMERIC"))
- setlocale(LC_NUMERIC, "en_US");
+ struct thread_data *td;
+ int i;
- ps = sysconf(_SC_PAGESIZE);
- if (ps < 0) {
- log_err("Failed to get page size\n");
- return 1;
+ if (nr_clients)
+ return fio_handle_clients();
+ if (exec_profile) {
+ if (load_profile(exec_profile))
+ return 1;
+ free(exec_profile);
+ exec_profile = NULL;
}
-
- page_size = ps;
- page_mask = ps - 1;
-
- fio_keywords_init();
-
- if (parse_options(argc, argv))
- return 1;
-
- if (exec_profile && load_profile(exec_profile))
- return 1;
-
if (!thread_number)
return 0;
}
}
+ for_each_td(td, i)
+ fio_options_free(td);
+
cgroup_kill(cgroup_list);
sfree(cgroup_list);
sfree(cgroup_mnt);
fio_mutex_remove(writeout_mutex);
return exit_value;
}
+
+void reset_fio_state(void)
+{
+ groupid = 0;
+ thread_number = 0;
+ nr_process = 0;
+ nr_thread = 0;
+ done_secs = 0;
+}
+
+static int endian_check(void)
+{
+ union {
+ uint8_t c[8];
+ uint64_t v;
+ } u;
+ int le = 0, be = 0;
+
+ u.v = 0x12;
+ if (u.c[7] == 0x12)
+ be = 1;
+ else if (u.c[0] == 0x12)
+ le = 1;
+
+#if defined(FIO_LITTLE_ENDIAN)
+ if (be)
+ return 1;
+#elif defined(FIO_BIG_ENDIAN)
+ if (le)
+ return 1;
+#else
+ return 1;
+#endif
+
+ if (!le && !be)
+ return 1;
+
+ return 0;
+}
+
+int main(int argc, char *argv[], char *envp[])
+{
+ long ps;
+
+ if (endian_check()) {
+ log_err("fio: endianness settings appear wrong.\n");
+ log_err("fio: please report this to fio@vger.kernel.org\n");
+ return 1;
+ }
+
+ arch_init(envp);
+
+ sinit();
+
+ /*
+ * We need locale for number printing, if it isn't set then just
+ * go with the US format.
+ */
+ if (!getenv("LC_NUMERIC"))
+ setlocale(LC_NUMERIC, "en_US");
+
+ ps = sysconf(_SC_PAGESIZE);
+ if (ps < 0) {
+ log_err("Failed to get page size\n");
+ return 1;
+ }
+
+ page_size = ps;
+ page_mask = ps - 1;
+
+ fio_keywords_init();
+
+ if (parse_options(argc, argv))
+ return 1;
+
+ return exec_run();
+}