#include <sys/stat.h>
#include <sys/wait.h>
#include <math.h>
+#include <pthread.h>
#include "fio.h"
#include "smalloc.h"
#include "rate-submit.h"
#include "helper_thread.h"
#include "pshared.h"
+#include "zone-dist.h"
static struct fio_sem *startup_sem;
static struct flist_head *cgroup_list;
-static char *cgroup_mnt;
+static struct cgroup_mnt *cgroup_mnt;
static int exit_value;
-static volatile int fio_abort;
+static volatile bool fio_abort;
static unsigned int nr_process = 0;
static unsigned int nr_thread = 0;
int shm_id = 0;
int temp_stall_ts;
unsigned long done_secs = 0;
+pthread_mutex_t overlap_check = PTHREAD_MUTEX_INITIALIZER;
#define JOB_START_TIMEOUT (5 * 1000)
exit_value = 128;
}
- fio_terminate_threads(TERMINATE_ALL);
+ fio_terminate_threads(TERMINATE_ALL, TERMINATE_ALL);
}
}
unsigned long long bytes = 0;
unsigned long iops = 0;
unsigned long spent;
- unsigned long rate;
- unsigned int ratemin = 0;
+ unsigned long long rate;
+ unsigned long long ratemin = 0;
unsigned int rate_iops = 0;
unsigned int rate_iops_min = 0;
* check bandwidth specified rate
*/
if (bytes < td->rate_bytes[ddir]) {
- log_err("%s: rate_min=%uB/s not met, only transferred %lluB\n",
+ log_err("%s: rate_min=%lluB/s not met, only transferred %lluB\n",
td->o.name, ratemin, bytes);
return true;
} else {
if (rate < ratemin ||
bytes < td->rate_bytes[ddir]) {
- log_err("%s: rate_min=%uB/s not met, got %luB/s\n",
+ log_err("%s: rate_min=%lluB/s not met, got %lluB/s\n",
td->o.name, ratemin, rate);
return true;
}
if (rate < rate_iops_min ||
iops < td->rate_blocks[ddir]) {
- log_err("%s: rate_iops_min=%u not met, got %lu IOPS\n",
+ log_err("%s: rate_iops_min=%u not met, got %llu IOPS\n",
td->o.name, rate_iops_min, rate);
return true;
}
* get immediately available events, if any
*/
r = io_u_queued_complete(td, 0);
- if (r < 0)
- return;
/*
* now cancel remaining active events
static bool fio_io_sync(struct thread_data *td, struct fio_file *f)
{
struct io_u *io_u = __get_io_u(td);
- int ret;
+ enum fio_q_status ret;
if (!io_u)
return true;
io_u->ddir = DDIR_SYNC;
io_u->file = f;
+ io_u_set(td, io_u, IO_U_F_NO_FILE_PUT);
if (td_io_prep(td, io_u)) {
put_io_u(td, io_u);
requeue:
ret = td_io_queue(td, io_u);
- if (ret < 0) {
- td_verror(td, io_u->error, "td_io_queue");
- put_io_u(td, io_u);
- return true;
- } else if (ret == FIO_Q_QUEUED) {
- if (td_io_commit(td))
- return true;
+ switch (ret) {
+ case FIO_Q_QUEUED:
+ td_io_commit(td);
if (io_u_queued_complete(td, 1) < 0)
return true;
- } else if (ret == FIO_Q_COMPLETED) {
+ break;
+ case FIO_Q_COMPLETED:
if (io_u->error) {
td_verror(td, io_u->error, "td_io_queue");
return true;
if (io_u_sync_complete(td, io_u) < 0)
return true;
- } else if (ret == FIO_Q_BUSY) {
- if (td_io_commit(td))
- return true;
+ break;
+ case FIO_Q_BUSY:
+ td_io_commit(td);
goto requeue;
}
static int fio_file_fsync(struct thread_data *td, struct fio_file *f)
{
- int ret;
+ int ret, ret2;
if (fio_file_open(f))
return fio_io_sync(td, f);
return 1;
ret = fio_io_sync(td, f);
- td_io_close_file(td, f);
- return ret;
+ ret2 = 0;
+ if (fio_file_open(f))
+ ret2 = td_io_close_file(td, f);
+ return (ret || ret2);
}
static inline void __update_ts_cache(struct thread_data *td)
if ((full && !min_evts) || !td->o.iodepth_batch_complete_min)
min_evts = 1;
- if (time && (__should_check_rate(td, DDIR_READ) ||
- __should_check_rate(td, DDIR_WRITE) ||
- __should_check_rate(td, DDIR_TRIM)))
+ if (time && __should_check_rate(td))
fio_gettime(time, NULL);
do {
enum fio_ddir ddir, uint64_t *bytes_issued, int from_verify,
struct timespec *comp_time)
{
- int ret2;
-
switch (*ret) {
case FIO_Q_COMPLETED:
if (io_u->error) {
*ret = -io_u->error;
clear_io_u(td, io_u);
} else if (io_u->resid) {
- int bytes = io_u->xfer_buflen - io_u->resid;
+ long long bytes = io_u->xfer_buflen - io_u->resid;
struct fio_file *f = io_u->file;
if (bytes_issued)
*bytes_issued += bytes;
if (!from_verify)
- trim_io_piece(td, io_u);
+ trim_io_piece(io_u);
/*
* zero read, fail
requeue_io_u(td, &io_u);
} else {
sync_done:
- if (comp_time && (__should_check_rate(td, DDIR_READ) ||
- __should_check_rate(td, DDIR_WRITE) ||
- __should_check_rate(td, DDIR_TRIM)))
+ if (comp_time && __should_check_rate(td))
fio_gettime(comp_time, NULL);
*ret = io_u_sync_complete(td, io_u);
if (!from_verify)
unlog_io_piece(td, io_u);
requeue_io_u(td, &io_u);
- ret2 = td_io_commit(td);
- if (ret2 < 0)
- *ret = ret2;
+ td_io_commit(td);
break;
default:
assert(*ret < 0);
/*
* Check if io_u will overlap an in-flight IO in the queue
*/
-static bool in_flight_overlap(struct io_u_queue *q, struct io_u *io_u)
+bool in_flight_overlap(struct io_u_queue *q, struct io_u *io_u)
{
bool overlap;
struct io_u *check_io_u;
if (x1 < y2 && y1 < x2) {
overlap = true;
- dprint(FD_IO, "in-flight overlap: %llu/%lu, %llu/%lu\n",
+ dprint(FD_IO, "in-flight overlap: %llu/%llu, %llu/%llu\n",
x1, io_u->buflen,
y1, check_io_u->buflen);
break;
return overlap;
}
-static int io_u_submit(struct thread_data *td, struct io_u *io_u)
+static enum fio_q_status io_u_submit(struct thread_data *td, struct io_u *io_u)
{
/*
* Check for overlap if the user asked us to, and we have
break;
} else if (io_u->ddir == DDIR_WRITE) {
io_u->ddir = DDIR_READ;
+ populate_verify_io_u(td, io_u);
break;
} else {
put_io_u(td, io_u);
over = (usperop - total) / usperop * -bs;
td->rate_io_issue_bytes[ddir] += (missed - over);
+ /* adjust for rate_process=poisson */
+ td->last_usec[ddir] += total;
}
}
* Break if we exceeded the bytes. The exception is time
* based runs, but we still need to break out of the loop
* for those to run verification, if enabled.
+ * Jobs read from iolog do not use this stop condition.
*/
if (bytes_issued >= total_bytes &&
+ !td->o.read_iolog_file &&
(!td->o.time_based ||
(td->o.time_based && td->o.verify != VERIFY_NONE)))
break;
break;
}
+ if (io_u->ddir == DDIR_WRITE && td->flags & TD_F_DO_VERIFY)
+ populate_verify_io_u(td, io_u);
+
ddir = io_u->ddir;
/*
if (td->o.verify != VERIFY_NONE && io_u->ddir == DDIR_READ &&
((io_u->flags & IO_U_F_VER_LIST) || !td_rw(td))) {
- if (!td->o.verify_pattern_bytes) {
- io_u->rand_seed = __rand(&td->verify_state);
- if (sizeof(int) != sizeof(long *))
- io_u->rand_seed *= __rand(&td->verify_state);
- }
-
if (verify_state_should_stop(td, io_u)) {
put_io_u(td, io_u);
break;
log_io_piece(td, io_u);
if (td->o.io_submit_mode == IO_MODE_OFFLOAD) {
- const unsigned long blen = io_u->xfer_buflen;
- const enum fio_ddir ddir = acct_ddir(io_u);
+ const unsigned long long blen = io_u->xfer_buflen;
+ const enum fio_ddir __ddir = acct_ddir(io_u);
if (td->error)
break;
workqueue_enqueue(&td->io_wq, &io_u->work);
ret = FIO_Q_QUEUED;
- if (ddir_rw(ddir)) {
- td->io_issues[ddir]++;
- td->io_issue_bytes[ddir] += blen;
- td->rate_io_issue_bytes[ddir] += blen;
+ if (ddir_rw(__ddir)) {
+ td->io_issues[__ddir]++;
+ td->io_issue_bytes[__ddir] += blen;
+ td->rate_io_issue_bytes[__ddir] += blen;
}
if (should_check_rate(td))
- td->rate_next_io_time[ddir] = usec_for_io(td, ddir);
+ td->rate_next_io_time[__ddir] = usec_for_io(td, __ddir);
} else {
ret = io_u_submit(td, io_u);
if (!in_ramp_time(td) && should_check_rate(td)) {
if (check_min_rate(td, &comp_time)) {
if (exitall_on_terminate || td->o.exitall_error)
- fio_terminate_threads(td->groupid);
+ fio_terminate_threads(td->groupid, td->o.exit_what);
td_verror(td, EIO, "check_min_rate");
break;
}
td->error = 0;
}
- if (should_fsync(td) && td->o.end_fsync) {
+ if (should_fsync(td) && (td->o.end_fsync || td->o.fsync_on_close)) {
td_set_runstate(td, TD_FSYNCING);
for_each_file(td, f, i) {
if (td->io_ops->io_u_free)
td->io_ops->io_u_free(td, io_u);
- fio_memfree(io_u, sizeof(*io_u));
+ fio_memfree(io_u, sizeof(*io_u), td_offload_overlap(td));
}
free_io_mem(td);
io_u_rexit(&td->io_u_requeues);
- io_u_qexit(&td->io_u_freelist);
- io_u_qexit(&td->io_u_all);
+ io_u_qexit(&td->io_u_freelist, false);
+ io_u_qexit(&td->io_u_all, td_offload_overlap(td));
free_file_completion_logging(td);
}
static int init_io_u(struct thread_data *td)
{
struct io_u *io_u;
- unsigned int max_bs, min_write;
int cl_align, i, max_units;
- int data_xfer = 1, err;
- char *p;
+ int err;
max_units = td->o.iodepth;
- max_bs = td_max_bs(td);
- min_write = td->o.min_bs[DDIR_WRITE];
- td->orig_buffer_size = (unsigned long long) max_bs
- * (unsigned long long) max_units;
-
- if (td_ioengine_flagged(td, FIO_NOIO) || !(td_read(td) || td_write(td)))
- data_xfer = 0;
err = 0;
err += !io_u_rinit(&td->io_u_requeues, td->o.iodepth);
- err += !io_u_qinit(&td->io_u_freelist, td->o.iodepth);
- err += !io_u_qinit(&td->io_u_all, td->o.iodepth);
+ err += !io_u_qinit(&td->io_u_freelist, td->o.iodepth, false);
+ err += !io_u_qinit(&td->io_u_all, td->o.iodepth, td_offload_overlap(td));
if (err) {
log_err("fio: failed setting up IO queues\n");
return 1;
}
+ cl_align = os_cache_line_size();
+
+ for (i = 0; i < max_units; i++) {
+ void *ptr;
+
+ if (td->terminate)
+ return 1;
+
+ ptr = fio_memalign(cl_align, sizeof(*io_u), td_offload_overlap(td));
+ if (!ptr) {
+ log_err("fio: unable to allocate aligned memory\n");
+ return 1;
+ }
+
+ io_u = ptr;
+ memset(io_u, 0, sizeof(*io_u));
+ INIT_FLIST_HEAD(&io_u->verify_list);
+ dprint(FD_MEM, "io_u alloc %p, index %u\n", io_u, i);
+
+ io_u->index = i;
+ io_u->flags = IO_U_F_FREE;
+ io_u_qpush(&td->io_u_freelist, io_u);
+
+ /*
+ * io_u never leaves this stack, used for iteration of all
+ * io_u buffers.
+ */
+ io_u_qpush(&td->io_u_all, io_u);
+
+ if (td->io_ops->io_u_init) {
+ int ret = td->io_ops->io_u_init(td, io_u);
+
+ if (ret) {
+ log_err("fio: failed to init engine data: %d\n", ret);
+ return 1;
+ }
+ }
+ }
+
+ init_io_u_buffers(td);
+
+ if (init_file_completion_logging(td, max_units))
+ return 1;
+
+ return 0;
+}
+
+int init_io_u_buffers(struct thread_data *td)
+{
+ struct io_u *io_u;
+ unsigned long long max_bs, min_write;
+ int i, max_units;
+ int data_xfer = 1;
+ char *p;
+
+ max_units = td->o.iodepth;
+ max_bs = td_max_bs(td);
+ min_write = td->o.min_bs[DDIR_WRITE];
+ td->orig_buffer_size = (unsigned long long) max_bs
+ * (unsigned long long) max_units;
+
+ if (td_ioengine_flagged(td, FIO_NOIO) || !(td_read(td) || td_write(td)))
+ data_xfer = 0;
+
/*
* if we may later need to do address alignment, then add any
* possible adjustment here so that we don't cause a buffer
td->orig_buffer_size += page_mask + td->o.mem_align;
if (td->o.mem_type == MEM_SHMHUGE || td->o.mem_type == MEM_MMAPHUGE) {
- unsigned long bs;
+ unsigned long long bs;
bs = td->orig_buffer_size + td->o.hugepage_size - 1;
td->orig_buffer_size = bs & ~(td->o.hugepage_size - 1);
else
p = td->orig_buffer;
- cl_align = os_cache_line_size();
-
for (i = 0; i < max_units; i++) {
- void *ptr;
-
- if (td->terminate)
- return 1;
-
- ptr = fio_memalign(cl_align, sizeof(*io_u));
- if (!ptr) {
- log_err("fio: unable to allocate aligned memory\n");
- break;
- }
-
- io_u = ptr;
- memset(io_u, 0, sizeof(*io_u));
- INIT_FLIST_HEAD(&io_u->verify_list);
+ io_u = td->io_u_all.io_us[i];
dprint(FD_MEM, "io_u alloc %p, index %u\n", io_u, i);
if (data_xfer) {
fill_verify_pattern(td, io_u->buf, max_bs, io_u, 0, 0);
}
}
-
- io_u->index = i;
- io_u->flags = IO_U_F_FREE;
- io_u_qpush(&td->io_u_freelist, io_u);
-
- /*
- * io_u never leaves this stack, used for iteration of all
- * io_u buffers.
- */
- io_u_qpush(&td->io_u_all, io_u);
-
- if (td->io_ops->io_u_init) {
- int ret = td->io_ops->io_u_init(td, io_u);
-
- if (ret) {
- log_err("fio: failed to init engine data: %d\n", ret);
- return 1;
- }
- }
-
p += max_bs;
}
- if (init_file_completion_logging(td, max_units))
- return 1;
-
return 0;
}
static int switch_ioscheduler(struct thread_data *td)
{
#ifdef FIO_HAVE_IOSCHED_SWITCH
- char tmp[256], tmp2[128];
+ char tmp[256], tmp2[128], *p;
FILE *f;
int ret;
/*
* Read back and check that the selected scheduler is now the default.
*/
- memset(tmp, 0, sizeof(tmp));
- ret = fread(tmp, sizeof(tmp), 1, f);
+ ret = fread(tmp, 1, sizeof(tmp) - 1, f);
if (ferror(f) || ret < 0) {
td_verror(td, errno, "fread");
fclose(f);
return 1;
}
+ tmp[ret] = '\0';
/*
- * either a list of io schedulers or "none\n" is expected.
+ * either a list of io schedulers or "none\n" is expected. Strip the
+ * trailing newline.
*/
- tmp[strlen(tmp) - 1] = '\0';
+ p = tmp;
+ strsep(&p, "\n");
/*
* Write to "none" entry doesn't fail, so check the result here.
static int exec_string(struct thread_options *o, const char *string, const char *mode)
{
- size_t newlen = strlen(string) + strlen(o->name) + strlen(mode) + 9 + 1;
+ size_t newlen = strlen(string) + strlen(o->name) + strlen(mode) + 13 + 1;
int ret;
char *str;
str = malloc(newlen);
- sprintf(str, "%s &> %s.%s.txt", string, o->name, mode);
+ sprintf(str, "%s > %s.%s.txt 2>&1", string, o->name, mode);
log_info("%s : Saving output of %s in %s.%s.txt\n",o->name, mode, o->name, mode);
ret = system(str);
struct sk_out *sk_out = fd->sk_out;
uint64_t bytes_done[DDIR_RWDIR_CNT];
int deadlock_loop_cnt;
- bool clear_state, did_some_io;
+ bool clear_state;
int ret;
sk_out_assign(sk_out);
} else
td->pid = gettid();
- fio_local_clock_init(o->use_thread);
+ fio_local_clock_init();
dprint(FD_PROCESS, "jobs pid=%d started\n", (int) td->pid);
INIT_FLIST_HEAD(&td->io_hist_list);
INIT_FLIST_HEAD(&td->verify_list);
INIT_FLIST_HEAD(&td->trim_list);
- INIT_FLIST_HEAD(&td->next_rand_list);
td->io_hist_tree = RB_ROOT;
ret = mutex_cond_init_pshared(&td->io_u_lock, &td->free_cond);
goto err;
}
+ td_zone_gen_index(td);
+
/*
* Do this early, we don't want the compress threads to be limited
* to the same CPUs as the IO workers. So do this before we set
* May alter parameters that init_io_u() will use, so we need to
* do this first.
*/
- if (init_iolog(td))
+ if (!init_iolog(td))
+ goto err;
+
+ if (td_io_init(td))
goto err;
if (init_io_u(td))
goto err;
+ if (td->io_ops->post_init && td->io_ops->post_init(td))
+ goto err;
+
if (o->verify_async && verify_async_init(td))
goto err;
if (!o->create_serialize && setup_files(td))
goto err;
- if (td_io_init(td))
- goto err;
-
if (!init_random_map(td))
goto err;
memset(bytes_done, 0, sizeof(bytes_done));
clear_state = false;
- did_some_io = false;
while (keep_running(td)) {
uint64_t verify_bytes;
td_ioengine_flagged(td, FIO_UNIDIR))
continue;
- if (ddir_rw_sum(bytes_done))
- did_some_io = true;
-
clear_io_state(td, 0);
fio_gettime(&td->start, NULL);
}
/*
- * If td ended up with no I/O when it should have had,
- * then something went wrong unless FIO_NOIO or FIO_DISKLESSIO.
- * (Are we not missing other flags that can be ignored ?)
+ * Acquire this lock if we were doing overlap checking in
+ * offload mode so that we don't clean up this job while
+ * another thread is checking its io_u's for overlap
*/
- if ((td->o.size || td->o.io_size) && !ddir_rw_sum(bytes_done) &&
- !did_some_io && !td->o.create_only &&
- !(td_ioengine_flagged(td, FIO_NOIO) ||
- td_ioengine_flagged(td, FIO_DISKLESSIO)))
- log_err("%s: No I/O performed by %s, "
- "perhaps try --debug=io option for details?\n",
- td->o.name, td->io_ops->name);
-
+ if (td_offload_overlap(td))
+ pthread_mutex_lock(&overlap_check);
td_set_runstate(td, TD_FINISHING);
+ if (td_offload_overlap(td))
+ pthread_mutex_unlock(&overlap_check);
update_rusage_stat(td);
td->ts.total_run_time = mtime_since_now(&td->epoch);
exec_string(o, o->exec_postrun, (const char *)"postrun");
if (exitall_on_terminate || (o->exitall_error && td->error))
- fio_terminate_threads(td->groupid);
+ fio_terminate_threads(td->groupid, td->o.exit_what);
err:
if (td->error)
close_and_free_files(td);
cleanup_io_u(td);
close_ioengine(td);
- cgroup_shutdown(td, &cgroup_mnt);
+ cgroup_shutdown(td, cgroup_mnt);
verify_free_state(td);
-
- if (td->zone_state_index) {
- int i;
-
- for (i = 0; i < DDIR_RWDIR_CNT; i++)
- free(td->zone_state_index[i]);
- free(td->zone_state_index);
- td->zone_state_index = NULL;
- }
+ td_zone_free_index(td);
if (fio_option_is_set(o, cpumask)) {
ret = fio_cpuset_exit(&o->cpumask);
*/
if (o->write_iolog_file)
write_iolog_close(td);
+ if (td->io_log_rfile)
+ fclose(td->io_log_rfile);
td_set_runstate(td, TD_EXITED);
}
if (*nr_running == cputhreads && !pending && realthreads)
- fio_terminate_threads(TERMINATE_ALL);
+ fio_terminate_threads(TERMINATE_ALL, TERMINATE_ALL);
}
static bool __check_trigger_file(void)
fio_clients_send_trigger(trigger_remote_cmd);
else {
verify_save_state(IO_LIST_ALL);
- fio_terminate_threads(TERMINATE_ALL);
+ fio_terminate_threads(TERMINATE_ALL, TERMINATE_ALL);
exec_trigger(trigger_cmd);
}
}
td->thread_number - 1, &data);
if (!ret)
verify_assign_state(td, data);
- } else
- ret = verify_load_state(td, "local");
+ } else {
+ char prefix[PATH_MAX];
+
+ if (aux_path)
+ sprintf(prefix, "%s%clocal", aux_path,
+ FIO_OS_PATH_SEPARATOR);
+ else
+ strcpy(prefix, "local");
+ ret = verify_load_state(td, prefix);
+ }
return ret;
}
}
if (output_format & FIO_OUTPUT_NORMAL) {
- log_info("Starting ");
+ struct buf_output out;
+
+ buf_output_init(&out);
+ __log_buf(&out, "Starting ");
if (nr_thread)
- log_info("%d thread%s", nr_thread,
+ __log_buf(&out, "%d thread%s", nr_thread,
nr_thread > 1 ? "s" : "");
if (nr_process) {
if (nr_thread)
- log_info(" and ");
- log_info("%d process%s", nr_process,
+ __log_buf(&out, " and ");
+ __log_buf(&out, "%d process%s", nr_process,
nr_process > 1 ? "es" : "");
}
- log_info("\n");
- log_info_flush();
+ __log_buf(&out, "\n");
+ log_info_buf(out.buf, out.buflen);
+ buf_output_free(&out);
}
todo = thread_number;
dprint(FD_MUTEX, "wait on startup_sem\n");
if (fio_sem_down_timeout(startup_sem, 10000)) {
log_err("fio: job startup hung? exiting.\n");
- fio_terminate_threads(TERMINATE_ALL);
- fio_abort = 1;
+ fio_terminate_threads(TERMINATE_ALL, TERMINATE_ALL);
+ fio_abort = true;
nr_started--;
free(fd);
break;
}
startup_sem = fio_sem_init(FIO_SEM_LOCKED);
+ if (!sk_out)
+ is_local_backend = true;
if (startup_sem == NULL)
return 1;
set_genesis_time();
stat_init();
- helper_thread_create(startup_sem, sk_out);
+ if (helper_thread_create(startup_sem, sk_out))
+ log_err("fio: failed to create helper thread\n");
cgroup_list = smalloc(sizeof(*cgroup_list));
- INIT_FLIST_HEAD(cgroup_list);
+ if (cgroup_list)
+ INIT_FLIST_HEAD(cgroup_list);
run_threads(sk_out);
}
free_disk_util();
- cgroup_kill(cgroup_list);
- sfree(cgroup_list);
- sfree(cgroup_mnt);
+ if (cgroup_list) {
+ cgroup_kill(cgroup_list);
+ sfree(cgroup_list);
+ }
fio_sem_remove(startup_sem);
stat_exit();