#include "rate-submit.h"
#include "helper_thread.h"
#include "pshared.h"
+#include "zone-dist.h"
static struct fio_sem *startup_sem;
static struct flist_head *cgroup_list;
-static char *cgroup_mnt;
+static struct cgroup_mnt *cgroup_mnt;
static int exit_value;
static volatile int fio_abort;
static unsigned int nr_process = 0;
*ret = -io_u->error;
clear_io_u(td, io_u);
} else if (io_u->resid) {
- int bytes = io_u->xfer_buflen - io_u->resid;
+ long long bytes = io_u->xfer_buflen - io_u->resid;
struct fio_file *f = io_u->file;
if (bytes_issued)
if (x1 < y2 && y1 < x2) {
overlap = true;
- dprint(FD_IO, "in-flight overlap: %llu/%lu, %llu/%lu\n",
+ dprint(FD_IO, "in-flight overlap: %llu/%llu, %llu/%llu\n",
x1, io_u->buflen,
y1, check_io_u->buflen);
break;
* Break if we exceeded the bytes. The exception is time
* based runs, but we still need to break out of the loop
* for those to run verification, if enabled.
+ * Jobs read from iolog do not use this stop condition.
*/
if (bytes_issued >= total_bytes &&
+ !td->o.read_iolog_file &&
(!td->o.time_based ||
(td->o.time_based && td->o.verify != VERIFY_NONE)))
break;
log_io_piece(td, io_u);
if (td->o.io_submit_mode == IO_MODE_OFFLOAD) {
- const unsigned long blen = io_u->xfer_buflen;
+ const unsigned long long blen = io_u->xfer_buflen;
const enum fio_ddir __ddir = acct_ddir(io_u);
if (td->error)
static int init_io_u(struct thread_data *td)
{
struct io_u *io_u;
- unsigned int max_bs, min_write;
int cl_align, i, max_units;
- int data_xfer = 1, err;
- char *p;
+ int err;
max_units = td->o.iodepth;
- max_bs = td_max_bs(td);
- min_write = td->o.min_bs[DDIR_WRITE];
- td->orig_buffer_size = (unsigned long long) max_bs
- * (unsigned long long) max_units;
-
- if (td_ioengine_flagged(td, FIO_NOIO) || !(td_read(td) || td_write(td)))
- data_xfer = 0;
err = 0;
err += !io_u_rinit(&td->io_u_requeues, td->o.iodepth);
return 1;
}
+ cl_align = os_cache_line_size();
+
+ for (i = 0; i < max_units; i++) {
+ void *ptr;
+
+ if (td->terminate)
+ return 1;
+
+ ptr = fio_memalign(cl_align, sizeof(*io_u));
+ if (!ptr) {
+ log_err("fio: unable to allocate aligned memory\n");
+ break;
+ }
+
+ io_u = ptr;
+ memset(io_u, 0, sizeof(*io_u));
+ INIT_FLIST_HEAD(&io_u->verify_list);
+ dprint(FD_MEM, "io_u alloc %p, index %u\n", io_u, i);
+
+ io_u->index = i;
+ io_u->flags = IO_U_F_FREE;
+ io_u_qpush(&td->io_u_freelist, io_u);
+
+ /*
+ * io_u never leaves this stack, used for iteration of all
+ * io_u buffers.
+ */
+ io_u_qpush(&td->io_u_all, io_u);
+
+ if (td->io_ops->io_u_init) {
+ int ret = td->io_ops->io_u_init(td, io_u);
+
+ if (ret) {
+ log_err("fio: failed to init engine data: %d\n", ret);
+ return 1;
+ }
+ }
+ }
+
+ init_io_u_buffers(td);
+
+ if (init_file_completion_logging(td, max_units))
+ return 1;
+
+ return 0;
+}
+
+int init_io_u_buffers(struct thread_data *td)
+{
+ struct io_u *io_u;
+ unsigned long long max_bs, min_write;
+ int i, max_units;
+ int data_xfer = 1;
+ char *p;
+
+ max_units = td->o.iodepth;
+ max_bs = td_max_bs(td);
+ min_write = td->o.min_bs[DDIR_WRITE];
+ td->orig_buffer_size = (unsigned long long) max_bs
+ * (unsigned long long) max_units;
+
+ if (td_ioengine_flagged(td, FIO_NOIO) || !(td_read(td) || td_write(td)))
+ data_xfer = 0;
+
/*
* if we may later need to do address alignment, then add any
* possible adjustment here so that we don't cause a buffer
td->orig_buffer_size += page_mask + td->o.mem_align;
if (td->o.mem_type == MEM_SHMHUGE || td->o.mem_type == MEM_MMAPHUGE) {
- unsigned long bs;
+ unsigned long long bs;
bs = td->orig_buffer_size + td->o.hugepage_size - 1;
td->orig_buffer_size = bs & ~(td->o.hugepage_size - 1);
else
p = td->orig_buffer;
- cl_align = os_cache_line_size();
-
for (i = 0; i < max_units; i++) {
- void *ptr;
-
- if (td->terminate)
- return 1;
-
- ptr = fio_memalign(cl_align, sizeof(*io_u));
- if (!ptr) {
- log_err("fio: unable to allocate aligned memory\n");
- break;
- }
-
- io_u = ptr;
- memset(io_u, 0, sizeof(*io_u));
- INIT_FLIST_HEAD(&io_u->verify_list);
+ io_u = td->io_u_all.io_us[i];
dprint(FD_MEM, "io_u alloc %p, index %u\n", io_u, i);
if (data_xfer) {
fill_verify_pattern(td, io_u->buf, max_bs, io_u, 0, 0);
}
}
-
- io_u->index = i;
- io_u->flags = IO_U_F_FREE;
- io_u_qpush(&td->io_u_freelist, io_u);
-
- /*
- * io_u never leaves this stack, used for iteration of all
- * io_u buffers.
- */
- io_u_qpush(&td->io_u_all, io_u);
-
- if (td->io_ops->io_u_init) {
- int ret = td->io_ops->io_u_init(td, io_u);
-
- if (ret) {
- log_err("fio: failed to init engine data: %d\n", ret);
- return 1;
- }
- }
-
p += max_bs;
}
- if (init_file_completion_logging(td, max_units))
- return 1;
-
return 0;
}
goto err;
}
+ td_zone_gen_index(td);
+
/*
* Do this early, we don't want the compress threads to be limited
* to the same CPUs as the IO workers. So do this before we set
close_and_free_files(td);
cleanup_io_u(td);
close_ioengine(td);
- cgroup_shutdown(td, &cgroup_mnt);
+ cgroup_shutdown(td, cgroup_mnt);
verify_free_state(td);
-
- if (td->zone_state_index) {
- int i;
-
- for (i = 0; i < DDIR_RWDIR_CNT; i++)
- free(td->zone_state_index[i]);
- free(td->zone_state_index);
- td->zone_state_index = NULL;
- }
+ td_zone_free_index(td);
if (fio_option_is_set(o, cpumask)) {
ret = fio_cpuset_exit(&o->cpumask);
*/
if (o->write_iolog_file)
write_iolog_close(td);
+ if (td->io_log_rfile)
+ fclose(td->io_log_rfile);
td_set_runstate(td, TD_EXITED);
}
if (output_format & FIO_OUTPUT_NORMAL) {
- log_info("Starting ");
+ struct buf_output out;
+
+ buf_output_init(&out);
+ __log_buf(&out, "Starting ");
if (nr_thread)
- log_info("%d thread%s", nr_thread,
+ __log_buf(&out, "%d thread%s", nr_thread,
nr_thread > 1 ? "s" : "");
if (nr_process) {
if (nr_thread)
- log_info(" and ");
- log_info("%d process%s", nr_process,
+ __log_buf(&out, " and ");
+ __log_buf(&out, "%d process%s", nr_process,
nr_process > 1 ? "es" : "");
}
- log_info("\n");
- log_info_flush();
+ __log_buf(&out, "\n");
+ log_info_buf(out.buf, out.buflen);
+ buf_output_free(&out);
}
todo = thread_number;
}
startup_sem = fio_sem_init(FIO_SEM_LOCKED);
+ if (!sk_out)
+ is_local_backend = true;
if (startup_sem == NULL)
return 1;
cgroup_kill(cgroup_list);
sfree(cgroup_list);
}
- sfree(cgroup_mnt);
fio_sem_remove(startup_sem);
stat_exit();