X-Git-Url: https://git.kernel.dk/?p=fio.git;a=blobdiff_plain;f=backend.c;h=8fec1ce3141f4f5b3f910784c75f5f6a200c88d9;hp=8a2b2ab8c2ebb69a50bd8c44a3cb7c1bcbd7be8e;hb=4806b82473fea74c517e5e0c7665b0ca0542b3ec;hpb=98e7161cda6a1f5b0cbd8979c261db8eb3a4ab72 diff --git a/backend.c b/backend.c index 8a2b2ab8..8fec1ce3 100644 --- a/backend.c +++ b/backend.c @@ -47,6 +47,7 @@ #include "rate-submit.h" #include "helper_thread.h" #include "pshared.h" +#include "zone-dist.h" static struct fio_sem *startup_sem; static struct flist_head *cgroup_list; @@ -454,7 +455,7 @@ int io_queue_event(struct thread_data *td, struct io_u *io_u, int *ret, *ret = -io_u->error; clear_io_u(td, io_u); } else if (io_u->resid) { - int bytes = io_u->xfer_buflen - io_u->resid; + long long bytes = io_u->xfer_buflen - io_u->resid; struct fio_file *f = io_u->file; if (bytes_issued) @@ -583,7 +584,7 @@ static bool in_flight_overlap(struct io_u_queue *q, struct io_u *io_u) if (x1 < y2 && y1 < x2) { overlap = true; - dprint(FD_IO, "in-flight overlap: %llu/%lu, %llu/%lu\n", + dprint(FD_IO, "in-flight overlap: %llu/%llu, %llu/%llu\n", x1, io_u->buflen, y1, check_io_u->buflen); break; @@ -1035,7 +1036,7 @@ static void do_io(struct thread_data *td, uint64_t *bytes_done) log_io_piece(td, io_u); if (td->o.io_submit_mode == IO_MODE_OFFLOAD) { - const unsigned long blen = io_u->xfer_buflen; + const unsigned long long blen = io_u->xfer_buflen; const enum fio_ddir __ddir = acct_ddir(io_u); if (td->error) @@ -1201,19 +1202,10 @@ static void cleanup_io_u(struct thread_data *td) static int init_io_u(struct thread_data *td) { struct io_u *io_u; - unsigned int max_bs, min_write; int cl_align, i, max_units; - int data_xfer = 1, err; - char *p; + int err; max_units = td->o.iodepth; - max_bs = td_max_bs(td); - min_write = td->o.min_bs[DDIR_WRITE]; - td->orig_buffer_size = (unsigned long long) max_bs - * (unsigned long long) max_units; - - if (td_ioengine_flagged(td, FIO_NOIO) || !(td_read(td) || td_write(td))) - data_xfer = 0; err = 0; err += !io_u_rinit(&td->io_u_requeues, td->o.iodepth); @@ -1225,6 +1217,70 @@ static int init_io_u(struct thread_data *td) return 1; } + cl_align = os_cache_line_size(); + + for (i = 0; i < max_units; i++) { + void *ptr; + + if (td->terminate) + return 1; + + ptr = fio_memalign(cl_align, sizeof(*io_u)); + if (!ptr) { + log_err("fio: unable to allocate aligned memory\n"); + break; + } + + io_u = ptr; + memset(io_u, 0, sizeof(*io_u)); + INIT_FLIST_HEAD(&io_u->verify_list); + dprint(FD_MEM, "io_u alloc %p, index %u\n", io_u, i); + + io_u->index = i; + io_u->flags = IO_U_F_FREE; + io_u_qpush(&td->io_u_freelist, io_u); + + /* + * io_u never leaves this stack, used for iteration of all + * io_u buffers. + */ + io_u_qpush(&td->io_u_all, io_u); + + if (td->io_ops->io_u_init) { + int ret = td->io_ops->io_u_init(td, io_u); + + if (ret) { + log_err("fio: failed to init engine data: %d\n", ret); + return 1; + } + } + } + + init_io_u_buffers(td); + + if (init_file_completion_logging(td, max_units)) + return 1; + + return 0; +} + +int init_io_u_buffers(struct thread_data *td) +{ + struct io_u *io_u; + unsigned long long max_bs, min_write; + int i, max_units; + int data_xfer = 1; + char *p; + + max_units = td->o.iodepth; + max_bs = td_max_bs(td); + min_write = td->o.min_bs[DDIR_WRITE]; + td->orig_buffer_size = (unsigned long long) max_bs + * (unsigned long long) max_units; + + if (td_ioengine_flagged(td, FIO_NOIO) || !(td_read(td) || td_write(td))) + data_xfer = 0; + /* * if we may later need to do address alignment, then add any * possible adjustment here so that we don't cause a buffer @@ -1236,7 +1292,7 @@ static int init_io_u(struct thread_data *td) td->orig_buffer_size += page_mask + td->o.mem_align; if (td->o.mem_type == MEM_SHMHUGE || td->o.mem_type == MEM_MMAPHUGE) { - unsigned long bs; + unsigned long long bs; bs = td->orig_buffer_size + td->o.hugepage_size - 1; td->orig_buffer_size = bs & ~(td->o.hugepage_size - 1); @@ -1256,23 +1312,8 @@ static int init_io_u(struct thread_data *td) else p = td->orig_buffer; - cl_align = os_cache_line_size(); - for (i = 0; i < max_units; i++) { - void *ptr; - - if (td->terminate) - return 1; - - ptr = fio_memalign(cl_align, sizeof(*io_u)); - if (!ptr) { - log_err("fio: unable to allocate aligned memory\n"); - break; - } - - io_u = ptr; - memset(io_u, 0, sizeof(*io_u)); - INIT_FLIST_HEAD(&io_u->verify_list); + io_u = td->io_u_all.io_us[i]; dprint(FD_MEM, "io_u alloc %p, index %u\n", io_u, i); if (data_xfer) { @@ -1289,32 +1330,9 @@ static int init_io_u(struct thread_data *td) fill_verify_pattern(td, io_u->buf, max_bs, io_u, 0, 0); } } - - io_u->index = i; - io_u->flags = IO_U_F_FREE; - io_u_qpush(&td->io_u_freelist, io_u); - - /* - * io_u never leaves this stack, used for iteration of all - * io_u buffers. - */ - io_u_qpush(&td->io_u_all, io_u); - - if (td->io_ops->io_u_init) { - int ret = td->io_ops->io_u_init(td, io_u); - - if (ret) { - log_err("fio: failed to init engine data: %d\n", ret); - return 1; - } - } - p += max_bs; } - if (init_file_completion_logging(td, max_units)) - return 1; - return 0; } @@ -1575,6 +1593,8 @@ static void *thread_main(void *data) goto err; } + td_zone_gen_index(td); + /* * Do this early, we don't want the compress threads to be limited * to the same CPUs as the IO workers. So do this before we set @@ -1890,15 +1910,7 @@ err: close_ioengine(td); cgroup_shutdown(td, cgroup_mnt); verify_free_state(td); - - if (td->zone_state_index) { - int i; - - for (i = 0; i < DDIR_RWDIR_CNT; i++) - free(td->zone_state_index[i]); - free(td->zone_state_index); - td->zone_state_index = NULL; - } + td_zone_free_index(td); if (fio_option_is_set(o, cpumask)) { ret = fio_cpuset_exit(&o->cpumask); @@ -2469,6 +2481,8 @@ int fio_backend(struct sk_out *sk_out) } startup_sem = fio_sem_init(FIO_SEM_LOCKED); + if (!sk_out) + is_local_backend = true; if (startup_sem == NULL) return 1;