engines/libblkio: Add option libblkio_wait_mode
[fio.git] / backend.c
index 001b2b96477465f683fa6fbafc83231799de515c..928e524a370c21abb0a6cf1a232e29525a55185b 100644 (file)
--- a/backend.c
+++ b/backend.c
@@ -90,6 +90,25 @@ static void sig_int(int sig)
        }
 }
 
+#ifdef WIN32
+static void sig_break(int sig)
+{
+       struct thread_data *td;
+       int i;
+
+       sig_int(sig);
+
+       /**
+        * Windows terminates all job processes on SIGBREAK after the handler
+        * returns, so give them time to wrap-up and give stats
+        */
+       for_each_td(td, i) {
+               while (td->runstate < TD_EXITED)
+                       sleep(1);
+       }
+}
+#endif
+
 void sig_show_status(int sig)
 {
        show_running_run_stats();
@@ -112,7 +131,7 @@ static void set_sig_handlers(void)
 /* Windows uses SIGBREAK as a quit signal from other applications */
 #ifdef WIN32
        memset(&act, 0, sizeof(act));
-       act.sa_handler = sig_int;
+       act.sa_handler = sig_break;
        act.sa_flags = SA_RESTART;
        sigaction(SIGBREAK, &act, NULL);
 #endif
@@ -663,7 +682,7 @@ static void do_verify(struct thread_data *td, uint64_t verify_bytes)
                                break;
                        }
                } else {
-                       if (ddir_rw_sum(td->bytes_done) + td->o.rw_min_bs > verify_bytes)
+                       if (td->bytes_verified + td->o.rw_min_bs > verify_bytes)
                                break;
 
                        while ((io_u = get_io_u(td)) != NULL) {
@@ -692,6 +711,8 @@ static void do_verify(struct thread_data *td, uint64_t verify_bytes)
                                        break;
                                } else if (io_u->ddir == DDIR_WRITE) {
                                        io_u->ddir = DDIR_READ;
+                                       io_u->numberio = td->verify_read_issues;
+                                       td->verify_read_issues++;
                                        populate_verify_io_u(td, io_u);
                                        break;
                                } else {
@@ -952,9 +973,11 @@ static void do_io(struct thread_data *td, uint64_t *bytes_done)
                total_bytes += td->o.size;
 
        /* In trimwrite mode, each byte is trimmed and then written, so
-        * allow total_bytes to be twice as big */
-       if (td_trimwrite(td))
+        * allow total_bytes or number of ios to be twice as big */
+       if (td_trimwrite(td)) {
                total_bytes += td->total_io_size;
+               td->o.number_ios *= 2;
+       }
 
        while ((td->o.read_iolog_file && !flist_empty(&td->io_log_list)) ||
                (!flist_empty(&td->trim_list)) || !io_issue_bytes_exceeded(td) ||
@@ -1009,8 +1032,10 @@ static void do_io(struct thread_data *td, uint64_t *bytes_done)
                        break;
                }
 
-               if (io_u->ddir == DDIR_WRITE && td->flags & TD_F_DO_VERIFY)
+               if (io_u->ddir == DDIR_WRITE && td->flags & TD_F_DO_VERIFY) {
+                       io_u->numberio = td->io_issues[io_u->ddir];
                        populate_verify_io_u(td, io_u);
+               }
 
                ddir = io_u->ddir;
 
@@ -1276,7 +1301,8 @@ static int init_io_u(struct thread_data *td)
                }
        }
 
-       init_io_u_buffers(td);
+       if (init_io_u_buffers(td))
+               return 1;
 
        if (init_file_completion_logging(td, max_units))
                return 1;
@@ -1770,6 +1796,11 @@ static void *thread_main(void *data)
        if (td_io_init(td))
                goto err;
 
+       if (td_ioengine_flagged(td, FIO_SYNCIO) && td->o.iodepth > 1) {
+               log_info("note: both iodepth >= 1 and synchronous I/O engine "
+                        "are selected, queue depth will be capped at 1\n");
+       }
+
        if (init_io_u(td))
                goto err;
 
@@ -2021,7 +2052,7 @@ static void reap_threads(unsigned int *nr_running, uint64_t *t_rate,
        for_each_td(td, i) {
                int flags = 0;
 
-                if (!strcmp(td->o.ioengine, "cpuio"))
+               if (!strcmp(td->o.ioengine, "cpuio"))
                        cputhreads++;
                else
                        realthreads++;
@@ -2432,9 +2463,9 @@ reap:
                                                        strerror(ret));
                        } else {
                                pid_t pid;
-                               struct fio_file **files;
+                               void *eo;
                                dprint(FD_PROCESS, "will fork\n");
-                               files = td->files;
+                               eo = td->eo;
                                read_barrier();
                                pid = fork();
                                if (!pid) {
@@ -2444,9 +2475,7 @@ reap:
                                        _exit(ret);
                                } else if (i == fio_debug_jobno)
                                        *fio_debug_jobp = pid;
-                               // freeing previously allocated memory for files
-                               // this memory freed MUST NOT be shared between processes, only the pointer itself may be shared within TD
-                               free(files);
+                               free(eo);
                                free(fd);
                                fd = NULL;
                        }
@@ -2567,6 +2596,11 @@ int fio_backend(struct sk_out *sk_out)
                setup_log(&agg_io_log[DDIR_TRIM], &p, "agg-trim_bw.log");
        }
 
+       if (init_global_dedupe_working_set_seeds()) {
+               log_err("fio: failed to initialize global dedupe working set\n");
+               return 1;
+       }
+
        startup_sem = fio_sem_init(FIO_SEM_LOCKED);
        if (!sk_out)
                is_local_backend = true;