X-Git-Url: https://git.kernel.dk/?p=fio.git;a=blobdiff_plain;f=backend.c;h=ce0a0098299294279ea4bf16dfd88b7079d9466e;hp=6e0e4424d6f6ce31d32ff6cf5d2ca77ba09111c2;hb=f3afa57e36550288340f1b6c694f354ae72654b9;hpb=649c10c59b016ae8586e54746d3761bc6df33c9b diff --git a/backend.c b/backend.c index 6e0e4424..ce0a0098 100644 --- a/backend.c +++ b/backend.c @@ -30,6 +30,7 @@ #include #include #include +#include #include #include #include @@ -49,6 +50,7 @@ #include "server.h" static pthread_t disk_util_thread; +static struct fio_mutex *disk_thread_mutex; static struct fio_mutex *startup_mutex; static struct fio_mutex *writeout_mutex; static struct flist_head *cgroup_list; @@ -56,7 +58,7 @@ static char *cgroup_mnt; static int exit_value; static volatile int fio_abort; -struct io_log *agg_io_log[2]; +struct io_log *agg_io_log[DDIR_RWDIR_CNT]; int groupid = 0; unsigned int thread_number = 0; @@ -67,7 +69,7 @@ int temp_stall_ts; unsigned long done_secs = 0; #define PAGE_ALIGN(buf) \ - (char *) (((unsigned long) (buf) + page_mask) & ~page_mask) + (char *) (((uintptr_t) (buf) + page_mask) & ~page_mask) #define JOB_START_TIMEOUT (5 * 1000) @@ -86,6 +88,11 @@ static void sig_int(int sig) } } +static void sig_show_status(int sig) +{ + show_running_run_stats(); +} + static void set_sig_handlers(void) { struct sigaction act; @@ -100,6 +107,11 @@ static void set_sig_handlers(void) act.sa_flags = SA_RESTART; sigaction(SIGTERM, &act, NULL); + memset(&act, 0, sizeof(act)); + act.sa_handler = sig_show_status; + act.sa_flags = SA_RESTART; + sigaction(SIGUSR1, &act, NULL); + if (is_backend) { memset(&act, 0, sizeof(act)); act.sa_handler = sig_int; @@ -196,10 +208,12 @@ static int check_min_rate(struct thread_data *td, struct timeval *now, { int ret = 0; - if (bytes_done[0]) - ret |= __check_min_rate(td, now, 0); - if (bytes_done[1]) - ret |= __check_min_rate(td, now, 1); + if (bytes_done[DDIR_READ]) + ret |= __check_min_rate(td, now, DDIR_READ); + if (bytes_done[DDIR_WRITE]) + ret |= __check_min_rate(td, now, DDIR_WRITE); + if (bytes_done[DDIR_TRIM]) + ret |= __check_min_rate(td, now, DDIR_TRIM); return ret; } @@ -408,6 +422,9 @@ static void do_verify(struct thread_data *td) } } + if (flow_threshold_exceeded(td)) + continue; + io_u = __get_io_u(td); if (!io_u) break; @@ -490,7 +507,10 @@ sync_done: if (full || !td->o.iodepth_batch_complete) { min_events = min(td->o.iodepth_batch_complete, td->cur_depth); - if (full && !min_events && td->o.iodepth_batch_complete != 0) + /* + * if the queue is full, we MUST reap at least 1 event + */ + if (full && !min_events) min_events = 1; do { @@ -522,6 +542,22 @@ sync_done: dprint(FD_VERIFY, "exiting loop\n"); } +static int io_bytes_exceeded(struct thread_data *td) +{ + unsigned long long bytes; + + if (td_rw(td)) + bytes = td->this_io_bytes[DDIR_READ] + td->this_io_bytes[DDIR_WRITE]; + else if (td_write(td)) + bytes = td->this_io_bytes[DDIR_WRITE]; + else if (td_read(td)) + bytes = td->this_io_bytes[DDIR_READ]; + else + bytes = td->this_io_bytes[DDIR_TRIM]; + + return bytes >= td->o.size; +} + /* * Main IO worker function. It retrieves io_u's to process and queues * and reaps them, checking for rate and errors along the way. @@ -536,11 +572,11 @@ static void do_io(struct thread_data *td) else td_set_runstate(td, TD_RUNNING); - while ( (td->o.read_iolog_file && !flist_empty(&td->io_log_list)) || - (!flist_empty(&td->trim_list)) || - ((td->this_io_bytes[0] + td->this_io_bytes[1]) < td->o.size) ) { + while ((td->o.read_iolog_file && !flist_empty(&td->io_log_list)) || + (!flist_empty(&td->trim_list)) || !io_bytes_exceeded(td) || + td->o.time_based) { struct timeval comp_time; - unsigned long bytes_done[2] = { 0, 0 }; + unsigned long bytes_done[DDIR_RWDIR_CNT] = { 0, 0, 0 }; int min_evts = 0; struct io_u *io_u; int ret2, full; @@ -559,6 +595,9 @@ static void do_io(struct thread_data *td) } } + if (flow_threshold_exceeded(td)) + continue; + io_u = get_io_u(td); if (!io_u) break; @@ -566,11 +605,12 @@ static void do_io(struct thread_data *td) ddir = io_u->ddir; /* - * Add verification end_io handler, if asked to verify - * a previously written file. + * Add verification end_io handler if: + * - Asked to verify (!td_rw(td)) + * - Or the io_u is from our verify list (mixed write/ver) */ if (td->o.verify != VERIFY_NONE && io_u->ddir == DDIR_READ && - !td_rw(td)) { + ((io_u->flags & IO_U_F_VER_LIST) || !td_rw(td))) { if (td->o.verify_async) io_u->end_io = verify_io_u_async; else @@ -613,8 +653,9 @@ static void do_io(struct thread_data *td) requeue_io_u(td, &io_u); } else { sync_done: - if (__should_check_rate(td, 0) || - __should_check_rate(td, 1)) + if (__should_check_rate(td, DDIR_READ) || + __should_check_rate(td, DDIR_WRITE) || + __should_check_rate(td, DDIR_TRIM)) fio_gettime(&comp_time, NULL); ret = io_u_sync_complete(td, io_u, bytes_done); @@ -655,11 +696,15 @@ sync_done: if (full || !td->o.iodepth_batch_complete) { min_evts = min(td->o.iodepth_batch_complete, td->cur_depth); - if (full && !min_evts && td->o.iodepth_batch_complete != 0) + /* + * if the queue is full, we MUST reap at least 1 event + */ + if (full && !min_evts) min_evts = 1; - if (__should_check_rate(td, 0) || - __should_check_rate(td, 1)) + if (__should_check_rate(td, DDIR_READ) || + __should_check_rate(td, DDIR_WRITE) || + __should_check_rate(td, DDIR_TRIM)) fio_gettime(&comp_time, NULL); do { @@ -672,7 +717,7 @@ sync_done: if (ret < 0) break; - if (!(bytes_done[0] + bytes_done[1])) + if (!ddir_rw_sum(bytes_done)) continue; if (!in_ramp_time(td) && should_check_rate(td, bytes_done)) { @@ -687,7 +732,7 @@ sync_done: if (td->o.thinktime) { unsigned long long b; - b = td->io_blocks[0] + td->io_blocks[1]; + b = ddir_rw_sum(td->io_blocks); if (!(b % td->o.thinktime_blocks)) { int left; @@ -733,7 +778,7 @@ sync_done: /* * stop job if we failed doing any IO */ - if ((td->this_io_bytes[0] + td->this_io_bytes[1]) == 0) + if (!ddir_rw_sum(td->this_io_bytes)) td->done = 1; } @@ -755,12 +800,14 @@ static void cleanup_io_u(struct thread_data *td) static int init_io_u(struct thread_data *td) { struct io_u *io_u; - unsigned int max_bs; + unsigned int max_bs, min_write; int cl_align, i, max_units; char *p; max_units = td->o.iodepth; max_bs = max(td->o.max_bs[DDIR_READ], td->o.max_bs[DDIR_WRITE]); + max_bs = max(td->o.max_bs[DDIR_TRIM], max_bs); + min_write = td->o.min_bs[DDIR_WRITE]; td->orig_buffer_size = (unsigned long long) max_bs * (unsigned long long) max_units; @@ -809,7 +856,7 @@ static int init_io_u(struct thread_data *td) dprint(FD_MEM, "io_u %p, mem %p\n", io_u, io_u->buf); if (td_write(td)) - io_u_fill_buffer(td, io_u, max_bs); + io_u_fill_buffer(td, io_u, min_write, max_bs); if (td_write(td) && td->o.verify_pattern_bytes) { /* * Fill the buffer with the pattern if we are @@ -886,8 +933,6 @@ static int switch_ioscheduler(struct thread_data *td) static int keep_running(struct thread_data *td) { - unsigned long long io_done; - if (td->done) return 0; if (td->o.time_based) @@ -897,9 +942,7 @@ static int keep_running(struct thread_data *td) return 1; } - io_done = td->io_bytes[DDIR_READ] + td->io_bytes[DDIR_WRITE] - + td->io_skip_bytes; - if (io_done < td->o.size) + if (ddir_rw_sum(td->io_bytes) < td->o.size) return 1; return 0; @@ -1016,7 +1059,7 @@ static void *thread_main(void *data) } } - if (td->o.cgroup_weight && cgroup_setup(td, cgroup_list, &cgroup_mnt)) + if (td->o.cgroup && cgroup_setup(td, cgroup_list, &cgroup_mnt)) goto err; errno = 0; @@ -1057,10 +1100,13 @@ static void *thread_main(void *data) memcpy(&td->iops_sample_time, &td->start, sizeof(td->start)); memcpy(&td->tv_cache, &td->start, sizeof(td->start)); - if (td->o.ratemin[0] || td->o.ratemin[1]) { - memcpy(&td->lastrate[0], &td->bw_sample_time, + if (td->o.ratemin[DDIR_READ] || td->o.ratemin[DDIR_WRITE] || + td->o.ratemin[DDIR_TRIM]) { + memcpy(&td->lastrate[DDIR_READ], &td->bw_sample_time, sizeof(td->bw_sample_time)); - memcpy(&td->lastrate[1], &td->bw_sample_time, + memcpy(&td->lastrate[DDIR_WRITE], &td->bw_sample_time, + sizeof(td->bw_sample_time)); + memcpy(&td->lastrate[DDIR_TRIM], &td->bw_sample_time, sizeof(td->bw_sample_time)); } @@ -1081,6 +1127,10 @@ static void *thread_main(void *data) elapsed = utime_since_now(&td->start); td->ts.runtime[DDIR_WRITE] += elapsed; } + if (td_trim(td) && td->io_bytes[DDIR_TRIM]) { + elapsed = utime_since_now(&td->start); + td->ts.runtime[DDIR_TRIM] += elapsed; + } if (td->error || td->terminate) break; @@ -1103,11 +1153,13 @@ static void *thread_main(void *data) } update_rusage_stat(td); - td->ts.runtime[0] = (td->ts.runtime[0] + 999) / 1000; - td->ts.runtime[1] = (td->ts.runtime[1] + 999) / 1000; + td->ts.runtime[DDIR_READ] = (td->ts.runtime[DDIR_READ] + 999) / 1000; + td->ts.runtime[DDIR_WRITE] = (td->ts.runtime[DDIR_WRITE] + 999) / 1000; + td->ts.runtime[DDIR_TRIM] = (td->ts.runtime[DDIR_TRIM] + 999) / 1000; td->ts.total_run_time = mtime_since_now(&td->epoch); - td->ts.io_bytes[0] = td->io_bytes[0]; - td->ts.io_bytes[1] = td->io_bytes[1]; + td->ts.io_bytes[DDIR_READ] = td->io_bytes[DDIR_READ]; + td->ts.io_bytes[DDIR_WRITE] = td->io_bytes[DDIR_WRITE]; + td->ts.io_bytes[DDIR_TRIM] = td->io_bytes[DDIR_TRIM]; fio_mutex_down(writeout_mutex); if (td->bw_log) { @@ -1179,7 +1231,7 @@ err: write_iolog_close(td); td_set_runstate(td, TD_EXITED); - return (void *) (unsigned long) td->error; + return (void *) (uintptr_t) td->error; } @@ -1210,7 +1262,7 @@ static int fork_main(int shmid, int offset) td = data + offset * sizeof(struct thread_data); ret = thread_main(td); shmdt(data); - return (int) (unsigned long) ret; + return (int) (uintptr_t) ret; } /* @@ -1265,6 +1317,7 @@ static void reap_threads(unsigned int *nr_running, unsigned int *t_rate, if (errno == ECHILD) { log_err("fio: pid=%d disappeared %d\n", (int) td->pid, td->runstate); + td->sig = ECHILD; td_set_runstate(td, TD_REAPED); goto reaped; } @@ -1276,6 +1329,7 @@ static void reap_threads(unsigned int *nr_running, unsigned int *t_rate, if (sig != SIGTERM) log_err("fio: pid=%d, got signal=%d\n", (int) td->pid, sig); + td->sig = sig; td_set_runstate(td, TD_REAPED); goto reaped; } @@ -1295,8 +1349,8 @@ static void reap_threads(unsigned int *nr_running, unsigned int *t_rate, continue; reaped: (*nr_running)--; - (*m_rate) -= (td->o.ratemin[0] + td->o.ratemin[1]); - (*t_rate) -= (td->o.rate[0] + td->o.rate[1]); + (*m_rate) -= ddir_rw_sum(td->o.ratemin); + (*t_rate) -= ddir_rw_sum(td->o.rate); if (!td->pid) pending--; @@ -1327,7 +1381,7 @@ static void run_threads(void) set_sig_handlers(); - if (!terse_output) { + if (output_format == FIO_OUTPUT_NORMAL) { log_info("Starting "); if (nr_thread) log_info("%d thread%s", nr_thread, @@ -1518,8 +1572,8 @@ static void run_threads(void) td_set_runstate(td, TD_RUNNING); nr_running++; nr_started--; - m_rate += td->o.ratemin[0] + td->o.ratemin[1]; - t_rate += td->o.rate[0] + td->o.rate[1]; + m_rate += ddir_rw_sum(td->o.ratemin); + t_rate += ddir_rw_sum(td->o.rate); todo--; fio_mutex_up(td->mutex); } @@ -1547,20 +1601,28 @@ static void run_threads(void) fio_unpin_memory(); } +void wait_for_disk_thread_exit(void) +{ + fio_mutex_down(disk_thread_mutex); +} + static void *disk_thread_main(void *data) { + int ret = 0; + fio_mutex_up(startup_mutex); - while (threads) { + while (threads && !ret) { usleep(DISK_UTIL_MSEC * 1000); if (!threads) break; - update_io_ticks(); + ret = update_io_ticks(); if (!is_backend) print_thread_status(); } + fio_mutex_up(disk_thread_mutex); return NULL; } @@ -1568,14 +1630,20 @@ static int create_disk_util_thread(void) { int ret; + setup_disk_util(); + + disk_thread_mutex = fio_mutex_init(FIO_MUTEX_LOCKED); + ret = pthread_create(&disk_util_thread, NULL, disk_thread_main, NULL); if (ret) { + fio_mutex_remove(disk_thread_mutex); log_err("Can't create disk util thread: %s\n", strerror(ret)); return 1; } ret = pthread_detach(disk_util_thread); if (ret) { + fio_mutex_remove(disk_thread_mutex); log_err("Can't detatch disk util thread: %s\n", strerror(ret)); return 1; } @@ -1603,12 +1671,13 @@ int fio_backend(void) if (write_bw_log) { setup_log(&agg_io_log[DDIR_READ], 0); setup_log(&agg_io_log[DDIR_WRITE], 0); + setup_log(&agg_io_log[DDIR_TRIM], 0); } - startup_mutex = fio_mutex_init(0); + startup_mutex = fio_mutex_init(FIO_MUTEX_LOCKED); if (startup_mutex == NULL) return 1; - writeout_mutex = fio_mutex_init(1); + writeout_mutex = fio_mutex_init(FIO_MUTEX_UNLOCKED); if (writeout_mutex == NULL) return 1; @@ -1626,17 +1695,21 @@ int fio_backend(void) __finish_log(agg_io_log[DDIR_READ], "agg-read_bw.log"); __finish_log(agg_io_log[DDIR_WRITE], "agg-write_bw.log"); + __finish_log(agg_io_log[DDIR_TRIM], + "agg-write_bw.log"); } } for_each_td(td, i) fio_options_free(td); + free_disk_util(); cgroup_kill(cgroup_list); sfree(cgroup_list); sfree(cgroup_mnt); fio_mutex_remove(startup_mutex); fio_mutex_remove(writeout_mutex); + fio_mutex_remove(disk_thread_mutex); return exit_value; }