From 0a852a50c315776de0b43185e30576f15a420a24 Mon Sep 17 00:00:00 2001 From: Damien Le Moal Date: Thu, 18 Nov 2021 14:27:29 +0900 Subject: [PATCH] fio: Introduce the log_entries option When iops, latency, or bw logging options are used, fio will by default log information for any I/O that completes. The initial number of I/O log entries is 1024, as defined by (DEF_LOG_ENTRIES). When all log entries are used, new log entries are dynamically allocated by get_new_log(). This dynamic log entry allocation can negatively impact time-related statistics such as the I/O tail latencies (e.g. 99.9 percentile completion latency) as growing the logs causes a temporary I/O stall (IO quiesce), which disturbs the workload steady state. The effect of this is especially noticeable with workloads using IO priorities: the tail latencies of high priority I/Os increase if the IO log needs to be grown. For example, running the following fio command on a SATA disk supporting NCQ priority: fio --name=prio-randread --filename=/dev/sdg \ --random_generator=tausworthe64 --ioscheduler=none \ --write_lat_log=randread.log --log_prio=1 --rw=randread --bs=128k \ --ioengine=libaio --iodepth=32 --direct=1 --cmdprio_class=1 \ --cmdprio_percentage=30 --runtime=900 (128KB random read workload at QD=32 and 30% of commands issued with a high priority), with an inital number of log entries equal to the default of 1024, depending on the machine memory state, the completion latency statistics may show imprecise percentiles such as shown below. high prio (30.75%) clat percentiles (msec): | 1.00th=[ 14], 5.00th=[ 17], 10.00th=[ 20], 20.00th=[ 23], | 30.00th=[ 27], 40.00th=[ 32], 50.00th=[ 40], 60.00th=[ 53], | 70.00th=[ 71], 80.00th=[ 104], 90.00th=[ 169], 95.00th=[ 243], | 99.00th=[ 514], 99.50th=[ 676], 99.90th=[ 1485], 99.95th=[ 1502], | 99.99th=[ 1552] low prio (69.25%) clat percentiles (msec): | 1.00th=[ 16], 5.00th=[ 24], 10.00th=[ 37], 20.00th=[ 68], | 30.00th=[ 105], 40.00th=[ 146], 50.00th=[ 199], 60.00th=[ 255], | 70.00th=[ 330], 80.00th=[ 439], 90.00th=[ 592], 95.00th=[ 718], | 99.00th=[ 885], 99.50th=[ 986], 99.90th=[ 1469], 99.95th=[ 1536], | 99.99th=[ 1586] All completion latency percentiles above the 99.90th percentile are similar for the high and low priority commands, which is not consistent with the drive expected execution of prioritized read commands. To solve this issue and get more precise latency statistics, this patch introduces the new "log_entries" option to allow specifying a larger initial number of IO log entries to avoid run-time allocation. This option value defaults to DEF_LOG_ENTRIES and its maximum value is MAX_LOG_ENTRIES to be consistent with get_new_log() allocation. Also simplify get_new_log() by using calloc() instead of malloc, thus removing the need for the local variable new_size. Adding the "--log_entries=65536" option to the previous command line example, the completion latency results obtained are more stable: high prio (30.72%) clat percentiles (msec): | 1.00th=[ 15], 5.00th=[ 17], 10.00th=[ 19], 20.00th=[ 22], | 30.00th=[ 24], 40.00th=[ 27], 50.00th=[ 32], 60.00th=[ 36], | 70.00th=[ 46], 80.00th=[ 57], 90.00th=[ 81], 95.00th=[ 105], | 99.00th=[ 161], 99.50th=[ 188], 99.90th=[ 271], 99.95th=[ 275], | 99.99th=[ 363] low prio (69.28%) clat percentiles (msec): | 1.00th=[ 16], 5.00th=[ 27], 10.00th=[ 43], 20.00th=[ 80], | 30.00th=[ 123], 40.00th=[ 176], 50.00th=[ 236], 60.00th=[ 313], | 70.00th=[ 401], 80.00th=[ 506], 90.00th=[ 634], 95.00th=[ 718], | 99.00th=[ 844], 99.50th=[ 885], 99.90th=[ 953], 99.95th=[ 995], | 99.99th=[ 1053] All completion percentiles clearly now show shorter latencies for high priority commands, as expected. The 99.99th percentile for low priority commands is also improved compared to the previous case as the measurements are not impacted by the log dynamic allocation. Suggested-by: Shin'ichiro Kawasaki Signed-off-by: Damien Le Moal Reviewed-by: Niklas Cassel Link: https://lore.kernel.org/r/20211118052729.132423-1-damien.lemoal@opensource.wdc.com Signed-off-by: Jens Axboe --- HOWTO | 12 ++++++++++++ cconv.c | 2 ++ fio.1 | 11 +++++++++++ options.c | 12 ++++++++++++ server.h | 2 +- stat.c | 12 +++++------- thread_options.h | 2 ++ 7 files changed, 45 insertions(+), 8 deletions(-) diff --git a/HOWTO b/HOWTO index 196bca6c..a3b3acfe 100644 --- a/HOWTO +++ b/HOWTO @@ -3537,6 +3537,18 @@ Measurements and reporting :option:`write_bw_log` for details about the filename format and `Log File Formats`_ for how data is structured within the file. +.. option:: log_entries=int + + By default, fio will log an entry in the iops, latency, or bw log for + every I/O that completes. The initial number of I/O log entries is 1024. + When the log entries are all used, new log entries are dynamically + allocated. This dynamic log entry allocation may negatively impact + time-related statistics such as I/O tail latencies (e.g. 99.9th percentile + completion latency). This option allows specifying a larger initial + number of log entries to avoid run-time allocations of new log entries, + resulting in more precise time-related I/O statistics. + Also see :option:`log_avg_msec`. Defaults to 1024. + .. option:: log_avg_msec=int By default, fio will log an entry in the iops, latency, or bw log for every diff --git a/cconv.c b/cconv.c index 2104308c..4f8d27eb 100644 --- a/cconv.c +++ b/cconv.c @@ -187,6 +187,7 @@ void convert_thread_options_to_cpu(struct thread_options *o, o->rand_repeatable = le32_to_cpu(top->rand_repeatable); o->allrand_repeatable = le32_to_cpu(top->allrand_repeatable); o->rand_seed = le64_to_cpu(top->rand_seed); + o->log_entries = le32_to_cpu(top->log_entries); o->log_avg_msec = le32_to_cpu(top->log_avg_msec); o->log_hist_msec = le32_to_cpu(top->log_hist_msec); o->log_hist_coarseness = le32_to_cpu(top->log_hist_coarseness); @@ -416,6 +417,7 @@ void convert_thread_options_to_net(struct thread_options_pack *top, top->rand_repeatable = cpu_to_le32(o->rand_repeatable); top->allrand_repeatable = cpu_to_le32(o->allrand_repeatable); top->rand_seed = __cpu_to_le64(o->rand_seed); + top->log_entries = cpu_to_le32(o->log_entries); top->log_avg_msec = cpu_to_le32(o->log_avg_msec); top->log_max = cpu_to_le32(o->log_max); top->log_offset = cpu_to_le32(o->log_offset); diff --git a/fio.1 b/fio.1 index e3c3feae..a6469541 100644 --- a/fio.1 +++ b/fio.1 @@ -3243,6 +3243,17 @@ logging (see \fBlog_avg_msec\fR) has been enabled. See \fBwrite_bw_log\fR for details about the filename format and \fBLOG FILE FORMATS\fR for how data is structured within the file. .TP +.BI log_entries \fR=\fPint +By default, fio will log an entry in the iops, latency, or bw log for +every I/O that completes. The initial number of I/O log entries is 1024. +When the log entries are all used, new log entries are dynamically +allocated. This dynamic log entry allocation may negatively impact +time-related statistics such as I/O tail latencies (e.g. 99.9th percentile +completion latency). This option allows specifying a larger initial +number of log entries to avoid run-time allocation of new log entries, +resulting in more precise time-related I/O statistics. +Also see \fBlog_avg_msec\fR as well. Defaults to 1024. +.TP .BI log_avg_msec \fR=\fPint By default, fio will log an entry in the iops, latency, or bw log for every I/O that completes. When writing to the disk log, that can quickly grow to a diff --git a/options.c b/options.c index 460cf4ff..102bcf56 100644 --- a/options.c +++ b/options.c @@ -4244,6 +4244,18 @@ struct fio_option fio_options[FIO_MAX_OPTS] = { .category = FIO_OPT_C_LOG, .group = FIO_OPT_G_INVALID, }, + { + .name = "log_entries", + .lname = "Log entries", + .type = FIO_OPT_INT, + .off1 = offsetof(struct thread_options, log_entries), + .help = "Initial number of entries in a job IO log", + .def = __fio_stringify(DEF_LOG_ENTRIES), + .minval = DEF_LOG_ENTRIES, + .maxval = MAX_LOG_ENTRIES, + .category = FIO_OPT_C_LOG, + .group = FIO_OPT_G_INVALID, + }, { .name = "log_avg_msec", .lname = "Log averaging (msec)", diff --git a/server.h b/server.h index 44b8da12..25b6bbdc 100644 --- a/server.h +++ b/server.h @@ -48,7 +48,7 @@ struct fio_net_cmd_reply { }; enum { - FIO_SERVER_VER = 94, + FIO_SERVER_VER = 95, FIO_SERVER_MAX_FRAGMENT_PDU = 1024, FIO_SERVER_MAX_CMD_MB = 2048, diff --git a/stat.c b/stat.c index cd35b114..e0dc99b6 100644 --- a/stat.c +++ b/stat.c @@ -2688,27 +2688,25 @@ static inline void add_stat_sample(struct io_stat *is, unsigned long long data) */ static struct io_logs *get_new_log(struct io_log *iolog) { - size_t new_size, new_samples; + size_t new_samples; struct io_logs *cur_log; /* * Cap the size at MAX_LOG_ENTRIES, so we don't keep doubling * forever */ - if (!iolog->cur_log_max) - new_samples = DEF_LOG_ENTRIES; - else { + if (!iolog->cur_log_max) { + new_samples = iolog->td->o.log_entries; + } else { new_samples = iolog->cur_log_max * 2; if (new_samples > MAX_LOG_ENTRIES) new_samples = MAX_LOG_ENTRIES; } - new_size = new_samples * log_entry_sz(iolog); - cur_log = smalloc(sizeof(*cur_log)); if (cur_log) { INIT_FLIST_HEAD(&cur_log->list); - cur_log->log = malloc(new_size); + cur_log->log = calloc(new_samples, log_entry_sz(iolog)); if (cur_log->log) { cur_log->nr_samples = 0; cur_log->max_samples = new_samples; diff --git a/thread_options.h b/thread_options.h index 6e1a2cdd..8f4c8a59 100644 --- a/thread_options.h +++ b/thread_options.h @@ -377,6 +377,7 @@ struct thread_options { fio_fp64_t zrt; fio_fp64_t zrf; + unsigned int log_entries; unsigned int log_prio; }; @@ -683,6 +684,7 @@ struct thread_options_pack { int32_t max_open_zones; uint32_t ignore_zone_limits; + uint32_t log_entries; uint32_t log_prio; } __attribute__((packed)); -- 2.25.1