From b22989b9f9349b3c1d1c41846ab27ff0914bd6de Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 17 Jul 2009 22:29:23 +0200 Subject: [PATCH] Get rid of KiB vs KB distinction Confuses more than it does good, drop it and default to just using KB, MB, etc. Signed-off-by: Jens Axboe --- HOWTO | 22 +++++++++++----------- README | 4 ++-- eta.c | 2 +- filesetup.c | 2 +- fio.1 | 6 +++--- fio.c | 2 +- fio_generate_plots | 2 +- memory.c | 2 +- stat.c | 6 +++--- 9 files changed, 24 insertions(+), 24 deletions(-) diff --git a/HOWTO b/HOWTO index 5099c83c..2d155aa0 100644 --- a/HOWTO +++ b/HOWTO @@ -112,7 +112,7 @@ section residing above it. If the first character in a line is a ';' or a '#', the entire line is discarded as a comment. So let's look at a really simple job file that defines two processes, each -randomly reading from a 128MiB file. +randomly reading from a 128MB file. ; -- start job file -- [global] @@ -150,9 +150,9 @@ numjobs=4 Here we have no global section, as we only have one job defined anyway. We want to use async io here, with a depth of 4 for each file. We also -increased the buffer size used to 32KiB and define numjobs to 4 to +increased the buffer size used to 32KB and define numjobs to 4 to fork 4 identical jobs. The result is 4 processes each randomly writing -to their own 64MiB file. Instead of using the above job file, you could +to their own 64MB file. Instead of using the above job file, you could have given the parameters on the command line. For this case, you would specify: @@ -691,7 +691,7 @@ mem=str Fio can use various types of memory as the io unit buffer. that for shmhuge and mmaphuge to work, the system must have free huge pages allocated. This can normally be checked and set by reading/writing /proc/sys/vm/nr_hugepages on a - Linux system. Fio assumes a huge page is 4MiB in size. So + Linux system. Fio assumes a huge page is 4MB in size. So to calculate the number of huge pages you need for a given job file, add up the io depth of all jobs (normally one unless iodepth= is used) and multiply by the maximum bs set. Then @@ -715,7 +715,7 @@ iomem_align=int This indiciates the memory alignment of the IO memory buffers. hugepage-size=int Defines the size of a huge page. Must at least be equal - to the system setting, see /proc/meminfo. Defaults to 4MiB. + to the system setting, see /proc/meminfo. Defaults to 4MB. Should probably always be a multiple of megabytes, so using hugepage-size=Xm is the preferred way to set this to avoid setting a non-pow-2 bad value. @@ -1005,10 +1005,10 @@ each thread, group of threads, and disks in that order. For each data direction, the output looks like: Client1 (g=0): err= 0: - write: io= 32MiB, bw= 666KiB/s, runt= 50320msec + write: io= 32MB, bw= 666KB/s, runt= 50320msec slat (msec): min= 0, max= 136, avg= 0.03, stdev= 1.92 clat (msec): min= 0, max= 631, avg=48.50, stdev=86.82 - bw (KiB/s) : min= 0, max= 1196, per=51.00%, avg=664.02, stdev=681.68 + bw (KB/s) : min= 0, max= 1196, per=51.00%, avg=664.02, stdev=681.68 cpu : usr=1.49%, sys=0.25%, ctx=7969, majf=0, minf=17 IO depths : 1=0.1%, 2=0.3%, 4=0.5%, 8=99.0%, 16=0.0%, 32=0.0%, >32=0.0% submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0% @@ -1068,8 +1068,8 @@ After each client has been listed, the group statistics are printed. They will look like this: Run status group 0 (all jobs): - READ: io=64MiB, aggrb=22178, minb=11355, maxb=11814, mint=2840msec, maxt=2955msec - WRITE: io=64MiB, aggrb=1302, minb=666, maxb=669, mint=50093msec, maxt=50320msec + READ: io=64MB, aggrb=22178, minb=11355, maxb=11814, mint=2840msec, maxt=2955msec + WRITE: io=64MB, aggrb=1302, minb=666, maxb=669, mint=50093msec, maxt=50320msec For each data direction, it prints: @@ -1112,12 +1112,12 @@ Split up, the format is as follows: jobname, groupid, error READ status: - KiB IO, bandwidth (KiB/sec), runtime (msec) + KB IO, bandwidth (KB/sec), runtime (msec) Submission latency: min, max, mean, deviation Completion latency: min, max, mean, deviation Bw: min, max, aggregate percentage of total, mean, deviation WRITE status: - KiB IO, bandwidth (KiB/sec), runtime (msec) + KB IO, bandwidth (KB/sec), runtime (msec) Submission latency: min, max, mean, deviation Completion latency: min, max, mean, deviation Bw: min, max, aggregate percentage of total, mean, deviation diff --git a/README b/README index 480f1d0e..d5235de3 100644 --- a/README +++ b/README @@ -202,8 +202,8 @@ The job file parameters are: also include k/m postfix. direct=x 1 for direct IO, 0 for buffered IO thinktime=x "Think" x usec after each io - rate=x Throttle rate to x KiB/sec - ratemin=x Quit if rate of x KiB/sec can't be met + rate=x Throttle rate to x KB/sec + ratemin=x Quit if rate of x KB/sec can't be met ratecycle=x ratemin averaged over x msecs cpumask=x Only allow job to run on CPUs defined by mask. cpus_allowed=x Like 'cpumask', but allow text setting of CPU affinity. diff --git a/eta.c b/eta.c index 9573e8a6..6629c29d 100644 --- a/eta.c +++ b/eta.c @@ -336,7 +336,7 @@ void print_thread_status(void) mr = num2str(m_rate, 4, 0, 1); tr = num2str(t_rate, 4, 0, 1); - printf(", CR=%s/%s KiB/s", tr, mr); + printf(", CR=%s/%s KB/s", tr, mr); free(tr); free(mr); } else if (m_iops || t_iops) diff --git a/filesetup.c b/filesetup.c index 1a5a7ecc..d7c83a79 100644 --- a/filesetup.c +++ b/filesetup.c @@ -611,7 +611,7 @@ int setup_files(struct thread_data *td) temp_stall_ts = 1; if (!terse_output) log_info("%s: Laying out IO file(s) (%u file(s) /" - " %LuMiB)\n", td->o.name, need_extend, + " %LuMB)\n", td->o.name, need_extend, extend_size >> 20); for_each_file(td, f, i) { diff --git a/fio.1 b/fio.1 index fa54763d..f4a3293a 100644 --- a/fio.1 +++ b/fio.1 @@ -523,7 +523,7 @@ sum of the \fBiomem_align\fR and \fBbs\fR used. .TP .BI hugepage\-size \fR=\fPint Defines the size of a huge page. Must be at least equal to the system setting. -Should be a multiple of 1MiB. Default: 4MiB. +Should be a multiple of 1MB. Default: 4MB. .TP .B exitall Terminate all jobs when one finishes. Default: wait for each job to finish. @@ -871,7 +871,7 @@ semicolon-delimited format suitable for scripted use. The fields are: .P Read status: .RS -.B KiB I/O, bandwidth \fR(KiB/s)\fP, runtime \fR(ms)\fP +.B KB I/O, bandwidth \fR(KB/s)\fP, runtime \fR(ms)\fP .P Submission latency: .RS @@ -889,7 +889,7 @@ Bandwidth: .P Write status: .RS -.B KiB I/O, bandwidth \fR(KiB/s)\fP, runtime \fR(ms)\fP +.B KB I/O, bandwidth \fR(KB/s)\fP, runtime \fR(ms)\fP .P Submission latency: .RS diff --git a/fio.c b/fio.c index fc6dd8ae..ba9e384b 100644 --- a/fio.c +++ b/fio.c @@ -223,7 +223,7 @@ static int __check_min_rate(struct thread_data *td, struct timeval *now, if (rate < ratemin || bytes < td->rate_bytes[ddir]) { log_err("%s: min rate %u not met, got" - " %luKiB/sec\n", td->o.name, + " %luKB/sec\n", td->o.name, ratemin, rate); return 1; } diff --git a/fio_generate_plots b/fio_generate_plots index 9b1e1ca5..4e2cb08e 100755 --- a/fio_generate_plots +++ b/fio_generate_plots @@ -30,7 +30,7 @@ done if [ "$PLOT_LINE"x != "x" ]; then echo Making bw logs - echo "set title 'Bandwidth - $TITLE'; set xlabel 'time (msec)'; set ylabel 'KiB/sec'; set terminal png; set output '$TITLE-bw.png'; plot " $PLOT_LINE | $GNUPLOT - + echo "set title 'Bandwidth - $TITLE'; set xlabel 'time (msec)'; set ylabel 'KB/sec'; set terminal png; set output '$TITLE-bw.png'; plot " $PLOT_LINE | $GNUPLOT - fi PLOT_LINE="" diff --git a/memory.c b/memory.c index 00339e42..9b49d398 100644 --- a/memory.c +++ b/memory.c @@ -39,7 +39,7 @@ int fio_pin_memory(void) if (phys_mem) { if ((mlock_size + 128 * 1024 * 1024) > phys_mem) { mlock_size = phys_mem - 128 * 1024 * 1024; - log_info("fio: limiting mlocked memory to %lluMiB\n", + log_info("fio: limiting mlocked memory to %lluMB\n", mlock_size >> 20); } } diff --git a/stat.c b/stat.c index e1af59ed..7319b9c8 100644 --- a/stat.c +++ b/stat.c @@ -67,7 +67,7 @@ static void show_group_stats(struct group_run_stats *rs, int id) p3 = num2str(rs->min_bw[i], 6, 1024, 1); p4 = num2str(rs->max_bw[i], 6, 1024, 1); - log_info("%s: io=%siB, aggrb=%siB/s, minb=%siB/s, maxb=%siB/s," + log_info("%s: io=%sB, aggrb=%sB/s, minb=%sB/s, maxb=%sB/s," " mint=%llumsec, maxt=%llumsec\n", ddir_str[i], p1, p2, p3, p4, rs->min_run[i], rs->max_run[i]); @@ -166,7 +166,7 @@ static void show_ddir_status(struct group_run_stats *rs, struct thread_stat *ts, iops = (1000 * ts->total_io_u[ddir]) / runt; iops_p = num2str(iops, 6, 1, 0); - log_info(" %s: io=%siB, bw=%siB/s, iops=%s, runt=%6lumsec\n", + log_info(" %s: io=%sB, bw=%sB/s, iops=%s, runt=%6lumsec\n", ddir_str[ddir], io_p, bw_p, iops_p, ts->runtime[ddir]); @@ -210,7 +210,7 @@ static void show_ddir_status(struct group_run_stats *rs, struct thread_stat *ts, double p_of_agg; p_of_agg = mean * 100 / (double) rs->agg[ddir]; - log_info(" bw (KiB/s) : min=%5lu, max=%5lu, per=%3.2f%%," + log_info(" bw (KB/s) : min=%5lu, max=%5lu, per=%3.2f%%," " avg=%5.02f, stdev=%5.02f\n", min, max, p_of_agg, mean, dev); } -- 2.25.1