+++ /dev/null
-; Keep adding 1024kb/s reading clients at 4 seconds
-[global]
-size=32m
-rw=read
-directory=tmp
-rate=1250
-ratemin=1024
-
-[file1]
-startdelay=0
-
-[file2]
-startdelay=4
-
-[file3]
-startdelay=8
-
-[file4]
-startdelay=12
-
-[file5]
-startdelay=16
-
-[file6]
-startdelay=20
-
-[file7]
-startdelay=24
-
-[file8]
-startdelay=28
-
-[file9]
-startdelay=32
-
-[file10]
-startdelay=36
-
-[file11]
-startdelay=40
-
-[file12]
-startdelay=44
-
-[file13]
-startdelay=48
-
-[file14]
-startdelay=52
-
-[file15]
-startdelay=56
-
-[file16]
-startdelay=60
-
-[file17]
-startdelay=64
-
-[file18]
-startdelay=68
-
-[file19]
-startdelay=72
-
-[file20]
-startdelay=76
-
-[file21]
-startdelay=80
-
-[file22]
-startdelay=84
-
-[file23]
-startdelay=88
-
-[file24]
-startdelay=92
-
-[file25]
-startdelay=96
-
-[file26]
-startdelay=100
-
-[file27]
-startdelay=104
-
-[file28]
-startdelay=108
-
-[file29]
-startdelay=112
-
-[file30]
-startdelay=116
-
-[file31]
-startdelay=120
-
-[file32]
-startdelay=124
-
--- /dev/null
+; Keep adding 1024kb/s reading clients at 4 seconds
+[global]
+size=32m
+rw=read
+directory=tmp
+rate=1250
+ratemin=1024
+
+[file1]
+startdelay=0
+
+[file2]
+startdelay=4
+
+[file3]
+startdelay=8
+
+[file4]
+startdelay=12
+
+[file5]
+startdelay=16
+
+[file6]
+startdelay=20
+
+[file7]
+startdelay=24
+
+[file8]
+startdelay=28
+
+[file9]
+startdelay=32
+
+[file10]
+startdelay=36
+
+[file11]
+startdelay=40
+
+[file12]
+startdelay=44
+
+[file13]
+startdelay=48
+
+[file14]
+startdelay=52
+
+[file15]
+startdelay=56
+
+[file16]
+startdelay=60
+
+[file17]
+startdelay=64
+
+[file18]
+startdelay=68
+
+[file19]
+startdelay=72
+
+[file20]
+startdelay=76
+
+[file21]
+startdelay=80
+
+[file22]
+startdelay=84
+
+[file23]
+startdelay=88
+
+[file24]
+startdelay=92
+
+[file25]
+startdelay=96
+
+[file26]
+startdelay=100
+
+[file27]
+startdelay=104
+
+[file28]
+startdelay=108
+
+[file29]
+startdelay=112
+
+[file30]
+startdelay=116
+
+[file31]
+startdelay=120
+
+[file32]
+startdelay=124
+
+++ /dev/null
-; Read 4 files with aio at different depths
-[global]
-ioengine=libaio
-buffered=0
-rw=randread
-bs=128k
-size=512m
-directory=/data1
-
-[file1]
-iodepth=4
-
-[file2]
-iodepth=32
-
-[file3]
-iodepth=8
-
-[file4]
-iodepth=16
--- /dev/null
+; Read 4 files with aio at different depths
+[global]
+ioengine=libaio
+buffered=0
+rw=randread
+bs=128k
+size=512m
+directory=/data1
+
+[file1]
+iodepth=4
+
+[file2]
+iodepth=32
+
+[file3]
+iodepth=8
+
+[file4]
+iodepth=16
+++ /dev/null
-; Read disk in zones of 128m/2g, generating a plot of that afterwards
-; should give a nice picture of the zoning of this drive
-
-[global]
-bs=64k
-direct=1
-rw=read
-ioengine=libaio
-iodepth=2
-zonesize=256m
-zoneskip=2g
-write_bw_log
-
-[/dev/sdb]
--- /dev/null
+; Read disk in zones of 128m/2g, generating a plot of that afterwards
+; should give a nice picture of the zoning of this drive
+
+[global]
+bs=64k
+direct=1
+rw=read
+ioengine=libaio
+iodepth=2
+zonesize=256m
+zoneskip=2g
+write_bw_log
+
+[/dev/sdb]
+++ /dev/null
-# Example usage of flows. The below will have roughly a 1:8 difference
-# between job2 and job1.
-[global]
-norandommap
-thread
-time_based
-runtime=30
-direct=1
-ioengine=libaio
-iodepth=256
-size=100g
-bs=8k
-filename=/tmp/testfile
-flow_watermark=100
-flow_sleep=1000
-
-[job2]
-numjobs=1
-rw=write
-flow=-8
-
-[job1]
-numjobs=1
-rw=randread
-flow=1
--- /dev/null
+# Example usage of flows. The below will have roughly a 1:8 difference
+# between job2 and job1.
+[global]
+norandommap
+thread
+time_based
+runtime=30
+direct=1
+ioengine=libaio
+iodepth=256
+size=100g
+bs=8k
+filename=/tmp/testfile
+flow_watermark=100
+flow_sleep=1000
+
+[job2]
+numjobs=1
+rw=write
+flow=-8
+
+[job1]
+numjobs=1
+rw=randread
+flow=1
+++ /dev/null
-; This job file works pretty works similarly to running fsx-linux
-; with -r 4096 -w 4096 -Z -N 500000
-[file]
-ioengine=libaio
-iodepth=1
-rw=randrw
-size=256k
-bs=4k
-norandommap
-direct=1
-loops=500000
-rwmixcycle=40
--- /dev/null
+; This job file works pretty works similarly to running fsx-linux
+; with -r 4096 -w 4096 -Z -N 500000
+[file]
+ioengine=libaio
+iodepth=1
+rw=randrw
+size=256k
+bs=4k
+norandommap
+direct=1
+loops=500000
+rwmixcycle=40
+++ /dev/null
-# This job file tries to mimic the Intel IOMeter File Server Access Pattern
-[global]
-description=Emulation of Intel IOmeter File Server Access Pattern
-
-[iometer]
-bssplit=512/10:1k/5:2k/5:4k/60:8k/2:16k/4:32k/4:64k/10
-rw=randrw
-rwmixread=80
-direct=1
-size=4g
-ioengine=libaio
-# IOMeter defines the server loads as the following:
-# iodepth=1 Linear
-# iodepth=4 Very Light
-# iodepth=8 Light
-# iodepth=64 Moderate
-# iodepth=256 Heavy
-iodepth=64
--- /dev/null
+# This job file tries to mimic the Intel IOMeter File Server Access Pattern
+[global]
+description=Emulation of Intel IOmeter File Server Access Pattern
+
+[iometer]
+bssplit=512/10:1k/5:2k/5:4k/60:8k/2:16k/4:32k/4:64k/10
+rw=randrw
+rwmixread=80
+direct=1
+size=4g
+ioengine=libaio
+# IOMeter defines the server loads as the following:
+# iodepth=1 Linear
+# iodepth=4 Very Light
+# iodepth=8 Light
+# iodepth=64 Moderate
+# iodepth=256 Heavy
+iodepth=64
+++ /dev/null
-# Example network job, just defines two clients that send/recv data
-[global]
-ioengine=net
-#Use hostname=/tmp.fio.sock for local unix domain sockets
-port=8888
-#Use =udp for UDP, =unix for local unix domain socket
-protocol=tcp
-bs=4k
-size=10g
-#set the below option to enable end-to-end data integrity tests
-#verify=md5
-
-[receiver]
-listen
-rw=read
-
-[sender]
-hostname=localhost
-startdelay=1
-rw=write
--- /dev/null
+# Example network job, just defines two clients that send/recv data
+[global]
+ioengine=net
+#Use hostname=/tmp.fio.sock for local unix domain sockets
+port=8888
+#Use =udp for UDP, =unix for local unix domain socket
+protocol=tcp
+bs=4k
+size=10g
+#set the below option to enable end-to-end data integrity tests
+#verify=md5
+
+[receiver]
+listen
+rw=read
+
+[sender]
+hostname=localhost
+startdelay=1
+rw=write
+++ /dev/null
-[global]
-bs=4k
-gtod_reduce=1
-
-[null]
-ioengine=null
-size=100g
-rw=randread
-norandommap
-time_based=0
--- /dev/null
+[global]
+bs=4k
+gtod_reduce=1
+
+[null]
+ioengine=null
+size=100g
+rw=randread
+norandommap
+time_based=0
+++ /dev/null
-# Example rdma client job
-[global]
-ioengine=rdma
-filename=[ip_addr]/[port]/[RDMA_WRITE/RDMA_READ/SEND]
-bs=1m
-size=100g
-
-[sender]
-rw=write
-iodepth=1
-iodepth_batch_complete=1
\ No newline at end of file
--- /dev/null
+# Example rdma client job
+[global]
+ioengine=rdma
+filename=[ip_addr]/[port]/[RDMA_WRITE/RDMA_READ/SEND]
+bs=1m
+size=100g
+
+[sender]
+rw=write
+iodepth=1
+iodepth_batch_complete=1
\ No newline at end of file
+++ /dev/null
-# Example rdma server job
-[global]
-ioengine=rdma
-filename=[ip_addr]/[port]
-bs=1m
-size=100g
-
-[receiver]
-rw=read
-iodepth=16
\ No newline at end of file
--- /dev/null
+# Example rdma server job
+[global]
+ioengine=rdma
+filename=[ip_addr]/[port]
+bs=1m
+size=100g
+
+[receiver]
+rw=read
+iodepth=16
\ No newline at end of file
+++ /dev/null
-# Do some important numbers on SSD drives, to gauge what kind of
-# performance you might get out of them.
-#
-# Sequential read and write speeds are tested, these are expected to be
-# high. Random reads should also be fast, random writes are where crap
-# drives are usually separated from the good drives.
-#
-# This uses a queue depth of 4. New SATA SSD's will support up to 32
-# in flight commands, so it may also be interesting to increase the queue
-# depth and compare. Note that most real-life usage will not see that
-# large of a queue depth, so 4 is more representative of normal use.
-#
-[global]
-bs=4k
-ioengine=libaio
-iodepth=4
-size=1g
-direct=1
-runtime=60
-directory=/mount-point-of-ssd
-filename=ssd.test.file
-
-[seq-read]
-rw=read
-stonewall
-
-[rand-read]
-rw=randread
-stonewall
-
-[seq-write]
-rw=write
-stonewall
-
-[rand-write]
-rw=randwrite
-stonewall
--- /dev/null
+# Do some important numbers on SSD drives, to gauge what kind of
+# performance you might get out of them.
+#
+# Sequential read and write speeds are tested, these are expected to be
+# high. Random reads should also be fast, random writes are where crap
+# drives are usually separated from the good drives.
+#
+# This uses a queue depth of 4. New SATA SSD's will support up to 32
+# in flight commands, so it may also be interesting to increase the queue
+# depth and compare. Note that most real-life usage will not see that
+# large of a queue depth, so 4 is more representative of normal use.
+#
+[global]
+bs=4k
+ioengine=libaio
+iodepth=4
+size=1g
+direct=1
+runtime=60
+directory=/mount-point-of-ssd
+filename=ssd.test.file
+
+[seq-read]
+rw=read
+stonewall
+
+[rand-read]
+rw=randread
+stonewall
+
+[seq-write]
+rw=write
+stonewall
+
+[rand-write]
+rw=randwrite
+stonewall
+++ /dev/null
-; writes 512 byte verification blocks until the disk is full,
-; then verifies written data
-[global]
-thread=1
-bs=64k
-direct=1
-ioengine=sync
-verify=meta
-verify_pattern=0xaa555aa5
-verify_interval=512
-
-[write-phase]
-filename=datafile.tmp ; or use a full disk, for example /dev/sda
-rw=write
-fill_device=1
-do_verify=0
-
-[verify-phase]
-stonewall
-create_serialize=0
-filename=datafile.tmp
-rw=read
-do_verify=1
--- /dev/null
+; writes 512 byte verification blocks until the disk is full,
+; then verifies written data
+[global]
+thread=1
+bs=64k
+direct=1
+ioengine=sync
+verify=meta
+verify_pattern=0xaa555aa5
+verify_interval=512
+
+[write-phase]
+filename=datafile.tmp ; or use a full disk, for example /dev/sda
+rw=write
+fill_device=1
+do_verify=0
+
+[verify-phase]
+stonewall
+create_serialize=0
+filename=datafile.tmp
+rw=read
+do_verify=1
+++ /dev/null
-; tiobench like setup, add more fX files between the stonewalls to
-; create more threads
-
-[global]
-direct=1
-size=512m
-bsrange=4k-4k
-timeout=60
-numjobs=4 ; 4 simultaneous threads for each job
-
-[f1]
-rw=write
-
-[f2]
-stonewall
-rw=randwrite
-
-[f3]
-stonewall
-rw=read
-
-[f4]
-stonewall
-rw=randread
--- /dev/null
+; tiobench like setup, add more fX files between the stonewalls to
+; create more threads
+
+[global]
+direct=1
+size=512m
+bsrange=4k-4k
+timeout=60
+numjobs=4 ; 4 simultaneous threads for each job
+
+[f1]
+rw=write
+
+[f2]
+stonewall
+rw=randwrite
+
+[f3]
+stonewall
+rw=read
+
+[f4]
+stonewall
+rw=randread