}
run_fio() {
- local fio
+ local fio opts
fio=$(dirname "$0")/../../fio
- { echo; echo "fio $*"; echo; } >>"${logfile}.${test_number}"
+ opts=("--aux-path=/tmp" "--allow_file_create=0" \
+ "--significant_figures=10" "$@")
+ { echo; echo "fio ${opts[*]}"; echo; } >>"${logfile}.${test_number}"
- "${dynamic_analyzer[@]}" "$fio" "$@"
+ "${dynamic_analyzer[@]}" "$fio" "${opts[@]}"
}
run_one_fio_job() {
# Check whether buffered writes are refused.
test1() {
run_fio --name=job1 --filename="$dev" --rw=write --direct=0 --bs=4K \
- --size="${zone_size}" \
+ --size="${zone_size}" --thread=1 \
--zonemode=zbd --zonesize="${zone_size}" 2>&1 |
tee -a "${logfile}.${test_number}" |
grep -q 'Using direct I/O is mandatory for writing to ZBD drives'
if [ -z "$is_zbd" ]; then
opts+=("--zonesize=${zone_size}")
fi
- run_fio "${opts[@]}" 2>&1 |
- tee -a "${logfile}.${test_number}" |
- grep -q 'No I/O performed'
+ run_fio "${opts[@]}" >> "${logfile}.${test_number}" 2>&1 || return $?
+ ! grep -q 'WRITE:' "${logfile}.${test_number}"
}
# Run fio against an empty zone. This causes fio to report "No I/O performed".
opts+=("--zonesize=${zone_size}")
fi
run_fio "${opts[@]}" >> "${logfile}.${test_number}" 2>&1 || return $?
- grep -q "No I/O performed" "${logfile}.${test_number}"
+ grep -q 'READ:' "${logfile}.${test_number}"
rc=$?
if [ -n "$is_zbd" ]; then
- [ $rc = 0 ]
- else
[ $rc != 0 ]
+ else
+ [ $rc = 0 ]
fi
}
grep -q 'Specifying the zone size is mandatory for regular block devices with --zonemode=zbd'
}
-# Check whether fio handles --zonesize=1 correctly.
+# Check whether fio handles --zonesize=1 correctly for regular block devices.
test43() {
+ [ -n "$is_zbd" ] && return 0
read_one_block --zonemode=zbd --zonesize=1 |
grep -q 'zone size must be at least 512 bytes for --zonemode=zbd'
}
grep -q "fio: first I/O failed. If .* is a zoned block device, consider --zonemode=zbd"
}
+# Random write to sequential zones, libaio, 8 jobs, queue depth 64 per job
+test46() {
+ local size
+
+ size=$((4 * zone_size))
+ run_fio_on_seq --ioengine=libaio --iodepth=64 --rw=randwrite --bs=4K \
+ --group_reporting=1 --numjobs=8 \
+ >> "${logfile}.${test_number}" 2>&1 || return $?
+ check_written $((size * 8)) || return $?
+}
+
+# Check whether fio handles --zonemode=zbd --zoneskip=1 correctly.
+test47() {
+ local bs
+
+ [ -z "$is_zbd" ] && return 0
+ bs=$((logical_block_size))
+ run_one_fio_job --ioengine=psync --rw=write --bs=$bs \
+ --zonemode=zbd --zoneskip=1 \
+ >> "${logfile}.${test_number}" 2>&1 && return 1
+ grep -q 'zoneskip 1 is not a multiple of the device zone size' "${logfile}.${test_number}"
+}
+
+# Multiple overlapping random write jobs for the same drive and with a
+# limited number of open zones. This is similar to test29, but uses libaio
+# to stress test zone locking.
+test48() {
+ local i jobs=16 off opts=()
+
+ off=$((first_sequential_zone_sector * 512 + 64 * zone_size))
+ size=$((16*zone_size))
+ [ -n "$is_zbd" ] && reset_zone "$dev" $((off / 512))
+ opts=("--aux-path=/tmp" "--allow_file_create=0" "--significant_figures=10")
+ opts+=("--debug=zbd")
+ opts+=("--ioengine=libaio" "--rw=randwrite" "--direct=1")
+ opts+=("--time_based" "--runtime=30")
+ opts+=("--zonemode=zbd" "--zonesize=${zone_size}")
+ opts+=("--max_open_zones=4")
+ for ((i=0;i<jobs;i++)); do
+ opts+=("--name=job$i" "--filename=$dev" "--offset=$off" "--bs=16K")
+ opts+=("--io_size=$zone_size" "--iodepth=256" "--thread=1")
+ opts+=("--group_reporting=1")
+ done
+
+ fio=$(dirname "$0")/../../fio
+
+ { echo; echo "fio ${opts[*]}"; echo; } >>"${logfile}.${test_number}"
+
+ timeout -v -s KILL 45s \
+ "${dynamic_analyzer[@]}" "$fio" "${opts[@]}" \
+ >> "${logfile}.${test_number}" 2>&1 || return $?
+}
+
tests=()
dynamic_analyzer=()
reset_all_zones=
dev=$1
realdev=$(readlink -f "$dev")
basename=$(basename "$realdev")
-disk_size=$(($(<"/sys/block/$basename/size")*512))
+major=$((0x$(stat -L -c '%t' "$realdev"))) || exit $?
+minor=$((0x$(stat -L -c '%T' "$realdev"))) || exit $?
+disk_size=$(($(<"/sys/dev/block/$major:$minor/size")*512))
+# When the target is a partition device, get basename of its holder device to
+# access sysfs path of the holder device
+if [[ -r "/sys/dev/block/$major:$minor/partition" ]]; then
+ realsysfs=$(readlink "/sys/dev/block/$major:$minor")
+ basename=$(basename "${realsysfs%/*}")
+fi
logical_block_size=$(<"/sys/block/$basename/queue/logical_block_size")
case "$(<"/sys/class/block/$basename/queue/zoned")" in
host-managed|host-aware)
esac
if [ "${#tests[@]}" = 0 ]; then
- for ((i=1;i<=45;i++)); do
- tests+=("$i")
- done
+ readarray -t tests < <(declare -F | grep "test[0-9]*" | \
+ tr -c -d "[:digit:]\n" | sort -n)
fi
logfile=$0.log
+passed=0
+failed=0
rc=0
for test_number in "${tests[@]}"; do
rm -f "${logfile}.${test_number}"
echo -n "Running test $test_number ... "
if eval "test$test_number"; then
status="PASS"
+ ((passed++))
else
status="FAIL"
+ ((failed++))
rc=1
fi
echo "$status"
echo "$status" >> "${logfile}.${test_number}"
done
+echo "$passed tests passed"
+if [ $failed -gt 0 ]; then
+ echo " and $failed tests failed"
+fi
exit $rc