summaryrefslogtreecommitdiff
path: root/io_u.c
diff options
context:
space:
mode:
authorgvkovai <gv.kovai@gmail.com>2017-09-12 11:17:40 -0400
committergvkovai <gv.kovai@gmail.com>2017-09-14 12:34:39 -0400
commit224b3093cc2106233dc449871cb3367e6029b1b0 (patch)
treedc1d4b312f5ce731a9e9e29ea7d809ef15879c00 /io_u.c
parentc6fa271e32f08f35d7fc25272e77c0f7ee17bfec (diff)
downloadfio-224b3093cc2106233dc449871cb3367e6029b1b0.tar.gz
fio-224b3093cc2106233dc449871cb3367e6029b1b0.tar.bz2
Fix zoning issue with seq-io and randommap issue
The case of zonerange < zonesize scenario was not handled correctly earlier. When zonesize > zonerange, IO must continue in the same zonerange of the size zonesize for seq-io. For random io, zonesize > zonerange leads to sequential io after first zonerange size of io is done when 'norandommap' is not set. In this case, map needs to be reset for every zonerange size of IO on a zone. <seqzoneread.fio> ===== [global] ioengine=libaio direct=1 time_based disk_util=0 continue_on_error=all rate_process=poisson write_iolog=offsetlog [db-dss1] bs=8K filesize=524288M zonesize=9M zonerange=3M zoneskip=1M filename=/dev/sdb rw=read iodepth=1 rate_iops=100 ====== sudo ./fio --runtime 120 --debug=file,io,blktrace --write_iops_log=/tmp/IOPS --write_lat_log=/tmp/LAT --status-interval=10 --output=/tmp/fio.out --output-format=json seqzoneread.fio see issue for more details and plots which describes the issue and fix. fixes #450 .
Diffstat (limited to 'io_u.c')
-rw-r--r--io_u.c56
1 files changed, 42 insertions, 14 deletions
diff --git a/io_u.c b/io_u.c
index db043e4a..41feeacf 100644
--- a/io_u.c
+++ b/io_u.c
@@ -850,6 +850,45 @@ void requeue_io_u(struct thread_data *td, struct io_u **io_u)
*io_u = NULL;
}
+static void __fill_io_u_zone(struct thread_data *td, struct io_u *io_u)
+{
+ struct fio_file *f = io_u->file;
+
+ /*
+ * See if it's time to switch to a new zone
+ */
+ if (td->zone_bytes >= td->o.zone_size && td->o.zone_skip) {
+ td->zone_bytes = 0;
+ f->file_offset += td->o.zone_range + td->o.zone_skip;
+
+ /*
+ * Wrap from the beginning, if we exceed the file size
+ */
+ if (f->file_offset >= f->real_file_size)
+ f->file_offset = f->real_file_size - f->file_offset;
+ f->last_pos[io_u->ddir] = f->file_offset;
+ td->io_skip_bytes += td->o.zone_skip;
+ }
+
+ /*
+ * If zone_size > zone_range, then maintain the same zone until
+ * zone_bytes >= zone_size.
+ */
+ if (f->last_pos[io_u->ddir] >= (f->file_offset + td->o.zone_range)) {
+ dprint(FD_IO, "io_u maintain zone offset=%" PRIu64 "/last_pos=%" PRIu64 "\n",
+ f->file_offset, f->last_pos[io_u->ddir]);
+ f->last_pos[io_u->ddir] = f->file_offset;
+ }
+
+ /*
+ * For random: if 'norandommap' is not set and zone_size > zone_range,
+ * map needs to be reset as it's done with zone_range everytime.
+ */
+ if ((td->zone_bytes % td->o.zone_range) == 0) {
+ fio_file_reset(td, f);
+ }
+}
+
static int fill_io_u(struct thread_data *td, struct io_u *io_u)
{
unsigned int is_random;
@@ -866,21 +905,10 @@ static int fill_io_u(struct thread_data *td, struct io_u *io_u)
goto out;
/*
- * See if it's time to switch to a new zone
+ * When file is zoned zone_range is always positive
*/
- if (td->zone_bytes >= td->o.zone_size && td->o.zone_skip) {
- struct fio_file *f = io_u->file;
-
- td->zone_bytes = 0;
- f->file_offset += td->o.zone_range + td->o.zone_skip;
-
- /*
- * Wrap from the beginning, if we exceed the file size
- */
- if (f->file_offset >= f->real_file_size)
- f->file_offset = f->real_file_size - f->file_offset;
- f->last_pos[io_u->ddir] = f->file_offset;
- td->io_skip_bytes += td->o.zone_skip;
+ if (td->o.zone_range) {
+ __fill_io_u_zone(td, io_u);
}
/*