Merge branch 'disable_opt' of https://github.com/sitsofe/fio
[fio.git] / io_u.c
diff --git a/io_u.c b/io_u.c
index 852b98e9d0f5a5c30a3f886fd29ca11b1b074c42..61d09ba872e9c816ed0f07b8a07c39b32ccb34e0 100644 (file)
--- a/io_u.c
+++ b/io_u.c
@@ -163,7 +163,6 @@ static int __get_next_rand_offset_zoned_abs(struct thread_data *td,
 {
        struct zone_split_index *zsi;
        uint64_t lastb, send, stotal;
-       static int warned;
        unsigned int v;
 
        lastb = last_block(td, f, ddir);
@@ -192,10 +191,8 @@ bail:
         * Should never happen
         */
        if (send == -1U) {
-               if (!warned) {
+               if (!fio_did_warn(FIO_WARN_ZONED_BUG))
                        log_err("fio: bug in zoned generation\n");
-                       warned = 1;
-               }
                goto bail;
        } else if (send > lastb) {
                /*
@@ -223,7 +220,6 @@ static int __get_next_rand_offset_zoned(struct thread_data *td,
 {
        unsigned int v, send, stotal;
        uint64_t offset, lastb;
-       static int warned;
        struct zone_split_index *zsi;
 
        lastb = last_block(td, f, ddir);
@@ -248,10 +244,8 @@ bail:
         * Should never happen
         */
        if (send == -1U) {
-               if (!warned) {
+               if (!fio_did_warn(FIO_WARN_ZONED_BUG))
                        log_err("fio: bug in zoned generation\n");
-                       warned = 1;
-               }
                goto bail;
        }
 
@@ -922,6 +916,45 @@ void requeue_io_u(struct thread_data *td, struct io_u **io_u)
        *io_u = NULL;
 }
 
+static void __fill_io_u_zone(struct thread_data *td, struct io_u *io_u)
+{
+       struct fio_file *f = io_u->file;
+
+       /*
+        * See if it's time to switch to a new zone
+        */
+       if (td->zone_bytes >= td->o.zone_size && td->o.zone_skip) {
+               td->zone_bytes = 0;
+               f->file_offset += td->o.zone_range + td->o.zone_skip;
+
+               /*
+                * Wrap from the beginning, if we exceed the file size
+                */
+               if (f->file_offset >= f->real_file_size)
+                       f->file_offset = f->real_file_size - f->file_offset;
+               f->last_pos[io_u->ddir] = f->file_offset;
+               td->io_skip_bytes += td->o.zone_skip;
+       }
+
+       /*
+        * If zone_size > zone_range, then maintain the same zone until
+        * zone_bytes >= zone_size.
+        */
+       if (f->last_pos[io_u->ddir] >= (f->file_offset + td->o.zone_range)) {
+               dprint(FD_IO, "io_u maintain zone offset=%" PRIu64 "/last_pos=%" PRIu64 "\n",
+                               f->file_offset, f->last_pos[io_u->ddir]);
+               f->last_pos[io_u->ddir] = f->file_offset;
+       }
+
+       /*
+        * For random: if 'norandommap' is not set and zone_size > zone_range,
+        * map needs to be reset as it's done with zone_range everytime.
+        */
+       if ((td->zone_bytes % td->o.zone_range) == 0) {
+               fio_file_reset(td, f);
+       }
+}
+
 static int fill_io_u(struct thread_data *td, struct io_u *io_u)
 {
        unsigned int is_random;
@@ -938,21 +971,10 @@ static int fill_io_u(struct thread_data *td, struct io_u *io_u)
                goto out;
 
        /*
-        * See if it's time to switch to a new zone
+        * When file is zoned zone_range is always positive
         */
-       if (td->zone_bytes >= td->o.zone_size && td->o.zone_skip) {
-               struct fio_file *f = io_u->file;
-
-               td->zone_bytes = 0;
-               f->file_offset += td->o.zone_range + td->o.zone_skip;
-
-               /*
-                * Wrap from the beginning, if we exceed the file size
-                */
-               if (f->file_offset >= f->real_file_size)
-                       f->file_offset = f->real_file_size - f->file_offset;
-               f->last_pos[io_u->ddir] = f->file_offset;
-               td->io_skip_bytes += td->o.zone_skip;
+       if (td->o.zone_range) {
+               __fill_io_u_zone(td, io_u);
        }
 
        /*
@@ -990,7 +1012,7 @@ out:
        return 0;
 }
 
-static void __io_u_mark_map(unsigned int *map, unsigned int nr)
+static void __io_u_mark_map(uint64_t *map, unsigned int nr)
 {
        int idx = 0;
 
@@ -1893,7 +1915,8 @@ static void account_io_completion(struct thread_data *td, struct io_u *io_u,
 
                if (no_reduce && per_unit_log(td->iops_log))
                        add_iops_sample(td, io_u, bytes);
-       }
+       } else if (ddir_sync(idx) && !td->o.disable_clat)
+               add_sync_clat_sample(&td->ts, llnsec);
 
        if (td->ts.nr_block_infos && io_u->ddir == DDIR_TRIM) {
                uint32_t *info = io_u_block_info(td, io_u);
@@ -1931,6 +1954,12 @@ static void file_log_write_comp(const struct thread_data *td, struct fio_file *f
                f->last_write_idx = 0;
 }
 
+static bool should_account(struct thread_data *td)
+{
+       return ramp_time_over(td) && (td->runstate == TD_RUNNING ||
+                                          td->runstate == TD_VERIFYING);
+}
+
 static void io_completed(struct thread_data *td, struct io_u **io_u_ptr,
                         struct io_completion_data *icd)
 {
@@ -1959,15 +1988,17 @@ static void io_completed(struct thread_data *td, struct io_u **io_u_ptr,
        }
 
        if (ddir_sync(ddir)) {
-               td->last_was_sync = 1;
+               td->last_was_sync = true;
                if (f) {
                        f->first_write = -1ULL;
                        f->last_write = -1ULL;
                }
+               if (should_account(td))
+                       account_io_completion(td, io_u, icd, ddir, io_u->buflen);
                return;
        }
 
-       td->last_was_sync = 0;
+       td->last_was_sync = false;
        td->last_ddir = ddir;
 
        if (!io_u->error && ddir_rw(ddir)) {
@@ -1985,8 +2016,7 @@ static void io_completed(struct thread_data *td, struct io_u **io_u_ptr,
                if (ddir == DDIR_WRITE)
                        file_log_write_comp(td, f, io_u->offset, bytes);
 
-               if (ramp_time_over(td) && (td->runstate == TD_RUNNING ||
-                                          td->runstate == TD_VERIFYING))
+               if (should_account(td))
                        account_io_completion(td, io_u, icd, ddir, bytes);
 
                icd->bytes_done[ddir] += bytes;