fio: add FIO_RO_NEEDS_RW_OPEN ioengine flag
[fio.git] / zbd.c
diff --git a/zbd.c b/zbd.c
index b1fd6b4bb0ae23cda7dba13a3c0bf3577139600f..d1e469f665052987fd5ad55b77ba9161f7a2c7a3 100644 (file)
--- a/zbd.c
+++ b/zbd.c
@@ -70,6 +70,19 @@ static inline uint64_t zbd_zone_capacity_end(const struct fio_zone_info *z)
        return z->start + z->capacity;
 }
 
+/**
+ * zbd_zone_remainder - Return the number of bytes that are still available for
+ *                      writing before the zone gets full
+ * @z: zone info pointer.
+ */
+static inline uint64_t zbd_zone_remainder(struct fio_zone_info *z)
+{
+       if (z->wp >= zbd_zone_capacity_end(z))
+               return 0;
+
+       return zbd_zone_capacity_end(z) - z->wp;
+}
+
 /**
  * zbd_zone_full - verify whether a minimum number of bytes remain in a zone
  * @f: file pointer.
@@ -83,8 +96,7 @@ static bool zbd_zone_full(const struct fio_file *f, struct fio_zone_info *z,
 {
        assert((required & 511) == 0);
 
-       return z->has_wp &&
-               z->wp + required > zbd_zone_capacity_end(z);
+       return z->has_wp && required > zbd_zone_remainder(z);
 }
 
 static void zone_lock(struct thread_data *td, const struct fio_file *f,
@@ -279,7 +291,6 @@ static int zbd_reset_zone(struct thread_data *td, struct fio_file *f,
        pthread_mutex_unlock(&f->zbd_info->mutex);
 
        z->wp = z->start;
-       z->verify_block = 0;
 
        td->ts.nr_zone_resets++;
 
@@ -322,6 +333,44 @@ static void zbd_close_zone(struct thread_data *td, const struct fio_file *f,
        z->open = 0;
 }
 
+/**
+ * zbd_finish_zone - finish the specified zone
+ * @td: FIO thread data.
+ * @f: FIO file for which to finish a zone
+ * @z: Zone to finish.
+ *
+ * Finish the zone at @offset with open or close status.
+ */
+static int zbd_finish_zone(struct thread_data *td, struct fio_file *f,
+                          struct fio_zone_info *z)
+{
+       uint64_t offset = z->start;
+       uint64_t length = f->zbd_info->zone_size;
+       int ret = 0;
+
+       switch (f->zbd_info->model) {
+       case ZBD_HOST_AWARE:
+       case ZBD_HOST_MANAGED:
+               if (td->io_ops && td->io_ops->finish_zone)
+                       ret = td->io_ops->finish_zone(td, f, offset, length);
+               else
+                       ret = blkzoned_finish_zone(td, f, offset, length);
+               break;
+       default:
+               break;
+       }
+
+       if (ret < 0) {
+               td_verror(td, errno, "finish zone failed");
+               log_err("%s: finish zone at sector %"PRIu64" failed (%d).\n",
+                       f->file_name, offset >> 9, errno);
+       } else {
+               z->wp = (z+1)->start;
+       }
+
+       return ret;
+}
+
 /**
  * zbd_reset_zones - Reset a range of zones.
  * @td: fio thread data.
@@ -440,7 +489,7 @@ static bool zbd_open_zone(struct thread_data *td, const struct fio_file *f,
                 * already in-flight, handle it as a full zone instead of an
                 * open zone.
                 */
-               if (z->wp >= zbd_zone_capacity_end(z))
+               if (!zbd_zone_remainder(z))
                        res = false;
                goto out;
        }
@@ -466,7 +515,7 @@ out:
        return res;
 }
 
-/* Verify whether direct I/O is used for all host-managed zoned drives. */
+/* Verify whether direct I/O is used for all host-managed zoned block drives. */
 static bool zbd_using_direct_io(void)
 {
        struct thread_data *td;
@@ -477,7 +526,7 @@ static bool zbd_using_direct_io(void)
                if (td->o.odirect || !(td->o.td_ddir & TD_DDIR_WRITE))
                        continue;
                for_each_file(td, f, j) {
-                       if (f->zbd_info &&
+                       if (f->zbd_info && f->filetype == FIO_TYPE_BLOCK &&
                            f->zbd_info->model == ZBD_HOST_MANAGED)
                                return false;
                }
@@ -602,7 +651,7 @@ static bool zbd_verify_bs(void)
 {
        struct thread_data *td;
        struct fio_file *f;
-       int i, j, k;
+       int i, j;
 
        for_each_td(td, i) {
                if (td_trim(td) &&
@@ -624,15 +673,6 @@ static bool zbd_verify_bs(void)
                                         zone_size);
                                return false;
                        }
-                       for (k = 0; k < FIO_ARRAY_SIZE(td->o.bs); k++) {
-                               if (td->o.verify != VERIFY_NONE &&
-                                   zone_size % td->o.bs[k] != 0) {
-                                       log_info("%s: block size %llu is not a divisor of the zone size %"PRIu64"\n",
-                                                f->file_name, td->o.bs[k],
-                                                zone_size);
-                                       return false;
-                               }
-                       }
                }
        }
        return true;
@@ -1044,6 +1084,11 @@ int zbd_setup_files(struct thread_data *td)
        if (!zbd_verify_bs())
                return 1;
 
+       if (td->o.experimental_verify) {
+               log_err("zonemode=zbd does not support experimental verify\n");
+               return 1;
+       }
+
        for_each_file(td, f, i) {
                struct zoned_block_device_info *zbd = f->zbd_info;
                struct fio_zone_info *z;
@@ -1208,6 +1253,7 @@ void zbd_file_reset(struct thread_data *td, struct fio_file *f)
 {
        struct fio_zone_info *zb, *ze;
        uint64_t swd;
+       bool verify_data_left = false;
 
        if (!f->zbd_info || !td_write(td))
                return;
@@ -1224,8 +1270,16 @@ void zbd_file_reset(struct thread_data *td, struct fio_file *f)
         * writing any data to avoid that a zone reset has to be issued while
         * writing data, which causes data loss.
         */
-       if (td->o.verify != VERIFY_NONE && td->runstate != TD_VERIFYING)
-               zbd_reset_zones(td, f, zb, ze);
+       if (td->o.verify != VERIFY_NONE) {
+               verify_data_left = td->runstate == TD_VERIFYING ||
+                       td->io_hist_len || td->verify_batch;
+               if (td->io_hist_len && td->o.verify_backlog)
+                       verify_data_left =
+                               td->io_hist_len % td->o.verify_backlog;
+               if (!verify_data_left)
+                       zbd_reset_zones(td, f, zb, ze);
+       }
+
        zbd_reset_write_cnt(td, f);
 }
 
@@ -1368,7 +1422,7 @@ found_candidate_zone:
        /* Both z->mutex and zbdi->mutex are held. */
 
 examine_zone:
-       if (z->wp + min_bs <= zbd_zone_capacity_end(z)) {
+       if (zbd_zone_remainder(z) >= min_bs) {
                pthread_mutex_unlock(&zbdi->mutex);
                goto out;
        }
@@ -1433,7 +1487,7 @@ retry:
                z = zbd_get_zone(f, zone_idx);
 
                zone_lock(td, f, z);
-               if (z->wp + min_bs <= zbd_zone_capacity_end(z))
+               if (zbd_zone_remainder(z) >= min_bs)
                        goto out;
                pthread_mutex_lock(&zbdi->mutex);
        }
@@ -1476,42 +1530,6 @@ out:
        return z;
 }
 
-/* The caller must hold z->mutex. */
-static struct fio_zone_info *zbd_replay_write_order(struct thread_data *td,
-                                                   struct io_u *io_u,
-                                                   struct fio_zone_info *z)
-{
-       const struct fio_file *f = io_u->file;
-       const uint64_t min_bs = td->o.min_bs[DDIR_WRITE];
-
-       if (!zbd_open_zone(td, f, z)) {
-               zone_unlock(z);
-               z = zbd_convert_to_open_zone(td, io_u);
-               assert(z);
-       }
-
-       if (z->verify_block * min_bs >= z->capacity) {
-               log_err("%s: %d * %"PRIu64" >= %"PRIu64"\n",
-                       f->file_name, z->verify_block, min_bs, z->capacity);
-               /*
-                * If the assertion below fails during a test run, adding
-                * "--experimental_verify=1" to the command line may help.
-                */
-               assert(false);
-       }
-
-       io_u->offset = z->start + z->verify_block * min_bs;
-       if (io_u->offset + io_u->buflen >= zbd_zone_capacity_end(z)) {
-               log_err("%s: %llu + %llu >= %"PRIu64"\n",
-                       f->file_name, io_u->offset, io_u->buflen,
-                       zbd_zone_capacity_end(z));
-               assert(false);
-       }
-       z->verify_block += io_u->buflen / min_bs;
-
-       return z;
-}
-
 /*
  * Find another zone which has @min_bytes of readable data. Search in zones
  * @zb + 1 .. @zl. For random workload, also search in zones @zb - 1 .. @zf.
@@ -1862,10 +1880,8 @@ enum io_u_action zbd_adjust_block(struct thread_data *td, struct io_u *io_u)
 
        switch (io_u->ddir) {
        case DDIR_READ:
-               if (td->runstate == TD_VERIFYING && td_write(td)) {
-                       zb = zbd_replay_write_order(td, io_u, zb);
+               if (td->runstate == TD_VERIFYING && td_write(td))
                        goto accept;
-               }
 
                /*
                 * Check that there is enough written data in the zone to do an
@@ -1941,6 +1957,33 @@ enum io_u_action zbd_adjust_block(struct thread_data *td, struct io_u *io_u)
                        goto eof;
                }
 
+retry:
+               if (zbd_zone_remainder(zb) > 0 &&
+                   zbd_zone_remainder(zb) < min_bs) {
+                       pthread_mutex_lock(&f->zbd_info->mutex);
+                       zbd_close_zone(td, f, zb);
+                       pthread_mutex_unlock(&f->zbd_info->mutex);
+                       dprint(FD_ZBD,
+                              "%s: finish zone %d\n",
+                              f->file_name, zbd_zone_idx(f, zb));
+                       io_u_quiesce(td);
+                       zbd_finish_zone(td, f, zb);
+                       if (zbd_zone_idx(f, zb) + 1 >= f->max_zone) {
+                               if (!td_random(td))
+                                       goto eof;
+                       }
+                       zone_unlock(zb);
+
+                       /* Find the next write pointer zone */
+                       do {
+                               zb++;
+                               if (zbd_zone_idx(f, zb) >= f->max_zone)
+                                       zb = zbd_get_zone(f, f->min_zone);
+                       } while (!zb->has_wp);
+
+                       zone_lock(td, f, zb);
+               }
+
                if (!zbd_open_zone(td, f, zb)) {
                        zone_unlock(zb);
                        zb = zbd_convert_to_open_zone(td, io_u);
@@ -1951,6 +1994,10 @@ enum io_u_action zbd_adjust_block(struct thread_data *td, struct io_u *io_u)
                        }
                }
 
+               if (zbd_zone_remainder(zb) > 0 &&
+                   zbd_zone_remainder(zb) < min_bs)
+                       goto retry;
+
                /* Check whether the zone reset threshold has been exceeded */
                if (td->o.zrf.u.f) {
                        if (zbdi->wp_sectors_with_data >= f->io_size * td->o.zrt.u.f &&
@@ -1960,7 +2007,19 @@ enum io_u_action zbd_adjust_block(struct thread_data *td, struct io_u *io_u)
 
                /* Reset the zone pointer if necessary */
                if (zb->reset_zone || zbd_zone_full(f, zb, min_bs)) {
-                       assert(td->o.verify == VERIFY_NONE);
+                       if (td->o.verify != VERIFY_NONE) {
+                               /*
+                                * Unset io-u->file to tell get_next_verify()
+                                * that this IO is not requeue.
+                                */
+                               io_u->file = NULL;
+                               if (!get_next_verify(td, io_u)) {
+                                       zone_unlock(zb);
+                                       return io_u_accept;
+                               }
+                               io_u->file = f;
+                       }
+
                        /*
                         * Since previous write requests may have been submitted
                         * asynchronously and since we will submit the zone