+static bool is_valid_offset(const struct fio_file *f, uint64_t offset)
+{
+ return (uint64_t)(offset - f->file_offset) < f->io_size;
+}
+
+static inline unsigned int zbd_zone_idx(const struct fio_file *f,
+ struct fio_zone_info *zone)
+{
+ return zone - f->zbd_info->zone_info;
+}
+
+/**
+ * zbd_offset_to_zone_idx - convert an offset into a zone number
+ * @f: file pointer.
+ * @offset: offset in bytes. If this offset is in the first zone_size bytes
+ * past the disk size then the index of the sentinel is returned.
+ */
+static unsigned int zbd_offset_to_zone_idx(const struct fio_file *f,
+ uint64_t offset)
+{
+ uint32_t zone_idx;
+
+ if (f->zbd_info->zone_size_log2 > 0)
+ zone_idx = offset >> f->zbd_info->zone_size_log2;
+ else
+ zone_idx = offset / f->zbd_info->zone_size;
+
+ return min(zone_idx, f->zbd_info->nr_zones);
+}
+
+/**
+ * zbd_zone_end - Return zone end location
+ * @z: zone info pointer.
+ */
+static inline uint64_t zbd_zone_end(const struct fio_zone_info *z)
+{
+ return (z+1)->start;
+}
+
+/**
+ * zbd_zone_capacity_end - Return zone capacity limit end location
+ * @z: zone info pointer.
+ */
+static inline uint64_t zbd_zone_capacity_end(const struct fio_zone_info *z)
+{
+ return z->start + z->capacity;
+}
+
+/**
+ * zbd_zone_remainder - Return the number of bytes that are still available for
+ * writing before the zone gets full
+ * @z: zone info pointer.
+ */
+static inline uint64_t zbd_zone_remainder(struct fio_zone_info *z)
+{
+ if (z->wp >= zbd_zone_capacity_end(z))
+ return 0;
+
+ return zbd_zone_capacity_end(z) - z->wp;
+}
+
+/**
+ * zbd_zone_full - verify whether a minimum number of bytes remain in a zone
+ * @f: file pointer.
+ * @z: zone info pointer.
+ * @required: minimum number of bytes that must remain in a zone.
+ *
+ * The caller must hold z->mutex.
+ */
+static bool zbd_zone_full(const struct fio_file *f, struct fio_zone_info *z,
+ uint64_t required)
+{
+ assert((required & 511) == 0);
+
+ return z->has_wp && required > zbd_zone_remainder(z);
+}
+
+static void zone_lock(struct thread_data *td, const struct fio_file *f,
+ struct fio_zone_info *z)
+{
+#ifndef NDEBUG
+ struct zoned_block_device_info *zbd = f->zbd_info;
+ uint32_t const nz = z - zbd->zone_info;
+ /* A thread should never lock zones outside its working area. */
+ assert(f->min_zone <= nz && nz < f->max_zone);
+ assert(z->has_wp);
+#endif
+
+ /*
+ * Lock the io_u target zone. The zone will be unlocked if io_u offset
+ * is changed or when io_u completes and zbd_put_io() executed.
+ * To avoid multiple jobs doing asynchronous I/Os from deadlocking each
+ * other waiting for zone locks when building an io_u batch, first
+ * only trylock the zone. If the zone is already locked by another job,
+ * process the currently queued I/Os so that I/O progress is made and
+ * zones unlocked.
+ */
+ if (pthread_mutex_trylock(&z->mutex) != 0) {
+ if (!td_ioengine_flagged(td, FIO_SYNCIO))
+ io_u_quiesce(td);
+ pthread_mutex_lock(&z->mutex);
+ }
+}
+
+static inline void zone_unlock(struct fio_zone_info *z)
+{
+ assert(z->has_wp);
+ pthread_mutex_unlock(&z->mutex);
+}
+
+static inline struct fio_zone_info *zbd_get_zone(const struct fio_file *f,
+ unsigned int zone_idx)
+{
+ return &f->zbd_info->zone_info[zone_idx];
+}
+
+static inline struct fio_zone_info *
+zbd_offset_to_zone(const struct fio_file *f, uint64_t offset)
+{
+ return zbd_get_zone(f, zbd_offset_to_zone_idx(f, offset));
+}
+
+static bool accounting_vdb(struct thread_data *td, const struct fio_file *f)
+{
+ return td->o.zrt.u.f && td_write(td);
+}
+