#include "pshared.h"
#include "zbd.h"
+static bool is_valid_offset(const struct fio_file *f, uint64_t offset)
+{
+ return (uint64_t)(offset - f->file_offset) < f->io_size;
+}
+
+static inline unsigned int zbd_zone_idx(const struct fio_file *f,
+ struct fio_zone_info *zone)
+{
+ return zone - f->zbd_info->zone_info;
+}
+
+/**
+ * zbd_offset_to_zone_idx - convert an offset into a zone number
+ * @f: file pointer.
+ * @offset: offset in bytes. If this offset is in the first zone_size bytes
+ * past the disk size then the index of the sentinel is returned.
+ */
+static unsigned int zbd_offset_to_zone_idx(const struct fio_file *f,
+ uint64_t offset)
+{
+ uint32_t zone_idx;
+
+ if (f->zbd_info->zone_size_log2 > 0)
+ zone_idx = offset >> f->zbd_info->zone_size_log2;
+ else
+ zone_idx = offset / f->zbd_info->zone_size;
+
+ return min(zone_idx, f->zbd_info->nr_zones);
+}
+
+/**
+ * zbd_zone_end - Return zone end location
+ * @z: zone info pointer.
+ */
+static inline uint64_t zbd_zone_end(const struct fio_zone_info *z)
+{
+ return (z+1)->start;
+}
+
+/**
+ * zbd_zone_capacity_end - Return zone capacity limit end location
+ * @z: zone info pointer.
+ */
+static inline uint64_t zbd_zone_capacity_end(const struct fio_zone_info *z)
+{
+ return z->start + z->capacity;
+}
+
+/**
+ * zbd_zone_remainder - Return the number of bytes that are still available for
+ * writing before the zone gets full
+ * @z: zone info pointer.
+ */
+static inline uint64_t zbd_zone_remainder(struct fio_zone_info *z)
+{
+ if (z->wp >= zbd_zone_capacity_end(z))
+ return 0;
+
+ return zbd_zone_capacity_end(z) - z->wp;
+}
+
+/**
+ * zbd_zone_full - verify whether a minimum number of bytes remain in a zone
+ * @f: file pointer.
+ * @z: zone info pointer.
+ * @required: minimum number of bytes that must remain in a zone.
+ *
+ * The caller must hold z->mutex.
+ */
+static bool zbd_zone_full(const struct fio_file *f, struct fio_zone_info *z,
+ uint64_t required)
+{
+ assert((required & 511) == 0);
+
+ return z->has_wp && required > zbd_zone_remainder(z);
+}
+
+static void zone_lock(struct thread_data *td, const struct fio_file *f,
+ struct fio_zone_info *z)
+{
+ struct zoned_block_device_info *zbd = f->zbd_info;
+ uint32_t nz = z - zbd->zone_info;
+
+ /* A thread should never lock zones outside its working area. */
+ assert(f->min_zone <= nz && nz < f->max_zone);
+
+ assert(z->has_wp);
+
+ /*
+ * Lock the io_u target zone. The zone will be unlocked if io_u offset
+ * is changed or when io_u completes and zbd_put_io() executed.
+ * To avoid multiple jobs doing asynchronous I/Os from deadlocking each
+ * other waiting for zone locks when building an io_u batch, first
+ * only trylock the zone. If the zone is already locked by another job,
+ * process the currently queued I/Os so that I/O progress is made and
+ * zones unlocked.
+ */
+ if (pthread_mutex_trylock(&z->mutex) != 0) {
+ if (!td_ioengine_flagged(td, FIO_SYNCIO))
+ io_u_quiesce(td);
+ pthread_mutex_lock(&z->mutex);
+ }
+}
+
+static inline void zone_unlock(struct fio_zone_info *z)
+{
+ int ret;
+
+ assert(z->has_wp);
+ ret = pthread_mutex_unlock(&z->mutex);
+ assert(!ret);
+}
+
+static inline struct fio_zone_info *zbd_get_zone(const struct fio_file *f,
+ unsigned int zone_idx)
+{
+ return &f->zbd_info->zone_info[zone_idx];
+}
+
+static inline struct fio_zone_info *
+zbd_offset_to_zone(const struct fio_file *f, uint64_t offset)
+{
+ return zbd_get_zone(f, zbd_offset_to_zone_idx(f, offset));
+}
+
+static bool accounting_vdb(struct thread_data *td, const struct fio_file *f)
+{
+ return td->o.zrt.u.f && td_write(td);
+}
+
/**
* zbd_get_zoned_model - Get a device zoned model
* @td: FIO thread data
* @f: FIO file for which to get model information
*/
-int zbd_get_zoned_model(struct thread_data *td, struct fio_file *f,
- enum zbd_zoned_model *model)
+static int zbd_get_zoned_model(struct thread_data *td, struct fio_file *f,
+ enum zbd_zoned_model *model)
{
int ret;
+ if (f->filetype == FIO_TYPE_PIPE) {
+ log_err("zonemode=zbd does not support pipes\n");
+ return -EINVAL;
+ }
+
+ /* If regular file, always emulate zones inside the file. */
+ if (f->filetype == FIO_TYPE_FILE) {
+ *model = ZBD_NONE;
+ return 0;
+ }
+
if (td->io_ops && td->io_ops->get_zoned_model)
ret = td->io_ops->get_zoned_model(td, f, model);
else
* upon failure. If the zone report is empty, always assume an error (device
* problem) and return -EIO.
*/
-int zbd_report_zones(struct thread_data *td, struct fio_file *f,
- uint64_t offset, struct zbd_zone *zones,
- unsigned int nr_zones)
+static int zbd_report_zones(struct thread_data *td, struct fio_file *f,
+ uint64_t offset, struct zbd_zone *zones,
+ unsigned int nr_zones)
{
int ret;
ret = blkzoned_report_zones(td, f, offset, zones, nr_zones);
if (ret < 0) {
td_verror(td, errno, "report zones failed");
- log_err("%s: report zones from sector %llu failed (%d).\n",
- f->file_name, (unsigned long long)offset >> 9, errno);
+ log_err("%s: report zones from sector %"PRIu64" failed (nr_zones=%d; errno=%d).\n",
+ f->file_name, offset >> 9, nr_zones, errno);
} else if (ret == 0) {
td_verror(td, errno, "Empty zone report");
- log_err("%s: report zones from sector %llu is empty.\n",
- f->file_name, (unsigned long long)offset >> 9);
+ log_err("%s: report zones from sector %"PRIu64" is empty.\n",
+ f->file_name, offset >> 9);
ret = -EIO;
}
* Reset the write pointer of all zones in the range @offset...@offset+@length.
* Returns 0 upon success and a negative error code upon failure.
*/
-int zbd_reset_wp(struct thread_data *td, struct fio_file *f,
- uint64_t offset, uint64_t length)
+static int zbd_reset_wp(struct thread_data *td, struct fio_file *f,
+ uint64_t offset, uint64_t length)
{
int ret;
ret = blkzoned_reset_wp(td, f, offset, length);
if (ret < 0) {
td_verror(td, errno, "resetting wp failed");
- log_err("%s: resetting wp for %llu sectors at sector %llu failed (%d).\n",
- f->file_name, (unsigned long long)length >> 9,
- (unsigned long long)offset >> 9, errno);
+ log_err("%s: resetting wp for %"PRIu64" sectors at sector %"PRIu64" failed (%d).\n",
+ f->file_name, length >> 9, offset >> 9, errno);
}
return ret;
}
/**
- * zbd_zone_idx - convert an offset into a zone number
- * @f: file pointer.
- * @offset: offset in bytes. If this offset is in the first zone_size bytes
- * past the disk size then the index of the sentinel is returned.
+ * zbd_reset_zone - reset the write pointer of a single zone
+ * @td: FIO thread data.
+ * @f: FIO file associated with the disk for which to reset a write pointer.
+ * @z: Zone to reset.
+ *
+ * Returns 0 upon success and a negative error code upon failure.
+ *
+ * The caller must hold z->mutex.
*/
-static uint32_t zbd_zone_idx(const struct fio_file *f, uint64_t offset)
+static int zbd_reset_zone(struct thread_data *td, struct fio_file *f,
+ struct fio_zone_info *z)
{
- uint32_t zone_idx;
+ uint64_t offset = z->start;
+ uint64_t length = (z+1)->start - offset;
+ uint64_t data_in_zone = z->wp - z->start;
+ int ret = 0;
- if (f->zbd_info->zone_size_log2 > 0)
- zone_idx = offset >> f->zbd_info->zone_size_log2;
- else
- zone_idx = offset / f->zbd_info->zone_size;
+ if (!data_in_zone)
+ return 0;
- return min(zone_idx, f->zbd_info->nr_zones);
+ assert(is_valid_offset(f, offset + length - 1));
+
+ dprint(FD_ZBD, "%s: resetting wp of zone %u.\n",
+ f->file_name, zbd_zone_idx(f, z));
+
+ switch (f->zbd_info->model) {
+ case ZBD_HOST_AWARE:
+ case ZBD_HOST_MANAGED:
+ ret = zbd_reset_wp(td, f, offset, length);
+ if (ret < 0)
+ return ret;
+ break;
+ default:
+ break;
+ }
+
+ if (accounting_vdb(td, f)) {
+ pthread_mutex_lock(&f->zbd_info->mutex);
+ f->zbd_info->wp_valid_data_bytes -= data_in_zone;
+ pthread_mutex_unlock(&f->zbd_info->mutex);
+ }
+
+ z->wp = z->start;
+
+ td->ts.nr_zone_resets++;
+
+ return ret;
}
/**
- * zbd_zone_swr - Test whether a zone requires sequential writes
- * @z: zone info pointer.
+ * zbd_close_zone - Remove a zone from the open zones array.
+ * @td: FIO thread data.
+ * @f: FIO file associated with the disk for which to reset a write pointer.
+ * @zone_idx: Index of the zone to remove.
+ *
+ * The caller must hold f->zbd_info->mutex.
*/
-static inline bool zbd_zone_swr(struct fio_zone_info *z)
+static void zbd_close_zone(struct thread_data *td, const struct fio_file *f,
+ struct fio_zone_info *z)
{
- return z->type == ZBD_ZONE_TYPE_SWR;
+ uint32_t ozi;
+
+ if (!z->open)
+ return;
+
+ for (ozi = 0; ozi < f->zbd_info->num_open_zones; ozi++) {
+ if (zbd_get_zone(f, f->zbd_info->open_zones[ozi]) == z)
+ break;
+ }
+ if (ozi == f->zbd_info->num_open_zones)
+ return;
+
+ dprint(FD_ZBD, "%s: closing zone %u\n",
+ f->file_name, zbd_zone_idx(f, z));
+
+ memmove(f->zbd_info->open_zones + ozi,
+ f->zbd_info->open_zones + ozi + 1,
+ (ZBD_MAX_OPEN_ZONES - (ozi + 1)) *
+ sizeof(f->zbd_info->open_zones[0]));
+
+ f->zbd_info->num_open_zones--;
+ td->num_open_zones--;
+ z->open = 0;
}
/**
- * zbd_zone_end - Return zone end location
- * @z: zone info pointer.
+ * zbd_finish_zone - finish the specified zone
+ * @td: FIO thread data.
+ * @f: FIO file for which to finish a zone
+ * @z: Zone to finish.
+ *
+ * Finish the zone at @offset with open or close status.
*/
-static inline uint64_t zbd_zone_end(const struct fio_zone_info *z)
+static int zbd_finish_zone(struct thread_data *td, struct fio_file *f,
+ struct fio_zone_info *z)
{
- return (z+1)->start;
+ uint64_t offset = z->start;
+ uint64_t length = f->zbd_info->zone_size;
+ int ret = 0;
+
+ switch (f->zbd_info->model) {
+ case ZBD_HOST_AWARE:
+ case ZBD_HOST_MANAGED:
+ if (td->io_ops && td->io_ops->finish_zone)
+ ret = td->io_ops->finish_zone(td, f, offset, length);
+ else
+ ret = blkzoned_finish_zone(td, f, offset, length);
+ break;
+ default:
+ break;
+ }
+
+ if (ret < 0) {
+ td_verror(td, errno, "finish zone failed");
+ log_err("%s: finish zone at sector %"PRIu64" failed (%d).\n",
+ f->file_name, offset >> 9, errno);
+ } else {
+ z->wp = (z+1)->start;
+ }
+
+ return ret;
}
/**
- * zbd_zone_capacity_end - Return zone capacity limit end location
- * @z: zone info pointer.
+ * zbd_reset_zones - Reset a range of zones.
+ * @td: fio thread data.
+ * @f: fio file for which to reset zones
+ * @zb: first zone to reset.
+ * @ze: first zone not to reset.
+ *
+ * Returns 0 upon success and 1 upon failure.
*/
-static inline uint64_t zbd_zone_capacity_end(const struct fio_zone_info *z)
+static int zbd_reset_zones(struct thread_data *td, struct fio_file *f,
+ struct fio_zone_info *const zb,
+ struct fio_zone_info *const ze)
{
- return z->start + z->capacity;
+ struct fio_zone_info *z;
+ const uint64_t min_bs = td->o.min_bs[DDIR_WRITE];
+ int res = 0;
+
+ assert(min_bs);
+
+ dprint(FD_ZBD, "%s: examining zones %u .. %u\n",
+ f->file_name, zbd_zone_idx(f, zb), zbd_zone_idx(f, ze));
+
+ for (z = zb; z < ze; z++) {
+ if (!z->has_wp)
+ continue;
+
+ zone_lock(td, f, z);
+ pthread_mutex_lock(&f->zbd_info->mutex);
+ zbd_close_zone(td, f, z);
+ pthread_mutex_unlock(&f->zbd_info->mutex);
+
+ if (z->wp != z->start) {
+ dprint(FD_ZBD, "%s: resetting zone %u\n",
+ f->file_name, zbd_zone_idx(f, z));
+ if (zbd_reset_zone(td, f, z) < 0)
+ res = 1;
+ }
+
+ zone_unlock(z);
+ }
+
+ return res;
}
/**
- * zbd_zone_full - verify whether a minimum number of bytes remain in a zone
- * @f: file pointer.
- * @z: zone info pointer.
- * @required: minimum number of bytes that must remain in a zone.
+ * zbd_get_max_open_zones - Get the maximum number of open zones
+ * @td: FIO thread data
+ * @f: FIO file for which to get max open zones
+ * @max_open_zones: Upon success, result will be stored here.
*
- * The caller must hold z->mutex.
+ * A @max_open_zones value set to zero means no limit.
+ *
+ * Returns 0 upon success and a negative error code upon failure.
*/
-static bool zbd_zone_full(const struct fio_file *f, struct fio_zone_info *z,
- uint64_t required)
+static int zbd_get_max_open_zones(struct thread_data *td, struct fio_file *f,
+ unsigned int *max_open_zones)
{
- assert((required & 511) == 0);
+ int ret;
+
+ if (td->io_ops && td->io_ops->get_max_open_zones)
+ ret = td->io_ops->get_max_open_zones(td, f, max_open_zones);
+ else
+ ret = blkzoned_get_max_open_zones(td, f, max_open_zones);
+ if (ret < 0) {
+ td_verror(td, errno, "get max open zones failed");
+ log_err("%s: get max open zones failed (%d).\n",
+ f->file_name, errno);
+ }
- return zbd_zone_swr(z) &&
- z->wp + required > zbd_zone_capacity_end(z);
+ return ret;
}
-static void zone_lock(struct thread_data *td, struct fio_file *f, struct fio_zone_info *z)
+/**
+ * zbd_open_zone - Add a zone to the array of open zones.
+ * @td: fio thread data.
+ * @f: fio file that has the open zones to add.
+ * @zone_idx: Index of the zone to add.
+ *
+ * Open a ZBD zone if it is not already open. Returns true if either the zone
+ * was already open or if the zone was successfully added to the array of open
+ * zones without exceeding the maximum number of open zones. Returns false if
+ * the zone was not already open and opening the zone would cause the zone limit
+ * to be exceeded.
+ */
+static bool zbd_open_zone(struct thread_data *td, const struct fio_file *f,
+ struct fio_zone_info *z)
{
- struct zoned_block_device_info *zbd = f->zbd_info;
- uint32_t nz = z - zbd->zone_info;
+ const uint64_t min_bs = td->o.min_bs[DDIR_WRITE];
+ struct zoned_block_device_info *zbdi = f->zbd_info;
+ uint32_t zone_idx = zbd_zone_idx(f, z);
+ bool res = true;
- /* A thread should never lock zones outside its working area. */
- assert(f->min_zone <= nz && nz < f->max_zone);
+ if (z->cond == ZBD_ZONE_COND_OFFLINE)
+ return false;
/*
- * Lock the io_u target zone. The zone will be unlocked if io_u offset
- * is changed or when io_u completes and zbd_put_io() executed.
- * To avoid multiple jobs doing asynchronous I/Os from deadlocking each
- * other waiting for zone locks when building an io_u batch, first
- * only trylock the zone. If the zone is already locked by another job,
- * process the currently queued I/Os so that I/O progress is made and
- * zones unlocked.
+ * Skip full zones with data verification enabled because resetting a
+ * zone causes data loss and hence causes verification to fail.
*/
- if (pthread_mutex_trylock(&z->mutex) != 0) {
- if (!td_ioengine_flagged(td, FIO_SYNCIO))
- io_u_quiesce(td);
- pthread_mutex_lock(&z->mutex);
+ if (td->o.verify != VERIFY_NONE && zbd_zone_full(f, z, min_bs))
+ return false;
+
+ /*
+ * zbdi->max_open_zones == 0 means that there is no limit on the maximum
+ * number of open zones. In this case, do no track open zones in
+ * zbdi->open_zones array.
+ */
+ if (!zbdi->max_open_zones)
+ return true;
+
+ pthread_mutex_lock(&zbdi->mutex);
+
+ if (z->open) {
+ /*
+ * If the zone is going to be completely filled by writes
+ * already in-flight, handle it as a full zone instead of an
+ * open zone.
+ */
+ if (!zbd_zone_remainder(z))
+ res = false;
+ goto out;
}
-}
-static bool is_valid_offset(const struct fio_file *f, uint64_t offset)
-{
- return (uint64_t)(offset - f->file_offset) < f->io_size;
+ res = false;
+ /* Zero means no limit */
+ if (td->o.job_max_open_zones > 0 &&
+ td->num_open_zones >= td->o.job_max_open_zones)
+ goto out;
+ if (zbdi->num_open_zones >= zbdi->max_open_zones)
+ goto out;
+
+ dprint(FD_ZBD, "%s: opening zone %u\n",
+ f->file_name, zone_idx);
+
+ zbdi->open_zones[zbdi->num_open_zones++] = zone_idx;
+ td->num_open_zones++;
+ z->open = 1;
+ res = true;
+
+out:
+ pthread_mutex_unlock(&zbdi->mutex);
+ return res;
}
-/* Verify whether direct I/O is used for all host-managed zoned drives. */
+/* Verify whether direct I/O is used for all host-managed zoned block drives. */
static bool zbd_using_direct_io(void)
{
- struct thread_data *td;
struct fio_file *f;
- int i, j;
+ int j;
- for_each_td(td, i) {
+ for_each_td(td) {
if (td->o.odirect || !(td->o.td_ddir & TD_DDIR_WRITE))
continue;
for_each_file(td, f, j) {
- if (f->zbd_info &&
+ if (f->zbd_info && f->filetype == FIO_TYPE_BLOCK &&
f->zbd_info->model == ZBD_HOST_MANAGED)
return false;
}
- }
+ } end_for_each();
return true;
}
/* Whether or not the I/O range for f includes one or more sequential zones */
-static bool zbd_is_seq_job(struct fio_file *f)
+static bool zbd_is_seq_job(const struct fio_file *f)
{
uint32_t zone_idx, zone_idx_b, zone_idx_e;
assert(f->zbd_info);
+
if (f->io_size == 0)
return false;
- zone_idx_b = zbd_zone_idx(f, f->file_offset);
- zone_idx_e = zbd_zone_idx(f, f->file_offset + f->io_size - 1);
+
+ zone_idx_b = zbd_offset_to_zone_idx(f, f->file_offset);
+ zone_idx_e =
+ zbd_offset_to_zone_idx(f, f->file_offset + f->io_size - 1);
for (zone_idx = zone_idx_b; zone_idx <= zone_idx_e; zone_idx++)
- if (zbd_zone_swr(&f->zbd_info->zone_info[zone_idx]))
+ if (zbd_get_zone(f, zone_idx)->has_wp)
return true;
return false;
}
/*
- * Verify whether offset and size parameters are aligned with zone boundaries.
+ * Verify whether the file offset and size parameters are aligned with zone
+ * boundaries. If the file offset is not aligned, align it down to the start of
+ * the zone containing the start offset and align up the file io_size parameter.
*/
-static bool zbd_verify_sizes(void)
+static bool zbd_zone_align_file_sizes(struct thread_data *td,
+ struct fio_file *f)
{
const struct fio_zone_info *z;
- struct thread_data *td;
- struct fio_file *f;
uint64_t new_offset, new_end;
- uint32_t zone_idx;
- int i, j;
- for_each_td(td, i) {
- for_each_file(td, f, j) {
- if (!f->zbd_info)
- continue;
- if (f->file_offset >= f->real_file_size)
- continue;
- if (!zbd_is_seq_job(f))
- continue;
+ if (!f->zbd_info)
+ return true;
+ if (f->file_offset >= f->real_file_size)
+ return true;
+ if (!zbd_is_seq_job(f))
+ return true;
- if (!td->o.zone_size) {
- td->o.zone_size = f->zbd_info->zone_size;
- if (!td->o.zone_size) {
- log_err("%s: invalid 0 zone size\n",
- f->file_name);
- return false;
- }
- } else if (td->o.zone_size != f->zbd_info->zone_size) {
- log_err("%s: job parameter zonesize %llu does not match disk zone size %llu.\n",
- f->file_name, (unsigned long long) td->o.zone_size,
- (unsigned long long) f->zbd_info->zone_size);
- return false;
- }
+ if (!td->o.zone_size) {
+ td->o.zone_size = f->zbd_info->zone_size;
+ if (!td->o.zone_size) {
+ log_err("%s: invalid 0 zone size\n",
+ f->file_name);
+ return false;
+ }
+ } else if (td->o.zone_size != f->zbd_info->zone_size) {
+ log_err("%s: zonesize %llu does not match the device zone size %"PRIu64".\n",
+ f->file_name, td->o.zone_size,
+ f->zbd_info->zone_size);
+ return false;
+ }
- if (td->o.zone_skip &&
- (td->o.zone_skip < td->o.zone_size ||
- td->o.zone_skip % td->o.zone_size)) {
- log_err("%s: zoneskip %llu is not a multiple of the device zone size %llu.\n",
- f->file_name, (unsigned long long) td->o.zone_skip,
- (unsigned long long) td->o.zone_size);
- return false;
- }
+ if (td->o.zone_skip % td->o.zone_size) {
+ log_err("%s: zoneskip %llu is not a multiple of the device zone size %llu.\n",
+ f->file_name, td->o.zone_skip,
+ td->o.zone_size);
+ return false;
+ }
- zone_idx = zbd_zone_idx(f, f->file_offset);
- z = &f->zbd_info->zone_info[zone_idx];
- if ((f->file_offset != z->start) &&
- (td->o.td_ddir != TD_DDIR_READ)) {
- new_offset = zbd_zone_end(z);
- if (new_offset >= f->file_offset + f->io_size) {
- log_info("%s: io_size must be at least one zone\n",
- f->file_name);
- return false;
- }
- log_info("%s: rounded up offset from %llu to %llu\n",
- f->file_name, (unsigned long long) f->file_offset,
- (unsigned long long) new_offset);
- f->io_size -= (new_offset - f->file_offset);
- f->file_offset = new_offset;
- }
- zone_idx = zbd_zone_idx(f, f->file_offset + f->io_size);
- z = &f->zbd_info->zone_info[zone_idx];
- new_end = z->start;
- if ((td->o.td_ddir != TD_DDIR_READ) &&
- (f->file_offset + f->io_size != new_end)) {
- if (new_end <= f->file_offset) {
- log_info("%s: io_size must be at least one zone\n",
- f->file_name);
- return false;
- }
- log_info("%s: rounded down io_size from %llu to %llu\n",
- f->file_name, (unsigned long long) f->io_size,
- (unsigned long long) new_end - f->file_offset);
- f->io_size = new_end - f->file_offset;
- }
+ z = zbd_offset_to_zone(f, f->file_offset);
+ if ((f->file_offset != z->start) &&
+ (td->o.td_ddir != TD_DDIR_READ)) {
+ new_offset = zbd_zone_end(z);
+ if (new_offset >= f->file_offset + f->io_size) {
+ log_info("%s: io_size must be at least one zone\n",
+ f->file_name);
+ return false;
+ }
+ log_info("%s: rounded up offset from %"PRIu64" to %"PRIu64"\n",
+ f->file_name, f->file_offset,
+ new_offset);
+ f->io_size -= (new_offset - f->file_offset);
+ f->file_offset = new_offset;
+ }
- f->min_zone = zbd_zone_idx(f, f->file_offset);
- f->max_zone = zbd_zone_idx(f, f->file_offset + f->io_size);
- assert(f->min_zone < f->max_zone);
+ z = zbd_offset_to_zone(f, f->file_offset + f->io_size);
+ new_end = z->start;
+ if ((td->o.td_ddir != TD_DDIR_READ) &&
+ (f->file_offset + f->io_size != new_end)) {
+ if (new_end <= f->file_offset) {
+ log_info("%s: io_size must be at least one zone\n",
+ f->file_name);
+ return false;
}
+ log_info("%s: rounded down io_size from %"PRIu64" to %"PRIu64"\n",
+ f->file_name, f->io_size,
+ new_end - f->file_offset);
+ f->io_size = new_end - f->file_offset;
}
return true;
}
-static bool zbd_verify_bs(void)
+/*
+ * Verify whether offset and size parameters are aligned with zone boundaries.
+ */
+static bool zbd_verify_sizes(void)
{
- struct thread_data *td;
struct fio_file *f;
- uint32_t zone_size;
- int i, j, k;
+ int j;
+
+ for_each_td(td) {
+ for_each_file(td, f, j) {
+ if (!zbd_zone_align_file_sizes(td, f))
+ return false;
+ }
+ } end_for_each();
- for_each_td(td, i) {
+ return true;
+}
+
+static bool zbd_verify_bs(void)
+{
+ struct fio_file *f;
+ int j;
+
+ for_each_td(td) {
+ if (td_trim(td) &&
+ (td->o.min_bs[DDIR_TRIM] != td->o.max_bs[DDIR_TRIM] ||
+ td->o.bssplit_nr[DDIR_TRIM])) {
+ log_info("bsrange and bssplit are not allowed for trim with zonemode=zbd\n");
+ return false;
+ }
for_each_file(td, f, j) {
+ uint64_t zone_size;
+
if (!f->zbd_info)
continue;
+
zone_size = f->zbd_info->zone_size;
- for (k = 0; k < FIO_ARRAY_SIZE(td->o.bs); k++) {
- if (td->o.verify != VERIFY_NONE &&
- zone_size % td->o.bs[k] != 0) {
- log_info("%s: block size %llu is not a divisor of the zone size %d\n",
- f->file_name, td->o.bs[k],
- zone_size);
- return false;
- }
+ if (td_trim(td) && td->o.bs[DDIR_TRIM] != zone_size) {
+ log_info("%s: trim block size %llu is not the zone size %"PRIu64"\n",
+ f->file_name, td->o.bs[DDIR_TRIM],
+ zone_size);
+ return false;
}
}
- }
+ } end_for_each();
return true;
}
int i;
if (zone_size == 0) {
- log_err("%s: Specifying the zone size is mandatory for regular block devices with --zonemode=zbd\n\n",
+ log_err("%s: Specifying the zone size is mandatory for regular file/block device with --zonemode=zbd\n\n",
f->file_name);
return 1;
}
if (zone_capacity > zone_size) {
log_err("%s: job parameter zonecapacity %llu is larger than zone size %llu\n",
- f->file_name, (unsigned long long) td->o.zone_capacity,
- (unsigned long long) td->o.zone_size);
+ f->file_name, td->o.zone_capacity, td->o.zone_size);
return 1;
}
+ if (f->real_file_size < zone_size) {
+ log_err("%s: file/device size %"PRIu64" is smaller than zone size %"PRIu64"\n",
+ f->file_name, f->real_file_size, zone_size);
+ return -EINVAL;
+ }
+
nr_zones = (f->real_file_size + zone_size - 1) / zone_size;
zbd_info = scalloc(1, sizeof(*zbd_info) +
(nr_zones + 1) * sizeof(zbd_info->zone_info[0]));
p->type = ZBD_ZONE_TYPE_SWR;
p->cond = ZBD_ZONE_COND_EMPTY;
p->capacity = zone_capacity;
+ p->has_wp = 1;
}
/* a sentinel */
p->start = nr_zones * zone_size;
int nr_zones, nrz;
struct zbd_zone *zones, *z;
struct fio_zone_info *p;
- uint64_t zone_size, offset;
+ uint64_t zone_size, offset, capacity;
+ bool same_zone_cap = true;
struct zoned_block_device_info *zbd_info = NULL;
int i, j, ret = -ENOMEM;
}
zone_size = zones[0].len;
+ capacity = zones[0].capacity;
nr_zones = (f->real_file_size + zone_size - 1) / zone_size;
if (td->o.zone_size == 0) {
td->o.zone_size = zone_size;
} else if (td->o.zone_size != zone_size) {
- log_err("fio: %s job parameter zonesize %llu does not match disk zone size %llu.\n",
- f->file_name, (unsigned long long) td->o.zone_size,
- (unsigned long long) zone_size);
+ log_err("fio: %s job parameter zonesize %llu does not match disk zone size %"PRIu64".\n",
+ f->file_name, td->o.zone_size, zone_size);
ret = -EINVAL;
goto out;
}
- dprint(FD_ZBD, "Device %s has %d zones of size %llu KB\n", f->file_name,
- nr_zones, (unsigned long long) zone_size / 1024);
+ dprint(FD_ZBD, "Device %s has %d zones of size %"PRIu64" KB\n",
+ f->file_name, nr_zones, zone_size / 1024);
zbd_info = scalloc(1, sizeof(*zbd_info) +
(nr_zones + 1) * sizeof(zbd_info->zone_info[0]));
PTHREAD_MUTEX_RECURSIVE);
p->start = z->start;
p->capacity = z->capacity;
+ if (capacity != z->capacity)
+ same_zone_cap = false;
+
switch (z->cond) {
case ZBD_ZONE_COND_NOT_WP:
case ZBD_ZONE_COND_FULL:
p->wp = z->wp;
break;
}
+
+ switch (z->type) {
+ case ZBD_ZONE_TYPE_SWR:
+ p->has_wp = 1;
+ break;
+ default:
+ p->has_wp = 0;
+ }
p->type = z->type;
p->cond = z->cond;
+
if (j > 0 && p->start != p[-1].start + zone_size) {
- log_info("%s: invalid zone data\n",
- f->file_name);
+ log_info("%s: invalid zone data [%d:%d]: %"PRIu64" + %"PRIu64" != %"PRIu64"\n",
+ f->file_name, j, i,
+ p[-1].start, zone_size, p->start);
ret = -EINVAL;
goto out;
}
offset = z->start + z->len;
if (j >= nr_zones)
break;
- nrz = zbd_report_zones(td, f, offset,
- zones, ZBD_REPORT_MAX_ZONES);
+
+ nrz = zbd_report_zones(td, f, offset, zones,
+ min((uint32_t)(nr_zones - j),
+ ZBD_REPORT_MAX_ZONES));
if (nrz < 0) {
ret = nrz;
- log_info("fio: report zones (offset %llu) failed for %s (%d).\n",
- (unsigned long long)offset,
- f->file_name, -ret);
+ log_info("fio: report zones (offset %"PRIu64") failed for %s (%d).\n",
+ offset, f->file_name, -ret);
goto out;
}
}
f->zbd_info->zone_size_log2 = is_power_of_2(zone_size) ?
ilog2(zone_size) : 0;
f->zbd_info->nr_zones = nr_zones;
+
+ if (same_zone_cap)
+ dprint(FD_ZBD, "Zone capacity = %"PRIu64" KB\n",
+ capacity / 1024);
+
zbd_info = NULL;
ret = 0;
return ret;
}
+static int zbd_set_max_open_zones(struct thread_data *td, struct fio_file *f)
+{
+ struct zoned_block_device_info *zbd = f->zbd_info;
+ unsigned int max_open_zones;
+ int ret;
+
+ if (zbd->model != ZBD_HOST_MANAGED || td->o.ignore_zone_limits) {
+ /* Only host-managed devices have a max open limit */
+ zbd->max_open_zones = td->o.max_open_zones;
+ goto out;
+ }
+
+ /* If host-managed, get the max open limit */
+ ret = zbd_get_max_open_zones(td, f, &max_open_zones);
+ if (ret)
+ return ret;
+
+ if (!max_open_zones) {
+ /* No device limit */
+ zbd->max_open_zones = td->o.max_open_zones;
+ } else if (!td->o.max_open_zones) {
+ /* No user limit. Set limit to device limit */
+ zbd->max_open_zones = max_open_zones;
+ } else if (td->o.max_open_zones <= max_open_zones) {
+ /* Both user limit and dev limit. User limit not too large */
+ zbd->max_open_zones = td->o.max_open_zones;
+ } else {
+ /* Both user limit and dev limit. User limit too large */
+ td_verror(td, EINVAL,
+ "Specified --max_open_zones is too large");
+ log_err("Specified --max_open_zones (%d) is larger than max (%u)\n",
+ td->o.max_open_zones, max_open_zones);
+ return -EINVAL;
+ }
+
+out:
+ /* Ensure that the limit is not larger than FIO's internal limit */
+ if (zbd->max_open_zones > ZBD_MAX_OPEN_ZONES) {
+ td_verror(td, EINVAL, "'max_open_zones' value is too large");
+ log_err("'max_open_zones' value is larger than %u\n",
+ ZBD_MAX_OPEN_ZONES);
+ return -EINVAL;
+ }
+
+ dprint(FD_ZBD, "%s: using max open zones limit: %"PRIu32"\n",
+ f->file_name, zbd->max_open_zones);
+
+ return 0;
+}
+
/*
* Allocate zone information and store it into f->zbd_info if zonemode=zbd.
*
return ret;
switch (zbd_model) {
- case ZBD_IGNORE:
- return 0;
case ZBD_HOST_AWARE:
case ZBD_HOST_MANAGED:
ret = parse_zone_info(td, f);
+ if (ret)
+ return ret;
break;
case ZBD_NONE:
ret = init_zone_info(td, f);
+ if (ret)
+ return ret;
break;
default:
td_verror(td, EINVAL, "Unsupported zoned model");
return -EINVAL;
}
- if (ret == 0) {
- f->zbd_info->model = zbd_model;
- f->zbd_info->max_open_zones = td->o.max_open_zones;
+ assert(f->zbd_info);
+ f->zbd_info->model = zbd_model;
+
+ ret = zbd_set_max_open_zones(td, f);
+ if (ret) {
+ zbd_free_zone_info(f);
+ return ret;
}
- return ret;
+
+ return 0;
}
void zbd_free_zone_info(struct fio_file *f)
*/
static int zbd_init_zone_info(struct thread_data *td, struct fio_file *file)
{
- struct thread_data *td2;
struct fio_file *f2;
- int i, j, ret;
+ int j, ret;
- for_each_td(td2, i) {
+ for_each_td(td2) {
for_each_file(td2, f2, j) {
if (td2 == td && f2 == file)
continue;
file->zbd_info->refcount++;
return 0;
}
- }
+ } end_for_each();
ret = zbd_create_zone_info(td, file);
if (ret < 0)
td_verror(td, -ret, "zbd_create_zone_info() failed");
+
return ret;
}
-static bool zbd_open_zone(struct thread_data *td, const struct fio_file *f,
- uint32_t zone_idx);
-static int zbd_reset_zone(struct thread_data *td, struct fio_file *f,
- struct fio_zone_info *z);
-
-int zbd_setup_files(struct thread_data *td)
+int zbd_init_files(struct thread_data *td)
{
struct fio_file *f;
int i;
return 1;
}
- if (!zbd_using_direct_io()) {
- log_err("Using direct I/O is mandatory for writing to ZBD drives\n\n");
- return 1;
- }
-
- if (!zbd_verify_sizes())
- return 1;
+ return 0;
+}
- if (!zbd_verify_bs())
- return 1;
+void zbd_recalc_options_with_zone_granularity(struct thread_data *td)
+{
+ struct fio_file *f;
+ int i;
for_each_file(td, f, i) {
struct zoned_block_device_info *zbd = f->zbd_info;
- struct fio_zone_info *z;
- int zi;
+ uint64_t zone_size;
- if (!zbd)
+ /* zonemode=strided doesn't get per-file zone size. */
+ zone_size = zbd ? zbd->zone_size : td->o.zone_size;
+ if (zone_size == 0)
continue;
- zbd->max_open_zones = zbd->max_open_zones ?: ZBD_MAX_OPEN_ZONES;
-
- if (td->o.max_open_zones > 0 &&
- zbd->max_open_zones != td->o.max_open_zones) {
- log_err("Different 'max_open_zones' values\n");
- return 1;
- }
- if (zbd->max_open_zones > ZBD_MAX_OPEN_ZONES) {
- log_err("'max_open_zones' value is limited by %u\n", ZBD_MAX_OPEN_ZONES);
- return 1;
- }
-
- for (zi = f->min_zone; zi < f->max_zone; zi++) {
- z = &zbd->zone_info[zi];
- if (z->cond != ZBD_ZONE_COND_IMP_OPEN &&
- z->cond != ZBD_ZONE_COND_EXP_OPEN)
- continue;
- if (zbd_open_zone(td, f, zi))
- continue;
- /*
- * If the number of open zones exceeds specified limits,
- * reset all extra open zones.
- */
- if (zbd_reset_zone(td, f, z) < 0) {
- log_err("Failed to reest zone %d\n", zi);
- return 1;
- }
- }
+ if (td->o.size_nz > 0)
+ td->o.size = td->o.size_nz * zone_size;
+ if (td->o.io_size_nz > 0)
+ td->o.io_size = td->o.io_size_nz * zone_size;
+ if (td->o.start_offset_nz > 0)
+ td->o.start_offset = td->o.start_offset_nz * zone_size;
+ if (td->o.offset_increment_nz > 0)
+ td->o.offset_increment =
+ td->o.offset_increment_nz * zone_size;
+ if (td->o.zone_skip_nz > 0)
+ td->o.zone_skip = td->o.zone_skip_nz * zone_size;
}
-
- return 0;
}
-static inline unsigned int zbd_zone_nr(const struct fio_file *f,
- struct fio_zone_info *zone)
+static uint64_t zbd_verify_and_set_vdb(struct thread_data *td,
+ const struct fio_file *f)
{
- return zone - f->zbd_info->zone_info;
-}
+ struct fio_zone_info *zb, *ze, *z;
+ uint64_t wp_vdb = 0;
+ struct zoned_block_device_info *zbdi = f->zbd_info;
-/**
- * zbd_reset_zone - reset the write pointer of a single zone
- * @td: FIO thread data.
- * @f: FIO file associated with the disk for which to reset a write pointer.
- * @z: Zone to reset.
- *
- * Returns 0 upon success and a negative error code upon failure.
- *
- * The caller must hold z->mutex.
- */
-static int zbd_reset_zone(struct thread_data *td, struct fio_file *f,
- struct fio_zone_info *z)
-{
- uint64_t offset = z->start;
- uint64_t length = (z+1)->start - offset;
- int ret = 0;
+ assert(td->runstate < TD_RUNNING);
+ assert(zbdi);
- if (z->wp == z->start)
+ if (!accounting_vdb(td, f))
return 0;
- assert(is_valid_offset(f, offset + length - 1));
+ /*
+ * Ensure that the I/O range includes one or more sequential zones so
+ * that f->min_zone and f->max_zone have different values.
+ */
+ if (!zbd_is_seq_job(f))
+ return 0;
- dprint(FD_ZBD, "%s: resetting wp of zone %u.\n", f->file_name,
- zbd_zone_nr(f, z));
- switch (f->zbd_info->model) {
- case ZBD_HOST_AWARE:
- case ZBD_HOST_MANAGED:
- ret = zbd_reset_wp(td, f, offset, length);
- if (ret < 0)
- return ret;
- break;
- default:
- break;
+ if (zbdi->write_min_zone != zbdi->write_max_zone) {
+ if (zbdi->write_min_zone != f->min_zone ||
+ zbdi->write_max_zone != f->max_zone) {
+ td_verror(td, EINVAL,
+ "multi-jobs with different write ranges are "
+ "not supported with zone_reset_threshold");
+ log_err("multi-jobs with different write ranges are "
+ "not supported with zone_reset_threshold\n");
+ }
+ return 0;
}
- pthread_mutex_lock(&f->zbd_info->mutex);
- f->zbd_info->sectors_with_data -= z->wp - z->start;
- pthread_mutex_unlock(&f->zbd_info->mutex);
- z->wp = z->start;
- z->verify_block = 0;
+ zbdi->write_min_zone = f->min_zone;
+ zbdi->write_max_zone = f->max_zone;
- td->ts.nr_zone_resets++;
+ zb = zbd_get_zone(f, f->min_zone);
+ ze = zbd_get_zone(f, f->max_zone);
+ for (z = zb; z < ze; z++)
+ if (z->has_wp)
+ wp_vdb += z->wp - z->start;
- return ret;
+ zbdi->wp_valid_data_bytes = wp_vdb;
+
+ return wp_vdb;
}
-/* The caller must hold f->zbd_info->mutex */
-static void zbd_close_zone(struct thread_data *td, const struct fio_file *f,
- unsigned int zone_idx)
+int zbd_setup_files(struct thread_data *td)
{
- uint32_t open_zone_idx = 0;
+ struct fio_file *f;
+ int i;
- for (; open_zone_idx < f->zbd_info->num_open_zones; open_zone_idx++) {
- if (f->zbd_info->open_zones[open_zone_idx] == zone_idx)
- break;
+ if (!zbd_using_direct_io()) {
+ log_err("Using direct I/O is mandatory for writing to ZBD drives\n\n");
+ return 1;
}
- if (open_zone_idx == f->zbd_info->num_open_zones) {
- dprint(FD_ZBD, "%s: zone %d is not open\n",
- f->file_name, zone_idx);
- return;
+
+ if (!zbd_verify_sizes())
+ return 1;
+
+ if (!zbd_verify_bs())
+ return 1;
+
+ if (td->o.experimental_verify) {
+ log_err("zonemode=zbd does not support experimental verify\n");
+ return 1;
}
- dprint(FD_ZBD, "%s: closing zone %d\n", f->file_name, zone_idx);
- memmove(f->zbd_info->open_zones + open_zone_idx,
- f->zbd_info->open_zones + open_zone_idx + 1,
- (ZBD_MAX_OPEN_ZONES - (open_zone_idx + 1)) *
- sizeof(f->zbd_info->open_zones[0]));
- f->zbd_info->num_open_zones--;
- td->num_open_zones--;
- f->zbd_info->zone_info[zone_idx].open = 0;
-}
+ for_each_file(td, f, i) {
+ struct zoned_block_device_info *zbd = f->zbd_info;
+ struct fio_zone_info *z;
+ int zi;
+ uint64_t vdb;
-/*
- * Reset a range of zones. Returns 0 upon success and 1 upon failure.
- * @td: fio thread data.
- * @f: fio file for which to reset zones
- * @zb: first zone to reset.
- * @ze: first zone not to reset.
- * @all_zones: whether to reset all zones or only those zones for which the
- * write pointer is not a multiple of td->o.min_bs[DDIR_WRITE].
- */
-static int zbd_reset_zones(struct thread_data *td, struct fio_file *f,
- struct fio_zone_info *const zb,
- struct fio_zone_info *const ze, bool all_zones)
-{
- struct fio_zone_info *z;
- const uint32_t min_bs = td->o.min_bs[DDIR_WRITE];
- bool reset_wp;
- int res = 0;
+ assert(zbd);
- assert(min_bs);
+ f->min_zone = zbd_offset_to_zone_idx(f, f->file_offset);
+ f->max_zone =
+ zbd_offset_to_zone_idx(f, f->file_offset + f->io_size);
- dprint(FD_ZBD, "%s: examining zones %u .. %u\n", f->file_name,
- zbd_zone_nr(f, zb), zbd_zone_nr(f, ze));
- for (z = zb; z < ze; z++) {
- uint32_t nz = zbd_zone_nr(f, z);
+ vdb = zbd_verify_and_set_vdb(td, f);
- if (!zbd_zone_swr(z))
- continue;
- zone_lock(td, f, z);
- if (all_zones) {
- pthread_mutex_lock(&f->zbd_info->mutex);
- zbd_close_zone(td, f, nz);
- pthread_mutex_unlock(&f->zbd_info->mutex);
+ dprint(FD_ZBD, "%s(%s): valid data bytes = %" PRIu64 "\n",
+ __func__, f->file_name, vdb);
- reset_wp = z->wp != z->start;
- } else {
- reset_wp = z->wp % min_bs != 0;
+ /*
+ * When all zones in the I/O range are conventional, io_size
+ * can be smaller than zone size, making min_zone the same
+ * as max_zone. This is why the assert below needs to be made
+ * conditional.
+ */
+ if (zbd_is_seq_job(f))
+ assert(f->min_zone < f->max_zone);
+
+ if (td->o.max_open_zones > 0 &&
+ zbd->max_open_zones != td->o.max_open_zones) {
+ log_err("Different 'max_open_zones' values\n");
+ return 1;
+ }
+
+ /*
+ * The per job max open zones limit cannot be used without a
+ * global max open zones limit. (As the tracking of open zones
+ * is disabled when there is no global max open zones limit.)
+ */
+ if (td->o.job_max_open_zones && !zbd->max_open_zones) {
+ log_err("'job_max_open_zones' cannot be used without a global open zones limit\n");
+ return 1;
}
- if (reset_wp) {
- dprint(FD_ZBD, "%s: resetting zone %u\n",
- f->file_name, zbd_zone_nr(f, z));
- if (zbd_reset_zone(td, f, z) < 0)
- res = 1;
+
+ /*
+ * zbd->max_open_zones is the global limit shared for all jobs
+ * that target the same zoned block device. Force sync the per
+ * thread global limit with the actual global limit. (The real
+ * per thread/job limit is stored in td->o.job_max_open_zones).
+ */
+ td->o.max_open_zones = zbd->max_open_zones;
+
+ for (zi = f->min_zone; zi < f->max_zone; zi++) {
+ z = &zbd->zone_info[zi];
+ if (z->cond != ZBD_ZONE_COND_IMP_OPEN &&
+ z->cond != ZBD_ZONE_COND_EXP_OPEN)
+ continue;
+ if (zbd_open_zone(td, f, z))
+ continue;
+ /*
+ * If the number of open zones exceeds specified limits,
+ * reset all extra open zones.
+ */
+ if (zbd_reset_zone(td, f, z) < 0) {
+ log_err("Failed to reest zone %d\n", zi);
+ return 1;
+ }
}
- pthread_mutex_unlock(&z->mutex);
}
- return res;
+ return 0;
}
/*
return write_cnt == 0;
}
-enum swd_action {
- CHECK_SWD,
- SET_SWD,
-};
-
-/* Calculate the number of sectors with data (swd) and perform action 'a' */
-static uint64_t zbd_process_swd(const struct fio_file *f, enum swd_action a)
-{
- struct fio_zone_info *zb, *ze, *z;
- uint64_t swd = 0;
-
- zb = &f->zbd_info->zone_info[f->min_zone];
- ze = &f->zbd_info->zone_info[f->max_zone];
- for (z = zb; z < ze; z++) {
- pthread_mutex_lock(&z->mutex);
- swd += z->wp - z->start;
- }
- pthread_mutex_lock(&f->zbd_info->mutex);
- switch (a) {
- case CHECK_SWD:
- assert(f->zbd_info->sectors_with_data == swd);
- break;
- case SET_SWD:
- f->zbd_info->sectors_with_data = swd;
- break;
- }
- pthread_mutex_unlock(&f->zbd_info->mutex);
- for (z = zb; z < ze; z++)
- pthread_mutex_unlock(&z->mutex);
-
- return swd;
-}
-
-/*
- * The swd check is useful for debugging but takes too much time to leave
- * it enabled all the time. Hence it is disabled by default.
- */
-static const bool enable_check_swd = false;
-
-/* Check whether the value of zbd_info.sectors_with_data is correct. */
-static void zbd_check_swd(const struct fio_file *f)
-{
- if (!enable_check_swd)
- return;
-
- zbd_process_swd(f, CHECK_SWD);
-}
-
-static void zbd_init_swd(struct fio_file *f)
-{
- uint64_t swd;
-
- if (!enable_check_swd)
- return;
-
- swd = zbd_process_swd(f, SET_SWD);
- dprint(FD_ZBD, "%s(%s): swd = %" PRIu64 "\n", __func__, f->file_name,
- swd);
-}
-
void zbd_file_reset(struct thread_data *td, struct fio_file *f)
{
struct fio_zone_info *zb, *ze;
+ bool verify_data_left = false;
if (!f->zbd_info || !td_write(td))
return;
- zb = &f->zbd_info->zone_info[f->min_zone];
- ze = &f->zbd_info->zone_info[f->max_zone];
- zbd_init_swd(f);
+ zb = zbd_get_zone(f, f->min_zone);
+ ze = zbd_get_zone(f, f->max_zone);
+
/*
* If data verification is enabled reset the affected zones before
* writing any data to avoid that a zone reset has to be issued while
* writing data, which causes data loss.
*/
- zbd_reset_zones(td, f, zb, ze, td->o.verify != VERIFY_NONE &&
- td->runstate != TD_VERIFYING);
+ if (td->o.verify != VERIFY_NONE) {
+ verify_data_left = td->runstate == TD_VERIFYING ||
+ td->io_hist_len || td->verify_batch;
+ if (td->io_hist_len && td->o.verify_backlog)
+ verify_data_left =
+ td->io_hist_len % td->o.verify_backlog;
+ if (!verify_data_left)
+ zbd_reset_zones(td, f, zb, ze);
+ }
+
zbd_reset_write_cnt(td, f);
}
-/* The caller must hold f->zbd_info->mutex. */
-static bool is_zone_open(const struct thread_data *td, const struct fio_file *f,
- unsigned int zone_idx)
+/* Return random zone index for one of the open zones. */
+static uint32_t pick_random_zone_idx(const struct fio_file *f,
+ const struct io_u *io_u)
{
- struct zoned_block_device_info *zbdi = f->zbd_info;
- int i;
-
- assert(td->o.job_max_open_zones == 0 || td->num_open_zones <= td->o.job_max_open_zones);
- assert(td->o.job_max_open_zones <= zbdi->max_open_zones);
- assert(zbdi->num_open_zones <= zbdi->max_open_zones);
-
- for (i = 0; i < zbdi->num_open_zones; i++)
- if (zbdi->open_zones[i] == zone_idx)
- return true;
-
- return false;
+ return (io_u->offset - f->file_offset) *
+ f->zbd_info->num_open_zones / f->io_size;
}
-/*
- * Open a ZBD zone if it was not yet open. Returns true if either the zone was
- * already open or if opening a new zone is allowed. Returns false if the zone
- * was not yet open and opening a new zone would cause the zone limit to be
- * exceeded.
- */
-static bool zbd_open_zone(struct thread_data *td, const struct fio_file *f,
- uint32_t zone_idx)
+static bool any_io_in_flight(void)
{
- const uint32_t min_bs = td->o.min_bs[DDIR_WRITE];
- struct fio_zone_info *z = &f->zbd_info->zone_info[zone_idx];
- bool res = true;
-
- if (z->cond == ZBD_ZONE_COND_OFFLINE)
- return false;
-
- /*
- * Skip full zones with data verification enabled because resetting a
- * zone causes data loss and hence causes verification to fail.
- */
- if (td->o.verify != VERIFY_NONE && zbd_zone_full(f, z, min_bs))
- return false;
-
- pthread_mutex_lock(&f->zbd_info->mutex);
- if (is_zone_open(td, f, zone_idx)) {
- /*
- * If the zone is already open and going to be full by writes
- * in-flight, handle it as a full zone instead of an open zone.
- */
- if (z->wp >= zbd_zone_capacity_end(z))
- res = false;
- goto out;
- }
- res = false;
- /* Zero means no limit */
- if (td->o.job_max_open_zones > 0 &&
- td->num_open_zones >= td->o.job_max_open_zones)
- goto out;
- if (f->zbd_info->num_open_zones >= f->zbd_info->max_open_zones)
- goto out;
- dprint(FD_ZBD, "%s: opening zone %d\n", f->file_name, zone_idx);
- f->zbd_info->open_zones[f->zbd_info->num_open_zones++] = zone_idx;
- td->num_open_zones++;
- z->open = 1;
- res = true;
-
-out:
- pthread_mutex_unlock(&f->zbd_info->mutex);
- return res;
-}
+ for_each_td(td) {
+ if (td->io_u_in_flight)
+ return true;
+ } end_for_each();
-/* Anything goes as long as it is not a constant. */
-static uint32_t pick_random_zone_idx(const struct fio_file *f,
- const struct io_u *io_u)
-{
- return io_u->offset * f->zbd_info->num_open_zones / f->real_file_size;
+ return false;
}
/*
* Modify the offset of an I/O unit that does not refer to an open zone such
* that it refers to an open zone. Close an open zone and open a new zone if
- * necessary. This algorithm can only work correctly if all write pointers are
+ * necessary. The open zone is searched across sequential zones.
+ * This algorithm can only work correctly if all write pointers are
* a multiple of the fio block size. The caller must neither hold z->mutex
* nor f->zbd_info->mutex. Returns with z->mutex held upon success.
*/
static struct fio_zone_info *zbd_convert_to_open_zone(struct thread_data *td,
struct io_u *io_u)
{
- const uint32_t min_bs = td->o.min_bs[io_u->ddir];
+ const uint64_t min_bs = td->o.min_bs[io_u->ddir];
struct fio_file *f = io_u->file;
+ struct zoned_block_device_info *zbdi = f->zbd_info;
struct fio_zone_info *z;
unsigned int open_zone_idx = -1;
uint32_t zone_idx, new_zone_idx;
int i;
bool wait_zone_close;
+ bool in_flight;
+ bool should_retry = true;
assert(is_valid_offset(f, io_u->offset));
- if (td->o.max_open_zones || td->o.job_max_open_zones) {
+ if (zbdi->max_open_zones || td->o.job_max_open_zones) {
/*
- * This statement accesses f->zbd_info->open_zones[] on purpose
+ * This statement accesses zbdi->open_zones[] on purpose
* without locking.
*/
- zone_idx = f->zbd_info->open_zones[pick_random_zone_idx(f, io_u)];
+ zone_idx = zbdi->open_zones[pick_random_zone_idx(f, io_u)];
} else {
- zone_idx = zbd_zone_idx(f, io_u->offset);
+ zone_idx = zbd_offset_to_zone_idx(f, io_u->offset);
}
if (zone_idx < f->min_zone)
zone_idx = f->min_zone;
else if (zone_idx >= f->max_zone)
zone_idx = f->max_zone - 1;
- dprint(FD_ZBD, "%s(%s): starting from zone %d (offset %lld, buflen %lld)\n",
+
+ dprint(FD_ZBD,
+ "%s(%s): starting from zone %d (offset %lld, buflen %lld)\n",
__func__, f->file_name, zone_idx, io_u->offset, io_u->buflen);
/*
- * Since z->mutex is the outer lock and f->zbd_info->mutex the inner
+ * Since z->mutex is the outer lock and zbdi->mutex the inner
* lock it can happen that the state of the zone with index zone_idx
- * has changed after 'z' has been assigned and before f->zbd_info->mutex
+ * has changed after 'z' has been assigned and before zbdi->mutex
* has been obtained. Hence the loop.
*/
for (;;) {
uint32_t tmp_idx;
- z = &f->zbd_info->zone_info[zone_idx];
-
- zone_lock(td, f, z);
- pthread_mutex_lock(&f->zbd_info->mutex);
- if (td->o.max_open_zones == 0 && td->o.job_max_open_zones == 0)
- goto examine_zone;
- if (f->zbd_info->num_open_zones == 0) {
- dprint(FD_ZBD, "%s(%s): no zones are open\n",
- __func__, f->file_name);
- goto open_other_zone;
+ z = zbd_get_zone(f, zone_idx);
+ if (z->has_wp)
+ zone_lock(td, f, z);
+
+ pthread_mutex_lock(&zbdi->mutex);
+
+ if (z->has_wp) {
+ if (z->cond != ZBD_ZONE_COND_OFFLINE &&
+ zbdi->max_open_zones == 0 &&
+ td->o.job_max_open_zones == 0)
+ goto examine_zone;
+ if (zbdi->num_open_zones == 0) {
+ dprint(FD_ZBD, "%s(%s): no zones are open\n",
+ __func__, f->file_name);
+ goto open_other_zone;
+ }
}
/*
- * List of opened zones is per-device, shared across all threads.
- * Start with quasi-random candidate zone.
- * Ignore zones which don't belong to thread's offset/size area.
+ * List of opened zones is per-device, shared across all
+ * threads. Start with quasi-random candidate zone. Ignore
+ * zones which don't belong to thread's offset/size area.
*/
open_zone_idx = pick_random_zone_idx(f, io_u);
- assert(open_zone_idx < f->zbd_info->num_open_zones);
+ assert(!open_zone_idx ||
+ open_zone_idx < zbdi->num_open_zones);
tmp_idx = open_zone_idx;
- for (i = 0; i < f->zbd_info->num_open_zones; i++) {
+
+ for (i = 0; i < zbdi->num_open_zones; i++) {
uint32_t tmpz;
- if (tmp_idx >= f->zbd_info->num_open_zones)
+ if (tmp_idx >= zbdi->num_open_zones)
tmp_idx = 0;
- tmpz = f->zbd_info->open_zones[tmp_idx];
+ tmpz = zbdi->open_zones[tmp_idx];
if (f->min_zone <= tmpz && tmpz < f->max_zone) {
open_zone_idx = tmp_idx;
goto found_candidate_zone;
dprint(FD_ZBD, "%s(%s): no candidate zone\n",
__func__, f->file_name);
- pthread_mutex_unlock(&f->zbd_info->mutex);
- pthread_mutex_unlock(&z->mutex);
+
+ pthread_mutex_unlock(&zbdi->mutex);
+
+ if (z->has_wp)
+ zone_unlock(z);
+
return NULL;
found_candidate_zone:
- new_zone_idx = f->zbd_info->open_zones[open_zone_idx];
+ new_zone_idx = zbdi->open_zones[open_zone_idx];
if (new_zone_idx == zone_idx)
break;
zone_idx = new_zone_idx;
- pthread_mutex_unlock(&f->zbd_info->mutex);
- pthread_mutex_unlock(&z->mutex);
+
+ pthread_mutex_unlock(&zbdi->mutex);
+
+ if (z->has_wp)
+ zone_unlock(z);
}
- /* Both z->mutex and f->zbd_info->mutex are held. */
+ /* Both z->mutex and zbdi->mutex are held. */
examine_zone:
- if (z->wp + min_bs <= zbd_zone_capacity_end(z)) {
- pthread_mutex_unlock(&f->zbd_info->mutex);
+ if (zbd_zone_remainder(z) >= min_bs) {
+ pthread_mutex_unlock(&zbdi->mutex);
goto out;
}
open_other_zone:
/* Check if number of open zones reaches one of limits. */
wait_zone_close =
- f->zbd_info->num_open_zones == f->max_zone - f->min_zone ||
- (td->o.max_open_zones &&
- f->zbd_info->num_open_zones == td->o.max_open_zones) ||
+ zbdi->num_open_zones == f->max_zone - f->min_zone ||
+ (zbdi->max_open_zones &&
+ zbdi->num_open_zones == zbdi->max_open_zones) ||
(td->o.job_max_open_zones &&
td->num_open_zones == td->o.job_max_open_zones);
- pthread_mutex_unlock(&f->zbd_info->mutex);
+ pthread_mutex_unlock(&zbdi->mutex);
/* Only z->mutex is held. */
* zone close before opening a new zone.
*/
if (wait_zone_close) {
- dprint(FD_ZBD, "%s(%s): quiesce to allow open zones to close\n",
+ dprint(FD_ZBD,
+ "%s(%s): quiesce to allow open zones to close\n",
__func__, f->file_name);
io_u_quiesce(td);
}
+retry:
/* Zone 'z' is full, so try to open a new zone. */
- for (i = f->io_size / f->zbd_info->zone_size; i > 0; i--) {
+ for (i = f->io_size / zbdi->zone_size; i > 0; i--) {
zone_idx++;
- pthread_mutex_unlock(&z->mutex);
+ if (z->has_wp)
+ zone_unlock(z);
z++;
if (!is_valid_offset(f, z->start)) {
/* Wrap-around. */
zone_idx = f->min_zone;
- z = &f->zbd_info->zone_info[zone_idx];
+ z = zbd_get_zone(f, zone_idx);
}
assert(is_valid_offset(f, z->start));
+ if (!z->has_wp)
+ continue;
zone_lock(td, f, z);
if (z->open)
continue;
- if (zbd_open_zone(td, f, zone_idx))
+ if (zbd_open_zone(td, f, z))
goto out;
}
/* Only z->mutex is held. */
/* Check whether the write fits in any of the already opened zones. */
- pthread_mutex_lock(&f->zbd_info->mutex);
- for (i = 0; i < f->zbd_info->num_open_zones; i++) {
- zone_idx = f->zbd_info->open_zones[i];
+ pthread_mutex_lock(&zbdi->mutex);
+ for (i = 0; i < zbdi->num_open_zones; i++) {
+ zone_idx = zbdi->open_zones[i];
if (zone_idx < f->min_zone || zone_idx >= f->max_zone)
continue;
- pthread_mutex_unlock(&f->zbd_info->mutex);
- pthread_mutex_unlock(&z->mutex);
+ pthread_mutex_unlock(&zbdi->mutex);
+ zone_unlock(z);
- z = &f->zbd_info->zone_info[zone_idx];
+ z = zbd_get_zone(f, zone_idx);
zone_lock(td, f, z);
- if (z->wp + min_bs <= zbd_zone_capacity_end(z))
+ if (zbd_zone_remainder(z) >= min_bs)
goto out;
- pthread_mutex_lock(&f->zbd_info->mutex);
+ pthread_mutex_lock(&zbdi->mutex);
}
- pthread_mutex_unlock(&f->zbd_info->mutex);
- pthread_mutex_unlock(&z->mutex);
- dprint(FD_ZBD, "%s(%s): did not open another zone\n", __func__,
- f->file_name);
+
+ /*
+ * When any I/O is in-flight or when all I/Os in-flight get completed,
+ * the I/Os might have closed zones then retry the steps to open a zone.
+ * Before retry, call io_u_quiesce() to complete in-flight writes.
+ */
+ in_flight = any_io_in_flight();
+ if (in_flight || should_retry) {
+ dprint(FD_ZBD,
+ "%s(%s): wait zone close and retry open zones\n",
+ __func__, f->file_name);
+ pthread_mutex_unlock(&zbdi->mutex);
+ zone_unlock(z);
+ io_u_quiesce(td);
+ zone_lock(td, f, z);
+ should_retry = in_flight;
+ goto retry;
+ }
+
+ pthread_mutex_unlock(&zbdi->mutex);
+
+ zone_unlock(z);
+
+ dprint(FD_ZBD, "%s(%s): did not open another zone\n",
+ __func__, f->file_name);
+
return NULL;
out:
- dprint(FD_ZBD, "%s(%s): returning zone %d\n", __func__, f->file_name,
- zone_idx);
- io_u->offset = z->start;
- return z;
-}
+ dprint(FD_ZBD, "%s(%s): returning zone %d\n",
+ __func__, f->file_name, zone_idx);
-/* The caller must hold z->mutex. */
-static struct fio_zone_info *zbd_replay_write_order(struct thread_data *td,
- struct io_u *io_u,
- struct fio_zone_info *z)
-{
- const struct fio_file *f = io_u->file;
- const uint32_t min_bs = td->o.min_bs[DDIR_WRITE];
-
- if (!zbd_open_zone(td, f, zbd_zone_nr(f, z))) {
- pthread_mutex_unlock(&z->mutex);
- z = zbd_convert_to_open_zone(td, io_u);
- assert(z);
- }
+ io_u->offset = z->start;
+ assert(z->has_wp);
+ assert(z->cond != ZBD_ZONE_COND_OFFLINE);
- if (z->verify_block * min_bs >= z->capacity)
- log_err("%s: %d * %d >= %llu\n", f->file_name, z->verify_block,
- min_bs, (unsigned long long)z->capacity);
- io_u->offset = z->start + z->verify_block++ * min_bs;
return z;
}
/*
- * Find another zone for which @io_u fits below the write pointer. Start
- * searching in zones @zb + 1 .. @zl and continue searching in zones
- * @zf .. @zb - 1.
+ * Find another zone which has @min_bytes of readable data. Search in zones
+ * @zb + 1 .. @zl. For random workload, also search in zones @zb - 1 .. @zf.
*
- * Either returns NULL or returns a zone pointer and holds the mutex for that
- * zone.
+ * Either returns NULL or returns a zone pointer. When the zone has write
+ * pointer, hold the mutex for the zone.
*/
static struct fio_zone_info *
-zbd_find_zone(struct thread_data *td, struct io_u *io_u,
+zbd_find_zone(struct thread_data *td, struct io_u *io_u, uint64_t min_bytes,
struct fio_zone_info *zb, struct fio_zone_info *zl)
{
- const uint32_t min_bs = td->o.min_bs[io_u->ddir];
struct fio_file *f = io_u->file;
struct fio_zone_info *z1, *z2;
- const struct fio_zone_info *const zf =
- &f->zbd_info->zone_info[f->min_zone];
+ const struct fio_zone_info *const zf = zbd_get_zone(f, f->min_zone);
/*
* Skip to the next non-empty zone in case of sequential I/O and to
*/
for (z1 = zb + 1, z2 = zb - 1; z1 < zl || z2 >= zf; z1++, z2--) {
if (z1 < zl && z1->cond != ZBD_ZONE_COND_OFFLINE) {
- zone_lock(td, f, z1);
- if (z1->start + min_bs <= z1->wp)
+ if (z1->has_wp)
+ zone_lock(td, f, z1);
+ if (z1->start + min_bytes <= z1->wp)
return z1;
- pthread_mutex_unlock(&z1->mutex);
+ if (z1->has_wp)
+ zone_unlock(z1);
} else if (!td_random(td)) {
break;
}
+
if (td_random(td) && z2 >= zf &&
z2->cond != ZBD_ZONE_COND_OFFLINE) {
- zone_lock(td, f, z2);
- if (z2->start + min_bs <= z2->wp)
+ if (z2->has_wp)
+ zone_lock(td, f, z2);
+ if (z2->start + min_bytes <= z2->wp)
return z2;
- pthread_mutex_unlock(&z2->mutex);
+ if (z2->has_wp)
+ zone_unlock(z2);
}
}
- dprint(FD_ZBD, "%s: adjusting random read offset failed\n",
- f->file_name);
+
+ dprint(FD_ZBD,
+ "%s: no zone has %"PRIu64" bytes of readable data\n",
+ f->file_name, min_bytes);
+
return NULL;
}
if (io_u->ddir == DDIR_WRITE &&
io_u->offset + io_u->buflen >= zbd_zone_capacity_end(z)) {
pthread_mutex_lock(&f->zbd_info->mutex);
- zbd_close_zone(td, f, zbd_zone_nr(f, z));
+ zbd_close_zone(td, f, z);
pthread_mutex_unlock(&f->zbd_info->mutex);
}
}
const struct fio_file *f = io_u->file;
struct zoned_block_device_info *zbd_info = f->zbd_info;
struct fio_zone_info *z;
- uint32_t zone_idx;
uint64_t zone_end;
- if (!zbd_info)
- return;
-
- zone_idx = zbd_zone_idx(f, io_u->offset);
- assert(zone_idx < zbd_info->nr_zones);
- z = &zbd_info->zone_info[zone_idx];
+ assert(zbd_info);
- if (!zbd_zone_swr(z))
- return;
+ z = zbd_offset_to_zone(f, io_u->offset);
+ assert(z->has_wp);
if (!success)
goto unlock;
dprint(FD_ZBD,
"%s: queued I/O (%lld, %llu) for zone %u\n",
- f->file_name, io_u->offset, io_u->buflen, zone_idx);
+ f->file_name, io_u->offset, io_u->buflen, zbd_zone_idx(f, z));
switch (io_u->ddir) {
case DDIR_WRITE:
zone_end = min((uint64_t)(io_u->offset + io_u->buflen),
zbd_zone_capacity_end(z));
- pthread_mutex_lock(&zbd_info->mutex);
+
/*
* z->wp > zone_end means that one or more I/O errors
* have occurred.
*/
- if (z->wp <= zone_end)
- zbd_info->sectors_with_data += zone_end - z->wp;
- pthread_mutex_unlock(&zbd_info->mutex);
+ if (accounting_vdb(td, f) && z->wp <= zone_end) {
+ pthread_mutex_lock(&zbd_info->mutex);
+ zbd_info->wp_valid_data_bytes += zone_end - z->wp;
+ pthread_mutex_unlock(&zbd_info->mutex);
+ }
z->wp = zone_end;
break;
- case DDIR_TRIM:
- assert(z->wp == z->start);
- break;
default:
break;
}
unlock:
if (!success || q != FIO_Q_QUEUED) {
/* BUSY or COMPLETED: unlock the zone */
- pthread_mutex_unlock(&z->mutex);
+ zone_unlock(z);
io_u->zbd_put_io = NULL;
}
}
const struct fio_file *f = io_u->file;
struct zoned_block_device_info *zbd_info = f->zbd_info;
struct fio_zone_info *z;
- uint32_t zone_idx;
- int ret;
- if (!zbd_info)
- return;
+ assert(zbd_info);
- zone_idx = zbd_zone_idx(f, io_u->offset);
- assert(zone_idx < zbd_info->nr_zones);
- z = &zbd_info->zone_info[zone_idx];
-
- if (!zbd_zone_swr(z))
- return;
+ z = zbd_offset_to_zone(f, io_u->offset);
+ assert(z->has_wp);
dprint(FD_ZBD,
"%s: terminate I/O (%lld, %llu) for zone %u\n",
- f->file_name, io_u->offset, io_u->buflen, zone_idx);
+ f->file_name, io_u->offset, io_u->buflen, zbd_zone_idx(f, z));
zbd_end_zone_io(td, io_u, z);
- ret = pthread_mutex_unlock(&z->mutex);
- assert(ret == 0);
- zbd_check_swd(f);
+ zone_unlock(z);
}
/*
struct fio_file *f = io_u->file;
enum fio_ddir ddir = io_u->ddir;
struct fio_zone_info *z;
- uint32_t zone_idx;
assert(td->o.zone_mode == ZONE_MODE_ZBD);
assert(td->o.zone_size);
+ assert(f->zbd_info);
- zone_idx = zbd_zone_idx(f, f->last_pos[ddir]);
- z = &f->zbd_info->zone_info[zone_idx];
+ z = zbd_offset_to_zone(f, f->last_pos[ddir]);
/*
* When the zone capacity is smaller than the zone size and the I/O is
* sequential write, skip to zone end if the latest position is at the
* zone capacity limit.
*/
- if (z->capacity < f->zbd_info->zone_size && !td_random(td) &&
- ddir == DDIR_WRITE &&
+ if (z->capacity < f->zbd_info->zone_size &&
+ !td_random(td) && ddir == DDIR_WRITE &&
f->last_pos[ddir] >= zbd_zone_capacity_end(z)) {
dprint(FD_ZBD,
"%s: Jump from zone capacity limit to zone end:"
- " (%llu -> %llu) for zone %u (%llu)\n",
- f->file_name, (unsigned long long) f->last_pos[ddir],
- (unsigned long long) zbd_zone_end(z), zone_idx,
- (unsigned long long) z->capacity);
+ " (%"PRIu64" -> %"PRIu64") for zone %u (%"PRIu64")\n",
+ f->file_name, f->last_pos[ddir],
+ zbd_zone_end(z), zbd_zone_idx(f, z), z->capacity);
td->io_skip_bytes += zbd_zone_end(z) - f->last_pos[ddir];
f->last_pos[ddir] = zbd_zone_end(z);
}
* devices with all empty zones. Overwrite the first I/O direction as
* write to make sure data to read exists.
*/
+ assert(io_u->file->zbd_info);
if (ddir != DDIR_READ || !td_rw(td))
return ddir;
- if (io_u->file->zbd_info->sectors_with_data ||
- td->o.read_beyond_wp)
+ if (io_u->file->last_start[DDIR_WRITE] != -1ULL || td->o.read_beyond_wp)
return DDIR_READ;
return DDIR_WRITE;
enum io_u_action zbd_adjust_block(struct thread_data *td, struct io_u *io_u)
{
struct fio_file *f = io_u->file;
- uint32_t zone_idx_b;
+ struct zoned_block_device_info *zbdi = f->zbd_info;
struct fio_zone_info *zb, *zl, *orig_zb;
uint32_t orig_len = io_u->buflen;
- uint32_t min_bs = td->o.min_bs[io_u->ddir];
+ uint64_t min_bs = td->o.min_bs[io_u->ddir];
uint64_t new_len;
int64_t range;
- if (!f->zbd_info)
- return io_u_accept;
-
+ assert(zbdi);
assert(min_bs);
assert(is_valid_offset(f, io_u->offset));
assert(io_u->buflen);
- zone_idx_b = zbd_zone_idx(f, io_u->offset);
- zb = &f->zbd_info->zone_info[zone_idx_b];
+
+ zb = zbd_offset_to_zone(f, io_u->offset);
orig_zb = zb;
- /* Accept the I/O offset for conventional zones. */
- if (!zbd_zone_swr(zb))
+ if (!zb->has_wp) {
+ /* Accept non-write I/Os for conventional zones. */
+ if (io_u->ddir != DDIR_WRITE)
+ return io_u_accept;
+
+ /*
+ * Make sure that writes to conventional zones
+ * don't cross over to any sequential zones.
+ */
+ if (!(zb + 1)->has_wp ||
+ io_u->offset + io_u->buflen <= (zb + 1)->start)
+ return io_u_accept;
+
+ if (io_u->offset + min_bs > (zb + 1)->start) {
+ dprint(FD_IO,
+ "%s: off=%llu + min_bs=%"PRIu64" > next zone %"PRIu64"\n",
+ f->file_name, io_u->offset,
+ min_bs, (zb + 1)->start);
+ io_u->offset =
+ zb->start + (zb + 1)->start - io_u->offset;
+ new_len = min(io_u->buflen,
+ (zb + 1)->start - io_u->offset);
+ } else {
+ new_len = (zb + 1)->start - io_u->offset;
+ }
+
+ io_u->buflen = new_len / min_bs * min_bs;
+
return io_u_accept;
+ }
/*
* Accept the I/O offset for reads if reading beyond the write pointer
io_u->ddir == DDIR_READ && td->o.read_beyond_wp)
return io_u_accept;
- zbd_check_swd(f);
-
zone_lock(td, f, zb);
switch (io_u->ddir) {
case DDIR_READ:
- if (td->runstate == TD_VERIFYING && td_write(td)) {
- zb = zbd_replay_write_order(td, io_u, zb);
- pthread_mutex_unlock(&zb->mutex);
+ if (td->runstate == TD_VERIFYING && td_write(td))
goto accept;
- }
+
/*
* Check that there is enough written data in the zone to do an
* I/O of at least min_bs B. If there isn't, find a new zone for
zb->wp - zb->start : 0;
if (range < min_bs ||
((!td_random(td)) && (io_u->offset + min_bs > zb->wp))) {
- pthread_mutex_unlock(&zb->mutex);
- zl = &f->zbd_info->zone_info[f->max_zone];
- zb = zbd_find_zone(td, io_u, zb, zl);
+ zone_unlock(zb);
+ zl = zbd_get_zone(f, f->max_zone);
+ zb = zbd_find_zone(td, io_u, min_bs, zb, zl);
if (!zb) {
dprint(FD_ZBD,
"%s: zbd_find_zone(%lld, %llu) failed\n",
if (!td_random(td))
io_u->offset = zb->start;
}
+
/*
* Make sure the I/O is within the zone valid data range while
* maximizing the I/O size and preserving randomness.
io_u->offset = zb->start +
((io_u->offset - orig_zb->start) %
(range - io_u->buflen)) / min_bs * min_bs;
+
+ /*
+ * When zbd_find_zone() returns a conventional zone,
+ * we can simply accept the new i/o offset here.
+ */
+ if (!zb->has_wp)
+ return io_u_accept;
+
/*
* Make sure the I/O does not cross over the zone wp position.
*/
dprint(FD_IO, "Changed length from %u into %llu\n",
orig_len, io_u->buflen);
}
+
assert(zb->start <= io_u->offset);
assert(io_u->offset + io_u->buflen <= zb->wp);
+
goto accept;
+
case DDIR_WRITE:
- if (io_u->buflen > f->zbd_info->zone_size)
+ if (io_u->buflen > zbdi->zone_size) {
+ td_verror(td, EINVAL, "I/O buflen exceeds zone size");
+ dprint(FD_IO,
+ "%s: I/O buflen %llu exceeds zone size %"PRIu64"\n",
+ f->file_name, io_u->buflen, zbdi->zone_size);
goto eof;
- if (!zbd_open_zone(td, f, zone_idx_b)) {
- pthread_mutex_unlock(&zb->mutex);
+ }
+
+retry:
+ if (zbd_zone_remainder(zb) > 0 &&
+ zbd_zone_remainder(zb) < min_bs) {
+ pthread_mutex_lock(&f->zbd_info->mutex);
+ zbd_close_zone(td, f, zb);
+ pthread_mutex_unlock(&f->zbd_info->mutex);
+ dprint(FD_ZBD,
+ "%s: finish zone %d\n",
+ f->file_name, zbd_zone_idx(f, zb));
+ io_u_quiesce(td);
+ zbd_finish_zone(td, f, zb);
+ if (zbd_zone_idx(f, zb) + 1 >= f->max_zone) {
+ if (!td_random(td))
+ goto eof;
+ }
+ zone_unlock(zb);
+
+ /* Find the next write pointer zone */
+ do {
+ zb++;
+ if (zbd_zone_idx(f, zb) >= f->max_zone)
+ zb = zbd_get_zone(f, f->min_zone);
+ } while (!zb->has_wp);
+
+ zone_lock(td, f, zb);
+ }
+
+ if (!zbd_open_zone(td, f, zb)) {
+ zone_unlock(zb);
zb = zbd_convert_to_open_zone(td, io_u);
- if (!zb)
+ if (!zb) {
+ dprint(FD_IO, "%s: can't convert to open zone",
+ f->file_name);
goto eof;
- zone_idx_b = zbd_zone_nr(f, zb);
+ }
}
+
+ if (zbd_zone_remainder(zb) > 0 &&
+ zbd_zone_remainder(zb) < min_bs)
+ goto retry;
+
/* Check whether the zone reset threshold has been exceeded */
if (td->o.zrf.u.f) {
- if (f->zbd_info->sectors_with_data >=
+ if (zbdi->wp_valid_data_bytes >=
f->io_size * td->o.zrt.u.f &&
- zbd_dec_and_reset_write_cnt(td, f)) {
+ zbd_dec_and_reset_write_cnt(td, f))
zb->reset_zone = 1;
- }
}
+
/* Reset the zone pointer if necessary */
if (zb->reset_zone || zbd_zone_full(f, zb, min_bs)) {
- assert(td->o.verify == VERIFY_NONE);
+ if (td->o.verify != VERIFY_NONE) {
+ /*
+ * Unset io-u->file to tell get_next_verify()
+ * that this IO is not requeue.
+ */
+ io_u->file = NULL;
+ if (!get_next_verify(td, io_u)) {
+ zone_unlock(zb);
+ return io_u_accept;
+ }
+ io_u->file = f;
+ }
+
/*
* Since previous write requests may have been submitted
* asynchronously and since we will submit the zone
goto eof;
if (zb->capacity < min_bs) {
- log_err("zone capacity %llu smaller than minimum block size %d\n",
- (unsigned long long)zb->capacity,
- min_bs);
+ td_verror(td, EINVAL, "ZCAP is less min_bs");
+ log_err("zone capacity %"PRIu64" smaller than minimum block size %"PRIu64"\n",
+ zb->capacity, min_bs);
goto eof;
}
}
+
/* Make writes occur at the write pointer */
assert(!zbd_zone_full(f, zb, min_bs));
io_u->offset = zb->wp;
if (!is_valid_offset(f, io_u->offset)) {
- dprint(FD_ZBD, "Dropped request with offset %llu\n",
- io_u->offset);
+ td_verror(td, EINVAL, "invalid WP value");
+ dprint(FD_ZBD, "%s: dropped request with offset %llu\n",
+ f->file_name, io_u->offset);
goto eof;
}
+
/*
* Make sure that the buflen is a multiple of the minimal
* block size. Give up if shrinking would make the request too
orig_len, io_u->buflen);
goto accept;
}
- log_err("Zone remainder %lld smaller than minimum block size %d\n",
- (zbd_zone_capacity_end(zb) - io_u->offset),
- min_bs);
+
+ td_verror(td, EIO, "zone remainder too small");
+ log_err("zone remainder %lld smaller than min block size %"PRIu64"\n",
+ (zbd_zone_capacity_end(zb) - io_u->offset), min_bs);
+
goto eof;
+
case DDIR_TRIM:
- /* fall-through */
+ /* Check random trim targets a non-empty zone */
+ if (!td_random(td) || zb->wp > zb->start)
+ goto accept;
+
+ /* Find out a non-empty zone to trim */
+ zone_unlock(zb);
+ zl = zbd_get_zone(f, f->max_zone);
+ zb = zbd_find_zone(td, io_u, 1, zb, zl);
+ if (zb) {
+ io_u->offset = zb->start;
+ dprint(FD_ZBD, "%s: found new zone(%lld) for trim\n",
+ f->file_name, io_u->offset);
+ goto accept;
+ }
+
+ goto eof;
+
case DDIR_SYNC:
+ /* fall-through */
case DDIR_DATASYNC:
case DDIR_SYNC_FILE_RANGE:
case DDIR_WAIT:
assert(false);
accept:
- assert(zb);
+ assert(zb->has_wp);
assert(zb->cond != ZBD_ZONE_COND_OFFLINE);
assert(!io_u->zbd_queue_io);
assert(!io_u->zbd_put_io);
+
io_u->zbd_queue_io = zbd_queue_io;
io_u->zbd_put_io = zbd_put_io;
+
+ /*
+ * Since we return with the zone lock still held,
+ * add an annotation to let Coverity know that it
+ * is intentional.
+ */
+ /* coverity[missing_unlock] */
+
return io_u_accept;
eof:
- if (zb)
- pthread_mutex_unlock(&zb->mutex);
+ if (zb && zb->has_wp)
+ zone_unlock(zb);
+
return io_u_eof;
}
{
char *res;
- if (asprintf(&res, "; %llu zone resets", (unsigned long long) ts->nr_zone_resets) < 0)
+ if (asprintf(&res, "; %"PRIu64" zone resets", ts->nr_zone_resets) < 0)
return NULL;
return res;
}
+
+/**
+ * zbd_do_io_u_trim - If reset zone is applicable, do reset zone instead of trim
+ *
+ * @td: FIO thread data.
+ * @io_u: FIO I/O unit.
+ *
+ * It is assumed that z->mutex is already locked.
+ * Return io_u_completed when reset zone succeeds. Return 0 when the target zone
+ * does not have write pointer. On error, return negative errno.
+ */
+int zbd_do_io_u_trim(const struct thread_data *td, struct io_u *io_u)
+{
+ struct fio_file *f = io_u->file;
+ struct fio_zone_info *z;
+ int ret;
+
+ z = zbd_offset_to_zone(f, io_u->offset);
+ if (!z->has_wp)
+ return 0;
+
+ if (io_u->offset != z->start) {
+ log_err("Trim offset not at zone start (%lld)\n",
+ io_u->offset);
+ return -EINVAL;
+ }
+
+ ret = zbd_reset_zone((struct thread_data *)td, f, z);
+ if (ret < 0)
+ return ret;
+
+ return io_u_completed;
+}