#include <sys/stat.h>
#include <unistd.h>
+#include "compiler/compiler.h"
#include "os/os.h"
#include "file.h"
#include "fio.h"
return z->start + z->capacity;
}
+/**
+ * zbd_zone_remainder - Return the number of bytes that are still available for
+ * writing before the zone gets full
+ * @z: zone info pointer.
+ */
+static inline uint64_t zbd_zone_remainder(struct fio_zone_info *z)
+{
+ if (z->wp >= zbd_zone_capacity_end(z))
+ return 0;
+
+ return zbd_zone_capacity_end(z) - z->wp;
+}
+
/**
* zbd_zone_full - verify whether a minimum number of bytes remain in a zone
* @f: file pointer.
{
assert((required & 511) == 0);
- return z->has_wp &&
- z->wp + required > zbd_zone_capacity_end(z);
+ return z->has_wp && required > zbd_zone_remainder(z);
}
static void zone_lock(struct thread_data *td, const struct fio_file *f,
struct fio_zone_info *z)
{
- struct zoned_block_device_info *zbd = f->zbd_info;
- uint32_t nz = z - zbd->zone_info;
-
+#ifndef NDEBUG
+ unsigned int const nz = zbd_zone_idx(f, z);
/* A thread should never lock zones outside its working area. */
assert(f->min_zone <= nz && nz < f->max_zone);
-
assert(z->has_wp);
+#endif
/*
* Lock the io_u target zone. The zone will be unlocked if io_u offset
static inline void zone_unlock(struct fio_zone_info *z)
{
- int ret;
-
assert(z->has_wp);
- ret = pthread_mutex_unlock(&z->mutex);
- assert(!ret);
+ pthread_mutex_unlock(&z->mutex);
}
static inline struct fio_zone_info *zbd_get_zone(const struct fio_file *f,
return &f->zbd_info->zone_info[zone_idx];
}
+static inline struct fio_zone_info *
+zbd_offset_to_zone(const struct fio_file *f, uint64_t offset)
+{
+ return zbd_get_zone(f, zbd_offset_to_zone_idx(f, offset));
+}
+
+static bool accounting_vdb(struct thread_data *td, const struct fio_file *f)
+{
+ return td->o.zrt.u.f && td_write(td);
+}
+
/**
* zbd_get_zoned_model - Get a device zoned model
* @td: FIO thread data
ret = blkzoned_report_zones(td, f, offset, zones, nr_zones);
if (ret < 0) {
td_verror(td, errno, "report zones failed");
- log_err("%s: report zones from sector %"PRIu64" failed (%d).\n",
- f->file_name, offset >> 9, errno);
+ log_err("%s: report zones from sector %"PRIu64" failed (nr_zones=%d; errno=%d).\n",
+ f->file_name, offset >> 9, nr_zones, errno);
} else if (ret == 0) {
td_verror(td, errno, "Empty zone report");
log_err("%s: report zones from sector %"PRIu64" is empty.\n",
}
/**
- * zbd_reset_zone - reset the write pointer of a single zone
+ * __zbd_reset_zone - reset the write pointer of a single zone
* @td: FIO thread data.
* @f: FIO file associated with the disk for which to reset a write pointer.
* @z: Zone to reset.
*
* The caller must hold z->mutex.
*/
-static int zbd_reset_zone(struct thread_data *td, struct fio_file *f,
- struct fio_zone_info *z)
+static int __zbd_reset_zone(struct thread_data *td, struct fio_file *f,
+ struct fio_zone_info *z)
{
uint64_t offset = z->start;
uint64_t length = (z+1)->start - offset;
break;
}
- pthread_mutex_lock(&f->zbd_info->mutex);
- f->zbd_info->sectors_with_data -= data_in_zone;
- f->zbd_info->wp_sectors_with_data -= data_in_zone;
- pthread_mutex_unlock(&f->zbd_info->mutex);
+ if (accounting_vdb(td, f)) {
+ pthread_mutex_lock(&f->zbd_info->mutex);
+ f->zbd_info->wp_valid_data_bytes -= data_in_zone;
+ pthread_mutex_unlock(&f->zbd_info->mutex);
+ }
z->wp = z->start;
- z->verify_block = 0;
td->ts.nr_zone_resets++;
}
/**
- * zbd_close_zone - Remove a zone from the open zones array.
+ * zbd_write_zone_put - Remove a zone from the write target zones array.
* @td: FIO thread data.
- * @f: FIO file associated with the disk for which to reset a write pointer.
+ * @f: FIO file that has the write zones array to remove.
* @zone_idx: Index of the zone to remove.
*
* The caller must hold f->zbd_info->mutex.
*/
-static void zbd_close_zone(struct thread_data *td, const struct fio_file *f,
- struct fio_zone_info *z)
+static void zbd_write_zone_put(struct thread_data *td, const struct fio_file *f,
+ struct fio_zone_info *z)
{
- uint32_t ozi;
+ uint32_t zi;
- if (!z->open)
+ if (!z->write)
return;
- for (ozi = 0; ozi < f->zbd_info->num_open_zones; ozi++) {
- if (zbd_get_zone(f, f->zbd_info->open_zones[ozi]) == z)
+ for (zi = 0; zi < f->zbd_info->num_write_zones; zi++) {
+ if (zbd_get_zone(f, f->zbd_info->write_zones[zi]) == z)
break;
}
- if (ozi == f->zbd_info->num_open_zones)
+ if (zi == f->zbd_info->num_write_zones)
return;
- dprint(FD_ZBD, "%s: closing zone %u\n",
+ dprint(FD_ZBD, "%s: removing zone %u from write zone array\n",
f->file_name, zbd_zone_idx(f, z));
- memmove(f->zbd_info->open_zones + ozi,
- f->zbd_info->open_zones + ozi + 1,
- (ZBD_MAX_OPEN_ZONES - (ozi + 1)) *
- sizeof(f->zbd_info->open_zones[0]));
+ memmove(f->zbd_info->write_zones + zi,
+ f->zbd_info->write_zones + zi + 1,
+ (ZBD_MAX_WRITE_ZONES - (zi + 1)) *
+ sizeof(f->zbd_info->write_zones[0]));
+
+ f->zbd_info->num_write_zones--;
+ td->num_write_zones--;
+ z->write = 0;
+}
+
+/**
+ * zbd_reset_zone - reset the write pointer of a single zone and remove the zone
+ * from the array of write zones.
+ * @td: FIO thread data.
+ * @f: FIO file associated with the disk for which to reset a write pointer.
+ * @z: Zone to reset.
+ *
+ * Returns 0 upon success and a negative error code upon failure.
+ *
+ * The caller must hold z->mutex.
+ */
+static int zbd_reset_zone(struct thread_data *td, struct fio_file *f,
+ struct fio_zone_info *z)
+{
+ int ret;
+
+ ret = __zbd_reset_zone(td, f, z);
+ if (ret)
+ return ret;
- f->zbd_info->num_open_zones--;
- td->num_open_zones--;
- z->open = 0;
+ pthread_mutex_lock(&f->zbd_info->mutex);
+ zbd_write_zone_put(td, f, z);
+ pthread_mutex_unlock(&f->zbd_info->mutex);
+ return 0;
+}
+
+/**
+ * zbd_finish_zone - finish the specified zone
+ * @td: FIO thread data.
+ * @f: FIO file for which to finish a zone
+ * @z: Zone to finish.
+ *
+ * Finish the zone at @offset with open or close status.
+ */
+static int zbd_finish_zone(struct thread_data *td, struct fio_file *f,
+ struct fio_zone_info *z)
+{
+ uint64_t offset = z->start;
+ uint64_t length = f->zbd_info->zone_size;
+ int ret = 0;
+
+ switch (f->zbd_info->model) {
+ case ZBD_HOST_AWARE:
+ case ZBD_HOST_MANAGED:
+ if (td->io_ops && td->io_ops->finish_zone)
+ ret = td->io_ops->finish_zone(td, f, offset, length);
+ else
+ ret = blkzoned_finish_zone(td, f, offset, length);
+ break;
+ default:
+ break;
+ }
+
+ if (ret < 0) {
+ td_verror(td, errno, "finish zone failed");
+ log_err("%s: finish zone at sector %"PRIu64" failed (%d).\n",
+ f->file_name, offset >> 9, errno);
+ } else {
+ z->wp = (z+1)->start;
+ }
+
+ return ret;
}
/**
const uint64_t min_bs = td->o.min_bs[DDIR_WRITE];
int res = 0;
- assert(min_bs);
+ if (fio_unlikely(0 == min_bs))
+ return 1;
dprint(FD_ZBD, "%s: examining zones %u .. %u\n",
f->file_name, zbd_zone_idx(f, zb), zbd_zone_idx(f, ze));
continue;
zone_lock(td, f, z);
- pthread_mutex_lock(&f->zbd_info->mutex);
- zbd_close_zone(td, f, z);
- pthread_mutex_unlock(&f->zbd_info->mutex);
if (z->wp != z->start) {
dprint(FD_ZBD, "%s: resetting zone %u\n",
}
/**
- * zbd_open_zone - Add a zone to the array of open zones.
+ * zbd_get_max_active_zones - Get the maximum number of active zones
+ * @td: FIO thread data
+ * @f: FIO file for which to get max active zones
+ *
+ * Returns max_active_zones limit value of the target file if it is available.
+ * Otherwise return zero, which means no limit.
+ */
+static unsigned int zbd_get_max_active_zones(struct thread_data *td,
+ struct fio_file *f)
+{
+ unsigned int max_active_zones;
+ int ret;
+
+ if (td->io_ops && td->io_ops->get_max_active_zones)
+ ret = td->io_ops->get_max_active_zones(td, f,
+ &max_active_zones);
+ else
+ ret = blkzoned_get_max_active_zones(td, f, &max_active_zones);
+ if (ret < 0) {
+ dprint(FD_ZBD, "%s: max_active_zones is not available\n",
+ f->file_name);
+ return 0;
+ }
+
+ return max_active_zones;
+}
+
+/**
+ * __zbd_write_zone_get - Add a zone to the array of write zones.
* @td: fio thread data.
- * @f: fio file that has the open zones to add.
+ * @f: fio file that has the write zones array to add.
* @zone_idx: Index of the zone to add.
*
- * Open a ZBD zone if it is not already open. Returns true if either the zone
- * was already open or if the zone was successfully added to the array of open
- * zones without exceeding the maximum number of open zones. Returns false if
- * the zone was not already open and opening the zone would cause the zone limit
- * to be exceeded.
+ * Do same operation as @zbd_write_zone_get, except it adds the zone at
+ * @zone_idx to write target zones array even when it does not have remainder
+ * space to write one block.
*/
-static bool zbd_open_zone(struct thread_data *td, const struct fio_file *f,
- struct fio_zone_info *z)
+static bool __zbd_write_zone_get(struct thread_data *td,
+ const struct fio_file *f,
+ struct fio_zone_info *z)
{
- const uint64_t min_bs = td->o.min_bs[DDIR_WRITE];
struct zoned_block_device_info *zbdi = f->zbd_info;
uint32_t zone_idx = zbd_zone_idx(f, z);
bool res = true;
* Skip full zones with data verification enabled because resetting a
* zone causes data loss and hence causes verification to fail.
*/
- if (td->o.verify != VERIFY_NONE && zbd_zone_full(f, z, min_bs))
+ if (td->o.verify != VERIFY_NONE && zbd_zone_remainder(z) == 0)
return false;
/*
- * zbdi->max_open_zones == 0 means that there is no limit on the maximum
- * number of open zones. In this case, do no track open zones in
- * zbdi->open_zones array.
+ * zbdi->max_write_zones == 0 means that there is no limit on the
+ * maximum number of write target zones. In this case, do no track write
+ * target zones in zbdi->write_zones array.
*/
- if (!zbdi->max_open_zones)
+ if (!zbdi->max_write_zones)
return true;
pthread_mutex_lock(&zbdi->mutex);
- if (z->open) {
+ if (z->write) {
/*
* If the zone is going to be completely filled by writes
- * already in-flight, handle it as a full zone instead of an
- * open zone.
+ * already in-flight, handle it as a full zone instead of a
+ * write target zone.
*/
- if (z->wp >= zbd_zone_capacity_end(z))
+ if (!zbd_zone_remainder(z))
res = false;
goto out;
}
res = false;
/* Zero means no limit */
if (td->o.job_max_open_zones > 0 &&
- td->num_open_zones >= td->o.job_max_open_zones)
+ td->num_write_zones >= td->o.job_max_open_zones)
goto out;
- if (zbdi->num_open_zones >= zbdi->max_open_zones)
+ if (zbdi->num_write_zones >= zbdi->max_write_zones)
goto out;
- dprint(FD_ZBD, "%s: opening zone %u\n",
+ dprint(FD_ZBD, "%s: adding zone %u to write zone array\n",
f->file_name, zone_idx);
- zbdi->open_zones[zbdi->num_open_zones++] = zone_idx;
- td->num_open_zones++;
- z->open = 1;
+ zbdi->write_zones[zbdi->num_write_zones++] = zone_idx;
+ td->num_write_zones++;
+ z->write = 1;
res = true;
out:
return res;
}
-/* Verify whether direct I/O is used for all host-managed zoned drives. */
+/**
+ * zbd_write_zone_get - Add a zone to the array of write zones.
+ * @td: fio thread data.
+ * @f: fio file that has the open zones to add.
+ * @zone_idx: Index of the zone to add.
+ *
+ * Add a ZBD zone to write target zones array, if it is not yet added. Returns
+ * true if either the zone was already added or if the zone was successfully
+ * added to the array without exceeding the maximum number of write zones.
+ * Returns false if the zone was not already added and addition of the zone
+ * would cause the zone limit to be exceeded.
+ */
+static bool zbd_write_zone_get(struct thread_data *td, const struct fio_file *f,
+ struct fio_zone_info *z)
+{
+ const uint64_t min_bs = td->o.min_bs[DDIR_WRITE];
+
+ /*
+ * Skip full zones with data verification enabled because resetting a
+ * zone causes data loss and hence causes verification to fail.
+ */
+ if (td->o.verify != VERIFY_NONE && zbd_zone_full(f, z, min_bs))
+ return false;
+
+ return __zbd_write_zone_get(td, f, z);
+}
+
+/* Verify whether direct I/O is used for all host-managed zoned block drives. */
static bool zbd_using_direct_io(void)
{
- struct thread_data *td;
struct fio_file *f;
- int i, j;
+ int j;
- for_each_td(td, i) {
+ for_each_td(td) {
if (td->o.odirect || !(td->o.td_ddir & TD_DDIR_WRITE))
continue;
for_each_file(td, f, j) {
- if (f->zbd_info &&
+ if (f->zbd_info && f->filetype == FIO_TYPE_BLOCK &&
f->zbd_info->model == ZBD_HOST_MANAGED)
return false;
}
- }
+ } end_for_each();
return true;
}
/* Whether or not the I/O range for f includes one or more sequential zones */
-static bool zbd_is_seq_job(struct fio_file *f)
+static bool zbd_is_seq_job(const struct fio_file *f)
{
uint32_t zone_idx, zone_idx_b, zone_idx_e;
{
const struct fio_zone_info *z;
uint64_t new_offset, new_end;
- uint32_t zone_idx;
if (!f->zbd_info)
return true;
return false;
}
- zone_idx = zbd_offset_to_zone_idx(f, f->file_offset);
- z = zbd_get_zone(f, zone_idx);
- if ((f->file_offset != z->start) &&
- (td->o.td_ddir != TD_DDIR_READ)) {
+ if (td->o.td_ddir == TD_DDIR_READ) {
+ z = zbd_offset_to_zone(f, f->file_offset + f->io_size);
+ new_end = z->start;
+ if (f->file_offset + f->io_size > new_end) {
+ log_info("%s: rounded io_size from %"PRIu64" to %"PRIu64"\n",
+ f->file_name, f->io_size,
+ new_end - f->file_offset);
+ f->io_size = new_end - f->file_offset;
+ }
+ return true;
+ }
+
+ z = zbd_offset_to_zone(f, f->file_offset);
+ if (f->file_offset != z->start) {
new_offset = zbd_zone_end(z);
if (new_offset >= f->file_offset + f->io_size) {
log_info("%s: io_size must be at least one zone\n",
f->file_offset = new_offset;
}
- zone_idx = zbd_offset_to_zone_idx(f, f->file_offset + f->io_size);
- z = zbd_get_zone(f, zone_idx);
+ z = zbd_offset_to_zone(f, f->file_offset + f->io_size);
new_end = z->start;
- if ((td->o.td_ddir != TD_DDIR_READ) &&
- (f->file_offset + f->io_size != new_end)) {
+ if (f->file_offset + f->io_size != new_end) {
if (new_end <= f->file_offset) {
log_info("%s: io_size must be at least one zone\n",
f->file_name);
*/
static bool zbd_verify_sizes(void)
{
- struct thread_data *td;
struct fio_file *f;
- int i, j;
+ int j;
- for_each_td(td, i) {
+ for_each_td(td) {
for_each_file(td, f, j) {
if (!zbd_zone_align_file_sizes(td, f))
return false;
}
- }
+ } end_for_each();
return true;
}
static bool zbd_verify_bs(void)
{
- struct thread_data *td;
struct fio_file *f;
- int i, j, k;
+ int j;
- for_each_td(td, i) {
+ for_each_td(td) {
if (td_trim(td) &&
(td->o.min_bs[DDIR_TRIM] != td->o.max_bs[DDIR_TRIM] ||
td->o.bssplit_nr[DDIR_TRIM])) {
zone_size);
return false;
}
- for (k = 0; k < FIO_ARRAY_SIZE(td->o.bs); k++) {
- if (td->o.verify != VERIFY_NONE &&
- zone_size % td->o.bs[k] != 0) {
- log_info("%s: block size %llu is not a divisor of the zone size %"PRIu64"\n",
- f->file_name, td->o.bs[k],
- zone_size);
- return false;
- }
- }
}
- }
+ } end_for_each();
return true;
}
int nr_zones, nrz;
struct zbd_zone *zones, *z;
struct fio_zone_info *p;
- uint64_t zone_size, offset;
+ uint64_t zone_size, offset, capacity;
+ bool same_zone_cap = true;
struct zoned_block_device_info *zbd_info = NULL;
int i, j, ret = -ENOMEM;
}
zone_size = zones[0].len;
+ capacity = zones[0].capacity;
nr_zones = (f->real_file_size + zone_size - 1) / zone_size;
if (td->o.zone_size == 0) {
PTHREAD_MUTEX_RECURSIVE);
p->start = z->start;
p->capacity = z->capacity;
+ if (capacity != z->capacity)
+ same_zone_cap = false;
switch (z->cond) {
case ZBD_ZONE_COND_NOT_WP:
p->cond = z->cond;
if (j > 0 && p->start != p[-1].start + zone_size) {
- log_info("%s: invalid zone data\n",
- f->file_name);
+ log_info("%s: invalid zone data [%d:%d]: %"PRIu64" + %"PRIu64" != %"PRIu64"\n",
+ f->file_name, j, i,
+ p[-1].start, zone_size, p->start);
ret = -EINVAL;
goto out;
}
f->zbd_info->zone_size_log2 = is_power_of_2(zone_size) ?
ilog2(zone_size) : 0;
f->zbd_info->nr_zones = nr_zones;
+ f->zbd_info->max_active_zones = zbd_get_max_active_zones(td, f);
+
+ if (same_zone_cap)
+ dprint(FD_ZBD, "Zone capacity = %"PRIu64" KB\n",
+ capacity / 1024);
+
zbd_info = NULL;
ret = 0;
return ret;
}
-static int zbd_set_max_open_zones(struct thread_data *td, struct fio_file *f)
+static int zbd_set_max_write_zones(struct thread_data *td, struct fio_file *f)
{
struct zoned_block_device_info *zbd = f->zbd_info;
unsigned int max_open_zones;
if (zbd->model != ZBD_HOST_MANAGED || td->o.ignore_zone_limits) {
/* Only host-managed devices have a max open limit */
- zbd->max_open_zones = td->o.max_open_zones;
+ zbd->max_write_zones = td->o.max_open_zones;
goto out;
}
if (!max_open_zones) {
/* No device limit */
- zbd->max_open_zones = td->o.max_open_zones;
+ zbd->max_write_zones = td->o.max_open_zones;
} else if (!td->o.max_open_zones) {
/* No user limit. Set limit to device limit */
- zbd->max_open_zones = max_open_zones;
+ zbd->max_write_zones = max_open_zones;
} else if (td->o.max_open_zones <= max_open_zones) {
/* Both user limit and dev limit. User limit not too large */
- zbd->max_open_zones = td->o.max_open_zones;
+ zbd->max_write_zones = td->o.max_open_zones;
} else {
/* Both user limit and dev limit. User limit too large */
td_verror(td, EINVAL,
out:
/* Ensure that the limit is not larger than FIO's internal limit */
- if (zbd->max_open_zones > ZBD_MAX_OPEN_ZONES) {
+ if (zbd->max_write_zones > ZBD_MAX_WRITE_ZONES) {
td_verror(td, EINVAL, "'max_open_zones' value is too large");
log_err("'max_open_zones' value is larger than %u\n",
- ZBD_MAX_OPEN_ZONES);
+ ZBD_MAX_WRITE_ZONES);
return -EINVAL;
}
- dprint(FD_ZBD, "%s: using max open zones limit: %"PRIu32"\n",
- f->file_name, zbd->max_open_zones);
+ dprint(FD_ZBD, "%s: using max write zones limit: %"PRIu32"\n",
+ f->file_name, zbd->max_write_zones);
return 0;
}
assert(f->zbd_info);
f->zbd_info->model = zbd_model;
- ret = zbd_set_max_open_zones(td, f);
+ ret = zbd_set_max_write_zones(td, f);
if (ret) {
zbd_free_zone_info(f);
return ret;
*/
static int zbd_init_zone_info(struct thread_data *td, struct fio_file *file)
{
- struct thread_data *td2;
struct fio_file *f2;
- int i, j, ret;
+ int j, ret;
- for_each_td(td2, i) {
+ for_each_td(td2) {
for_each_file(td2, f2, j) {
if (td2 == td && f2 == file)
continue;
file->zbd_info->refcount++;
return 0;
}
- }
+ } end_for_each();
ret = zbd_create_zone_info(td, file);
if (ret < 0)
}
}
+static uint64_t zbd_verify_and_set_vdb(struct thread_data *td,
+ const struct fio_file *f)
+{
+ struct fio_zone_info *zb, *ze, *z;
+ uint64_t wp_vdb = 0;
+ struct zoned_block_device_info *zbdi = f->zbd_info;
+
+ assert(td->runstate < TD_RUNNING);
+ assert(zbdi);
+
+ if (!accounting_vdb(td, f))
+ return 0;
+
+ /*
+ * Ensure that the I/O range includes one or more sequential zones so
+ * that f->min_zone and f->max_zone have different values.
+ */
+ if (!zbd_is_seq_job(f))
+ return 0;
+
+ if (zbdi->write_min_zone != zbdi->write_max_zone) {
+ if (zbdi->write_min_zone != f->min_zone ||
+ zbdi->write_max_zone != f->max_zone) {
+ td_verror(td, EINVAL,
+ "multi-jobs with different write ranges are "
+ "not supported with zone_reset_threshold");
+ log_err("multi-jobs with different write ranges are "
+ "not supported with zone_reset_threshold\n");
+ }
+ return 0;
+ }
+
+ zbdi->write_min_zone = f->min_zone;
+ zbdi->write_max_zone = f->max_zone;
+
+ zb = zbd_get_zone(f, f->min_zone);
+ ze = zbd_get_zone(f, f->max_zone);
+ for (z = zb; z < ze; z++)
+ if (z->has_wp)
+ wp_vdb += z->wp - z->start;
+
+ zbdi->wp_valid_data_bytes = wp_vdb;
+
+ return wp_vdb;
+}
+
int zbd_setup_files(struct thread_data *td)
{
struct fio_file *f;
if (!zbd_verify_bs())
return 1;
+ if (td->o.experimental_verify) {
+ log_err("zonemode=zbd does not support experimental verify\n");
+ return 1;
+ }
+
for_each_file(td, f, i) {
struct zoned_block_device_info *zbd = f->zbd_info;
struct fio_zone_info *z;
int zi;
+ uint64_t vdb;
assert(zbd);
f->max_zone =
zbd_offset_to_zone_idx(f, f->file_offset + f->io_size);
+ vdb = zbd_verify_and_set_vdb(td, f);
+
+ dprint(FD_ZBD, "%s(%s): valid data bytes = %" PRIu64 "\n",
+ __func__, f->file_name, vdb);
+
/*
* When all zones in the I/O range are conventional, io_size
* can be smaller than zone size, making min_zone the same
assert(f->min_zone < f->max_zone);
if (td->o.max_open_zones > 0 &&
- zbd->max_open_zones != td->o.max_open_zones) {
+ zbd->max_write_zones != td->o.max_open_zones) {
log_err("Different 'max_open_zones' values\n");
return 1;
}
* global max open zones limit. (As the tracking of open zones
* is disabled when there is no global max open zones limit.)
*/
- if (td->o.job_max_open_zones && !zbd->max_open_zones) {
+ if (td->o.job_max_open_zones && !zbd->max_write_zones) {
log_err("'job_max_open_zones' cannot be used without a global open zones limit\n");
return 1;
}
/*
- * zbd->max_open_zones is the global limit shared for all jobs
+ * zbd->max_write_zones is the global limit shared for all jobs
* that target the same zoned block device. Force sync the per
* thread global limit with the actual global limit. (The real
* per thread/job limit is stored in td->o.job_max_open_zones).
*/
- td->o.max_open_zones = zbd->max_open_zones;
+ td->o.max_open_zones = zbd->max_write_zones;
for (zi = f->min_zone; zi < f->max_zone; zi++) {
z = &zbd->zone_info[zi];
if (z->cond != ZBD_ZONE_COND_IMP_OPEN &&
- z->cond != ZBD_ZONE_COND_EXP_OPEN)
+ z->cond != ZBD_ZONE_COND_EXP_OPEN &&
+ z->cond != ZBD_ZONE_COND_CLOSED)
+ continue;
+ if (!zbd->max_active_zones &&
+ z->cond == ZBD_ZONE_COND_CLOSED)
continue;
- if (zbd_open_zone(td, f, z))
+ if (__zbd_write_zone_get(td, f, z))
continue;
/*
* If the number of open zones exceeds specified limits,
- * reset all extra open zones.
+ * error out.
*/
- if (zbd_reset_zone(td, f, z) < 0) {
- log_err("Failed to reest zone %d\n", zi);
- return 1;
- }
+ log_err("Number of open zones exceeds max_open_zones limit\n");
+ return 1;
}
}
return write_cnt == 0;
}
-enum swd_action {
- CHECK_SWD,
- SET_SWD,
-};
-
-/* Calculate the number of sectors with data (swd) and perform action 'a' */
-static uint64_t zbd_process_swd(struct thread_data *td,
- const struct fio_file *f, enum swd_action a)
-{
- struct fio_zone_info *zb, *ze, *z;
- uint64_t swd = 0;
- uint64_t wp_swd = 0;
-
- zb = zbd_get_zone(f, f->min_zone);
- ze = zbd_get_zone(f, f->max_zone);
- for (z = zb; z < ze; z++) {
- if (z->has_wp) {
- zone_lock(td, f, z);
- wp_swd += z->wp - z->start;
- }
- swd += z->wp - z->start;
- }
-
- pthread_mutex_lock(&f->zbd_info->mutex);
- switch (a) {
- case CHECK_SWD:
- assert(f->zbd_info->sectors_with_data == swd);
- assert(f->zbd_info->wp_sectors_with_data == wp_swd);
- break;
- case SET_SWD:
- f->zbd_info->sectors_with_data = swd;
- f->zbd_info->wp_sectors_with_data = wp_swd;
- break;
- }
- pthread_mutex_unlock(&f->zbd_info->mutex);
-
- for (z = zb; z < ze; z++)
- if (z->has_wp)
- zone_unlock(z);
-
- return swd;
-}
-
-/*
- * The swd check is useful for debugging but takes too much time to leave
- * it enabled all the time. Hence it is disabled by default.
- */
-static const bool enable_check_swd = false;
-
-/* Check whether the values of zbd_info.*sectors_with_data are correct. */
-static void zbd_check_swd(struct thread_data *td, const struct fio_file *f)
-{
- if (!enable_check_swd)
- return;
-
- zbd_process_swd(td, f, CHECK_SWD);
-}
-
void zbd_file_reset(struct thread_data *td, struct fio_file *f)
{
struct fio_zone_info *zb, *ze;
- uint64_t swd;
+ bool verify_data_left = false;
if (!f->zbd_info || !td_write(td))
return;
zb = zbd_get_zone(f, f->min_zone);
ze = zbd_get_zone(f, f->max_zone);
- swd = zbd_process_swd(td, f, SET_SWD);
-
- dprint(FD_ZBD, "%s(%s): swd = %" PRIu64 "\n",
- __func__, f->file_name, swd);
/*
* If data verification is enabled reset the affected zones before
* writing any data to avoid that a zone reset has to be issued while
* writing data, which causes data loss.
*/
- if (td->o.verify != VERIFY_NONE && td->runstate != TD_VERIFYING)
- zbd_reset_zones(td, f, zb, ze);
+ if (td->o.verify != VERIFY_NONE) {
+ verify_data_left = td->runstate == TD_VERIFYING ||
+ td->io_hist_len || td->verify_batch;
+ if (!verify_data_left)
+ zbd_reset_zones(td, f, zb, ze);
+ }
+
zbd_reset_write_cnt(td, f);
}
-/* Return random zone index for one of the open zones. */
+/* Return random zone index for one of the write target zones. */
static uint32_t pick_random_zone_idx(const struct fio_file *f,
const struct io_u *io_u)
{
return (io_u->offset - f->file_offset) *
- f->zbd_info->num_open_zones / f->io_size;
+ f->zbd_info->num_write_zones / f->io_size;
}
static bool any_io_in_flight(void)
{
- struct thread_data *td;
- int i;
-
- for_each_td(td, i) {
+ for_each_td(td) {
if (td->io_u_in_flight)
return true;
- }
+ } end_for_each();
return false;
}
/*
- * Modify the offset of an I/O unit that does not refer to an open zone such
- * that it refers to an open zone. Close an open zone and open a new zone if
- * necessary. The open zone is searched across sequential zones.
+ * Modify the offset of an I/O unit that does not refer to a zone such that
+ * in write target zones array. Add a zone to or remove a zone from the lsit if
+ * necessary. The write target zone is searched across sequential zones.
* This algorithm can only work correctly if all write pointers are
* a multiple of the fio block size. The caller must neither hold z->mutex
* nor f->zbd_info->mutex. Returns with z->mutex held upon success.
*/
-static struct fio_zone_info *zbd_convert_to_open_zone(struct thread_data *td,
- struct io_u *io_u)
+static struct fio_zone_info *zbd_convert_to_write_zone(struct thread_data *td,
+ struct io_u *io_u)
{
const uint64_t min_bs = td->o.min_bs[io_u->ddir];
struct fio_file *f = io_u->file;
struct zoned_block_device_info *zbdi = f->zbd_info;
struct fio_zone_info *z;
- unsigned int open_zone_idx = -1;
+ unsigned int write_zone_idx = -1;
uint32_t zone_idx, new_zone_idx;
int i;
- bool wait_zone_close;
+ bool wait_zone_write;
bool in_flight;
bool should_retry = true;
assert(is_valid_offset(f, io_u->offset));
- if (zbdi->max_open_zones || td->o.job_max_open_zones) {
+ if (zbdi->max_write_zones || td->o.job_max_open_zones) {
/*
- * This statement accesses zbdi->open_zones[] on purpose
+ * This statement accesses zbdi->write_zones[] on purpose
* without locking.
*/
- zone_idx = zbdi->open_zones[pick_random_zone_idx(f, io_u)];
+ zone_idx = zbdi->write_zones[pick_random_zone_idx(f, io_u)];
} else {
zone_idx = zbd_offset_to_zone_idx(f, io_u->offset);
}
if (z->has_wp) {
if (z->cond != ZBD_ZONE_COND_OFFLINE &&
- zbdi->max_open_zones == 0 &&
+ zbdi->max_write_zones == 0 &&
td->o.job_max_open_zones == 0)
goto examine_zone;
- if (zbdi->num_open_zones == 0) {
- dprint(FD_ZBD, "%s(%s): no zones are open\n",
+ if (zbdi->num_write_zones == 0) {
+ dprint(FD_ZBD, "%s(%s): no zone is write target\n",
__func__, f->file_name);
- goto open_other_zone;
+ goto choose_other_zone;
}
}
/*
- * List of opened zones is per-device, shared across all
+ * Array of write target zones is per-device, shared across all
* threads. Start with quasi-random candidate zone. Ignore
* zones which don't belong to thread's offset/size area.
*/
- open_zone_idx = pick_random_zone_idx(f, io_u);
- assert(!open_zone_idx ||
- open_zone_idx < zbdi->num_open_zones);
- tmp_idx = open_zone_idx;
+ write_zone_idx = pick_random_zone_idx(f, io_u);
+ assert(!write_zone_idx ||
+ write_zone_idx < zbdi->num_write_zones);
+ tmp_idx = write_zone_idx;
- for (i = 0; i < zbdi->num_open_zones; i++) {
+ for (i = 0; i < zbdi->num_write_zones; i++) {
uint32_t tmpz;
- if (tmp_idx >= zbdi->num_open_zones)
+ if (tmp_idx >= zbdi->num_write_zones)
tmp_idx = 0;
- tmpz = zbdi->open_zones[tmp_idx];
+ tmpz = zbdi->write_zones[tmp_idx];
if (f->min_zone <= tmpz && tmpz < f->max_zone) {
- open_zone_idx = tmp_idx;
+ write_zone_idx = tmp_idx;
goto found_candidate_zone;
}
return NULL;
found_candidate_zone:
- new_zone_idx = zbdi->open_zones[open_zone_idx];
+ new_zone_idx = zbdi->write_zones[write_zone_idx];
if (new_zone_idx == zone_idx)
break;
zone_idx = new_zone_idx;
/* Both z->mutex and zbdi->mutex are held. */
examine_zone:
- if (z->wp + min_bs <= zbd_zone_capacity_end(z)) {
+ if (zbd_zone_remainder(z) >= min_bs) {
pthread_mutex_unlock(&zbdi->mutex);
goto out;
}
-open_other_zone:
- /* Check if number of open zones reaches one of limits. */
- wait_zone_close =
- zbdi->num_open_zones == f->max_zone - f->min_zone ||
- (zbdi->max_open_zones &&
- zbdi->num_open_zones == zbdi->max_open_zones) ||
+choose_other_zone:
+ /* Check if number of write target zones reaches one of limits. */
+ wait_zone_write =
+ zbdi->num_write_zones == f->max_zone - f->min_zone ||
+ (zbdi->max_write_zones &&
+ zbdi->num_write_zones == zbdi->max_write_zones) ||
(td->o.job_max_open_zones &&
- td->num_open_zones == td->o.job_max_open_zones);
+ td->num_write_zones == td->o.job_max_open_zones);
pthread_mutex_unlock(&zbdi->mutex);
/* Only z->mutex is held. */
/*
- * When number of open zones reaches to one of limits, wait for
- * zone close before opening a new zone.
+ * When number of write target zones reaches to one of limits, wait for
+ * zone write completion to one of them before trying a new zone.
*/
- if (wait_zone_close) {
+ if (wait_zone_write) {
dprint(FD_ZBD,
- "%s(%s): quiesce to allow open zones to close\n",
+ "%s(%s): quiesce to remove a zone from write target zones array\n",
__func__, f->file_name);
io_u_quiesce(td);
}
retry:
- /* Zone 'z' is full, so try to open a new zone. */
+ /* Zone 'z' is full, so try to choose a new zone. */
for (i = f->io_size / zbdi->zone_size; i > 0; i--) {
zone_idx++;
if (z->has_wp)
if (!z->has_wp)
continue;
zone_lock(td, f, z);
- if (z->open)
+ if (z->write)
continue;
- if (zbd_open_zone(td, f, z))
+ if (zbd_write_zone_get(td, f, z))
goto out;
}
/* Only z->mutex is held. */
- /* Check whether the write fits in any of the already opened zones. */
+ /* Check whether the write fits in any of the write target zones. */
pthread_mutex_lock(&zbdi->mutex);
- for (i = 0; i < zbdi->num_open_zones; i++) {
- zone_idx = zbdi->open_zones[i];
+ for (i = 0; i < zbdi->num_write_zones; i++) {
+ zone_idx = zbdi->write_zones[i];
if (zone_idx < f->min_zone || zone_idx >= f->max_zone)
continue;
pthread_mutex_unlock(&zbdi->mutex);
z = zbd_get_zone(f, zone_idx);
zone_lock(td, f, z);
- if (z->wp + min_bs <= zbd_zone_capacity_end(z))
+ if (zbd_zone_remainder(z) >= min_bs)
goto out;
pthread_mutex_lock(&zbdi->mutex);
}
/*
* When any I/O is in-flight or when all I/Os in-flight get completed,
- * the I/Os might have closed zones then retry the steps to open a zone.
- * Before retry, call io_u_quiesce() to complete in-flight writes.
+ * the I/Os might have removed zones from the write target array then
+ * retry the steps to choose a zone. Before retry, call io_u_quiesce()
+ * to complete in-flight writes.
*/
in_flight = any_io_in_flight();
if (in_flight || should_retry) {
dprint(FD_ZBD,
- "%s(%s): wait zone close and retry open zones\n",
+ "%s(%s): wait zone write and retry write target zone selection\n",
__func__, f->file_name);
+ should_retry = in_flight;
pthread_mutex_unlock(&zbdi->mutex);
zone_unlock(z);
io_u_quiesce(td);
zone_lock(td, f, z);
- should_retry = in_flight;
goto retry;
}
zone_unlock(z);
- dprint(FD_ZBD, "%s(%s): did not open another zone\n",
+ dprint(FD_ZBD, "%s(%s): did not choose another write zone\n",
__func__, f->file_name);
return NULL;
return z;
}
-/* The caller must hold z->mutex. */
-static struct fio_zone_info *zbd_replay_write_order(struct thread_data *td,
- struct io_u *io_u,
- struct fio_zone_info *z)
-{
- const struct fio_file *f = io_u->file;
- const uint64_t min_bs = td->o.min_bs[DDIR_WRITE];
-
- if (!zbd_open_zone(td, f, z)) {
- zone_unlock(z);
- z = zbd_convert_to_open_zone(td, io_u);
- assert(z);
- }
-
- if (z->verify_block * min_bs >= z->capacity) {
- log_err("%s: %d * %"PRIu64" >= %"PRIu64"\n",
- f->file_name, z->verify_block, min_bs, z->capacity);
- /*
- * If the assertion below fails during a test run, adding
- * "--experimental_verify=1" to the command line may help.
- */
- assert(false);
- }
-
- io_u->offset = z->start + z->verify_block * min_bs;
- if (io_u->offset + io_u->buflen >= zbd_zone_capacity_end(z)) {
- log_err("%s: %llu + %llu >= %"PRIu64"\n",
- f->file_name, io_u->offset, io_u->buflen,
- zbd_zone_capacity_end(z));
- assert(false);
- }
- z->verify_block += io_u->buflen / min_bs;
-
- return z;
-}
-
/*
* Find another zone which has @min_bytes of readable data. Search in zones
* @zb + 1 .. @zl. For random workload, also search in zones @zb - 1 .. @zf.
* @io_u: I/O unit
* @z: zone info pointer
*
- * If the write command made the zone full, close it.
+ * If the write command made the zone full, remove it from the write target
+ * zones array.
*
* The caller must hold z->mutex.
*/
if (io_u->ddir == DDIR_WRITE &&
io_u->offset + io_u->buflen >= zbd_zone_capacity_end(z)) {
pthread_mutex_lock(&f->zbd_info->mutex);
- zbd_close_zone(td, f, z);
+ zbd_write_zone_put(td, f, z);
pthread_mutex_unlock(&f->zbd_info->mutex);
}
}
const struct fio_file *f = io_u->file;
struct zoned_block_device_info *zbd_info = f->zbd_info;
struct fio_zone_info *z;
- uint32_t zone_idx;
uint64_t zone_end;
assert(zbd_info);
- zone_idx = zbd_offset_to_zone_idx(f, io_u->offset);
- assert(zone_idx < zbd_info->nr_zones);
- z = zbd_get_zone(f, zone_idx);
-
+ z = zbd_offset_to_zone(f, io_u->offset);
assert(z->has_wp);
if (!success)
dprint(FD_ZBD,
"%s: queued I/O (%lld, %llu) for zone %u\n",
- f->file_name, io_u->offset, io_u->buflen, zone_idx);
+ f->file_name, io_u->offset, io_u->buflen, zbd_zone_idx(f, z));
switch (io_u->ddir) {
case DDIR_WRITE:
* z->wp > zone_end means that one or more I/O errors
* have occurred.
*/
- pthread_mutex_lock(&zbd_info->mutex);
- if (z->wp <= zone_end) {
- zbd_info->sectors_with_data += zone_end - z->wp;
- zbd_info->wp_sectors_with_data += zone_end - z->wp;
+ if (accounting_vdb(td, f) && z->wp <= zone_end) {
+ pthread_mutex_lock(&zbd_info->mutex);
+ zbd_info->wp_valid_data_bytes += zone_end - z->wp;
+ pthread_mutex_unlock(&zbd_info->mutex);
}
- pthread_mutex_unlock(&zbd_info->mutex);
z->wp = zone_end;
break;
default:
static void zbd_put_io(struct thread_data *td, const struct io_u *io_u)
{
const struct fio_file *f = io_u->file;
- struct zoned_block_device_info *zbd_info = f->zbd_info;
struct fio_zone_info *z;
- uint32_t zone_idx;
-
- assert(zbd_info);
- zone_idx = zbd_offset_to_zone_idx(f, io_u->offset);
- assert(zone_idx < zbd_info->nr_zones);
- z = zbd_get_zone(f, zone_idx);
+ assert(f->zbd_info);
+ z = zbd_offset_to_zone(f, io_u->offset);
assert(z->has_wp);
dprint(FD_ZBD,
"%s: terminate I/O (%lld, %llu) for zone %u\n",
- f->file_name, io_u->offset, io_u->buflen, zone_idx);
+ f->file_name, io_u->offset, io_u->buflen, zbd_zone_idx(f, z));
zbd_end_zone_io(td, io_u, z);
zone_unlock(z);
- zbd_check_swd(td, f);
}
/*
struct fio_file *f = io_u->file;
enum fio_ddir ddir = io_u->ddir;
struct fio_zone_info *z;
- uint32_t zone_idx;
assert(td->o.zone_mode == ZONE_MODE_ZBD);
assert(td->o.zone_size);
assert(f->zbd_info);
- zone_idx = zbd_offset_to_zone_idx(f, f->last_pos[ddir]);
- z = zbd_get_zone(f, zone_idx);
+ z = zbd_offset_to_zone(f, f->last_pos[ddir]);
/*
* When the zone capacity is smaller than the zone size and the I/O is
"%s: Jump from zone capacity limit to zone end:"
" (%"PRIu64" -> %"PRIu64") for zone %u (%"PRIu64")\n",
f->file_name, f->last_pos[ddir],
- zbd_zone_end(z), zone_idx, z->capacity);
+ zbd_zone_end(z), zbd_zone_idx(f, z), z->capacity);
td->io_skip_bytes += zbd_zone_end(z) - f->last_pos[ddir];
f->last_pos[ddir] = zbd_zone_end(z);
}
if (ddir != DDIR_READ || !td_rw(td))
return ddir;
- if (io_u->file->zbd_info->sectors_with_data ||
- td->o.read_beyond_wp)
+ if (io_u->file->last_start[DDIR_WRITE] != -1ULL ||
+ td->o.read_beyond_wp || td->o.rwmix[DDIR_WRITE] == 0)
return DDIR_READ;
return DDIR_WRITE;
{
struct fio_file *f = io_u->file;
struct zoned_block_device_info *zbdi = f->zbd_info;
- uint32_t zone_idx_b;
struct fio_zone_info *zb, *zl, *orig_zb;
uint32_t orig_len = io_u->buflen;
uint64_t min_bs = td->o.min_bs[io_u->ddir];
assert(is_valid_offset(f, io_u->offset));
assert(io_u->buflen);
- zone_idx_b = zbd_offset_to_zone_idx(f, io_u->offset);
- zb = zbd_get_zone(f, zone_idx_b);
+ zb = zbd_offset_to_zone(f, io_u->offset);
orig_zb = zb;
if (!zb->has_wp) {
io_u->ddir == DDIR_READ && td->o.read_beyond_wp)
return io_u_accept;
- zbd_check_swd(td, f);
-
zone_lock(td, f, zb);
switch (io_u->ddir) {
case DDIR_READ:
- if (td->runstate == TD_VERIFYING && td_write(td)) {
- zb = zbd_replay_write_order(td, io_u, zb);
+ if (td->runstate == TD_VERIFYING && td_write(td))
goto accept;
- }
/*
* Check that there is enough written data in the zone to do an
goto eof;
}
- if (!zbd_open_zone(td, f, zb)) {
+retry:
+ if (zbd_zone_remainder(zb) > 0 &&
+ zbd_zone_remainder(zb) < min_bs) {
+ pthread_mutex_lock(&f->zbd_info->mutex);
+ zbd_write_zone_put(td, f, zb);
+ pthread_mutex_unlock(&f->zbd_info->mutex);
+ dprint(FD_ZBD,
+ "%s: finish zone %d\n",
+ f->file_name, zbd_zone_idx(f, zb));
+ io_u_quiesce(td);
+ zbd_finish_zone(td, f, zb);
+ if (zbd_zone_idx(f, zb) + 1 >= f->max_zone) {
+ if (!td_random(td))
+ goto eof;
+ }
zone_unlock(zb);
- zb = zbd_convert_to_open_zone(td, io_u);
+
+ /* Find the next write pointer zone */
+ do {
+ zb++;
+ if (zbd_zone_idx(f, zb) >= f->max_zone)
+ zb = zbd_get_zone(f, f->min_zone);
+ } while (!zb->has_wp);
+
+ zone_lock(td, f, zb);
+ }
+
+ if (!zbd_write_zone_get(td, f, zb)) {
+ zone_unlock(zb);
+ zb = zbd_convert_to_write_zone(td, io_u);
if (!zb) {
- dprint(FD_IO, "%s: can't convert to open zone",
+ dprint(FD_IO, "%s: can't convert to write target zone",
f->file_name);
goto eof;
}
}
+ if (zbd_zone_remainder(zb) > 0 &&
+ zbd_zone_remainder(zb) < min_bs)
+ goto retry;
+
/* Check whether the zone reset threshold has been exceeded */
if (td->o.zrf.u.f) {
- if (zbdi->wp_sectors_with_data >= f->io_size * td->o.zrt.u.f &&
+ if (zbdi->wp_valid_data_bytes >=
+ f->io_size * td->o.zrt.u.f &&
zbd_dec_and_reset_write_cnt(td, f))
zb->reset_zone = 1;
}
/* Reset the zone pointer if necessary */
if (zb->reset_zone || zbd_zone_full(f, zb, min_bs)) {
- assert(td->o.verify == VERIFY_NONE);
+ if (td->o.verify != VERIFY_NONE) {
+ /*
+ * Unset io-u->file to tell get_next_verify()
+ * that this IO is not requeue.
+ */
+ io_u->file = NULL;
+ if (!get_next_verify(td, io_u)) {
+ zone_unlock(zb);
+ return io_u_accept;
+ }
+ io_u->file = f;
+ }
+
/*
* Since previous write requests may have been submitted
* asynchronously and since we will submit the zone
*/
io_u_quiesce(td);
zb->reset_zone = 0;
- if (zbd_reset_zone(td, f, zb) < 0)
+ if (__zbd_reset_zone(td, f, zb) < 0)
goto eof;
if (zb->capacity < min_bs) {
case DDIR_WAIT:
case DDIR_LAST:
case DDIR_INVAL:
+ case DDIR_TIMEOUT:
goto accept;
}
* Return io_u_completed when reset zone succeeds. Return 0 when the target zone
* does not have write pointer. On error, return negative errno.
*/
-int zbd_do_io_u_trim(const struct thread_data *td, struct io_u *io_u)
+int zbd_do_io_u_trim(struct thread_data *td, struct io_u *io_u)
{
struct fio_file *f = io_u->file;
struct fio_zone_info *z;
- uint32_t zone_idx;
int ret;
- zone_idx = zbd_offset_to_zone_idx(f, io_u->offset);
- z = zbd_get_zone(f, zone_idx);
-
+ z = zbd_offset_to_zone(f, io_u->offset);
if (!z->has_wp)
return 0;
return io_u_completed;
}
+
+void zbd_log_err(const struct thread_data *td, const struct io_u *io_u)
+{
+ const struct fio_file *f = io_u->file;
+
+ if (td->o.zone_mode != ZONE_MODE_ZBD)
+ return;
+
+ if (io_u->error == EOVERFLOW)
+ log_err("%s: Exceeded max_active_zones limit. Check conditions of zones out of I/O ranges.\n",
+ f->file_name);
+}