pthread_mutex_unlock(&f->zbd_info->mutex);
z->wp = z->start;
- z->verify_block = 0;
td->ts.nr_zone_resets++;
z->open = 0;
}
+/**
+ * zbd_finish_zone - finish the specified zone
+ * @td: FIO thread data.
+ * @f: FIO file for which to finish a zone
+ * @z: Zone to finish.
+ *
+ * Finish the zone at @offset with open or close status.
+ */
+static int zbd_finish_zone(struct thread_data *td, struct fio_file *f,
+ struct fio_zone_info *z)
+{
+ uint64_t offset = z->start;
+ uint64_t length = f->zbd_info->zone_size;
+ int ret = 0;
+
+ switch (f->zbd_info->model) {
+ case ZBD_HOST_AWARE:
+ case ZBD_HOST_MANAGED:
+ if (td->io_ops && td->io_ops->finish_zone)
+ ret = td->io_ops->finish_zone(td, f, offset, length);
+ else
+ ret = blkzoned_finish_zone(td, f, offset, length);
+ break;
+ default:
+ break;
+ }
+
+ if (ret < 0) {
+ td_verror(td, errno, "finish zone failed");
+ log_err("%s: finish zone at sector %"PRIu64" failed (%d).\n",
+ f->file_name, offset >> 9, errno);
+ } else {
+ z->wp = (z+1)->start;
+ }
+
+ return ret;
+}
+
/**
* zbd_reset_zones - Reset a range of zones.
* @td: fio thread data.
{
struct thread_data *td;
struct fio_file *f;
- int i, j, k;
+ int i, j;
for_each_td(td, i) {
if (td_trim(td) &&
zone_size);
return false;
}
- for (k = 0; k < FIO_ARRAY_SIZE(td->o.bs); k++) {
- if (td->o.verify != VERIFY_NONE &&
- zone_size % td->o.bs[k] != 0) {
- log_info("%s: block size %llu is not a divisor of the zone size %"PRIu64"\n",
- f->file_name, td->o.bs[k],
- zone_size);
- return false;
- }
- }
}
}
return true;
if (!zbd_verify_bs())
return 1;
+ if (td->o.experimental_verify) {
+ log_err("zonemode=zbd does not support experimental verify\n");
+ return 1;
+ }
+
for_each_file(td, f, i) {
struct zoned_block_device_info *zbd = f->zbd_info;
struct fio_zone_info *z;
{
struct fio_zone_info *zb, *ze;
uint64_t swd;
+ bool verify_data_left = false;
if (!f->zbd_info || !td_write(td))
return;
* writing any data to avoid that a zone reset has to be issued while
* writing data, which causes data loss.
*/
- if (td->o.verify != VERIFY_NONE && td->runstate != TD_VERIFYING)
- zbd_reset_zones(td, f, zb, ze);
+ if (td->o.verify != VERIFY_NONE) {
+ verify_data_left = td->runstate == TD_VERIFYING ||
+ td->io_hist_len || td->verify_batch;
+ if (td->io_hist_len && td->o.verify_backlog)
+ verify_data_left =
+ td->io_hist_len % td->o.verify_backlog;
+ if (!verify_data_left)
+ zbd_reset_zones(td, f, zb, ze);
+ }
+
zbd_reset_write_cnt(td, f);
}
return z;
}
-/* The caller must hold z->mutex. */
-static struct fio_zone_info *zbd_replay_write_order(struct thread_data *td,
- struct io_u *io_u,
- struct fio_zone_info *z)
-{
- const struct fio_file *f = io_u->file;
- const uint64_t min_bs = td->o.min_bs[DDIR_WRITE];
-
- if (!zbd_open_zone(td, f, z)) {
- zone_unlock(z);
- z = zbd_convert_to_open_zone(td, io_u);
- assert(z);
- }
-
- if (z->verify_block * min_bs >= z->capacity) {
- log_err("%s: %d * %"PRIu64" >= %"PRIu64"\n",
- f->file_name, z->verify_block, min_bs, z->capacity);
- /*
- * If the assertion below fails during a test run, adding
- * "--experimental_verify=1" to the command line may help.
- */
- assert(false);
- }
-
- io_u->offset = z->start + z->verify_block * min_bs;
- if (io_u->offset + io_u->buflen >= zbd_zone_capacity_end(z)) {
- log_err("%s: %llu + %llu >= %"PRIu64"\n",
- f->file_name, io_u->offset, io_u->buflen,
- zbd_zone_capacity_end(z));
- assert(false);
- }
- z->verify_block += io_u->buflen / min_bs;
-
- return z;
-}
-
/*
* Find another zone which has @min_bytes of readable data. Search in zones
* @zb + 1 .. @zl. For random workload, also search in zones @zb - 1 .. @zf.
switch (io_u->ddir) {
case DDIR_READ:
- if (td->runstate == TD_VERIFYING && td_write(td)) {
- zb = zbd_replay_write_order(td, io_u, zb);
+ if (td->runstate == TD_VERIFYING && td_write(td))
goto accept;
- }
/*
* Check that there is enough written data in the zone to do an
goto eof;
}
+retry:
+ if (zbd_zone_remainder(zb) > 0 &&
+ zbd_zone_remainder(zb) < min_bs) {
+ pthread_mutex_lock(&f->zbd_info->mutex);
+ zbd_close_zone(td, f, zb);
+ pthread_mutex_unlock(&f->zbd_info->mutex);
+ dprint(FD_ZBD,
+ "%s: finish zone %d\n",
+ f->file_name, zbd_zone_idx(f, zb));
+ io_u_quiesce(td);
+ zbd_finish_zone(td, f, zb);
+ if (zbd_zone_idx(f, zb) + 1 >= f->max_zone) {
+ if (!td_random(td))
+ goto eof;
+ }
+ zone_unlock(zb);
+
+ /* Find the next write pointer zone */
+ do {
+ zb++;
+ if (zbd_zone_idx(f, zb) >= f->max_zone)
+ zb = zbd_get_zone(f, f->min_zone);
+ } while (!zb->has_wp);
+
+ zone_lock(td, f, zb);
+ }
+
if (!zbd_open_zone(td, f, zb)) {
zone_unlock(zb);
zb = zbd_convert_to_open_zone(td, io_u);
}
}
+ if (zbd_zone_remainder(zb) > 0 &&
+ zbd_zone_remainder(zb) < min_bs)
+ goto retry;
+
/* Check whether the zone reset threshold has been exceeded */
if (td->o.zrf.u.f) {
if (zbdi->wp_sectors_with_data >= f->io_size * td->o.zrt.u.f &&
/* Reset the zone pointer if necessary */
if (zb->reset_zone || zbd_zone_full(f, zb, min_bs)) {
- assert(td->o.verify == VERIFY_NONE);
+ if (td->o.verify != VERIFY_NONE) {
+ /*
+ * Unset io-u->file to tell get_next_verify()
+ * that this IO is not requeue.
+ */
+ io_u->file = NULL;
+ if (!get_next_verify(td, io_u)) {
+ zone_unlock(zb);
+ return io_u_accept;
+ }
+ io_u->file = f;
+ }
+
/*
* Since previous write requests may have been submitted
* asynchronously and since we will submit the zone