z->wp + required > zbd_zone_capacity_end(z);
}
-static void zone_lock(struct thread_data *td, struct fio_file *f, struct fio_zone_info *z)
+static void zone_lock(struct thread_data *td, const struct fio_file *f,
+ struct fio_zone_info *z)
{
struct zoned_block_device_info *zbd = f->zbd_info;
uint32_t nz = z - zbd->zone_info;
/* A thread should never lock zones outside its working area. */
assert(f->min_zone <= nz && nz < f->max_zone);
+ assert(z->has_wp);
+
/*
* Lock the io_u target zone. The zone will be unlocked if io_u offset
* is changed or when io_u completes and zbd_put_io() executed.
{
int ret;
+ assert(z->has_wp);
ret = pthread_mutex_unlock(&z->mutex);
assert(!ret);
}
return false;
}
- if (td->o.zone_skip &&
- (td->o.zone_skip < td->o.zone_size ||
- td->o.zone_skip % td->o.zone_size)) {
+ if (td->o.zone_skip % td->o.zone_size) {
log_err("%s: zoneskip %llu is not a multiple of the device zone size %llu.\n",
f->file_name, (unsigned long long) td->o.zone_skip,
(unsigned long long) td->o.zone_size);
(unsigned long long) new_end - f->file_offset);
f->io_size = new_end - f->file_offset;
}
-
- f->min_zone = zbd_zone_idx(f, f->file_offset);
- f->max_zone = zbd_zone_idx(f, f->file_offset + f->io_size);
- assert(f->min_zone < f->max_zone);
}
}
{
struct thread_data *td;
struct fio_file *f;
- uint32_t zone_size;
int i, j, k;
for_each_td(td, i) {
for_each_file(td, f, j) {
+ uint64_t zone_size;
+
if (!f->zbd_info)
continue;
zone_size = f->zbd_info->zone_size;
for (k = 0; k < FIO_ARRAY_SIZE(td->o.bs); k++) {
if (td->o.verify != VERIFY_NONE &&
zone_size % td->o.bs[k] != 0) {
- log_info("%s: block size %llu is not a divisor of the zone size %d\n",
+ log_info("%s: block size %llu is not a divisor of the zone size %llu\n",
f->file_name, td->o.bs[k],
- zone_size);
+ (unsigned long long)zone_size);
return false;
}
}
static int zbd_reset_zone(struct thread_data *td, struct fio_file *f,
struct fio_zone_info *z);
-int zbd_setup_files(struct thread_data *td)
+int zbd_init_files(struct thread_data *td)
{
struct fio_file *f;
int i;
if (zbd_init_zone_info(td, f))
return 1;
}
+ return 0;
+}
+
+void zbd_recalc_options_with_zone_granularity(struct thread_data *td)
+{
+ struct fio_file *f;
+ int i;
+
+ for_each_file(td, f, i) {
+ struct zoned_block_device_info *zbd = f->zbd_info;
+ // zonemode=strided doesn't get per-file zone size.
+ uint64_t zone_size = zbd ? zbd->zone_size : td->o.zone_size;
+
+ if (zone_size == 0)
+ continue;
+
+ if (td->o.size_nz > 0) {
+ td->o.size = td->o.size_nz * zone_size;
+ }
+ if (td->o.io_size_nz > 0) {
+ td->o.io_size = td->o.io_size_nz * zone_size;
+ }
+ if (td->o.start_offset_nz > 0) {
+ td->o.start_offset = td->o.start_offset_nz * zone_size;
+ }
+ if (td->o.offset_increment_nz > 0) {
+ td->o.offset_increment = td->o.offset_increment_nz * zone_size;
+ }
+ if (td->o.zone_skip_nz > 0) {
+ td->o.zone_skip = td->o.zone_skip_nz * zone_size;
+ }
+ }
+}
+
+int zbd_setup_files(struct thread_data *td)
+{
+ struct fio_file *f;
+ int i;
if (!zbd_using_direct_io()) {
log_err("Using direct I/O is mandatory for writing to ZBD drives\n\n");
if (!zbd)
continue;
+ f->min_zone = zbd_zone_idx(f, f->file_offset);
+ f->max_zone = zbd_zone_idx(f, f->file_offset + f->io_size);
+
+ /*
+ * When all zones in the I/O range are conventional, io_size
+ * can be smaller than zone size, making min_zone the same
+ * as max_zone. This is why the assert below needs to be made
+ * conditional.
+ */
+ if (zbd_is_seq_job(f))
+ assert(f->min_zone < f->max_zone);
+
zbd->max_open_zones = zbd->max_open_zones ?: ZBD_MAX_OPEN_ZONES;
if (td->o.max_open_zones > 0 &&
{
uint64_t offset = z->start;
uint64_t length = (z+1)->start - offset;
+ uint64_t data_in_zone = z->wp - z->start;
int ret = 0;
- if (z->wp == z->start)
+ if (!data_in_zone)
return 0;
assert(is_valid_offset(f, offset + length - 1));
}
pthread_mutex_lock(&f->zbd_info->mutex);
- f->zbd_info->sectors_with_data -= z->wp - z->start;
+ f->zbd_info->sectors_with_data -= data_in_zone;
+ f->zbd_info->wp_sectors_with_data -= data_in_zone;
pthread_mutex_unlock(&f->zbd_info->mutex);
z->wp = z->start;
z->verify_block = 0;
if (f->zbd_info->open_zones[open_zone_idx] == zone_idx)
break;
}
- if (open_zone_idx == f->zbd_info->num_open_zones) {
- dprint(FD_ZBD, "%s: zone %d is not open\n",
- f->file_name, zone_idx);
+ if (open_zone_idx == f->zbd_info->num_open_zones)
return;
- }
dprint(FD_ZBD, "%s: closing zone %d\n", f->file_name, zone_idx);
memmove(f->zbd_info->open_zones + open_zone_idx,
};
/* Calculate the number of sectors with data (swd) and perform action 'a' */
-static uint64_t zbd_process_swd(const struct fio_file *f, enum swd_action a)
+static uint64_t zbd_process_swd(struct thread_data *td,
+ const struct fio_file *f, enum swd_action a)
{
struct fio_zone_info *zb, *ze, *z;
uint64_t swd = 0;
+ uint64_t wp_swd = 0;
zb = get_zone(f, f->min_zone);
ze = get_zone(f, f->max_zone);
for (z = zb; z < ze; z++) {
- pthread_mutex_lock(&z->mutex);
+ if (z->has_wp) {
+ zone_lock(td, f, z);
+ wp_swd += z->wp - z->start;
+ }
swd += z->wp - z->start;
}
pthread_mutex_lock(&f->zbd_info->mutex);
switch (a) {
case CHECK_SWD:
assert(f->zbd_info->sectors_with_data == swd);
+ assert(f->zbd_info->wp_sectors_with_data == wp_swd);
break;
case SET_SWD:
f->zbd_info->sectors_with_data = swd;
+ f->zbd_info->wp_sectors_with_data = wp_swd;
break;
}
pthread_mutex_unlock(&f->zbd_info->mutex);
for (z = zb; z < ze; z++)
- zone_unlock(z);
+ if (z->has_wp)
+ zone_unlock(z);
return swd;
}
*/
static const bool enable_check_swd = false;
-/* Check whether the value of zbd_info.sectors_with_data is correct. */
-static void zbd_check_swd(const struct fio_file *f)
+/* Check whether the values of zbd_info.*sectors_with_data are correct. */
+static void zbd_check_swd(struct thread_data *td, const struct fio_file *f)
{
if (!enable_check_swd)
return;
- zbd_process_swd(f, CHECK_SWD);
-}
-
-static void zbd_init_swd(struct fio_file *f)
-{
- uint64_t swd;
-
- if (!enable_check_swd)
- return;
-
- swd = zbd_process_swd(f, SET_SWD);
- dprint(FD_ZBD, "%s(%s): swd = %" PRIu64 "\n", __func__, f->file_name,
- swd);
+ zbd_process_swd(td, f, CHECK_SWD);
}
void zbd_file_reset(struct thread_data *td, struct fio_file *f)
{
struct fio_zone_info *zb, *ze;
+ uint64_t swd;
if (!f->zbd_info || !td_write(td))
return;
zb = get_zone(f, f->min_zone);
ze = get_zone(f, f->max_zone);
- zbd_init_swd(f);
+ swd = zbd_process_swd(td, f, SET_SWD);
+ dprint(FD_ZBD, "%s(%s): swd = %" PRIu64 "\n", __func__, f->file_name,
+ swd);
/*
* If data verification is enabled reset the affected zones before
* writing any data to avoid that a zone reset has to be issued while
/*
* Modify the offset of an I/O unit that does not refer to an open zone such
* that it refers to an open zone. Close an open zone and open a new zone if
- * necessary. This algorithm can only work correctly if all write pointers are
+ * necessary. The open zone is searched across sequential zones.
+ * This algorithm can only work correctly if all write pointers are
* a multiple of the fio block size. The caller must neither hold z->mutex
* nor f->zbd_info->mutex. Returns with z->mutex held upon success.
*/
uint32_t tmp_idx;
z = get_zone(f, zone_idx);
-
- zone_lock(td, f, z);
+ if (z->has_wp)
+ zone_lock(td, f, z);
pthread_mutex_lock(&f->zbd_info->mutex);
- if (z->cond != ZBD_ZONE_COND_OFFLINE &&
- td->o.max_open_zones == 0 && td->o.job_max_open_zones == 0)
- goto examine_zone;
- if (f->zbd_info->num_open_zones == 0) {
- dprint(FD_ZBD, "%s(%s): no zones are open\n",
- __func__, f->file_name);
- goto open_other_zone;
+ if (z->has_wp) {
+ if (z->cond != ZBD_ZONE_COND_OFFLINE &&
+ td->o.max_open_zones == 0 && td->o.job_max_open_zones == 0)
+ goto examine_zone;
+ if (f->zbd_info->num_open_zones == 0) {
+ dprint(FD_ZBD, "%s(%s): no zones are open\n",
+ __func__, f->file_name);
+ goto open_other_zone;
+ }
}
/*
* Ignore zones which don't belong to thread's offset/size area.
*/
open_zone_idx = pick_random_zone_idx(f, io_u);
- assert(open_zone_idx < f->zbd_info->num_open_zones);
+ assert(!open_zone_idx ||
+ open_zone_idx < f->zbd_info->num_open_zones);
tmp_idx = open_zone_idx;
for (i = 0; i < f->zbd_info->num_open_zones; i++) {
uint32_t tmpz;
dprint(FD_ZBD, "%s(%s): no candidate zone\n",
__func__, f->file_name);
pthread_mutex_unlock(&f->zbd_info->mutex);
- zone_unlock(z);
+ if (z->has_wp)
+ zone_unlock(z);
return NULL;
found_candidate_zone:
break;
zone_idx = new_zone_idx;
pthread_mutex_unlock(&f->zbd_info->mutex);
- zone_unlock(z);
+ if (z->has_wp)
+ zone_unlock(z);
}
/* Both z->mutex and f->zbd_info->mutex are held. */
/* Zone 'z' is full, so try to open a new zone. */
for (i = f->io_size / f->zbd_info->zone_size; i > 0; i--) {
zone_idx++;
- zone_unlock(z);
+ if (z->has_wp)
+ zone_unlock(z);
z++;
if (!is_valid_offset(f, z->start)) {
/* Wrap-around. */
z = get_zone(f, zone_idx);
}
assert(is_valid_offset(f, z->start));
+ if (!z->has_wp)
+ continue;
zone_lock(td, f, z);
if (z->open)
continue;
dprint(FD_ZBD, "%s(%s): returning zone %d\n", __func__, f->file_name,
zone_idx);
io_u->offset = z->start;
+ assert(z->has_wp);
assert(z->cond != ZBD_ZONE_COND_OFFLINE);
return z;
}
assert(z);
}
- if (z->verify_block * min_bs >= z->capacity)
+ if (z->verify_block * min_bs >= z->capacity) {
log_err("%s: %d * %d >= %llu\n", f->file_name, z->verify_block,
min_bs, (unsigned long long)z->capacity);
- io_u->offset = z->start + z->verify_block++ * min_bs;
+ /*
+ * If the assertion below fails during a test run, adding
+ * "--experimental_verify=1" to the command line may help.
+ */
+ assert(false);
+ }
+ io_u->offset = z->start + z->verify_block * min_bs;
+ if (io_u->offset + io_u->buflen >= zbd_zone_capacity_end(z)) {
+ log_err("%s: %llu + %llu >= %llu\n", f->file_name, io_u->offset,
+ io_u->buflen, (unsigned long long) zbd_zone_capacity_end(z));
+ assert(false);
+ }
+ z->verify_block += io_u->buflen / min_bs;
+
return z;
}
/*
- * Find another zone for which @io_u fits below the write pointer. Start
- * searching in zones @zb + 1 .. @zl and continue searching in zones
- * @zf .. @zb - 1.
+ * Find another zone for which @io_u fits in the readable data in the zone.
+ * Search in zones @zb + 1 .. @zl. For random workload, also search in zones
+ * @zb - 1 .. @zf.
*
- * Either returns NULL or returns a zone pointer and holds the mutex for that
- * zone.
+ * Either returns NULL or returns a zone pointer. When the zone has write
+ * pointer, hold the mutex for the zone.
*/
static struct fio_zone_info *
zbd_find_zone(struct thread_data *td, struct io_u *io_u,
*/
for (z1 = zb + 1, z2 = zb - 1; z1 < zl || z2 >= zf; z1++, z2--) {
if (z1 < zl && z1->cond != ZBD_ZONE_COND_OFFLINE) {
- zone_lock(td, f, z1);
+ if (z1->has_wp)
+ zone_lock(td, f, z1);
if (z1->start + min_bs <= z1->wp)
return z1;
- zone_unlock(z1);
+ if (z1->has_wp)
+ zone_unlock(z1);
} else if (!td_random(td)) {
break;
}
if (td_random(td) && z2 >= zf &&
z2->cond != ZBD_ZONE_COND_OFFLINE) {
- zone_lock(td, f, z2);
+ if (z2->has_wp)
+ zone_lock(td, f, z2);
if (z2->start + min_bs <= z2->wp)
return z2;
- zone_unlock(z2);
+ if (z2->has_wp)
+ zone_unlock(z2);
}
}
dprint(FD_ZBD, "%s: adjusting random read offset failed\n",
assert(zone_idx < zbd_info->nr_zones);
z = get_zone(f, zone_idx);
- if (!z->has_wp)
- return;
+ assert(z->has_wp);
if (!success)
goto unlock;
* z->wp > zone_end means that one or more I/O errors
* have occurred.
*/
- if (z->wp <= zone_end)
+ if (z->wp <= zone_end) {
zbd_info->sectors_with_data += zone_end - z->wp;
+ zbd_info->wp_sectors_with_data += zone_end - z->wp;
+ }
pthread_mutex_unlock(&zbd_info->mutex);
z->wp = zone_end;
break;
assert(zone_idx < zbd_info->nr_zones);
z = get_zone(f, zone_idx);
- if (!z->has_wp)
- return;
+ assert(z->has_wp);
dprint(FD_ZBD,
"%s: terminate I/O (%lld, %llu) for zone %u\n",
zbd_end_zone_io(td, io_u, z);
zone_unlock(z);
- zbd_check_swd(f);
+ zbd_check_swd(td, f);
}
/*
zb = get_zone(f, zone_idx_b);
orig_zb = zb;
- /* Accept the I/O offset for conventional zones. */
- if (!zb->has_wp)
+ if (!zb->has_wp) {
+ /* Accept non-write I/Os for conventional zones. */
+ if (io_u->ddir != DDIR_WRITE)
+ return io_u_accept;
+ /*
+ * Make sure that writes to conventional zones
+ * don't cross over to any sequential zones.
+ */
+ if (!(zb + 1)->has_wp ||
+ io_u->offset + io_u->buflen <= (zb + 1)->start)
+ return io_u_accept;
+
+ if (io_u->offset + min_bs > (zb + 1)->start) {
+ dprint(FD_IO,
+ "%s: off=%llu + min_bs=%u > next zone %llu\n",
+ f->file_name, io_u->offset,
+ min_bs, (unsigned long long) (zb + 1)->start);
+ io_u->offset = zb->start + (zb + 1)->start - io_u->offset;
+ new_len = min(io_u->buflen, (zb + 1)->start - io_u->offset);
+ } else {
+ new_len = (zb + 1)->start - io_u->offset;
+ }
+ io_u->buflen = new_len / min_bs * min_bs;
return io_u_accept;
+ }
/*
* Accept the I/O offset for reads if reading beyond the write pointer
io_u->ddir == DDIR_READ && td->o.read_beyond_wp)
return io_u_accept;
- zbd_check_swd(f);
+ zbd_check_swd(td, f);
zone_lock(td, f, zb);
case DDIR_READ:
if (td->runstate == TD_VERIFYING && td_write(td)) {
zb = zbd_replay_write_order(td, io_u, zb);
- /*
- * Since we return with the zone lock still held,
- * add an annotation to let Coverity know that it
- * is intentional.
- */
- /* coverity[missing_unlock] */
goto accept;
}
/*
io_u->offset = zb->start +
((io_u->offset - orig_zb->start) %
(range - io_u->buflen)) / min_bs * min_bs;
+ /*
+ * When zbd_find_zone() returns a conventional zone,
+ * we can simply accept the new i/o offset here.
+ */
+ if (!zb->has_wp)
+ return io_u_accept;
/*
* Make sure the I/O does not cross over the zone wp position.
*/
assert(io_u->offset + io_u->buflen <= zb->wp);
goto accept;
case DDIR_WRITE:
- if (io_u->buflen > f->zbd_info->zone_size)
+ if (io_u->buflen > f->zbd_info->zone_size) {
+ td_verror(td, EINVAL, "I/O buflen exceeds zone size");
+ dprint(FD_IO,
+ "%s: I/O buflen %llu exceeds zone size %llu\n",
+ f->file_name, io_u->buflen,
+ (unsigned long long) f->zbd_info->zone_size);
goto eof;
+ }
if (!zbd_open_zone(td, f, zone_idx_b)) {
zone_unlock(zb);
zb = zbd_convert_to_open_zone(td, io_u);
- if (!zb)
+ if (!zb) {
+ dprint(FD_IO, "%s: can't convert to open zone",
+ f->file_name);
goto eof;
+ }
zone_idx_b = zbd_zone_nr(f, zb);
}
/* Check whether the zone reset threshold has been exceeded */
if (td->o.zrf.u.f) {
- if (f->zbd_info->sectors_with_data >=
+ if (f->zbd_info->wp_sectors_with_data >=
f->io_size * td->o.zrt.u.f &&
zbd_dec_and_reset_write_cnt(td, f)) {
zb->reset_zone = 1;
goto eof;
if (zb->capacity < min_bs) {
+ td_verror(td, EINVAL, "ZCAP is less min_bs");
log_err("zone capacity %llu smaller than minimum block size %d\n",
(unsigned long long)zb->capacity,
min_bs);
assert(!zbd_zone_full(f, zb, min_bs));
io_u->offset = zb->wp;
if (!is_valid_offset(f, io_u->offset)) {
- dprint(FD_ZBD, "Dropped request with offset %llu\n",
- io_u->offset);
+ td_verror(td, EINVAL, "invalid WP value");
+ dprint(FD_ZBD, "%s: dropped request with offset %llu\n",
+ f->file_name, io_u->offset);
goto eof;
}
/*
orig_len, io_u->buflen);
goto accept;
}
- log_err("Zone remainder %lld smaller than minimum block size %d\n",
- (zbd_zone_capacity_end(zb) - io_u->offset),
- min_bs);
+ td_verror(td, EIO, "zone remainder too small");
+ log_err("zone remainder %lld smaller than min block size %d\n",
+ (zbd_zone_capacity_end(zb) - io_u->offset), min_bs);
goto eof;
case DDIR_TRIM:
/* fall-through */
assert(false);
accept:
- assert(zb);
+ assert(zb->has_wp);
assert(zb->cond != ZBD_ZONE_COND_OFFLINE);
assert(!io_u->zbd_queue_io);
assert(!io_u->zbd_put_io);
io_u->zbd_queue_io = zbd_queue_io;
io_u->zbd_put_io = zbd_put_io;
+ /*
+ * Since we return with the zone lock still held,
+ * add an annotation to let Coverity know that it
+ * is intentional.
+ */
+ /* coverity[missing_unlock] */
return io_u_accept;
eof:
- if (zb)
+ if (zb && zb->has_wp)
zone_unlock(zb);
return io_u_eof;
}