return (uint64_t)(offset - f->file_offset) < f->io_size;
}
+static inline struct fio_zone_info *get_zone(const struct fio_file *f,
+ unsigned int zone_nr)
+{
+ return &f->zbd_info->zone_info[zone_nr];
+}
+
/* Verify whether direct I/O is used for all host-managed zoned drives. */
static bool zbd_using_direct_io(void)
{
zone_idx_b = zbd_zone_idx(f, f->file_offset);
zone_idx_e = zbd_zone_idx(f, f->file_offset + f->io_size - 1);
for (zone_idx = zone_idx_b; zone_idx <= zone_idx_e; zone_idx++)
- if (zbd_zone_swr(&f->zbd_info->zone_info[zone_idx]))
+ if (zbd_zone_swr(get_zone(f, zone_idx)))
return true;
return false;
}
zone_idx = zbd_zone_idx(f, f->file_offset);
- z = &f->zbd_info->zone_info[zone_idx];
+ z = get_zone(f, zone_idx);
if ((f->file_offset != z->start) &&
(td->o.td_ddir != TD_DDIR_READ)) {
new_offset = zbd_zone_end(z);
f->file_offset = new_offset;
}
zone_idx = zbd_zone_idx(f, f->file_offset + f->io_size);
- z = &f->zbd_info->zone_info[zone_idx];
+ z = get_zone(f, zone_idx);
new_end = z->start;
if ((td->o.td_ddir != TD_DDIR_READ) &&
(f->file_offset + f->io_size != new_end)) {
sizeof(f->zbd_info->open_zones[0]));
f->zbd_info->num_open_zones--;
td->num_open_zones--;
- f->zbd_info->zone_info[zone_idx].open = 0;
+ get_zone(f, zone_idx)->open = 0;
}
/*
struct fio_zone_info *zb, *ze, *z;
uint64_t swd = 0;
- zb = &f->zbd_info->zone_info[f->min_zone];
- ze = &f->zbd_info->zone_info[f->max_zone];
+ zb = get_zone(f, f->min_zone);
+ ze = get_zone(f, f->max_zone);
for (z = zb; z < ze; z++) {
pthread_mutex_lock(&z->mutex);
swd += z->wp - z->start;
if (!f->zbd_info || !td_write(td))
return;
- zb = &f->zbd_info->zone_info[f->min_zone];
- ze = &f->zbd_info->zone_info[f->max_zone];
+ zb = get_zone(f, f->min_zone);
+ ze = get_zone(f, f->max_zone);
zbd_init_swd(f);
/*
* If data verification is enabled reset the affected zones before
uint32_t zone_idx)
{
const uint32_t min_bs = td->o.min_bs[DDIR_WRITE];
- struct fio_zone_info *z = &f->zbd_info->zone_info[zone_idx];
+ struct fio_zone_info *z = get_zone(f, zone_idx);
bool res = true;
if (z->cond == ZBD_ZONE_COND_OFFLINE)
for (;;) {
uint32_t tmp_idx;
- z = &f->zbd_info->zone_info[zone_idx];
+ z = get_zone(f, zone_idx);
zone_lock(td, f, z);
pthread_mutex_lock(&f->zbd_info->mutex);
if (!is_valid_offset(f, z->start)) {
/* Wrap-around. */
zone_idx = f->min_zone;
- z = &f->zbd_info->zone_info[zone_idx];
+ z = get_zone(f, zone_idx);
}
assert(is_valid_offset(f, z->start));
zone_lock(td, f, z);
pthread_mutex_unlock(&f->zbd_info->mutex);
pthread_mutex_unlock(&z->mutex);
- z = &f->zbd_info->zone_info[zone_idx];
+ z = get_zone(f, zone_idx);
zone_lock(td, f, z);
if (z->wp + min_bs <= zbd_zone_capacity_end(z))
const uint32_t min_bs = td->o.min_bs[io_u->ddir];
struct fio_file *f = io_u->file;
struct fio_zone_info *z1, *z2;
- const struct fio_zone_info *const zf =
- &f->zbd_info->zone_info[f->min_zone];
+ const struct fio_zone_info *const zf = get_zone(f, f->min_zone);
/*
* Skip to the next non-empty zone in case of sequential I/O and to
zone_idx = zbd_zone_idx(f, io_u->offset);
assert(zone_idx < zbd_info->nr_zones);
- z = &zbd_info->zone_info[zone_idx];
+ z = get_zone(f, zone_idx);
if (!zbd_zone_swr(z))
return;
zone_idx = zbd_zone_idx(f, io_u->offset);
assert(zone_idx < zbd_info->nr_zones);
- z = &zbd_info->zone_info[zone_idx];
+ z = get_zone(f, zone_idx);
if (!zbd_zone_swr(z))
return;
assert(td->o.zone_size);
zone_idx = zbd_zone_idx(f, f->last_pos[ddir]);
- z = &f->zbd_info->zone_info[zone_idx];
+ z = get_zone(f, zone_idx);
/*
* When the zone capacity is smaller than the zone size and the I/O is
assert(is_valid_offset(f, io_u->offset));
assert(io_u->buflen);
zone_idx_b = zbd_zone_idx(f, io_u->offset);
- zb = &f->zbd_info->zone_info[zone_idx_b];
+ zb = get_zone(f, zone_idx_b);
orig_zb = zb;
/* Accept the I/O offset for conventional zones. */
if (range < min_bs ||
((!td_random(td)) && (io_u->offset + min_bs > zb->wp))) {
pthread_mutex_unlock(&zb->mutex);
- zl = &f->zbd_info->zone_info[f->max_zone];
+ zl = get_zone(f, f->max_zone);
zb = zbd_find_zone(td, io_u, zb, zl);
if (!zb) {
dprint(FD_ZBD,