#include "oslib/asprintf.h"
#include "smalloc.h"
#include "verify.h"
+#include "pshared.h"
#include "zbd.h"
/**
return z->type == ZBD_ZONE_TYPE_SWR;
}
+/**
+ * zbd_zone_end - Return zone end location
+ * @z: zone info pointer.
+ */
+static inline uint64_t zbd_zone_end(const struct fio_zone_info *z)
+{
+ return (z+1)->start;
+}
+
+/**
+ * zbd_zone_capacity_end - Return zone capacity limit end location
+ * @z: zone info pointer.
+ */
+static inline uint64_t zbd_zone_capacity_end(const struct fio_zone_info *z)
+{
+ return z->start + z->capacity;
+}
+
/**
* zbd_zone_full - verify whether a minimum number of bytes remain in a zone
* @f: file pointer.
assert((required & 511) == 0);
return zbd_zone_swr(z) &&
- z->wp + required > z->start + f->zbd_info->zone_size;
+ z->wp + required > zbd_zone_capacity_end(z);
}
-static void zone_lock(struct thread_data *td, struct fio_zone_info *z)
+static void zone_lock(struct thread_data *td, struct fio_file *f, struct fio_zone_info *z)
{
+ struct zoned_block_device_info *zbd = f->zbd_info;
+ uint32_t nz = z - zbd->zone_info;
+
+ /* A thread should never lock zones outside its working area. */
+ assert(f->min_zone <= nz && nz < f->max_zone);
+
/*
* Lock the io_u target zone. The zone will be unlocked if io_u offset
* is changed or when io_u completes and zbd_put_io() executed.
zone_idx = zbd_zone_idx(f, f->file_offset);
z = &f->zbd_info->zone_info[zone_idx];
- if (f->file_offset != z->start) {
- new_offset = (z+1)->start;
+ if ((f->file_offset != z->start) &&
+ (td->o.td_ddir != TD_DDIR_READ)) {
+ new_offset = zbd_zone_end(z);
if (new_offset >= f->file_offset + f->io_size) {
log_info("%s: io_size must be at least one zone\n",
f->file_name);
zone_idx = zbd_zone_idx(f, f->file_offset + f->io_size);
z = &f->zbd_info->zone_info[zone_idx];
new_end = z->start;
- if (f->file_offset + f->io_size != new_end) {
+ if ((td->o.td_ddir != TD_DDIR_READ) &&
+ (f->file_offset + f->io_size != new_end)) {
if (new_end <= f->file_offset) {
log_info("%s: io_size must be at least one zone\n",
f->file_name);
(unsigned long long) new_end - f->file_offset);
f->io_size = new_end - f->file_offset;
}
+
+ f->min_zone = zbd_zone_idx(f, f->file_offset);
+ f->max_zone = zbd_zone_idx(f, f->file_offset + f->io_size);
+ assert(f->min_zone < f->max_zone);
}
}
uint32_t nr_zones;
struct fio_zone_info *p;
uint64_t zone_size = td->o.zone_size;
+ uint64_t zone_capacity = td->o.zone_capacity;
struct zoned_block_device_info *zbd_info = NULL;
- pthread_mutexattr_t attr;
int i;
if (zone_size == 0) {
return 1;
}
+ if (zone_capacity == 0)
+ zone_capacity = zone_size;
+
+ if (zone_capacity > zone_size) {
+ log_err("%s: job parameter zonecapacity %llu is larger than zone size %llu\n",
+ f->file_name, (unsigned long long) td->o.zone_capacity,
+ (unsigned long long) td->o.zone_size);
+ return 1;
+ }
+
nr_zones = (f->real_file_size + zone_size - 1) / zone_size;
zbd_info = scalloc(1, sizeof(*zbd_info) +
(nr_zones + 1) * sizeof(zbd_info->zone_info[0]));
if (!zbd_info)
return -ENOMEM;
- pthread_mutexattr_init(&attr);
- pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
- pthread_mutexattr_setpshared(&attr, true);
- pthread_mutex_init(&zbd_info->mutex, &attr);
+ mutex_init_pshared(&zbd_info->mutex);
zbd_info->refcount = 1;
p = &zbd_info->zone_info[0];
for (i = 0; i < nr_zones; i++, p++) {
- pthread_mutex_init(&p->mutex, &attr);
+ mutex_init_pshared_with_type(&p->mutex,
+ PTHREAD_MUTEX_RECURSIVE);
p->start = i * zone_size;
- p->wp = p->start + zone_size;
+ p->wp = p->start;
p->type = ZBD_ZONE_TYPE_SWR;
p->cond = ZBD_ZONE_COND_EMPTY;
+ p->capacity = zone_capacity;
}
/* a sentinel */
p->start = nr_zones * zone_size;
f->zbd_info->zone_size_log2 = is_power_of_2(zone_size) ?
ilog2(zone_size) : 0;
f->zbd_info->nr_zones = nr_zones;
- pthread_mutexattr_destroy(&attr);
return 0;
}
struct fio_zone_info *p;
uint64_t zone_size, offset;
struct zoned_block_device_info *zbd_info = NULL;
- pthread_mutexattr_t attr;
int i, j, ret = 0;
- pthread_mutexattr_init(&attr);
- pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
- pthread_mutexattr_setpshared(&attr, true);
-
zones = calloc(ZBD_REPORT_MAX_ZONES, sizeof(struct zbd_zone));
if (!zones)
goto out;
ret = -ENOMEM;
if (!zbd_info)
goto out;
- pthread_mutex_init(&zbd_info->mutex, &attr);
+ mutex_init_pshared(&zbd_info->mutex);
zbd_info->refcount = 1;
p = &zbd_info->zone_info[0];
for (offset = 0, j = 0; j < nr_zones;) {
z = &zones[0];
for (i = 0; i < nrz; i++, j++, z++, p++) {
- pthread_mutex_init(&p->mutex, &attr);
+ mutex_init_pshared_with_type(&p->mutex,
+ PTHREAD_MUTEX_RECURSIVE);
p->start = z->start;
+ p->capacity = z->capacity;
switch (z->cond) {
case ZBD_ZONE_COND_NOT_WP:
case ZBD_ZONE_COND_FULL:
- p->wp = p->start + zone_size;
+ p->wp = p->start + p->capacity;
break;
default:
assert(z->start <= z->wp);
out:
sfree(zbd_info);
free(zones);
- pthread_mutexattr_destroy(&attr);
return ret;
}
return -EINVAL;
}
- if (ret == 0)
+ if (ret == 0) {
f->zbd_info->model = zbd_model;
+ f->zbd_info->max_open_zones = td->o.max_open_zones;
+ }
return ret;
}
{
uint32_t refcount;
- if (!f->zbd_info)
- return;
+ assert(f->zbd_info);
pthread_mutex_lock(&f->zbd_info->mutex);
refcount = --f->zbd_info->refcount;
return ret;
}
-int zbd_init(struct thread_data *td)
+static bool zbd_open_zone(struct thread_data *td, const struct fio_file *f,
+ uint32_t zone_idx);
+static int zbd_reset_zone(struct thread_data *td, struct fio_file *f,
+ struct fio_zone_info *z);
+
+int zbd_setup_files(struct thread_data *td)
{
struct fio_file *f;
int i;
if (!zbd_verify_bs())
return 1;
+ for_each_file(td, f, i) {
+ struct zoned_block_device_info *zbd = f->zbd_info;
+ struct fio_zone_info *z;
+ int zi;
+
+ if (!zbd)
+ continue;
+
+ zbd->max_open_zones = zbd->max_open_zones ?: ZBD_MAX_OPEN_ZONES;
+
+ if (td->o.max_open_zones > 0 &&
+ zbd->max_open_zones != td->o.max_open_zones) {
+ log_err("Different 'max_open_zones' values\n");
+ return 1;
+ }
+ if (zbd->max_open_zones > ZBD_MAX_OPEN_ZONES) {
+ log_err("'max_open_zones' value is limited by %u\n", ZBD_MAX_OPEN_ZONES);
+ return 1;
+ }
+
+ for (zi = f->min_zone; zi < f->max_zone; zi++) {
+ z = &zbd->zone_info[zi];
+ if (z->cond != ZBD_ZONE_COND_IMP_OPEN &&
+ z->cond != ZBD_ZONE_COND_EXP_OPEN)
+ continue;
+ if (zbd_open_zone(td, f, zi))
+ continue;
+ /*
+ * If the number of open zones exceeds specified limits,
+ * reset all extra open zones.
+ */
+ if (zbd_reset_zone(td, f, z) < 0) {
+ log_err("Failed to reest zone %d\n", zi);
+ return 1;
+ }
+ }
+ }
+
return 0;
}
+static unsigned int zbd_zone_nr(struct zoned_block_device_info *zbd_info,
+ struct fio_zone_info *zone)
+{
+ return zone - zbd_info->zone_info;
+}
+
/**
- * zbd_reset_range - reset zones for a range of sectors
+ * zbd_reset_zone - reset the write pointer of a single zone
* @td: FIO thread data.
- * @f: Fio file for which to reset zones
- * @sector: Starting sector in units of 512 bytes
- * @nr_sectors: Number of sectors in units of 512 bytes
+ * @f: FIO file associated with the disk for which to reset a write pointer.
+ * @z: Zone to reset.
*
* Returns 0 upon success and a negative error code upon failure.
+ *
+ * The caller must hold z->mutex.
*/
-static int zbd_reset_range(struct thread_data *td, struct fio_file *f,
- uint64_t offset, uint64_t length)
+static int zbd_reset_zone(struct thread_data *td, struct fio_file *f,
+ struct fio_zone_info *z)
{
- uint32_t zone_idx_b, zone_idx_e;
- struct fio_zone_info *zb, *ze, *z;
+ uint64_t offset = z->start;
+ uint64_t length = (z+1)->start - offset;
int ret = 0;
+ if (z->wp == z->start)
+ return 0;
+
assert(is_valid_offset(f, offset + length - 1));
+ dprint(FD_ZBD, "%s: resetting wp of zone %u.\n", f->file_name,
+ zbd_zone_nr(f->zbd_info, z));
switch (f->zbd_info->model) {
case ZBD_HOST_AWARE:
case ZBD_HOST_MANAGED:
break;
}
- zone_idx_b = zbd_zone_idx(f, offset);
- zb = &f->zbd_info->zone_info[zone_idx_b];
- zone_idx_e = zbd_zone_idx(f, offset + length);
- ze = &f->zbd_info->zone_info[zone_idx_e];
- for (z = zb; z < ze; z++) {
- pthread_mutex_lock(&z->mutex);
- pthread_mutex_lock(&f->zbd_info->mutex);
- f->zbd_info->sectors_with_data -= z->wp - z->start;
- pthread_mutex_unlock(&f->zbd_info->mutex);
- z->wp = z->start;
- z->verify_block = 0;
- pthread_mutex_unlock(&z->mutex);
- }
+ pthread_mutex_lock(&f->zbd_info->mutex);
+ f->zbd_info->sectors_with_data -= z->wp - z->start;
+ pthread_mutex_unlock(&f->zbd_info->mutex);
+ z->wp = z->start;
+ z->verify_block = 0;
- td->ts.nr_zone_resets += ze - zb;
+ td->ts.nr_zone_resets++;
return ret;
}
-static unsigned int zbd_zone_nr(struct zoned_block_device_info *zbd_info,
- struct fio_zone_info *zone)
+/* The caller must hold f->zbd_info->mutex */
+static void zbd_close_zone(struct thread_data *td, const struct fio_file *f,
+ unsigned int zone_idx)
{
- return zone - zbd_info->zone_info;
-}
+ uint32_t open_zone_idx = 0;
-/**
- * zbd_reset_zone - reset the write pointer of a single zone
- * @td: FIO thread data.
- * @f: FIO file associated with the disk for which to reset a write pointer.
- * @z: Zone to reset.
- *
- * Returns 0 upon success and a negative error code upon failure.
- */
-static int zbd_reset_zone(struct thread_data *td, struct fio_file *f,
- struct fio_zone_info *z)
-{
- dprint(FD_ZBD, "%s: resetting wp of zone %u.\n", f->file_name,
- zbd_zone_nr(f->zbd_info, z));
+ for (; open_zone_idx < f->zbd_info->num_open_zones; open_zone_idx++) {
+ if (f->zbd_info->open_zones[open_zone_idx] == zone_idx)
+ break;
+ }
+ if (open_zone_idx == f->zbd_info->num_open_zones) {
+ dprint(FD_ZBD, "%s: zone %d is not open\n",
+ f->file_name, zone_idx);
+ return;
+ }
- return zbd_reset_range(td, f, z->start, (z+1)->start - z->start);
+ dprint(FD_ZBD, "%s: closing zone %d\n", f->file_name, zone_idx);
+ memmove(f->zbd_info->open_zones + open_zone_idx,
+ f->zbd_info->open_zones + open_zone_idx + 1,
+ (ZBD_MAX_OPEN_ZONES - (open_zone_idx + 1)) *
+ sizeof(f->zbd_info->open_zones[0]));
+ f->zbd_info->num_open_zones--;
+ td->num_open_zones--;
+ f->zbd_info->zone_info[zone_idx].open = 0;
}
/*
dprint(FD_ZBD, "%s: examining zones %u .. %u\n", f->file_name,
zbd_zone_nr(f->zbd_info, zb), zbd_zone_nr(f->zbd_info, ze));
for (z = zb; z < ze; z++) {
+ uint32_t nz = z - f->zbd_info->zone_info;
+
if (!zbd_zone_swr(z))
continue;
- zone_lock(td, z);
- reset_wp = all_zones ? z->wp != z->start :
- (td->o.td_ddir & TD_DDIR_WRITE) &&
- z->wp % min_bs != 0;
+ zone_lock(td, f, z);
+ if (all_zones) {
+ pthread_mutex_lock(&f->zbd_info->mutex);
+ zbd_close_zone(td, f, nz);
+ pthread_mutex_unlock(&f->zbd_info->mutex);
+
+ reset_wp = z->wp != z->start;
+ } else {
+ reset_wp = z->wp % min_bs != 0;
+ }
if (reset_wp) {
dprint(FD_ZBD, "%s: resetting zone %u\n",
f->file_name,
* Reset zbd_info.write_cnt, the counter that counts down towards the next
* zone reset.
*/
-static void zbd_reset_write_cnt(const struct thread_data *td,
- const struct fio_file *f)
+static void _zbd_reset_write_cnt(const struct thread_data *td,
+ const struct fio_file *f)
{
assert(0 <= td->o.zrf.u.f && td->o.zrf.u.f <= 1);
- pthread_mutex_lock(&f->zbd_info->mutex);
f->zbd_info->write_cnt = td->o.zrf.u.f ?
min(1.0 / td->o.zrf.u.f, 0.0 + UINT_MAX) : UINT_MAX;
+}
+
+static void zbd_reset_write_cnt(const struct thread_data *td,
+ const struct fio_file *f)
+{
+ pthread_mutex_lock(&f->zbd_info->mutex);
+ _zbd_reset_write_cnt(td, f);
pthread_mutex_unlock(&f->zbd_info->mutex);
}
if (f->zbd_info->write_cnt)
write_cnt = --f->zbd_info->write_cnt;
if (write_cnt == 0)
- zbd_reset_write_cnt(td, f);
+ _zbd_reset_write_cnt(td, f);
pthread_mutex_unlock(&f->zbd_info->mutex);
return write_cnt == 0;
struct fio_zone_info *zb, *ze, *z;
uint64_t swd = 0;
- zb = &f->zbd_info->zone_info[zbd_zone_idx(f, f->file_offset)];
- ze = &f->zbd_info->zone_info[zbd_zone_idx(f, f->file_offset +
- f->io_size)];
+ zb = &f->zbd_info->zone_info[f->min_zone];
+ ze = &f->zbd_info->zone_info[f->max_zone];
for (z = zb; z < ze; z++) {
pthread_mutex_lock(&z->mutex);
swd += z->wp - z->start;
void zbd_file_reset(struct thread_data *td, struct fio_file *f)
{
struct fio_zone_info *zb, *ze;
- uint32_t zone_idx_e;
- if (!f->zbd_info)
+ if (!f->zbd_info || !td_write(td))
return;
- zb = &f->zbd_info->zone_info[zbd_zone_idx(f, f->file_offset)];
- zone_idx_e = zbd_zone_idx(f, f->file_offset + f->io_size);
- ze = &f->zbd_info->zone_info[zone_idx_e];
+ zb = &f->zbd_info->zone_info[f->min_zone];
+ ze = &f->zbd_info->zone_info[f->max_zone];
zbd_init_swd(f);
/*
* If data verification is enabled reset the affected zones before
* writing data, which causes data loss.
*/
zbd_reset_zones(td, f, zb, ze, td->o.verify != VERIFY_NONE &&
- (td->o.td_ddir & TD_DDIR_WRITE) &&
td->runstate != TD_VERIFYING);
zbd_reset_write_cnt(td, f);
}
struct zoned_block_device_info *zbdi = f->zbd_info;
int i;
- assert(td->o.max_open_zones <= ARRAY_SIZE(zbdi->open_zones));
- assert(zbdi->num_open_zones <= td->o.max_open_zones);
+ assert(td->o.job_max_open_zones == 0 || td->num_open_zones <= td->o.job_max_open_zones);
+ assert(td->o.job_max_open_zones <= zbdi->max_open_zones);
+ assert(zbdi->num_open_zones <= zbdi->max_open_zones);
for (i = 0; i < zbdi->num_open_zones; i++)
if (zbdi->open_zones[i] == zone_idx)
* was not yet open and opening a new zone would cause the zone limit to be
* exceeded.
*/
-static bool zbd_open_zone(struct thread_data *td, const struct io_u *io_u,
+static bool zbd_open_zone(struct thread_data *td, const struct fio_file *f,
uint32_t zone_idx)
{
const uint32_t min_bs = td->o.min_bs[DDIR_WRITE];
- const struct fio_file *f = io_u->file;
struct fio_zone_info *z = &f->zbd_info->zone_info[zone_idx];
bool res = true;
if (td->o.verify != VERIFY_NONE && zbd_zone_full(f, z, min_bs))
return false;
- /* Zero means no limit */
- if (!td->o.max_open_zones)
- return true;
-
pthread_mutex_lock(&f->zbd_info->mutex);
- if (is_zone_open(td, f, zone_idx))
+ if (is_zone_open(td, f, zone_idx)) {
+ /*
+ * If the zone is already open and going to be full by writes
+ * in-flight, handle it as a full zone instead of an open zone.
+ */
+ if (z->wp >= zbd_zone_capacity_end(z))
+ res = false;
goto out;
+ }
res = false;
- if (f->zbd_info->num_open_zones >= td->o.max_open_zones)
+ /* Zero means no limit */
+ if (td->o.job_max_open_zones > 0 &&
+ td->num_open_zones >= td->o.job_max_open_zones)
+ goto out;
+ if (f->zbd_info->num_open_zones >= f->zbd_info->max_open_zones)
goto out;
dprint(FD_ZBD, "%s: opening zone %d\n", f->file_name, zone_idx);
f->zbd_info->open_zones[f->zbd_info->num_open_zones++] = zone_idx;
+ td->num_open_zones++;
z->open = 1;
res = true;
return res;
}
-/* The caller must hold f->zbd_info->mutex */
-static void zbd_close_zone(struct thread_data *td, const struct fio_file *f,
- unsigned int open_zone_idx)
-{
- uint32_t zone_idx;
-
- assert(open_zone_idx < f->zbd_info->num_open_zones);
- zone_idx = f->zbd_info->open_zones[open_zone_idx];
- memmove(f->zbd_info->open_zones + open_zone_idx,
- f->zbd_info->open_zones + open_zone_idx + 1,
- (ZBD_MAX_OPEN_ZONES - (open_zone_idx + 1)) *
- sizeof(f->zbd_info->open_zones[0]));
- f->zbd_info->num_open_zones--;
- f->zbd_info->zone_info[zone_idx].open = 0;
-}
-
/* Anything goes as long as it is not a constant. */
static uint32_t pick_random_zone_idx(const struct fio_file *f,
const struct io_u *io_u)
struct io_u *io_u)
{
const uint32_t min_bs = td->o.min_bs[io_u->ddir];
- const struct fio_file *f = io_u->file;
+ struct fio_file *f = io_u->file;
struct fio_zone_info *z;
unsigned int open_zone_idx = -1;
uint32_t zone_idx, new_zone_idx;
int i;
+ bool wait_zone_close;
assert(is_valid_offset(f, io_u->offset));
- if (td->o.max_open_zones) {
+ if (td->o.max_open_zones || td->o.job_max_open_zones) {
/*
* This statement accesses f->zbd_info->open_zones[] on purpose
* without locking.
} else {
zone_idx = zbd_zone_idx(f, io_u->offset);
}
+ if (zone_idx < f->min_zone)
+ zone_idx = f->min_zone;
+ else if (zone_idx >= f->max_zone)
+ zone_idx = f->max_zone - 1;
dprint(FD_ZBD, "%s(%s): starting from zone %d (offset %lld, buflen %lld)\n",
__func__, f->file_name, zone_idx, io_u->offset, io_u->buflen);
z = &f->zbd_info->zone_info[zone_idx];
- zone_lock(td, z);
+ zone_lock(td, f, z);
pthread_mutex_lock(&f->zbd_info->mutex);
- if (td->o.max_open_zones == 0)
+ if (td->o.max_open_zones == 0 && td->o.job_max_open_zones == 0)
goto examine_zone;
if (f->zbd_info->num_open_zones == 0) {
- pthread_mutex_unlock(&f->zbd_info->mutex);
- pthread_mutex_unlock(&z->mutex);
dprint(FD_ZBD, "%s(%s): no zones are open\n",
__func__, f->file_name);
- return NULL;
+ goto open_other_zone;
}
/*
if (tmp_idx >= f->zbd_info->num_open_zones)
tmp_idx = 0;
tmpz = f->zbd_info->open_zones[tmp_idx];
-
- if (is_valid_offset(f, f->zbd_info->zone_info[tmpz].start)) {
+ if (f->min_zone <= tmpz && tmpz < f->max_zone) {
open_zone_idx = tmp_idx;
goto found_candidate_zone;
}
/* Both z->mutex and f->zbd_info->mutex are held. */
examine_zone:
- if (z->wp + min_bs <= (z+1)->start) {
+ if (z->wp + min_bs <= zbd_zone_capacity_end(z)) {
pthread_mutex_unlock(&f->zbd_info->mutex);
goto out;
}
- dprint(FD_ZBD, "%s(%s): closing zone %d\n", __func__, f->file_name,
- zone_idx);
- if (td->o.max_open_zones)
- zbd_close_zone(td, f, open_zone_idx);
+
+open_other_zone:
+ /* Check if number of open zones reaches one of limits. */
+ wait_zone_close =
+ f->zbd_info->num_open_zones == f->max_zone - f->min_zone ||
+ (td->o.max_open_zones &&
+ f->zbd_info->num_open_zones == td->o.max_open_zones) ||
+ (td->o.job_max_open_zones &&
+ td->num_open_zones == td->o.job_max_open_zones);
+
pthread_mutex_unlock(&f->zbd_info->mutex);
/* Only z->mutex is held. */
+ /*
+ * When number of open zones reaches to one of limits, wait for
+ * zone close before opening a new zone.
+ */
+ if (wait_zone_close) {
+ dprint(FD_ZBD, "%s(%s): quiesce to allow open zones to close\n",
+ __func__, f->file_name);
+ io_u_quiesce(td);
+ }
+
/* Zone 'z' is full, so try to open a new zone. */
for (i = f->io_size / f->zbd_info->zone_size; i > 0; i--) {
zone_idx++;
z++;
if (!is_valid_offset(f, z->start)) {
/* Wrap-around. */
- zone_idx = zbd_zone_idx(f, f->file_offset);
+ zone_idx = f->min_zone;
z = &f->zbd_info->zone_info[zone_idx];
}
assert(is_valid_offset(f, z->start));
- zone_lock(td, z);
+ zone_lock(td, f, z);
if (z->open)
continue;
- if (zbd_open_zone(td, io_u, zone_idx))
+ if (zbd_open_zone(td, f, zone_idx))
goto out;
}
pthread_mutex_lock(&f->zbd_info->mutex);
for (i = 0; i < f->zbd_info->num_open_zones; i++) {
zone_idx = f->zbd_info->open_zones[i];
+ if (zone_idx < f->min_zone || zone_idx >= f->max_zone)
+ continue;
pthread_mutex_unlock(&f->zbd_info->mutex);
pthread_mutex_unlock(&z->mutex);
z = &f->zbd_info->zone_info[zone_idx];
- zone_lock(td, z);
- if (z->wp + min_bs <= (z+1)->start)
+ zone_lock(td, f, z);
+ if (z->wp + min_bs <= zbd_zone_capacity_end(z))
goto out;
pthread_mutex_lock(&f->zbd_info->mutex);
}
const struct fio_file *f = io_u->file;
const uint32_t min_bs = td->o.min_bs[DDIR_WRITE];
- if (!zbd_open_zone(td, io_u, z - f->zbd_info->zone_info)) {
+ if (!zbd_open_zone(td, f, z - f->zbd_info->zone_info)) {
pthread_mutex_unlock(&z->mutex);
z = zbd_convert_to_open_zone(td, io_u);
assert(z);
}
- if (z->verify_block * min_bs >= f->zbd_info->zone_size)
+ if (z->verify_block * min_bs >= z->capacity)
log_err("%s: %d * %d >= %llu\n", f->file_name, z->verify_block,
- min_bs, (unsigned long long) f->zbd_info->zone_size);
+ min_bs, (unsigned long long)z->capacity);
io_u->offset = z->start + z->verify_block++ * min_bs;
return z;
}
struct fio_zone_info *zb, struct fio_zone_info *zl)
{
const uint32_t min_bs = td->o.min_bs[io_u->ddir];
- const struct fio_file *f = io_u->file;
+ struct fio_file *f = io_u->file;
struct fio_zone_info *z1, *z2;
const struct fio_zone_info *const zf =
- &f->zbd_info->zone_info[zbd_zone_idx(f, f->file_offset)];
+ &f->zbd_info->zone_info[f->min_zone];
/*
* Skip to the next non-empty zone in case of sequential I/O and to
*/
for (z1 = zb + 1, z2 = zb - 1; z1 < zl || z2 >= zf; z1++, z2--) {
if (z1 < zl && z1->cond != ZBD_ZONE_COND_OFFLINE) {
- pthread_mutex_lock(&z1->mutex);
+ zone_lock(td, f, z1);
if (z1->start + min_bs <= z1->wp)
return z1;
pthread_mutex_unlock(&z1->mutex);
}
if (td_random(td) && z2 >= zf &&
z2->cond != ZBD_ZONE_COND_OFFLINE) {
- pthread_mutex_lock(&z2->mutex);
+ zone_lock(td, f, z2);
if (z2->start + min_bs <= z2->wp)
return z2;
pthread_mutex_unlock(&z2->mutex);
return NULL;
}
+/**
+ * zbd_end_zone_io - update zone status at command completion
+ * @io_u: I/O unit
+ * @z: zone info pointer
+ *
+ * If the write command made the zone full, close it.
+ *
+ * The caller must hold z->mutex.
+ */
+static void zbd_end_zone_io(struct thread_data *td, const struct io_u *io_u,
+ struct fio_zone_info *z)
+{
+ const struct fio_file *f = io_u->file;
+
+ if (io_u->ddir == DDIR_WRITE &&
+ io_u->offset + io_u->buflen >= zbd_zone_capacity_end(z)) {
+ pthread_mutex_lock(&f->zbd_info->mutex);
+ zbd_close_zone(td, f, z - f->zbd_info->zone_info);
+ pthread_mutex_unlock(&f->zbd_info->mutex);
+ }
+}
+
/**
* zbd_queue_io - update the write pointer of a sequential zone
* @io_u: I/O unit
* For write and trim operations, update the write pointer of the I/O unit
* target zone.
*/
-static void zbd_queue_io(struct io_u *io_u, int q, bool success)
+static void zbd_queue_io(struct thread_data *td, struct io_u *io_u, int q,
+ bool success)
{
const struct fio_file *f = io_u->file;
struct zoned_block_device_info *zbd_info = f->zbd_info;
switch (io_u->ddir) {
case DDIR_WRITE:
zone_end = min((uint64_t)(io_u->offset + io_u->buflen),
- (z + 1)->start);
+ zbd_zone_capacity_end(z));
pthread_mutex_lock(&zbd_info->mutex);
/*
* z->wp > zone_end means that one or more I/O errors
break;
}
+ if (q == FIO_Q_COMPLETED && !io_u->error)
+ zbd_end_zone_io(td, io_u, z);
+
unlock:
if (!success || q != FIO_Q_QUEUED) {
/* BUSY or COMPLETED: unlock the zone */
* zbd_put_io - Unlock an I/O unit target zone lock
* @io_u: I/O unit
*/
-static void zbd_put_io(const struct io_u *io_u)
+static void zbd_put_io(struct thread_data *td, const struct io_u *io_u)
{
const struct fio_file *f = io_u->file;
struct zoned_block_device_info *zbd_info = f->zbd_info;
struct fio_zone_info *z;
uint32_t zone_idx;
+ int ret;
if (!zbd_info)
return;
"%s: terminate I/O (%lld, %llu) for zone %u\n",
f->file_name, io_u->offset, io_u->buflen, zone_idx);
- assert(pthread_mutex_unlock(&z->mutex) == 0);
+ zbd_end_zone_io(td, io_u, z);
+
+ ret = pthread_mutex_unlock(&z->mutex);
+ assert(ret == 0);
zbd_check_swd(f);
}
assert(td->o.zone_mode == ZONE_MODE_ZBD);
assert(td->o.zone_size);
+ zone_idx = zbd_zone_idx(f, f->last_pos[ddir]);
+ z = &f->zbd_info->zone_info[zone_idx];
+
+ /*
+ * When the zone capacity is smaller than the zone size and the I/O is
+ * sequential write, skip to zone end if the latest position is at the
+ * zone capacity limit.
+ */
+ if (z->capacity < f->zbd_info->zone_size && !td_random(td) &&
+ ddir == DDIR_WRITE &&
+ f->last_pos[ddir] >= zbd_zone_capacity_end(z)) {
+ dprint(FD_ZBD,
+ "%s: Jump from zone capacity limit to zone end:"
+ " (%llu -> %llu) for zone %u (%llu)\n",
+ f->file_name, (unsigned long long) f->last_pos[ddir],
+ (unsigned long long) zbd_zone_end(z),
+ zbd_zone_nr(f->zbd_info, z),
+ (unsigned long long) z->capacity);
+ td->io_skip_bytes += zbd_zone_end(z) - f->last_pos[ddir];
+ f->last_pos[ddir] = zbd_zone_end(z);
+ }
+
/*
* zone_skip is valid only for sequential workloads.
*/
* - For reads with td->o.read_beyond_wp == false, the last position
* reached the zone write pointer.
*/
- zone_idx = zbd_zone_idx(f, f->last_pos[ddir]);
- z = &f->zbd_info->zone_info[zone_idx];
-
if (td->zone_bytes >= td->o.zone_size ||
- f->last_pos[ddir] >= (z+1)->start ||
+ f->last_pos[ddir] >= zbd_zone_end(z) ||
(ddir == DDIR_READ &&
(!td->o.read_beyond_wp) && f->last_pos[ddir] >= z->wp)) {
/*
}
}
+/**
+ * zbd_adjust_ddir - Adjust an I/O direction for zonemode=zbd.
+ *
+ * @td: FIO thread data.
+ * @io_u: FIO I/O unit.
+ * @ddir: I/O direction before adjustment.
+ *
+ * Return adjusted I/O direction.
+ */
+enum fio_ddir zbd_adjust_ddir(struct thread_data *td, struct io_u *io_u,
+ enum fio_ddir ddir)
+{
+ /*
+ * In case read direction is chosen for the first random I/O, fio with
+ * zonemode=zbd stops because no data can be read from zoned block
+ * devices with all empty zones. Overwrite the first I/O direction as
+ * write to make sure data to read exists.
+ */
+ if (ddir != DDIR_READ || !td_rw(td))
+ return ddir;
+
+ if (io_u->file->zbd_info->sectors_with_data ||
+ td->o.read_beyond_wp)
+ return DDIR_READ;
+
+ return DDIR_WRITE;
+}
+
/**
* zbd_adjust_block - adjust the offset and length as necessary for ZBD drives
* @td: FIO thread data.
zbd_check_swd(f);
- zone_lock(td, zb);
+ zone_lock(td, f, zb);
switch (io_u->ddir) {
case DDIR_READ:
- if (td->runstate == TD_VERIFYING) {
+ if (td->runstate == TD_VERIFYING && td_write(td)) {
zb = zbd_replay_write_order(td, io_u, zb);
+ pthread_mutex_unlock(&zb->mutex);
goto accept;
}
/*
if (range < min_bs ||
((!td_random(td)) && (io_u->offset + min_bs > zb->wp))) {
pthread_mutex_unlock(&zb->mutex);
- zl = &f->zbd_info->zone_info[zbd_zone_idx(f,
- f->file_offset + f->io_size)];
+ zl = &f->zbd_info->zone_info[f->max_zone];
zb = zbd_find_zone(td, io_u, zb, zl);
if (!zb) {
dprint(FD_ZBD,
case DDIR_WRITE:
if (io_u->buflen > f->zbd_info->zone_size)
goto eof;
- if (!zbd_open_zone(td, io_u, zone_idx_b)) {
+ if (!zbd_open_zone(td, f, zone_idx_b)) {
pthread_mutex_unlock(&zb->mutex);
zb = zbd_convert_to_open_zone(td, io_u);
if (!zb)
zb->reset_zone = 0;
if (zbd_reset_zone(td, f, zb) < 0)
goto eof;
+
+ if (zb->capacity < min_bs) {
+ log_err("zone capacity %llu smaller than minimum block size %d\n",
+ (unsigned long long)zb->capacity,
+ min_bs);
+ goto eof;
+ }
}
/* Make writes occur at the write pointer */
assert(!zbd_zone_full(f, zb, min_bs));
* small.
*/
new_len = min((unsigned long long)io_u->buflen,
- (zb + 1)->start - io_u->offset);
+ zbd_zone_capacity_end(zb) - io_u->offset);
new_len = new_len / min_bs * min_bs;
if (new_len == io_u->buflen)
goto accept;
goto accept;
}
log_err("Zone remainder %lld smaller than minimum block size %d\n",
- ((zb + 1)->start - io_u->offset),
+ (zbd_zone_capacity_end(zb) - io_u->offset),
min_bs);
goto eof;
case DDIR_TRIM: