{
int ret;
+ if (f->filetype == FIO_TYPE_PIPE) {
+ log_err("zonemode=zbd does not support pipes\n");
+ return -EINVAL;
+ }
+
+ /* If regular file, always emulate zones inside the file. */
+ if (f->filetype == FIO_TYPE_FILE) {
+ *model = ZBD_NONE;
+ return 0;
+ }
+
if (td->io_ops && td->io_ops->get_zoned_model)
ret = td->io_ops->get_zoned_model(td, f, model);
else
return ret;
}
+/**
+ * zbd_get_max_open_zones - Get the maximum number of open zones
+ * @td: FIO thread data
+ * @f: FIO file for which to get max open zones
+ * @max_open_zones: Upon success, result will be stored here.
+ *
+ * A @max_open_zones value set to zero means no limit.
+ *
+ * Returns 0 upon success and a negative error code upon failure.
+ */
+int zbd_get_max_open_zones(struct thread_data *td, struct fio_file *f,
+ unsigned int *max_open_zones)
+{
+ int ret;
+
+ if (td->io_ops && td->io_ops->get_max_open_zones)
+ ret = td->io_ops->get_max_open_zones(td, f, max_open_zones);
+ else
+ ret = blkzoned_get_max_open_zones(td, f, max_open_zones);
+ if (ret < 0) {
+ td_verror(td, errno, "get max open zones failed");
+ log_err("%s: get max open zones failed (%d).\n",
+ f->file_name, errno);
+ }
+
+ return ret;
+}
+
/**
* zbd_zone_idx - convert an offset into a zone number
* @f: file pointer.
return false;
}
- if (td->o.zone_skip &&
- (td->o.zone_skip < td->o.zone_size ||
- td->o.zone_skip % td->o.zone_size)) {
+ if (td->o.zone_skip % td->o.zone_size) {
log_err("%s: zoneskip %llu is not a multiple of the device zone size %llu.\n",
f->file_name, (unsigned long long) td->o.zone_skip,
(unsigned long long) td->o.zone_size);
{
struct thread_data *td;
struct fio_file *f;
- uint32_t zone_size;
int i, j, k;
for_each_td(td, i) {
+ if (td_trim(td) &&
+ (td->o.min_bs[DDIR_TRIM] != td->o.max_bs[DDIR_TRIM] ||
+ td->o.bssplit_nr[DDIR_TRIM])) {
+ log_info("bsrange and bssplit are not allowed for trim with zonemode=zbd\n");
+ return false;
+ }
for_each_file(td, f, j) {
+ uint64_t zone_size;
+
if (!f->zbd_info)
continue;
zone_size = f->zbd_info->zone_size;
+ if (td_trim(td) && td->o.bs[DDIR_TRIM] != zone_size) {
+ log_info("%s: trim block size %llu is not the zone size %llu\n",
+ f->file_name, td->o.bs[DDIR_TRIM],
+ (unsigned long long)zone_size);
+ return false;
+ }
for (k = 0; k < FIO_ARRAY_SIZE(td->o.bs); k++) {
if (td->o.verify != VERIFY_NONE &&
zone_size % td->o.bs[k] != 0) {
- log_info("%s: block size %llu is not a divisor of the zone size %d\n",
+ log_info("%s: block size %llu is not a divisor of the zone size %llu\n",
f->file_name, td->o.bs[k],
- zone_size);
+ (unsigned long long)zone_size);
return false;
}
}
int i;
if (zone_size == 0) {
- log_err("%s: Specifying the zone size is mandatory for regular block devices with --zonemode=zbd\n\n",
+ log_err("%s: Specifying the zone size is mandatory for regular file/block device with --zonemode=zbd\n\n",
f->file_name);
return 1;
}
return 1;
}
+ if (f->real_file_size < zone_size) {
+ log_err("%s: file/device size %"PRIu64" is smaller than zone size %"PRIu64"\n",
+ f->file_name, f->real_file_size, zone_size);
+ return -EINVAL;
+ }
+
nr_zones = (f->real_file_size + zone_size - 1) / zone_size;
zbd_info = scalloc(1, sizeof(*zbd_info) +
(nr_zones + 1) * sizeof(zbd_info->zone_info[0]));
return ret;
}
+static int zbd_set_max_open_zones(struct thread_data *td, struct fio_file *f)
+{
+ struct zoned_block_device_info *zbd = f->zbd_info;
+ unsigned int max_open_zones;
+ int ret;
+
+ if (zbd->model != ZBD_HOST_MANAGED || td->o.ignore_zone_limits) {
+ /* Only host-managed devices have a max open limit */
+ zbd->max_open_zones = td->o.max_open_zones;
+ goto out;
+ }
+
+ /* If host-managed, get the max open limit */
+ ret = zbd_get_max_open_zones(td, f, &max_open_zones);
+ if (ret)
+ return ret;
+
+ if (!max_open_zones) {
+ /* No device limit */
+ zbd->max_open_zones = td->o.max_open_zones;
+ } else if (!td->o.max_open_zones) {
+ /* No user limit. Set limit to device limit */
+ zbd->max_open_zones = max_open_zones;
+ } else if (td->o.max_open_zones <= max_open_zones) {
+ /* Both user limit and dev limit. User limit not too large */
+ zbd->max_open_zones = td->o.max_open_zones;
+ } else {
+ /* Both user limit and dev limit. User limit too large */
+ td_verror(td, EINVAL,
+ "Specified --max_open_zones is too large");
+ log_err("Specified --max_open_zones (%d) is larger than max (%u)\n",
+ td->o.max_open_zones, max_open_zones);
+ return -EINVAL;
+ }
+
+out:
+ /* Ensure that the limit is not larger than FIO's internal limit */
+ if (zbd->max_open_zones > ZBD_MAX_OPEN_ZONES) {
+ td_verror(td, EINVAL, "'max_open_zones' value is too large");
+ log_err("'max_open_zones' value is larger than %u\n", ZBD_MAX_OPEN_ZONES);
+ return -EINVAL;
+ }
+
+ dprint(FD_ZBD, "%s: using max open zones limit: %"PRIu32"\n",
+ f->file_name, zbd->max_open_zones);
+
+ return 0;
+}
+
/*
* Allocate zone information and store it into f->zbd_info if zonemode=zbd.
*
return ret;
switch (zbd_model) {
- case ZBD_IGNORE:
- return 0;
case ZBD_HOST_AWARE:
case ZBD_HOST_MANAGED:
ret = parse_zone_info(td, f);
+ if (ret)
+ return ret;
break;
case ZBD_NONE:
ret = init_zone_info(td, f);
+ if (ret)
+ return ret;
break;
default:
td_verror(td, EINVAL, "Unsupported zoned model");
return -EINVAL;
}
- if (ret == 0) {
- f->zbd_info->model = zbd_model;
- f->zbd_info->max_open_zones = td->o.max_open_zones;
+ assert(f->zbd_info);
+ f->zbd_info->model = zbd_model;
+
+ ret = zbd_set_max_open_zones(td, f);
+ if (ret) {
+ zbd_free_zone_info(f);
+ return ret;
}
- return ret;
+
+ return 0;
}
void zbd_free_zone_info(struct fio_file *f)
static int zbd_reset_zone(struct thread_data *td, struct fio_file *f,
struct fio_zone_info *z);
-int zbd_setup_files(struct thread_data *td)
+int zbd_init_files(struct thread_data *td)
{
struct fio_file *f;
int i;
if (zbd_init_zone_info(td, f))
return 1;
}
+ return 0;
+}
+
+void zbd_recalc_options_with_zone_granularity(struct thread_data *td)
+{
+ struct fio_file *f;
+ int i;
+
+ for_each_file(td, f, i) {
+ struct zoned_block_device_info *zbd = f->zbd_info;
+ // zonemode=strided doesn't get per-file zone size.
+ uint64_t zone_size = zbd ? zbd->zone_size : td->o.zone_size;
+
+ if (zone_size == 0)
+ continue;
+
+ if (td->o.size_nz > 0) {
+ td->o.size = td->o.size_nz * zone_size;
+ }
+ if (td->o.io_size_nz > 0) {
+ td->o.io_size = td->o.io_size_nz * zone_size;
+ }
+ if (td->o.start_offset_nz > 0) {
+ td->o.start_offset = td->o.start_offset_nz * zone_size;
+ }
+ if (td->o.offset_increment_nz > 0) {
+ td->o.offset_increment = td->o.offset_increment_nz * zone_size;
+ }
+ if (td->o.zone_skip_nz > 0) {
+ td->o.zone_skip = td->o.zone_skip_nz * zone_size;
+ }
+ }
+}
+
+int zbd_setup_files(struct thread_data *td)
+{
+ struct fio_file *f;
+ int i;
if (!zbd_using_direct_io()) {
log_err("Using direct I/O is mandatory for writing to ZBD drives\n\n");
struct fio_zone_info *z;
int zi;
- if (!zbd)
- continue;
+ assert(zbd);
f->min_zone = zbd_zone_idx(f, f->file_offset);
f->max_zone = zbd_zone_idx(f, f->file_offset + f->io_size);
if (zbd_is_seq_job(f))
assert(f->min_zone < f->max_zone);
- zbd->max_open_zones = zbd->max_open_zones ?: ZBD_MAX_OPEN_ZONES;
-
if (td->o.max_open_zones > 0 &&
zbd->max_open_zones != td->o.max_open_zones) {
log_err("Different 'max_open_zones' values\n");
return 1;
}
- if (zbd->max_open_zones > ZBD_MAX_OPEN_ZONES) {
- log_err("'max_open_zones' value is limited by %u\n", ZBD_MAX_OPEN_ZONES);
+
+ /*
+ * The per job max open zones limit cannot be used without a
+ * global max open zones limit. (As the tracking of open zones
+ * is disabled when there is no global max open zones limit.)
+ */
+ if (td->o.job_max_open_zones && !zbd->max_open_zones) {
+ log_err("'job_max_open_zones' cannot be used without a global open zones limit\n");
return 1;
}
+ /*
+ * zbd->max_open_zones is the global limit shared for all jobs
+ * that target the same zoned block device. Force sync the per
+ * thread global limit with the actual global limit. (The real
+ * per thread/job limit is stored in td->o.job_max_open_zones).
+ */
+ td->o.max_open_zones = zbd->max_open_zones;
+
for (zi = f->min_zone; zi < f->max_zone; zi++) {
z = &zbd->zone_info[zi];
if (z->cond != ZBD_ZONE_COND_IMP_OPEN &&
* @f: fio file for which to reset zones
* @zb: first zone to reset.
* @ze: first zone not to reset.
- * @all_zones: whether to reset all zones or only those zones for which the
- * write pointer is not a multiple of td->o.min_bs[DDIR_WRITE].
*/
static int zbd_reset_zones(struct thread_data *td, struct fio_file *f,
struct fio_zone_info *const zb,
- struct fio_zone_info *const ze, bool all_zones)
+ struct fio_zone_info *const ze)
{
struct fio_zone_info *z;
const uint32_t min_bs = td->o.min_bs[DDIR_WRITE];
- bool reset_wp;
int res = 0;
assert(min_bs);
if (!z->has_wp)
continue;
zone_lock(td, f, z);
- if (all_zones) {
- pthread_mutex_lock(&f->zbd_info->mutex);
- zbd_close_zone(td, f, nz);
- pthread_mutex_unlock(&f->zbd_info->mutex);
-
- reset_wp = z->wp != z->start;
- } else {
- reset_wp = z->wp % min_bs != 0;
- }
- if (reset_wp) {
+ pthread_mutex_lock(&f->zbd_info->mutex);
+ zbd_close_zone(td, f, nz);
+ pthread_mutex_unlock(&f->zbd_info->mutex);
+ if (z->wp != z->start) {
dprint(FD_ZBD, "%s: resetting zone %u\n",
f->file_name, zbd_zone_nr(f, z));
if (zbd_reset_zone(td, f, z) < 0)
* writing any data to avoid that a zone reset has to be issued while
* writing data, which causes data loss.
*/
- zbd_reset_zones(td, f, zb, ze, td->o.verify != VERIFY_NONE &&
- td->runstate != TD_VERIFYING);
+ if (td->o.verify != VERIFY_NONE && td->runstate != TD_VERIFYING)
+ zbd_reset_zones(td, f, zb, ze);
zbd_reset_write_cnt(td, f);
}
struct zoned_block_device_info *zbdi = f->zbd_info;
int i;
+ /* This function should never be called when zbdi->max_open_zones == 0 */
+ assert(zbdi->max_open_zones);
assert(td->o.job_max_open_zones == 0 || td->num_open_zones <= td->o.job_max_open_zones);
assert(td->o.job_max_open_zones <= zbdi->max_open_zones);
assert(zbdi->num_open_zones <= zbdi->max_open_zones);
uint32_t zone_idx)
{
const uint32_t min_bs = td->o.min_bs[DDIR_WRITE];
+ struct zoned_block_device_info *zbdi = f->zbd_info;
struct fio_zone_info *z = get_zone(f, zone_idx);
bool res = true;
if (td->o.verify != VERIFY_NONE && zbd_zone_full(f, z, min_bs))
return false;
- pthread_mutex_lock(&f->zbd_info->mutex);
+ /*
+ * zbdi->max_open_zones == 0 means that there is no limit on the maximum
+ * number of open zones. In this case, do no track open zones in
+ * zbdi->open_zones array.
+ */
+ if (!zbdi->max_open_zones)
+ return true;
+
+ pthread_mutex_lock(&zbdi->mutex);
if (is_zone_open(td, f, zone_idx)) {
/*
* If the zone is already open and going to be full by writes
if (td->o.job_max_open_zones > 0 &&
td->num_open_zones >= td->o.job_max_open_zones)
goto out;
- if (f->zbd_info->num_open_zones >= f->zbd_info->max_open_zones)
+ if (zbdi->num_open_zones >= zbdi->max_open_zones)
goto out;
dprint(FD_ZBD, "%s: opening zone %d\n", f->file_name, zone_idx);
- f->zbd_info->open_zones[f->zbd_info->num_open_zones++] = zone_idx;
+ zbdi->open_zones[zbdi->num_open_zones++] = zone_idx;
td->num_open_zones++;
z->open = 1;
res = true;
out:
- pthread_mutex_unlock(&f->zbd_info->mutex);
+ pthread_mutex_unlock(&zbdi->mutex);
return res;
}
-/* Anything goes as long as it is not a constant. */
+/* Return random zone index for one of the open zones. */
static uint32_t pick_random_zone_idx(const struct fio_file *f,
const struct io_u *io_u)
{
- return io_u->offset * f->zbd_info->num_open_zones / f->real_file_size;
+ return (io_u->offset - f->file_offset) * f->zbd_info->num_open_zones /
+ f->io_size;
+}
+
+static bool any_io_in_flight(void)
+{
+ struct thread_data *td;
+ int i;
+
+ for_each_td(td, i) {
+ if (td->io_u_in_flight)
+ return true;
+ }
+
+ return false;
}
/*
{
const uint32_t min_bs = td->o.min_bs[io_u->ddir];
struct fio_file *f = io_u->file;
+ struct zoned_block_device_info *zbdi = f->zbd_info;
struct fio_zone_info *z;
unsigned int open_zone_idx = -1;
uint32_t zone_idx, new_zone_idx;
int i;
bool wait_zone_close;
+ bool in_flight;
+ bool should_retry = true;
assert(is_valid_offset(f, io_u->offset));
- if (td->o.max_open_zones || td->o.job_max_open_zones) {
+ if (zbdi->max_open_zones || td->o.job_max_open_zones) {
/*
- * This statement accesses f->zbd_info->open_zones[] on purpose
+ * This statement accesses zbdi->open_zones[] on purpose
* without locking.
*/
- zone_idx = f->zbd_info->open_zones[pick_random_zone_idx(f, io_u)];
+ zone_idx = zbdi->open_zones[pick_random_zone_idx(f, io_u)];
} else {
zone_idx = zbd_zone_idx(f, io_u->offset);
}
__func__, f->file_name, zone_idx, io_u->offset, io_u->buflen);
/*
- * Since z->mutex is the outer lock and f->zbd_info->mutex the inner
+ * Since z->mutex is the outer lock and zbdi->mutex the inner
* lock it can happen that the state of the zone with index zone_idx
- * has changed after 'z' has been assigned and before f->zbd_info->mutex
+ * has changed after 'z' has been assigned and before zbdi->mutex
* has been obtained. Hence the loop.
*/
for (;;) {
z = get_zone(f, zone_idx);
if (z->has_wp)
zone_lock(td, f, z);
- pthread_mutex_lock(&f->zbd_info->mutex);
+ pthread_mutex_lock(&zbdi->mutex);
if (z->has_wp) {
if (z->cond != ZBD_ZONE_COND_OFFLINE &&
- td->o.max_open_zones == 0 && td->o.job_max_open_zones == 0)
+ zbdi->max_open_zones == 0 && td->o.job_max_open_zones == 0)
goto examine_zone;
- if (f->zbd_info->num_open_zones == 0) {
+ if (zbdi->num_open_zones == 0) {
dprint(FD_ZBD, "%s(%s): no zones are open\n",
__func__, f->file_name);
goto open_other_zone;
* Ignore zones which don't belong to thread's offset/size area.
*/
open_zone_idx = pick_random_zone_idx(f, io_u);
- assert(open_zone_idx < f->zbd_info->num_open_zones);
+ assert(!open_zone_idx ||
+ open_zone_idx < zbdi->num_open_zones);
tmp_idx = open_zone_idx;
- for (i = 0; i < f->zbd_info->num_open_zones; i++) {
+ for (i = 0; i < zbdi->num_open_zones; i++) {
uint32_t tmpz;
- if (tmp_idx >= f->zbd_info->num_open_zones)
+ if (tmp_idx >= zbdi->num_open_zones)
tmp_idx = 0;
- tmpz = f->zbd_info->open_zones[tmp_idx];
+ tmpz = zbdi->open_zones[tmp_idx];
if (f->min_zone <= tmpz && tmpz < f->max_zone) {
open_zone_idx = tmp_idx;
goto found_candidate_zone;
dprint(FD_ZBD, "%s(%s): no candidate zone\n",
__func__, f->file_name);
- pthread_mutex_unlock(&f->zbd_info->mutex);
+ pthread_mutex_unlock(&zbdi->mutex);
if (z->has_wp)
zone_unlock(z);
return NULL;
found_candidate_zone:
- new_zone_idx = f->zbd_info->open_zones[open_zone_idx];
+ new_zone_idx = zbdi->open_zones[open_zone_idx];
if (new_zone_idx == zone_idx)
break;
zone_idx = new_zone_idx;
- pthread_mutex_unlock(&f->zbd_info->mutex);
+ pthread_mutex_unlock(&zbdi->mutex);
if (z->has_wp)
zone_unlock(z);
}
- /* Both z->mutex and f->zbd_info->mutex are held. */
+ /* Both z->mutex and zbdi->mutex are held. */
examine_zone:
if (z->wp + min_bs <= zbd_zone_capacity_end(z)) {
- pthread_mutex_unlock(&f->zbd_info->mutex);
+ pthread_mutex_unlock(&zbdi->mutex);
goto out;
}
open_other_zone:
/* Check if number of open zones reaches one of limits. */
wait_zone_close =
- f->zbd_info->num_open_zones == f->max_zone - f->min_zone ||
- (td->o.max_open_zones &&
- f->zbd_info->num_open_zones == td->o.max_open_zones) ||
+ zbdi->num_open_zones == f->max_zone - f->min_zone ||
+ (zbdi->max_open_zones &&
+ zbdi->num_open_zones == zbdi->max_open_zones) ||
(td->o.job_max_open_zones &&
td->num_open_zones == td->o.job_max_open_zones);
- pthread_mutex_unlock(&f->zbd_info->mutex);
+ pthread_mutex_unlock(&zbdi->mutex);
/* Only z->mutex is held. */
io_u_quiesce(td);
}
+retry:
/* Zone 'z' is full, so try to open a new zone. */
- for (i = f->io_size / f->zbd_info->zone_size; i > 0; i--) {
+ for (i = f->io_size / zbdi->zone_size; i > 0; i--) {
zone_idx++;
if (z->has_wp)
zone_unlock(z);
/* Only z->mutex is held. */
/* Check whether the write fits in any of the already opened zones. */
- pthread_mutex_lock(&f->zbd_info->mutex);
- for (i = 0; i < f->zbd_info->num_open_zones; i++) {
- zone_idx = f->zbd_info->open_zones[i];
+ pthread_mutex_lock(&zbdi->mutex);
+ for (i = 0; i < zbdi->num_open_zones; i++) {
+ zone_idx = zbdi->open_zones[i];
if (zone_idx < f->min_zone || zone_idx >= f->max_zone)
continue;
- pthread_mutex_unlock(&f->zbd_info->mutex);
+ pthread_mutex_unlock(&zbdi->mutex);
zone_unlock(z);
z = get_zone(f, zone_idx);
zone_lock(td, f, z);
if (z->wp + min_bs <= zbd_zone_capacity_end(z))
goto out;
- pthread_mutex_lock(&f->zbd_info->mutex);
+ pthread_mutex_lock(&zbdi->mutex);
}
- pthread_mutex_unlock(&f->zbd_info->mutex);
+
+ /*
+ * When any I/O is in-flight or when all I/Os in-flight get completed,
+ * the I/Os might have closed zones then retry the steps to open a zone.
+ * Before retry, call io_u_quiesce() to complete in-flight writes.
+ */
+ in_flight = any_io_in_flight();
+ if (in_flight || should_retry) {
+ dprint(FD_ZBD, "%s(%s): wait zone close and retry open zones\n",
+ __func__, f->file_name);
+ pthread_mutex_unlock(&zbdi->mutex);
+ zone_unlock(z);
+ io_u_quiesce(td);
+ zone_lock(td, f, z);
+ should_retry = in_flight;
+ goto retry;
+ }
+
+ pthread_mutex_unlock(&zbdi->mutex);
zone_unlock(z);
dprint(FD_ZBD, "%s(%s): did not open another zone\n", __func__,
f->file_name);
}
io_u->offset = z->start + z->verify_block * min_bs;
if (io_u->offset + io_u->buflen >= zbd_zone_capacity_end(z)) {
- log_err("%s: %llu + %llu >= %lu\n", f->file_name, io_u->offset,
- io_u->buflen, zbd_zone_capacity_end(z));
+ log_err("%s: %llu + %llu >= %llu\n", f->file_name, io_u->offset,
+ io_u->buflen, (unsigned long long) zbd_zone_capacity_end(z));
assert(false);
}
z->verify_block += io_u->buflen / min_bs;
}
/*
- * Find another zone for which @io_u fits in the readable data in the zone.
- * Search in zones @zb + 1 .. @zl. For random workload, also search in zones
- * @zb - 1 .. @zf.
+ * Find another zone which has @min_bytes of readable data. Search in zones
+ * @zb + 1 .. @zl. For random workload, also search in zones @zb - 1 .. @zf.
*
* Either returns NULL or returns a zone pointer. When the zone has write
* pointer, hold the mutex for the zone.
*/
static struct fio_zone_info *
-zbd_find_zone(struct thread_data *td, struct io_u *io_u,
+zbd_find_zone(struct thread_data *td, struct io_u *io_u, uint32_t min_bytes,
struct fio_zone_info *zb, struct fio_zone_info *zl)
{
- const uint32_t min_bs = td->o.min_bs[io_u->ddir];
struct fio_file *f = io_u->file;
struct fio_zone_info *z1, *z2;
const struct fio_zone_info *const zf = get_zone(f, f->min_zone);
if (z1 < zl && z1->cond != ZBD_ZONE_COND_OFFLINE) {
if (z1->has_wp)
zone_lock(td, f, z1);
- if (z1->start + min_bs <= z1->wp)
+ if (z1->start + min_bytes <= z1->wp)
return z1;
if (z1->has_wp)
zone_unlock(z1);
z2->cond != ZBD_ZONE_COND_OFFLINE) {
if (z2->has_wp)
zone_lock(td, f, z2);
- if (z2->start + min_bs <= z2->wp)
+ if (z2->start + min_bytes <= z2->wp)
return z2;
if (z2->has_wp)
zone_unlock(z2);
}
}
- dprint(FD_ZBD, "%s: adjusting random read offset failed\n",
- f->file_name);
+ dprint(FD_ZBD, "%s: no zone has %d bytes of readable data\n",
+ f->file_name, min_bytes);
return NULL;
}
uint32_t zone_idx;
uint64_t zone_end;
- if (!zbd_info)
- return;
+ assert(zbd_info);
zone_idx = zbd_zone_idx(f, io_u->offset);
assert(zone_idx < zbd_info->nr_zones);
pthread_mutex_unlock(&zbd_info->mutex);
z->wp = zone_end;
break;
- case DDIR_TRIM:
- assert(z->wp == z->start);
- break;
default:
break;
}
struct fio_zone_info *z;
uint32_t zone_idx;
- if (!zbd_info)
- return;
+ assert(zbd_info);
zone_idx = zbd_zone_idx(f, io_u->offset);
assert(zone_idx < zbd_info->nr_zones);
assert(td->o.zone_mode == ZONE_MODE_ZBD);
assert(td->o.zone_size);
+ assert(f->zbd_info);
zone_idx = zbd_zone_idx(f, f->last_pos[ddir]);
z = get_zone(f, zone_idx);
* devices with all empty zones. Overwrite the first I/O direction as
* write to make sure data to read exists.
*/
+ assert(io_u->file->zbd_info);
if (ddir != DDIR_READ || !td_rw(td))
return ddir;
enum io_u_action zbd_adjust_block(struct thread_data *td, struct io_u *io_u)
{
struct fio_file *f = io_u->file;
+ struct zoned_block_device_info *zbdi = f->zbd_info;
uint32_t zone_idx_b;
struct fio_zone_info *zb, *zl, *orig_zb;
uint32_t orig_len = io_u->buflen;
uint64_t new_len;
int64_t range;
- if (!f->zbd_info)
- return io_u_accept;
-
+ assert(zbdi);
assert(min_bs);
assert(is_valid_offset(f, io_u->offset));
assert(io_u->buflen);
if (io_u->offset + min_bs > (zb + 1)->start) {
dprint(FD_IO,
- "%s: off=%llu + min_bs=%u > next zone %lu\n",
- f->file_name, io_u->offset, min_bs,
- (zb + 1)->start);
+ "%s: off=%llu + min_bs=%u > next zone %llu\n",
+ f->file_name, io_u->offset,
+ min_bs, (unsigned long long) (zb + 1)->start);
io_u->offset = zb->start + (zb + 1)->start - io_u->offset;
new_len = min(io_u->buflen, (zb + 1)->start - io_u->offset);
} else {
case DDIR_READ:
if (td->runstate == TD_VERIFYING && td_write(td)) {
zb = zbd_replay_write_order(td, io_u, zb);
- /*
- * Since we return with the zone lock still held,
- * add an annotation to let Coverity know that it
- * is intentional.
- */
- /* coverity[missing_unlock] */
goto accept;
}
/*
((!td_random(td)) && (io_u->offset + min_bs > zb->wp))) {
zone_unlock(zb);
zl = get_zone(f, f->max_zone);
- zb = zbd_find_zone(td, io_u, zb, zl);
+ zb = zbd_find_zone(td, io_u, min_bs, zb, zl);
if (!zb) {
dprint(FD_ZBD,
"%s: zbd_find_zone(%lld, %llu) failed\n",
assert(io_u->offset + io_u->buflen <= zb->wp);
goto accept;
case DDIR_WRITE:
- if (io_u->buflen > f->zbd_info->zone_size)
+ if (io_u->buflen > zbdi->zone_size) {
+ td_verror(td, EINVAL, "I/O buflen exceeds zone size");
+ dprint(FD_IO,
+ "%s: I/O buflen %llu exceeds zone size %llu\n",
+ f->file_name, io_u->buflen,
+ (unsigned long long) zbdi->zone_size);
goto eof;
+ }
if (!zbd_open_zone(td, f, zone_idx_b)) {
zone_unlock(zb);
zb = zbd_convert_to_open_zone(td, io_u);
- if (!zb)
+ if (!zb) {
+ dprint(FD_IO, "%s: can't convert to open zone",
+ f->file_name);
goto eof;
- zone_idx_b = zbd_zone_nr(f, zb);
+ }
}
/* Check whether the zone reset threshold has been exceeded */
if (td->o.zrf.u.f) {
- if (f->zbd_info->wp_sectors_with_data >=
+ if (zbdi->wp_sectors_with_data >=
f->io_size * td->o.zrt.u.f &&
zbd_dec_and_reset_write_cnt(td, f)) {
zb->reset_zone = 1;
goto eof;
if (zb->capacity < min_bs) {
+ td_verror(td, EINVAL, "ZCAP is less min_bs");
log_err("zone capacity %llu smaller than minimum block size %d\n",
(unsigned long long)zb->capacity,
min_bs);
assert(!zbd_zone_full(f, zb, min_bs));
io_u->offset = zb->wp;
if (!is_valid_offset(f, io_u->offset)) {
- dprint(FD_ZBD, "Dropped request with offset %llu\n",
- io_u->offset);
+ td_verror(td, EINVAL, "invalid WP value");
+ dprint(FD_ZBD, "%s: dropped request with offset %llu\n",
+ f->file_name, io_u->offset);
goto eof;
}
/*
orig_len, io_u->buflen);
goto accept;
}
- log_err("Zone remainder %lld smaller than minimum block size %d\n",
- (zbd_zone_capacity_end(zb) - io_u->offset),
- min_bs);
+ td_verror(td, EIO, "zone remainder too small");
+ log_err("zone remainder %lld smaller than min block size %d\n",
+ (zbd_zone_capacity_end(zb) - io_u->offset), min_bs);
goto eof;
case DDIR_TRIM:
- /* fall-through */
+ /* Check random trim targets a non-empty zone */
+ if (!td_random(td) || zb->wp > zb->start)
+ goto accept;
+
+ /* Find out a non-empty zone to trim */
+ zone_unlock(zb);
+ zl = get_zone(f, f->max_zone);
+ zb = zbd_find_zone(td, io_u, 1, zb, zl);
+ if (zb) {
+ io_u->offset = zb->start;
+ dprint(FD_ZBD, "%s: found new zone(%lld) for trim\n",
+ f->file_name, io_u->offset);
+ goto accept;
+ }
+ goto eof;
case DDIR_SYNC:
+ /* fall-through */
case DDIR_DATASYNC:
case DDIR_SYNC_FILE_RANGE:
case DDIR_WAIT:
assert(!io_u->zbd_put_io);
io_u->zbd_queue_io = zbd_queue_io;
io_u->zbd_put_io = zbd_put_io;
+ /*
+ * Since we return with the zone lock still held,
+ * add an annotation to let Coverity know that it
+ * is intentional.
+ */
+ /* coverity[missing_unlock] */
return io_u_accept;
eof:
return NULL;
return res;
}
+
+/**
+ * zbd_do_io_u_trim - If reset zone is applicable, do reset zone instead of trim
+ *
+ * @td: FIO thread data.
+ * @io_u: FIO I/O unit.
+ *
+ * It is assumed that z->mutex is already locked.
+ * Return io_u_completed when reset zone succeeds. Return 0 when the target zone
+ * does not have write pointer. On error, return negative errno.
+ */
+int zbd_do_io_u_trim(const struct thread_data *td, struct io_u *io_u)
+{
+ struct fio_file *f = io_u->file;
+ struct fio_zone_info *z;
+ uint32_t zone_idx;
+ int ret;
+
+ zone_idx = zbd_zone_idx(f, io_u->offset);
+ z = get_zone(f, zone_idx);
+
+ if (!z->has_wp)
+ return 0;
+
+ if (io_u->offset != z->start) {
+ log_err("Trim offset not at zone start (%lld)\n", io_u->offset);
+ return -EINVAL;
+ }
+
+ ret = zbd_reset_zone((struct thread_data *)td, f, z);
+ if (ret < 0)
+ return ret;
+
+ return io_u_completed;
+}