* size of @buf.
*
* Returns 0 upon success and a negative error code upon failure.
+ * If the zone report is empty, always assume an error (device problem) and
+ * return -EIO.
*/
static int read_zone_info(int fd, uint64_t start_sector,
void *buf, unsigned int bufsz)
{
struct blk_zone_report *hdr = buf;
+ int ret;
if (bufsz < sizeof(*hdr))
return -EINVAL;
hdr->nr_zones = (bufsz - sizeof(*hdr)) / sizeof(struct blk_zone);
hdr->sector = start_sector;
- return ioctl(fd, BLKREPORTZONE, hdr) >= 0 ? 0 : -errno;
+ ret = ioctl(fd, BLKREPORTZONE, hdr);
+ if (ret)
+ return -errno;
+ if (!hdr->nr_zones)
+ return -EIO;
+ return 0;
}
/*
{
uint32_t nr_zones;
struct fio_zone_info *p;
- uint64_t zone_size;
+ uint64_t zone_size = td->o.zone_size;
struct zoned_block_device_info *zbd_info = NULL;
pthread_mutexattr_t attr;
int i;
- zone_size = td->o.zone_size;
- assert(zone_size);
+ if (zone_size == 0) {
+ log_err("%s: Specifying the zone size is mandatory for regular block devices with --zonemode=zbd\n\n",
+ f->file_name);
+ return 1;
+ }
+
+ if (zone_size < 512) {
+ log_err("%s: zone size must be at least 512 bytes for --zonemode=zbd\n\n",
+ f->file_name);
+ return 1;
+ }
+
nr_zones = (f->real_file_size + zone_size - 1) / zone_size;
zbd_info = scalloc(1, sizeof(*zbd_info) +
(nr_zones + 1) * sizeof(zbd_info->zone_info[0]));
p->start = z->start << 9;
switch (z->cond) {
case BLK_ZONE_COND_NOT_WP:
- p->wp = p->start;
- break;
case BLK_ZONE_COND_FULL:
p->wp = p->start + zone_size;
break;
*
* Returns 0 upon success and a negative error code upon failure.
*/
-int zbd_create_zone_info(struct thread_data *td, struct fio_file *f)
+static int zbd_create_zone_info(struct thread_data *td, struct fio_file *f)
{
enum blk_zoned_model zbd_model;
int ret = 0;
ret = zbd_create_zone_info(td, file);
if (ret < 0)
- td_verror(td, -ret, "BLKREPORTZONE failed");
+ td_verror(td, -ret, "zbd_create_zone_info() failed");
return ret;
}
for_each_file(td, f, i) {
if (f->filetype != FIO_TYPE_BLOCK)
continue;
- if (td->o.zone_size && td->o.zone_size < 512) {
- log_err("%s: zone size must be at least 512 bytes for --zonemode=zbd\n\n",
- f->file_name);
+ if (zbd_init_zone_info(td, f))
return 1;
- }
- if (td->o.zone_size == 0 &&
- get_zbd_model(f->file_name) == ZBD_DM_NONE) {
- log_err("%s: Specifying the zone size is mandatory for regular block devices with --zonemode=zbd\n\n",
- f->file_name);
- return 1;
- }
- zbd_init_zone_info(td, f);
}
if (!zbd_using_direct_io()) {
* a multiple of the fio block size. The caller must neither hold z->mutex
* nor f->zbd_info->mutex. Returns with z->mutex held upon success.
*/
-struct fio_zone_info *zbd_convert_to_open_zone(struct thread_data *td,
- struct io_u *io_u)
+static struct fio_zone_info *zbd_convert_to_open_zone(struct thread_data *td,
+ struct io_u *io_u)
{
const uint32_t min_bs = td->o.min_bs[io_u->ddir];
const struct fio_file *f = io_u->file;
zbd_check_swd(f);
- pthread_mutex_lock(&zb->mutex);
+ /*
+ * Lock the io_u target zone. The zone will be unlocked if io_u offset
+ * is changed or when io_u completes and zbd_put_io() executed.
+ * To avoid multiple jobs doing asynchronous I/Os from deadlocking each
+ * other waiting for zone locks when building an io_u batch, first
+ * only trylock the zone. If the zone is already locked by another job,
+ * process the currently queued I/Os so that I/O progress is made and
+ * zones unlocked.
+ */
+ if (pthread_mutex_trylock(&zb->mutex) != 0) {
+ if (!td_ioengine_flagged(td, FIO_SYNCIO))
+ io_u_quiesce(td);
+ pthread_mutex_lock(&zb->mutex);
+ }
+
switch (io_u->ddir) {
case DDIR_READ:
if (td->runstate == TD_VERIFYING) {