* size of @buf.
*
* Returns 0 upon success and a negative error code upon failure.
+ * If the zone report is empty, always assume an error (device problem) and
+ * return -EIO.
*/
static int read_zone_info(int fd, uint64_t start_sector,
void *buf, unsigned int bufsz)
{
struct blk_zone_report *hdr = buf;
+ int ret;
if (bufsz < sizeof(*hdr))
return -EINVAL;
hdr->nr_zones = (bufsz - sizeof(*hdr)) / sizeof(struct blk_zone);
hdr->sector = start_sector;
- return ioctl(fd, BLKREPORTZONE, hdr) >= 0 ? 0 : -errno;
+ ret = ioctl(fd, BLKREPORTZONE, hdr);
+ if (ret)
+ return -errno;
+ if (!hdr->nr_zones)
+ return -EIO;
+ return 0;
}
/*
char *zoned_attr_path = NULL;
char *model_str = NULL;
struct stat statbuf;
+ char *sys_devno_path = NULL;
+ char *part_attr_path = NULL;
+ char *part_str = NULL;
+ char sys_path[PATH_MAX];
+ ssize_t sz;
+ char *delim = NULL;
if (stat(file_name, &statbuf) < 0)
goto out;
- if (asprintf(&zoned_attr_path, "/sys/dev/block/%d:%d/queue/zoned",
+
+ if (asprintf(&sys_devno_path, "/sys/dev/block/%d:%d",
major(statbuf.st_rdev), minor(statbuf.st_rdev)) < 0)
goto out;
+
+ sz = readlink(sys_devno_path, sys_path, sizeof(sys_path) - 1);
+ if (sz < 0)
+ goto out;
+ sys_path[sz] = '\0';
+
+ /*
+ * If the device is a partition device, cut the device name in the
+ * canonical sysfs path to obtain the sysfs path of the holder device.
+ * e.g.: /sys/devices/.../sda/sda1 -> /sys/devices/.../sda
+ */
+ if (asprintf(&part_attr_path, "/sys/dev/block/%s/partition",
+ sys_path) < 0)
+ goto out;
+ part_str = read_file(part_attr_path);
+ if (part_str && *part_str == '1') {
+ delim = strrchr(sys_path, '/');
+ if (!delim)
+ goto out;
+ *delim = '\0';
+ }
+
+ if (asprintf(&zoned_attr_path,
+ "/sys/dev/block/%s/queue/zoned", sys_path) < 0)
+ goto out;
+
model_str = read_file(zoned_attr_path);
if (!model_str)
goto out;
out:
free(model_str);
free(zoned_attr_path);
+ free(part_str);
+ free(part_attr_path);
+ free(sys_devno_path);
return model;
}
p->start = z->start << 9;
switch (z->cond) {
case BLK_ZONE_COND_NOT_WP:
- p->wp = p->start;
- break;
case BLK_ZONE_COND_FULL:
p->wp = p->start + zone_size;
break;
*
* Returns 0 upon success and a negative error code upon failure.
*/
-int zbd_create_zone_info(struct thread_data *td, struct fio_file *f)
+static int zbd_create_zone_info(struct thread_data *td, struct fio_file *f)
{
enum blk_zoned_model zbd_model;
int ret = 0;
* a multiple of the fio block size. The caller must neither hold z->mutex
* nor f->zbd_info->mutex. Returns with z->mutex held upon success.
*/
-struct fio_zone_info *zbd_convert_to_open_zone(struct thread_data *td,
- struct io_u *io_u)
+static struct fio_zone_info *zbd_convert_to_open_zone(struct thread_data *td,
+ struct io_u *io_u)
{
const uint32_t min_bs = td->o.min_bs[io_u->ddir];
const struct fio_file *f = io_u->file;
return NULL;
}
-
/**
- * zbd_post_submit - update the write pointer and unlock the zone lock
+ * zbd_queue_io - update the write pointer of a sequential zone
* @io_u: I/O unit
- * @success: Whether or not the I/O unit has been executed successfully
+ * @success: Whether or not the I/O unit has been queued successfully
+ * @q: queueing status (busy, completed or queued).
*
- * For write and trim operations, update the write pointer of all affected
- * zones.
+ * For write and trim operations, update the write pointer of the I/O unit
+ * target zone.
*/
-static void zbd_post_submit(const struct io_u *io_u, bool success)
+static void zbd_queue_io(struct io_u *io_u, int q, bool success)
{
- struct zoned_block_device_info *zbd_info;
+ const struct fio_file *f = io_u->file;
+ struct zoned_block_device_info *zbd_info = f->zbd_info;
struct fio_zone_info *z;
uint32_t zone_idx;
- uint64_t end, zone_end;
+ uint64_t zone_end;
- zbd_info = io_u->file->zbd_info;
if (!zbd_info)
return;
- zone_idx = zbd_zone_idx(io_u->file, io_u->offset);
- end = io_u->offset + io_u->buflen;
- z = &zbd_info->zone_info[zone_idx];
+ zone_idx = zbd_zone_idx(f, io_u->offset);
assert(zone_idx < zbd_info->nr_zones);
+ z = &zbd_info->zone_info[zone_idx];
+
if (z->type != BLK_ZONE_TYPE_SEQWRITE_REQ)
return;
+
if (!success)
goto unlock;
+
+ dprint(FD_ZBD,
+ "%s: queued I/O (%lld, %llu) for zone %u\n",
+ f->file_name, io_u->offset, io_u->buflen, zone_idx);
+
switch (io_u->ddir) {
case DDIR_WRITE:
- zone_end = min(end, (z + 1)->start);
+ zone_end = min((uint64_t)(io_u->offset + io_u->buflen),
+ (z + 1)->start);
pthread_mutex_lock(&zbd_info->mutex);
/*
* z->wp > zone_end means that one or more I/O errors
default:
break;
}
+
unlock:
- pthread_mutex_unlock(&z->mutex);
+ if (!success || q != FIO_Q_QUEUED) {
+ /* BUSY or COMPLETED: unlock the zone */
+ pthread_mutex_unlock(&z->mutex);
+ io_u->zbd_put_io = NULL;
+ }
+}
- zbd_check_swd(io_u->file);
+/**
+ * zbd_put_io - Unlock an I/O unit target zone lock
+ * @io_u: I/O unit
+ */
+static void zbd_put_io(const struct io_u *io_u)
+{
+ const struct fio_file *f = io_u->file;
+ struct zoned_block_device_info *zbd_info = f->zbd_info;
+ struct fio_zone_info *z;
+ uint32_t zone_idx;
+
+ if (!zbd_info)
+ return;
+
+ zone_idx = zbd_zone_idx(f, io_u->offset);
+ assert(zone_idx < zbd_info->nr_zones);
+ z = &zbd_info->zone_info[zone_idx];
+
+ if (z->type != BLK_ZONE_TYPE_SEQWRITE_REQ)
+ return;
+
+ dprint(FD_ZBD,
+ "%s: terminate I/O (%lld, %llu) for zone %u\n",
+ f->file_name, io_u->offset, io_u->buflen, zone_idx);
+
+ assert(pthread_mutex_unlock(&z->mutex) == 0);
+ zbd_check_swd(f);
}
bool zbd_unaligned_write(int error_code)
zbd_check_swd(f);
- pthread_mutex_lock(&zb->mutex);
+ /*
+ * Lock the io_u target zone. The zone will be unlocked if io_u offset
+ * is changed or when io_u completes and zbd_put_io() executed.
+ * To avoid multiple jobs doing asynchronous I/Os from deadlocking each
+ * other waiting for zone locks when building an io_u batch, first
+ * only trylock the zone. If the zone is already locked by another job,
+ * process the currently queued I/Os so that I/O progress is made and
+ * zones unlocked.
+ */
+ if (pthread_mutex_trylock(&zb->mutex) != 0) {
+ if (!td_ioengine_flagged(td, FIO_SYNCIO))
+ io_u_quiesce(td);
+ pthread_mutex_lock(&zb->mutex);
+ }
+
switch (io_u->ddir) {
case DDIR_READ:
if (td->runstate == TD_VERIFYING) {
accept:
assert(zb);
assert(zb->cond != BLK_ZONE_COND_OFFLINE);
- assert(!io_u->post_submit);
- io_u->post_submit = zbd_post_submit;
+ assert(!io_u->zbd_queue_io);
+ assert(!io_u->zbd_put_io);
+ io_u->zbd_queue_io = zbd_queue_io;
+ io_u->zbd_put_io = zbd_put_io;
return io_u_accept;
eof: