{
uint32_t zone_idx;
- if (f->zbd_info->zone_size_log2)
+ if (f->zbd_info->zone_size_log2 > 0)
zone_idx = offset >> f->zbd_info->zone_size_log2;
else
- zone_idx = (offset >> 9) / f->zbd_info->zone_size;
+ zone_idx = offset / f->zbd_info->zone_size;
return min(zone_idx, f->zbd_info->nr_zones);
}
assert((required & 511) == 0);
return z->type == BLK_ZONE_TYPE_SEQWRITE_REQ &&
- z->wp + (required >> 9) > z->start + f->zbd_info->zone_size;
+ z->wp + required > z->start + f->zbd_info->zone_size;
}
static bool is_valid_offset(const struct fio_file *f, uint64_t offset)
continue;
zone_idx = zbd_zone_idx(f, f->file_offset);
z = &f->zbd_info->zone_info[zone_idx];
- if (f->file_offset != (z->start << 9)) {
- new_offset = (z+1)->start << 9;
+ if (f->file_offset != z->start) {
+ new_offset = (z+1)->start;
if (new_offset >= f->file_offset + f->io_size) {
log_info("%s: io_size must be at least one zone\n",
f->file_name);
return false;
}
- log_info("%s: rounded up offset from %lu to %lu\n",
- f->file_name, f->file_offset,
- new_offset);
+ log_info("%s: rounded up offset from %llu to %llu\n",
+ f->file_name, (unsigned long long) f->file_offset,
+ (unsigned long long) new_offset);
f->io_size -= (new_offset - f->file_offset);
f->file_offset = new_offset;
}
zone_idx = zbd_zone_idx(f, f->file_offset + f->io_size);
z = &f->zbd_info->zone_info[zone_idx];
- new_end = z->start << 9;
+ new_end = z->start;
if (f->file_offset + f->io_size != new_end) {
if (new_end <= f->file_offset) {
log_info("%s: io_size must be at least one zone\n",
f->file_name);
return false;
}
- log_info("%s: rounded down io_size from %lu to %lu\n",
- f->file_name, f->io_size,
- new_end - f->file_offset);
+ log_info("%s: rounded down io_size from %llu to %llu\n",
+ f->file_name, (unsigned long long) f->io_size,
+ (unsigned long long) new_end - f->file_offset);
f->io_size = new_end - f->file_offset;
}
}
zone_size = f->zbd_info->zone_size;
for (k = 0; k < ARRAY_SIZE(td->o.bs); k++) {
if (td->o.verify != VERIFY_NONE &&
- (zone_size << 9) % td->o.bs[k] != 0) {
+ zone_size % td->o.bs[k] != 0) {
log_info("%s: block size %llu is not a divisor of the zone size %d\n",
f->file_name, td->o.bs[k],
- zone_size << 9);
+ zone_size);
return false;
}
}
* size of @buf.
*
* Returns 0 upon success and a negative error code upon failure.
+ * If the zone report is empty, always assume an error (device problem) and
+ * return -EIO.
*/
static int read_zone_info(int fd, uint64_t start_sector,
void *buf, unsigned int bufsz)
{
struct blk_zone_report *hdr = buf;
+ int ret;
if (bufsz < sizeof(*hdr))
return -EINVAL;
hdr->nr_zones = (bufsz - sizeof(*hdr)) / sizeof(struct blk_zone);
hdr->sector = start_sector;
- return ioctl(fd, BLKREPORTZONE, hdr) >= 0 ? 0 : -errno;
+ ret = ioctl(fd, BLKREPORTZONE, hdr);
+ if (ret)
+ return -errno;
+ if (!hdr->nr_zones)
+ return -EIO;
+ return 0;
}
/*
char *zoned_attr_path = NULL;
char *model_str = NULL;
struct stat statbuf;
+ char *sys_devno_path = NULL;
+ char *part_attr_path = NULL;
+ char *part_str = NULL;
+ char sys_path[PATH_MAX];
+ ssize_t sz;
+ char *delim = NULL;
if (stat(file_name, &statbuf) < 0)
goto out;
- if (asprintf(&zoned_attr_path, "/sys/dev/block/%d:%d/queue/zoned",
+
+ if (asprintf(&sys_devno_path, "/sys/dev/block/%d:%d",
major(statbuf.st_rdev), minor(statbuf.st_rdev)) < 0)
goto out;
+
+ sz = readlink(sys_devno_path, sys_path, sizeof(sys_path) - 1);
+ if (sz < 0)
+ goto out;
+ sys_path[sz] = '\0';
+
+ /*
+ * If the device is a partition device, cut the device name in the
+ * canonical sysfs path to obtain the sysfs path of the holder device.
+ * e.g.: /sys/devices/.../sda/sda1 -> /sys/devices/.../sda
+ */
+ if (asprintf(&part_attr_path, "/sys/dev/block/%s/partition",
+ sys_path) < 0)
+ goto out;
+ part_str = read_file(part_attr_path);
+ if (part_str && *part_str == '1') {
+ delim = strrchr(sys_path, '/');
+ if (!delim)
+ goto out;
+ *delim = '\0';
+ }
+
+ if (asprintf(&zoned_attr_path,
+ "/sys/dev/block/%s/queue/zoned", sys_path) < 0)
+ goto out;
+
model_str = read_file(zoned_attr_path);
if (!model_str)
goto out;
out:
free(model_str);
free(zoned_attr_path);
+ free(part_str);
+ free(part_attr_path);
+ free(sys_devno_path);
return model;
}
pthread_mutexattr_t attr;
int i;
- zone_size = td->o.zone_size >> 9;
+ zone_size = td->o.zone_size;
assert(zone_size);
- nr_zones = ((f->real_file_size >> 9) + zone_size - 1) / zone_size;
+ nr_zones = (f->real_file_size + zone_size - 1) / zone_size;
zbd_info = scalloc(1, sizeof(*zbd_info) +
(nr_zones + 1) * sizeof(zbd_info->zone_info[0]));
if (!zbd_info)
f->zbd_info = zbd_info;
f->zbd_info->zone_size = zone_size;
f->zbd_info->zone_size_log2 = is_power_of_2(zone_size) ?
- ilog2(zone_size) + 9 : -1;
+ ilog2(zone_size) : -1;
f->zbd_info->nr_zones = nr_zones;
pthread_mutexattr_destroy(&attr);
return 0;
goto close;
}
z = (void *)(hdr + 1);
- zone_size = z->len;
- nr_zones = ((f->real_file_size >> 9) + zone_size - 1) / zone_size;
+ zone_size = z->len << 9;
+ nr_zones = (f->real_file_size + zone_size - 1) / zone_size;
if (td->o.zone_size == 0) {
- td->o.zone_size = zone_size << 9;
- } else if (td->o.zone_size != zone_size << 9) {
- log_info("fio: %s job parameter zonesize %lld does not match disk zone size %ld.\n",
- f->file_name, td->o.zone_size, zone_size << 9);
+ td->o.zone_size = zone_size;
+ } else if (td->o.zone_size != zone_size) {
+ log_info("fio: %s job parameter zonesize %llu does not match disk zone size %llu.\n",
+ f->file_name, (unsigned long long) td->o.zone_size,
+ (unsigned long long) zone_size);
ret = -EINVAL;
goto close;
}
- dprint(FD_ZBD, "Device %s has %d zones of size %lu KB\n", f->file_name,
- nr_zones, zone_size / 2);
+ dprint(FD_ZBD, "Device %s has %d zones of size %llu KB\n", f->file_name,
+ nr_zones, (unsigned long long) zone_size / 1024);
zbd_info = scalloc(1, sizeof(*zbd_info) +
(nr_zones + 1) * sizeof(zbd_info->zone_info[0]));
z = (void *)(hdr + 1);
for (i = 0; i < hdr->nr_zones; i++, j++, z++, p++) {
pthread_mutex_init(&p->mutex, &attr);
- p->start = z->start;
+ p->start = z->start << 9;
switch (z->cond) {
case BLK_ZONE_COND_NOT_WP:
- p->wp = z->start;
- break;
case BLK_ZONE_COND_FULL:
- p->wp = z->start + zone_size;
+ p->wp = p->start + zone_size;
break;
default:
assert(z->start <= z->wp);
- assert(z->wp <= z->start + zone_size);
- p->wp = z->wp;
+ assert(z->wp <= z->start + (zone_size >> 9));
+ p->wp = z->wp << 9;
break;
}
p->type = z->type;
break;
ret = read_zone_info(fd, start_sector, buf, bufsz);
if (ret < 0) {
- log_info("fio: BLKREPORTZONE(%lu) failed for %s (%d).\n",
- start_sector, f->file_name, -ret);
+ log_info("fio: BLKREPORTZONE(%llu) failed for %s (%d).\n",
+ (unsigned long long) start_sector, f->file_name, -ret);
goto close;
}
}
/* a sentinel */
- zbd_info->zone_info[nr_zones].start = start_sector;
+ zbd_info->zone_info[nr_zones].start = start_sector << 9;
f->zbd_info = zbd_info;
f->zbd_info->zone_size = zone_size;
f->zbd_info->zone_size_log2 = is_power_of_2(zone_size) ?
- ilog2(zone_size) + 9 : -1;
+ ilog2(zone_size) : -1;
f->zbd_info->nr_zones = nr_zones;
zbd_info = NULL;
ret = 0;
*
* Returns 0 upon success and a negative error code upon failure.
*/
-int zbd_create_zone_info(struct thread_data *td, struct fio_file *f)
+static int zbd_create_zone_info(struct thread_data *td, struct fio_file *f)
{
enum blk_zoned_model zbd_model;
int ret = 0;
* Returns 0 upon success and a negative error code upon failure.
*/
static int zbd_reset_range(struct thread_data *td, const struct fio_file *f,
- uint64_t sector, uint64_t nr_sectors)
+ uint64_t offset, uint64_t length)
{
struct blk_zone_range zr = {
- .sector = sector,
- .nr_sectors = nr_sectors,
+ .sector = offset >> 9,
+ .nr_sectors = length >> 9,
};
uint32_t zone_idx_b, zone_idx_e;
struct fio_zone_info *zb, *ze, *z;
int ret = 0;
assert(f->fd != -1);
- assert(is_valid_offset(f, ((sector + nr_sectors) << 9) - 1));
+ assert(is_valid_offset(f, offset + length - 1));
switch (f->zbd_info->model) {
case ZBD_DM_HOST_AWARE:
case ZBD_DM_HOST_MANAGED:
break;
}
- zone_idx_b = zbd_zone_idx(f, sector << 9);
+ zone_idx_b = zbd_zone_idx(f, offset);
zb = &f->zbd_info->zone_info[zone_idx_b];
- zone_idx_e = zbd_zone_idx(f, (sector + nr_sectors) << 9);
+ zone_idx_e = zbd_zone_idx(f, offset + length);
ze = &f->zbd_info->zone_info[zone_idx_e];
for (z = zb; z < ze; z++) {
pthread_mutex_lock(&z->mutex);
return ret;
}
+static unsigned int zbd_zone_nr(struct zoned_block_device_info *zbd_info,
+ struct fio_zone_info *zone)
+{
+ return zone - zbd_info->zone_info;
+}
+
/**
* zbd_reset_zone - reset the write pointer of a single zone
* @td: FIO thread data.
static int zbd_reset_zone(struct thread_data *td, const struct fio_file *f,
struct fio_zone_info *z)
{
- int ret;
+ dprint(FD_ZBD, "%s: resetting wp of zone %u.\n", f->file_name,
+ zbd_zone_nr(f->zbd_info, z));
- dprint(FD_ZBD, "%s: resetting wp of zone %lu.\n", f->file_name,
- z - f->zbd_info->zone_info);
- ret = zbd_reset_range(td, f, z->start, (z+1)->start - z->start);
- return ret;
+ return zbd_reset_range(td, f, z->start, (z+1)->start - z->start);
}
/*
struct fio_zone_info *const ze, bool all_zones)
{
struct fio_zone_info *z, *start_z = ze;
- const uint32_t min_bs = td->o.min_bs[DDIR_WRITE] >> 9;
+ const uint32_t min_bs = td->o.min_bs[DDIR_WRITE];
bool reset_wp;
int res = 0;
- dprint(FD_ZBD, "%s: examining zones %lu .. %lu\n", f->file_name,
- zb - f->zbd_info->zone_info, ze - f->zbd_info->zone_info);
+ dprint(FD_ZBD, "%s: examining zones %u .. %u\n", f->file_name,
+ zbd_zone_nr(f->zbd_info, zb), zbd_zone_nr(f->zbd_info, ze));
assert(f->fd != -1);
for (z = zb; z < ze; z++) {
pthread_mutex_lock(&z->mutex);
start_z = z;
} else if (start_z < ze && !reset_wp) {
dprint(FD_ZBD,
- "%s: resetting zones %lu .. %lu\n",
+ "%s: resetting zones %u .. %u\n",
f->file_name,
- start_z - f->zbd_info->zone_info,
- z - f->zbd_info->zone_info);
+ zbd_zone_nr(f->zbd_info, start_z),
+ zbd_zone_nr(f->zbd_info, z));
if (zbd_reset_range(td, f, start_z->start,
z->start - start_z->start) < 0)
res = 1;
default:
if (start_z == ze)
break;
- dprint(FD_ZBD, "%s: resetting zones %lu .. %lu\n",
- f->file_name, start_z - f->zbd_info->zone_info,
- z - f->zbd_info->zone_info);
+ dprint(FD_ZBD, "%s: resetting zones %u .. %u\n",
+ f->file_name, zbd_zone_nr(f->zbd_info, start_z),
+ zbd_zone_nr(f->zbd_info, z));
if (zbd_reset_range(td, f, start_z->start,
z->start - start_z->start) < 0)
res = 1;
}
}
if (start_z < ze) {
- dprint(FD_ZBD, "%s: resetting zones %lu .. %lu\n", f->file_name,
- start_z - f->zbd_info->zone_info,
- z - f->zbd_info->zone_info);
+ dprint(FD_ZBD, "%s: resetting zones %u .. %u\n", f->file_name,
+ zbd_zone_nr(f->zbd_info, start_z),
+ zbd_zone_nr(f->zbd_info, z));
if (zbd_reset_range(td, f, start_z->start,
z->start - start_z->start) < 0)
res = 1;
return write_cnt == 0;
}
-/* Check whether the value of zbd_info.sectors_with_data is correct. */
-static void check_swd(const struct thread_data *td, const struct fio_file *f)
+enum swd_action {
+ CHECK_SWD,
+ SET_SWD,
+};
+
+/* Calculate the number of sectors with data (swd) and perform action 'a' */
+static uint64_t zbd_process_swd(const struct fio_file *f, enum swd_action a)
{
-#if 0
struct fio_zone_info *zb, *ze, *z;
- uint64_t swd;
+ uint64_t swd = 0;
zb = &f->zbd_info->zone_info[zbd_zone_idx(f, f->file_offset)];
ze = &f->zbd_info->zone_info[zbd_zone_idx(f, f->file_offset +
f->io_size)];
- swd = 0;
for (z = zb; z < ze; z++) {
pthread_mutex_lock(&z->mutex);
swd += z->wp - z->start;
}
pthread_mutex_lock(&f->zbd_info->mutex);
- assert(f->zbd_info->sectors_with_data == swd);
+ switch (a) {
+ case CHECK_SWD:
+ assert(f->zbd_info->sectors_with_data == swd);
+ break;
+ case SET_SWD:
+ f->zbd_info->sectors_with_data = swd;
+ break;
+ }
pthread_mutex_unlock(&f->zbd_info->mutex);
for (z = zb; z < ze; z++)
pthread_mutex_unlock(&z->mutex);
-#endif
+
+ return swd;
+}
+
+/*
+ * The swd check is useful for debugging but takes too much time to leave
+ * it enabled all the time. Hence it is disabled by default.
+ */
+static const bool enable_check_swd = false;
+
+/* Check whether the value of zbd_info.sectors_with_data is correct. */
+static void zbd_check_swd(const struct fio_file *f)
+{
+ if (!enable_check_swd)
+ return;
+
+ zbd_process_swd(f, CHECK_SWD);
+}
+
+static void zbd_init_swd(struct fio_file *f)
+{
+ uint64_t swd;
+
+ swd = zbd_process_swd(f, SET_SWD);
+ dprint(FD_ZBD, "%s(%s): swd = %" PRIu64 "\n", __func__, f->file_name,
+ swd);
}
void zbd_file_reset(struct thread_data *td, struct fio_file *f)
{
- struct fio_zone_info *zb, *ze, *z;
+ struct fio_zone_info *zb, *ze;
uint32_t zone_idx_e;
- uint64_t swd = 0;
if (!f->zbd_info)
return;
zb = &f->zbd_info->zone_info[zbd_zone_idx(f, f->file_offset)];
zone_idx_e = zbd_zone_idx(f, f->file_offset + f->io_size);
ze = &f->zbd_info->zone_info[zone_idx_e];
- for (z = zb ; z < ze; z++) {
- pthread_mutex_lock(&z->mutex);
- swd += z->wp - z->start;
- }
- pthread_mutex_lock(&f->zbd_info->mutex);
- f->zbd_info->sectors_with_data = swd;
- pthread_mutex_unlock(&f->zbd_info->mutex);
- for (z = zb ; z < ze; z++)
- pthread_mutex_unlock(&z->mutex);
- dprint(FD_ZBD, "%s(%s): swd = %ld\n", __func__, f->file_name, swd);
+ zbd_init_swd(f);
/*
* If data verification is enabled reset the affected zones before
* writing any data to avoid that a zone reset has to be issued while
* a multiple of the fio block size. The caller must neither hold z->mutex
* nor f->zbd_info->mutex. Returns with z->mutex held upon success.
*/
-struct fio_zone_info *zbd_convert_to_open_zone(struct thread_data *td,
- struct io_u *io_u)
+static struct fio_zone_info *zbd_convert_to_open_zone(struct thread_data *td,
+ struct io_u *io_u)
{
const uint32_t min_bs = td->o.min_bs[io_u->ddir];
const struct fio_file *f = io_u->file;
/* Both z->mutex and f->zbd_info->mutex are held. */
examine_zone:
- if ((z->wp << 9) + min_bs <= ((z+1)->start << 9)) {
+ if (z->wp + min_bs <= (z+1)->start) {
pthread_mutex_unlock(&f->zbd_info->mutex);
goto out;
}
zone_idx++;
pthread_mutex_unlock(&z->mutex);
z++;
- if (!is_valid_offset(f, z->start << 9)) {
+ if (!is_valid_offset(f, z->start)) {
/* Wrap-around. */
zone_idx = zbd_zone_idx(f, f->file_offset);
z = &f->zbd_info->zone_info[zone_idx];
}
- assert(is_valid_offset(f, z->start << 9));
+ assert(is_valid_offset(f, z->start));
pthread_mutex_lock(&z->mutex);
if (z->open)
continue;
z = &f->zbd_info->zone_info[zone_idx];
pthread_mutex_lock(&z->mutex);
- if ((z->wp << 9) + min_bs <= ((z+1)->start << 9))
+ if (z->wp + min_bs <= (z+1)->start)
goto out;
pthread_mutex_lock(&f->zbd_info->mutex);
}
out:
dprint(FD_ZBD, "%s(%s): returning zone %d\n", __func__, f->file_name,
zone_idx);
- io_u->offset = z->start << 9;
+ io_u->offset = z->start;
return z;
}
}
if (z->verify_block * min_bs >= f->zbd_info->zone_size)
- log_err("%s: %d * %d >= %ld\n", f->file_name, z->verify_block,
- min_bs, f->zbd_info->zone_size);
- io_u->offset = (z->start << 9) + z->verify_block++ * min_bs;
+ log_err("%s: %d * %d >= %llu\n", f->file_name, z->verify_block,
+ min_bs, (unsigned long long) f->zbd_info->zone_size);
+ io_u->offset = z->start + z->verify_block++ * min_bs;
return z;
}
for (z1 = zb + 1, z2 = zb - 1; z1 < zl || z2 >= zf; z1++, z2--) {
if (z1 < zl && z1->cond != BLK_ZONE_COND_OFFLINE) {
pthread_mutex_lock(&z1->mutex);
- if (z1->start + (min_bs >> 9) <= z1->wp)
+ if (z1->start + min_bs <= z1->wp)
return z1;
pthread_mutex_unlock(&z1->mutex);
} else if (!td_random(td)) {
if (td_random(td) && z2 >= zf &&
z2->cond != BLK_ZONE_COND_OFFLINE) {
pthread_mutex_lock(&z2->mutex);
- if (z2->start + (min_bs >> 9) <= z2->wp)
+ if (z2->start + min_bs <= z2->wp)
return z2;
pthread_mutex_unlock(&z2->mutex);
}
return NULL;
}
-
/**
- * zbd_post_submit - update the write pointer and unlock the zone lock
+ * zbd_queue_io - update the write pointer of a sequential zone
* @io_u: I/O unit
- * @success: Whether or not the I/O unit has been executed successfully
+ * @success: Whether or not the I/O unit has been queued successfully
+ * @q: queueing status (busy, completed or queued).
*
- * For write and trim operations, update the write pointer of all affected
- * zones.
+ * For write and trim operations, update the write pointer of the I/O unit
+ * target zone.
*/
-static void zbd_post_submit(const struct io_u *io_u, bool success)
+static void zbd_queue_io(struct io_u *io_u, int q, bool success)
{
- struct zoned_block_device_info *zbd_info;
+ const struct fio_file *f = io_u->file;
+ struct zoned_block_device_info *zbd_info = f->zbd_info;
struct fio_zone_info *z;
uint32_t zone_idx;
- uint64_t end, zone_end;
+ uint64_t zone_end;
- zbd_info = io_u->file->zbd_info;
if (!zbd_info)
return;
- zone_idx = zbd_zone_idx(io_u->file, io_u->offset);
- end = (io_u->offset + io_u->buflen) >> 9;
- z = &zbd_info->zone_info[zone_idx];
+ zone_idx = zbd_zone_idx(f, io_u->offset);
assert(zone_idx < zbd_info->nr_zones);
+ z = &zbd_info->zone_info[zone_idx];
+
if (z->type != BLK_ZONE_TYPE_SEQWRITE_REQ)
return;
+
if (!success)
goto unlock;
+
+ dprint(FD_ZBD,
+ "%s: queued I/O (%lld, %llu) for zone %u\n",
+ f->file_name, io_u->offset, io_u->buflen, zone_idx);
+
switch (io_u->ddir) {
case DDIR_WRITE:
- zone_end = min(end, (z + 1)->start);
+ zone_end = min((uint64_t)(io_u->offset + io_u->buflen),
+ (z + 1)->start);
pthread_mutex_lock(&zbd_info->mutex);
/*
* z->wp > zone_end means that one or more I/O errors
default:
break;
}
+
unlock:
- pthread_mutex_unlock(&z->mutex);
+ if (!success || q != FIO_Q_QUEUED) {
+ /* BUSY or COMPLETED: unlock the zone */
+ pthread_mutex_unlock(&z->mutex);
+ io_u->zbd_put_io = NULL;
+ }
+}
+
+/**
+ * zbd_put_io - Unlock an I/O unit target zone lock
+ * @io_u: I/O unit
+ */
+static void zbd_put_io(const struct io_u *io_u)
+{
+ const struct fio_file *f = io_u->file;
+ struct zoned_block_device_info *zbd_info = f->zbd_info;
+ struct fio_zone_info *z;
+ uint32_t zone_idx;
+
+ if (!zbd_info)
+ return;
+
+ zone_idx = zbd_zone_idx(f, io_u->offset);
+ assert(zone_idx < zbd_info->nr_zones);
+ z = &zbd_info->zone_info[zone_idx];
+
+ if (z->type != BLK_ZONE_TYPE_SEQWRITE_REQ)
+ return;
+
+ dprint(FD_ZBD,
+ "%s: terminate I/O (%lld, %llu) for zone %u\n",
+ f->file_name, io_u->offset, io_u->buflen, zone_idx);
+
+ assert(pthread_mutex_unlock(&z->mutex) == 0);
+ zbd_check_swd(f);
}
bool zbd_unaligned_write(int error_code)
{
const struct fio_file *f = io_u->file;
uint32_t zone_idx_b;
- struct fio_zone_info *zb, *zl;
+ struct fio_zone_info *zb, *zl, *orig_zb;
uint32_t orig_len = io_u->buflen;
uint32_t min_bs = td->o.min_bs[io_u->ddir];
uint64_t new_len;
assert(io_u->buflen);
zone_idx_b = zbd_zone_idx(f, io_u->offset);
zb = &f->zbd_info->zone_info[zone_idx_b];
+ orig_zb = zb;
/* Accept the I/O offset for conventional zones. */
if (zb->type == BLK_ZONE_TYPE_CONVENTIONAL)
io_u->ddir == DDIR_READ && td->o.read_beyond_wp)
return io_u_accept;
- pthread_mutex_lock(&zb->mutex);
+ zbd_check_swd(f);
+
+ /*
+ * Lock the io_u target zone. The zone will be unlocked if io_u offset
+ * is changed or when io_u completes and zbd_put_io() executed.
+ * To avoid multiple jobs doing asynchronous I/Os from deadlocking each
+ * other waiting for zone locks when building an io_u batch, first
+ * only trylock the zone. If the zone is already locked by another job,
+ * process the currently queued I/Os so that I/O progress is made and
+ * zones unlocked.
+ */
+ if (pthread_mutex_trylock(&zb->mutex) != 0) {
+ if (!td_ioengine_flagged(td, FIO_SYNCIO))
+ io_u_quiesce(td);
+ pthread_mutex_lock(&zb->mutex);
+ }
+
switch (io_u->ddir) {
case DDIR_READ:
if (td->runstate == TD_VERIFYING) {
goto accept;
}
/*
- * Avoid reads past the write pointer because such reads do not
- * hit the medium.
+ * Check that there is enough written data in the zone to do an
+ * I/O of at least min_bs B. If there isn't, find a new zone for
+ * the I/O.
*/
range = zb->cond != BLK_ZONE_COND_OFFLINE ?
- ((zb->wp - zb->start) << 9) - io_u->buflen : 0;
- if (td_random(td) && range >= 0) {
- io_u->offset = (zb->start << 9) +
- ((io_u->offset - (zb->start << 9)) %
- (range + 1)) / min_bs * min_bs;
- assert(zb->start << 9 <= io_u->offset);
- assert(io_u->offset + io_u->buflen <= zb->wp << 9);
- goto accept;
- }
- if (zb->cond == BLK_ZONE_COND_OFFLINE ||
- (io_u->offset + io_u->buflen) >> 9 > zb->wp) {
+ zb->wp - zb->start : 0;
+ if (range < min_bs ||
+ ((!td_random(td)) && (io_u->offset + min_bs > zb->wp))) {
pthread_mutex_unlock(&zb->mutex);
zl = &f->zbd_info->zone_info[zbd_zone_idx(f,
f->file_offset + f->io_size)];
io_u->buflen);
goto eof;
}
- io_u->offset = zb->start << 9;
+ /*
+ * zbd_find_zone() returned a zone with a range of at
+ * least min_bs.
+ */
+ range = zb->wp - zb->start;
+ assert(range >= min_bs);
+
+ if (!td_random(td))
+ io_u->offset = zb->start;
}
- if ((io_u->offset + io_u->buflen) >> 9 > zb->wp) {
- dprint(FD_ZBD, "%s: %lld + %lld > %" PRIu64 "\n",
- f->file_name, io_u->offset, io_u->buflen,
- zb->wp);
- goto eof;
+ /*
+ * Make sure the I/O is within the zone valid data range while
+ * maximizing the I/O size and preserving randomness.
+ */
+ if (range <= io_u->buflen)
+ io_u->offset = zb->start;
+ else if (td_random(td))
+ io_u->offset = zb->start +
+ ((io_u->offset - orig_zb->start) %
+ (range - io_u->buflen)) / min_bs * min_bs;
+ /*
+ * Make sure the I/O does not cross over the zone wp position.
+ */
+ new_len = min((unsigned long long)io_u->buflen,
+ (unsigned long long)(zb->wp - io_u->offset));
+ new_len = new_len / min_bs * min_bs;
+ if (new_len < io_u->buflen) {
+ io_u->buflen = new_len;
+ dprint(FD_IO, "Changed length from %u into %llu\n",
+ orig_len, io_u->buflen);
}
+ assert(zb->start <= io_u->offset);
+ assert(io_u->offset + io_u->buflen <= zb->wp);
goto accept;
case DDIR_WRITE:
- if (io_u->buflen > (f->zbd_info->zone_size << 9))
+ if (io_u->buflen > f->zbd_info->zone_size)
goto eof;
if (!zbd_open_zone(td, io_u, zone_idx_b)) {
pthread_mutex_unlock(&zb->mutex);
}
/* Check whether the zone reset threshold has been exceeded */
if (td->o.zrf.u.f) {
- check_swd(td, f);
- if ((f->zbd_info->sectors_with_data << 9) >=
+ if (f->zbd_info->sectors_with_data >=
f->io_size * td->o.zrt.u.f &&
zbd_dec_and_reset_write_cnt(td, f)) {
zb->reset_zone = 1;
zb->reset_zone = 0;
if (zbd_reset_zone(td, f, zb) < 0)
goto eof;
- check_swd(td, f);
}
/* Make writes occur at the write pointer */
assert(!zbd_zone_full(f, zb, min_bs));
- io_u->offset = zb->wp << 9;
+ io_u->offset = zb->wp;
if (!is_valid_offset(f, io_u->offset)) {
dprint(FD_ZBD, "Dropped request with offset %llu\n",
io_u->offset);
* small.
*/
new_len = min((unsigned long long)io_u->buflen,
- ((zb + 1)->start << 9) - io_u->offset);
+ (zb + 1)->start - io_u->offset);
new_len = new_len / min_bs * min_bs;
if (new_len == io_u->buflen)
goto accept;
goto accept;
}
log_err("Zone remainder %lld smaller than minimum block size %d\n",
- (((zb + 1)->start << 9) - io_u->offset),
+ ((zb + 1)->start - io_u->offset),
min_bs);
goto eof;
case DDIR_TRIM:
accept:
assert(zb);
assert(zb->cond != BLK_ZONE_COND_OFFLINE);
- assert(!io_u->post_submit);
- io_u->post_submit = zbd_post_submit;
+ assert(!io_u->zbd_queue_io);
+ assert(!io_u->zbd_put_io);
+ io_u->zbd_queue_io = zbd_queue_io;
+ io_u->zbd_put_io = zbd_put_io;
return io_u_accept;
eof:
{
char *res;
- if (asprintf(&res, "; %ld zone resets", ts->nr_zone_resets) < 0)
+ if (asprintf(&res, "; %llu zone resets", (unsigned long long) ts->nr_zone_resets) < 0)
return NULL;
return res;
}