2 * Copyright (C) 2018 Western Digital Corporation or its affiliates.
4 * This file is released under the GPL.
12 #include <sys/ioctl.h>
15 #include <linux/blkzoned.h>
25 * zbd_zone_idx - convert an offset into a zone number
27 * @offset: offset in bytes. If this offset is in the first zone_size bytes
28 * past the disk size then the index of the sentinel is returned.
30 static uint32_t zbd_zone_idx(const struct fio_file *f, uint64_t offset)
34 if (f->zbd_info->zone_size_log2 > 0)
35 zone_idx = offset >> f->zbd_info->zone_size_log2;
37 zone_idx = offset / f->zbd_info->zone_size;
39 return min(zone_idx, f->zbd_info->nr_zones);
43 * zbd_zone_full - verify whether a minimum number of bytes remain in a zone
45 * @z: zone info pointer.
46 * @required: minimum number of bytes that must remain in a zone.
48 * The caller must hold z->mutex.
50 static bool zbd_zone_full(const struct fio_file *f, struct fio_zone_info *z,
53 assert((required & 511) == 0);
55 return z->type == BLK_ZONE_TYPE_SEQWRITE_REQ &&
56 z->wp + required > z->start + f->zbd_info->zone_size;
59 static bool is_valid_offset(const struct fio_file *f, uint64_t offset)
61 return (uint64_t)(offset - f->file_offset) < f->io_size;
64 /* Verify whether direct I/O is used for all host-managed zoned drives. */
65 static bool zbd_using_direct_io(void)
67 struct thread_data *td;
72 if (td->o.odirect || !(td->o.td_ddir & TD_DDIR_WRITE))
74 for_each_file(td, f, j) {
76 f->zbd_info->model == ZBD_DM_HOST_MANAGED)
84 /* Whether or not the I/O range for f includes one or more sequential zones */
85 static bool zbd_is_seq_job(struct fio_file *f)
87 uint32_t zone_idx, zone_idx_b, zone_idx_e;
92 zone_idx_b = zbd_zone_idx(f, f->file_offset);
93 zone_idx_e = zbd_zone_idx(f, f->file_offset + f->io_size - 1);
94 for (zone_idx = zone_idx_b; zone_idx <= zone_idx_e; zone_idx++)
95 if (f->zbd_info->zone_info[zone_idx].type ==
96 BLK_ZONE_TYPE_SEQWRITE_REQ)
103 * Verify whether offset and size parameters are aligned with zone boundaries.
105 static bool zbd_verify_sizes(void)
107 const struct fio_zone_info *z;
108 struct thread_data *td;
110 uint64_t new_offset, new_end;
115 for_each_file(td, f, j) {
118 if (f->file_offset >= f->real_file_size)
120 if (!zbd_is_seq_job(f))
122 zone_idx = zbd_zone_idx(f, f->file_offset);
123 z = &f->zbd_info->zone_info[zone_idx];
124 if (f->file_offset != z->start) {
125 new_offset = (z+1)->start;
126 if (new_offset >= f->file_offset + f->io_size) {
127 log_info("%s: io_size must be at least one zone\n",
131 log_info("%s: rounded up offset from %llu to %llu\n",
132 f->file_name, (unsigned long long) f->file_offset,
133 (unsigned long long) new_offset);
134 f->io_size -= (new_offset - f->file_offset);
135 f->file_offset = new_offset;
137 zone_idx = zbd_zone_idx(f, f->file_offset + f->io_size);
138 z = &f->zbd_info->zone_info[zone_idx];
140 if (f->file_offset + f->io_size != new_end) {
141 if (new_end <= f->file_offset) {
142 log_info("%s: io_size must be at least one zone\n",
146 log_info("%s: rounded down io_size from %llu to %llu\n",
147 f->file_name, (unsigned long long) f->io_size,
148 (unsigned long long) new_end - f->file_offset);
149 f->io_size = new_end - f->file_offset;
157 static bool zbd_verify_bs(void)
159 struct thread_data *td;
165 for_each_file(td, f, j) {
168 zone_size = f->zbd_info->zone_size;
169 for (k = 0; k < ARRAY_SIZE(td->o.bs); k++) {
170 if (td->o.verify != VERIFY_NONE &&
171 zone_size % td->o.bs[k] != 0) {
172 log_info("%s: block size %llu is not a divisor of the zone size %d\n",
173 f->file_name, td->o.bs[k],
184 * Read zone information into @buf starting from sector @start_sector.
185 * @fd is a file descriptor that refers to a block device and @bufsz is the
188 * Returns 0 upon success and a negative error code upon failure.
189 * If the zone report is empty, always assume an error (device problem) and
192 static int read_zone_info(int fd, uint64_t start_sector,
193 void *buf, unsigned int bufsz)
195 struct blk_zone_report *hdr = buf;
198 if (bufsz < sizeof(*hdr))
201 memset(hdr, 0, sizeof(*hdr));
203 hdr->nr_zones = (bufsz - sizeof(*hdr)) / sizeof(struct blk_zone);
204 hdr->sector = start_sector;
205 ret = ioctl(fd, BLKREPORTZONE, hdr);
214 * Read up to 255 characters from the first line of a file. Strip the trailing
217 static char *read_file(const char *path)
219 char line[256], *p = line;
222 f = fopen(path, "rb");
225 if (!fgets(line, sizeof(line), f))
233 static enum blk_zoned_model get_zbd_model(const char *file_name)
235 enum blk_zoned_model model = ZBD_DM_NONE;
236 char *zoned_attr_path = NULL;
237 char *model_str = NULL;
239 char *sys_devno_path = NULL;
240 char *part_attr_path = NULL;
241 char *part_str = NULL;
242 char sys_path[PATH_MAX];
246 if (stat(file_name, &statbuf) < 0)
249 if (asprintf(&sys_devno_path, "/sys/dev/block/%d:%d",
250 major(statbuf.st_rdev), minor(statbuf.st_rdev)) < 0)
253 sz = readlink(sys_devno_path, sys_path, sizeof(sys_path) - 1);
259 * If the device is a partition device, cut the device name in the
260 * canonical sysfs path to obtain the sysfs path of the holder device.
261 * e.g.: /sys/devices/.../sda/sda1 -> /sys/devices/.../sda
263 if (asprintf(&part_attr_path, "/sys/dev/block/%s/partition",
266 part_str = read_file(part_attr_path);
267 if (part_str && *part_str == '1') {
268 delim = strrchr(sys_path, '/');
274 if (asprintf(&zoned_attr_path,
275 "/sys/dev/block/%s/queue/zoned", sys_path) < 0)
278 model_str = read_file(zoned_attr_path);
281 dprint(FD_ZBD, "%s: zbd model string: %s\n", file_name, model_str);
282 if (strcmp(model_str, "host-aware") == 0)
283 model = ZBD_DM_HOST_AWARE;
284 else if (strcmp(model_str, "host-managed") == 0)
285 model = ZBD_DM_HOST_MANAGED;
289 free(zoned_attr_path);
291 free(part_attr_path);
292 free(sys_devno_path);
296 static int ilog2(uint64_t i)
308 * Initialize f->zbd_info for devices that are not zoned block devices. This
309 * allows to execute a ZBD workload against a non-ZBD device.
311 static int init_zone_info(struct thread_data *td, struct fio_file *f)
314 struct fio_zone_info *p;
316 struct zoned_block_device_info *zbd_info = NULL;
317 pthread_mutexattr_t attr;
320 zone_size = td->o.zone_size;
322 nr_zones = (f->real_file_size + zone_size - 1) / zone_size;
323 zbd_info = scalloc(1, sizeof(*zbd_info) +
324 (nr_zones + 1) * sizeof(zbd_info->zone_info[0]));
328 pthread_mutexattr_init(&attr);
329 pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
330 pthread_mutexattr_setpshared(&attr, true);
331 pthread_mutex_init(&zbd_info->mutex, &attr);
332 zbd_info->refcount = 1;
333 p = &zbd_info->zone_info[0];
334 for (i = 0; i < nr_zones; i++, p++) {
335 pthread_mutex_init(&p->mutex, &attr);
336 p->start = i * zone_size;
337 p->wp = p->start + zone_size;
338 p->type = BLK_ZONE_TYPE_SEQWRITE_REQ;
339 p->cond = BLK_ZONE_COND_EMPTY;
342 p->start = nr_zones * zone_size;
344 f->zbd_info = zbd_info;
345 f->zbd_info->zone_size = zone_size;
346 f->zbd_info->zone_size_log2 = is_power_of_2(zone_size) ?
347 ilog2(zone_size) : -1;
348 f->zbd_info->nr_zones = nr_zones;
349 pthread_mutexattr_destroy(&attr);
354 * Parse the BLKREPORTZONE output and store it in f->zbd_info. Must be called
355 * only for devices that support this ioctl, namely zoned block devices.
357 static int parse_zone_info(struct thread_data *td, struct fio_file *f)
359 const unsigned int bufsz = sizeof(struct blk_zone_report) +
360 4096 * sizeof(struct blk_zone);
362 struct blk_zone_report *hdr;
363 const struct blk_zone *z;
364 struct fio_zone_info *p;
365 uint64_t zone_size, start_sector;
366 struct zoned_block_device_info *zbd_info = NULL;
367 pthread_mutexattr_t attr;
369 int fd, i, j, ret = 0;
371 pthread_mutexattr_init(&attr);
372 pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
373 pthread_mutexattr_setpshared(&attr, true);
379 fd = open(f->file_name, O_RDONLY | O_LARGEFILE);
385 ret = read_zone_info(fd, 0, buf, bufsz);
387 log_info("fio: BLKREPORTZONE(%lu) failed for %s (%d).\n",
388 0UL, f->file_name, -ret);
392 if (hdr->nr_zones < 1) {
393 log_info("fio: %s has invalid zone information.\n",
397 z = (void *)(hdr + 1);
398 zone_size = z->len << 9;
399 nr_zones = (f->real_file_size + zone_size - 1) / zone_size;
401 if (td->o.zone_size == 0) {
402 td->o.zone_size = zone_size;
403 } else if (td->o.zone_size != zone_size) {
404 log_info("fio: %s job parameter zonesize %llu does not match disk zone size %llu.\n",
405 f->file_name, (unsigned long long) td->o.zone_size,
406 (unsigned long long) zone_size);
411 dprint(FD_ZBD, "Device %s has %d zones of size %llu KB\n", f->file_name,
412 nr_zones, (unsigned long long) zone_size / 1024);
414 zbd_info = scalloc(1, sizeof(*zbd_info) +
415 (nr_zones + 1) * sizeof(zbd_info->zone_info[0]));
419 pthread_mutex_init(&zbd_info->mutex, &attr);
420 zbd_info->refcount = 1;
421 p = &zbd_info->zone_info[0];
422 for (start_sector = 0, j = 0; j < nr_zones;) {
423 z = (void *)(hdr + 1);
424 for (i = 0; i < hdr->nr_zones; i++, j++, z++, p++) {
425 pthread_mutex_init(&p->mutex, &attr);
426 p->start = z->start << 9;
428 case BLK_ZONE_COND_NOT_WP:
429 case BLK_ZONE_COND_FULL:
430 p->wp = p->start + zone_size;
433 assert(z->start <= z->wp);
434 assert(z->wp <= z->start + (zone_size >> 9));
440 if (j > 0 && p->start != p[-1].start + zone_size) {
441 log_info("%s: invalid zone data\n",
448 start_sector = z->start + z->len;
451 ret = read_zone_info(fd, start_sector, buf, bufsz);
453 log_info("fio: BLKREPORTZONE(%llu) failed for %s (%d).\n",
454 (unsigned long long) start_sector, f->file_name, -ret);
459 zbd_info->zone_info[nr_zones].start = start_sector << 9;
461 f->zbd_info = zbd_info;
462 f->zbd_info->zone_size = zone_size;
463 f->zbd_info->zone_size_log2 = is_power_of_2(zone_size) ?
464 ilog2(zone_size) : -1;
465 f->zbd_info->nr_zones = nr_zones;
475 pthread_mutexattr_destroy(&attr);
480 * Allocate zone information and store it into f->zbd_info if zonemode=zbd.
482 * Returns 0 upon success and a negative error code upon failure.
484 static int zbd_create_zone_info(struct thread_data *td, struct fio_file *f)
486 enum blk_zoned_model zbd_model;
489 assert(td->o.zone_mode == ZONE_MODE_ZBD);
491 zbd_model = get_zbd_model(f->file_name);
493 case ZBD_DM_HOST_AWARE:
494 case ZBD_DM_HOST_MANAGED:
495 ret = parse_zone_info(td, f);
498 ret = init_zone_info(td, f);
502 f->zbd_info->model = zbd_model;
506 void zbd_free_zone_info(struct fio_file *f)
513 pthread_mutex_lock(&f->zbd_info->mutex);
514 refcount = --f->zbd_info->refcount;
515 pthread_mutex_unlock(&f->zbd_info->mutex);
517 assert((int32_t)refcount >= 0);
524 * Initialize f->zbd_info.
526 * Returns 0 upon success and a negative error code upon failure.
528 * Note: this function can only work correctly if it is called before the first
531 static int zbd_init_zone_info(struct thread_data *td, struct fio_file *file)
533 struct thread_data *td2;
537 for_each_td(td2, i) {
538 for_each_file(td2, f2, j) {
539 if (td2 == td && f2 == file)
542 strcmp(f2->file_name, file->file_name) != 0)
544 file->zbd_info = f2->zbd_info;
545 file->zbd_info->refcount++;
550 ret = zbd_create_zone_info(td, file);
552 td_verror(td, -ret, "BLKREPORTZONE failed");
556 int zbd_init(struct thread_data *td)
561 for_each_file(td, f, i) {
562 if (f->filetype != FIO_TYPE_BLOCK)
564 if (td->o.zone_size && td->o.zone_size < 512) {
565 log_err("%s: zone size must be at least 512 bytes for --zonemode=zbd\n\n",
569 if (td->o.zone_size == 0 &&
570 get_zbd_model(f->file_name) == ZBD_DM_NONE) {
571 log_err("%s: Specifying the zone size is mandatory for regular block devices with --zonemode=zbd\n\n",
575 zbd_init_zone_info(td, f);
578 if (!zbd_using_direct_io()) {
579 log_err("Using direct I/O is mandatory for writing to ZBD drives\n\n");
583 if (!zbd_verify_sizes())
586 if (!zbd_verify_bs())
593 * zbd_reset_range - reset zones for a range of sectors
594 * @td: FIO thread data.
595 * @f: Fio file for which to reset zones
596 * @sector: Starting sector in units of 512 bytes
597 * @nr_sectors: Number of sectors in units of 512 bytes
599 * Returns 0 upon success and a negative error code upon failure.
601 static int zbd_reset_range(struct thread_data *td, const struct fio_file *f,
602 uint64_t offset, uint64_t length)
604 struct blk_zone_range zr = {
605 .sector = offset >> 9,
606 .nr_sectors = length >> 9,
608 uint32_t zone_idx_b, zone_idx_e;
609 struct fio_zone_info *zb, *ze, *z;
613 assert(is_valid_offset(f, offset + length - 1));
614 switch (f->zbd_info->model) {
615 case ZBD_DM_HOST_AWARE:
616 case ZBD_DM_HOST_MANAGED:
617 ret = ioctl(f->fd, BLKRESETZONE, &zr);
619 td_verror(td, errno, "resetting wp failed");
620 log_err("%s: resetting wp for %llu sectors at sector %llu failed (%d).\n",
621 f->file_name, zr.nr_sectors, zr.sector, errno);
629 zone_idx_b = zbd_zone_idx(f, offset);
630 zb = &f->zbd_info->zone_info[zone_idx_b];
631 zone_idx_e = zbd_zone_idx(f, offset + length);
632 ze = &f->zbd_info->zone_info[zone_idx_e];
633 for (z = zb; z < ze; z++) {
634 pthread_mutex_lock(&z->mutex);
635 pthread_mutex_lock(&f->zbd_info->mutex);
636 f->zbd_info->sectors_with_data -= z->wp - z->start;
637 pthread_mutex_unlock(&f->zbd_info->mutex);
640 pthread_mutex_unlock(&z->mutex);
643 td->ts.nr_zone_resets += ze - zb;
648 static unsigned int zbd_zone_nr(struct zoned_block_device_info *zbd_info,
649 struct fio_zone_info *zone)
651 return zone - zbd_info->zone_info;
655 * zbd_reset_zone - reset the write pointer of a single zone
656 * @td: FIO thread data.
657 * @f: FIO file associated with the disk for which to reset a write pointer.
660 * Returns 0 upon success and a negative error code upon failure.
662 static int zbd_reset_zone(struct thread_data *td, const struct fio_file *f,
663 struct fio_zone_info *z)
665 dprint(FD_ZBD, "%s: resetting wp of zone %u.\n", f->file_name,
666 zbd_zone_nr(f->zbd_info, z));
668 return zbd_reset_range(td, f, z->start, (z+1)->start - z->start);
672 * Reset a range of zones. Returns 0 upon success and 1 upon failure.
673 * @td: fio thread data.
674 * @f: fio file for which to reset zones
675 * @zb: first zone to reset.
676 * @ze: first zone not to reset.
677 * @all_zones: whether to reset all zones or only those zones for which the
678 * write pointer is not a multiple of td->o.min_bs[DDIR_WRITE].
680 static int zbd_reset_zones(struct thread_data *td, struct fio_file *f,
681 struct fio_zone_info *const zb,
682 struct fio_zone_info *const ze, bool all_zones)
684 struct fio_zone_info *z, *start_z = ze;
685 const uint32_t min_bs = td->o.min_bs[DDIR_WRITE];
689 dprint(FD_ZBD, "%s: examining zones %u .. %u\n", f->file_name,
690 zbd_zone_nr(f->zbd_info, zb), zbd_zone_nr(f->zbd_info, ze));
692 for (z = zb; z < ze; z++) {
693 pthread_mutex_lock(&z->mutex);
695 case BLK_ZONE_TYPE_SEQWRITE_REQ:
696 reset_wp = all_zones ? z->wp != z->start :
697 (td->o.td_ddir & TD_DDIR_WRITE) &&
699 if (start_z == ze && reset_wp) {
701 } else if (start_z < ze && !reset_wp) {
703 "%s: resetting zones %u .. %u\n",
705 zbd_zone_nr(f->zbd_info, start_z),
706 zbd_zone_nr(f->zbd_info, z));
707 if (zbd_reset_range(td, f, start_z->start,
708 z->start - start_z->start) < 0)
716 dprint(FD_ZBD, "%s: resetting zones %u .. %u\n",
717 f->file_name, zbd_zone_nr(f->zbd_info, start_z),
718 zbd_zone_nr(f->zbd_info, z));
719 if (zbd_reset_range(td, f, start_z->start,
720 z->start - start_z->start) < 0)
727 dprint(FD_ZBD, "%s: resetting zones %u .. %u\n", f->file_name,
728 zbd_zone_nr(f->zbd_info, start_z),
729 zbd_zone_nr(f->zbd_info, z));
730 if (zbd_reset_range(td, f, start_z->start,
731 z->start - start_z->start) < 0)
734 for (z = zb; z < ze; z++)
735 pthread_mutex_unlock(&z->mutex);
741 * Reset zbd_info.write_cnt, the counter that counts down towards the next
744 static void zbd_reset_write_cnt(const struct thread_data *td,
745 const struct fio_file *f)
747 assert(0 <= td->o.zrf.u.f && td->o.zrf.u.f <= 1);
749 pthread_mutex_lock(&f->zbd_info->mutex);
750 f->zbd_info->write_cnt = td->o.zrf.u.f ?
751 min(1.0 / td->o.zrf.u.f, 0.0 + UINT_MAX) : UINT_MAX;
752 pthread_mutex_unlock(&f->zbd_info->mutex);
755 static bool zbd_dec_and_reset_write_cnt(const struct thread_data *td,
756 const struct fio_file *f)
758 uint32_t write_cnt = 0;
760 pthread_mutex_lock(&f->zbd_info->mutex);
761 assert(f->zbd_info->write_cnt);
762 if (f->zbd_info->write_cnt)
763 write_cnt = --f->zbd_info->write_cnt;
765 zbd_reset_write_cnt(td, f);
766 pthread_mutex_unlock(&f->zbd_info->mutex);
768 return write_cnt == 0;
776 /* Calculate the number of sectors with data (swd) and perform action 'a' */
777 static uint64_t zbd_process_swd(const struct fio_file *f, enum swd_action a)
779 struct fio_zone_info *zb, *ze, *z;
782 zb = &f->zbd_info->zone_info[zbd_zone_idx(f, f->file_offset)];
783 ze = &f->zbd_info->zone_info[zbd_zone_idx(f, f->file_offset +
785 for (z = zb; z < ze; z++) {
786 pthread_mutex_lock(&z->mutex);
787 swd += z->wp - z->start;
789 pthread_mutex_lock(&f->zbd_info->mutex);
792 assert(f->zbd_info->sectors_with_data == swd);
795 f->zbd_info->sectors_with_data = swd;
798 pthread_mutex_unlock(&f->zbd_info->mutex);
799 for (z = zb; z < ze; z++)
800 pthread_mutex_unlock(&z->mutex);
806 * The swd check is useful for debugging but takes too much time to leave
807 * it enabled all the time. Hence it is disabled by default.
809 static const bool enable_check_swd = false;
811 /* Check whether the value of zbd_info.sectors_with_data is correct. */
812 static void zbd_check_swd(const struct fio_file *f)
814 if (!enable_check_swd)
817 zbd_process_swd(f, CHECK_SWD);
820 static void zbd_init_swd(struct fio_file *f)
824 swd = zbd_process_swd(f, SET_SWD);
825 dprint(FD_ZBD, "%s(%s): swd = %" PRIu64 "\n", __func__, f->file_name,
829 void zbd_file_reset(struct thread_data *td, struct fio_file *f)
831 struct fio_zone_info *zb, *ze;
837 zb = &f->zbd_info->zone_info[zbd_zone_idx(f, f->file_offset)];
838 zone_idx_e = zbd_zone_idx(f, f->file_offset + f->io_size);
839 ze = &f->zbd_info->zone_info[zone_idx_e];
842 * If data verification is enabled reset the affected zones before
843 * writing any data to avoid that a zone reset has to be issued while
844 * writing data, which causes data loss.
846 zbd_reset_zones(td, f, zb, ze, td->o.verify != VERIFY_NONE &&
847 (td->o.td_ddir & TD_DDIR_WRITE) &&
848 td->runstate != TD_VERIFYING);
849 zbd_reset_write_cnt(td, f);
852 /* The caller must hold f->zbd_info->mutex. */
853 static bool is_zone_open(const struct thread_data *td, const struct fio_file *f,
854 unsigned int zone_idx)
856 struct zoned_block_device_info *zbdi = f->zbd_info;
859 assert(td->o.max_open_zones <= ARRAY_SIZE(zbdi->open_zones));
860 assert(zbdi->num_open_zones <= td->o.max_open_zones);
862 for (i = 0; i < zbdi->num_open_zones; i++)
863 if (zbdi->open_zones[i] == zone_idx)
870 * Open a ZBD zone if it was not yet open. Returns true if either the zone was
871 * already open or if opening a new zone is allowed. Returns false if the zone
872 * was not yet open and opening a new zone would cause the zone limit to be
875 static bool zbd_open_zone(struct thread_data *td, const struct io_u *io_u,
878 const uint32_t min_bs = td->o.min_bs[DDIR_WRITE];
879 const struct fio_file *f = io_u->file;
880 struct fio_zone_info *z = &f->zbd_info->zone_info[zone_idx];
883 if (z->cond == BLK_ZONE_COND_OFFLINE)
887 * Skip full zones with data verification enabled because resetting a
888 * zone causes data loss and hence causes verification to fail.
890 if (td->o.verify != VERIFY_NONE && zbd_zone_full(f, z, min_bs))
893 /* Zero means no limit */
894 if (!td->o.max_open_zones)
897 pthread_mutex_lock(&f->zbd_info->mutex);
898 if (is_zone_open(td, f, zone_idx))
901 if (f->zbd_info->num_open_zones >= td->o.max_open_zones)
903 dprint(FD_ZBD, "%s: opening zone %d\n", f->file_name, zone_idx);
904 f->zbd_info->open_zones[f->zbd_info->num_open_zones++] = zone_idx;
909 pthread_mutex_unlock(&f->zbd_info->mutex);
913 /* The caller must hold f->zbd_info->mutex */
914 static void zbd_close_zone(struct thread_data *td, const struct fio_file *f,
915 unsigned int open_zone_idx)
919 assert(open_zone_idx < f->zbd_info->num_open_zones);
920 zone_idx = f->zbd_info->open_zones[open_zone_idx];
921 memmove(f->zbd_info->open_zones + open_zone_idx,
922 f->zbd_info->open_zones + open_zone_idx + 1,
923 (FIO_MAX_OPEN_ZBD_ZONES - (open_zone_idx + 1)) *
924 sizeof(f->zbd_info->open_zones[0]));
925 f->zbd_info->num_open_zones--;
926 f->zbd_info->zone_info[zone_idx].open = 0;
930 * Modify the offset of an I/O unit that does not refer to an open zone such
931 * that it refers to an open zone. Close an open zone and open a new zone if
932 * necessary. This algorithm can only work correctly if all write pointers are
933 * a multiple of the fio block size. The caller must neither hold z->mutex
934 * nor f->zbd_info->mutex. Returns with z->mutex held upon success.
936 static struct fio_zone_info *zbd_convert_to_open_zone(struct thread_data *td,
939 const uint32_t min_bs = td->o.min_bs[io_u->ddir];
940 const struct fio_file *f = io_u->file;
941 struct fio_zone_info *z;
942 unsigned int open_zone_idx = -1;
943 uint32_t zone_idx, new_zone_idx;
946 assert(is_valid_offset(f, io_u->offset));
948 if (td->o.max_open_zones) {
950 * This statement accesses f->zbd_info->open_zones[] on purpose
953 zone_idx = f->zbd_info->open_zones[(io_u->offset -
955 f->zbd_info->num_open_zones / f->io_size];
957 zone_idx = zbd_zone_idx(f, io_u->offset);
959 dprint(FD_ZBD, "%s(%s): starting from zone %d (offset %lld, buflen %lld)\n",
960 __func__, f->file_name, zone_idx, io_u->offset, io_u->buflen);
963 * Since z->mutex is the outer lock and f->zbd_info->mutex the inner
964 * lock it can happen that the state of the zone with index zone_idx
965 * has changed after 'z' has been assigned and before f->zbd_info->mutex
966 * has been obtained. Hence the loop.
969 z = &f->zbd_info->zone_info[zone_idx];
971 pthread_mutex_lock(&z->mutex);
972 pthread_mutex_lock(&f->zbd_info->mutex);
973 if (td->o.max_open_zones == 0)
975 if (f->zbd_info->num_open_zones == 0) {
976 pthread_mutex_unlock(&f->zbd_info->mutex);
977 pthread_mutex_unlock(&z->mutex);
978 dprint(FD_ZBD, "%s(%s): no zones are open\n",
979 __func__, f->file_name);
982 open_zone_idx = (io_u->offset - f->file_offset) *
983 f->zbd_info->num_open_zones / f->io_size;
984 assert(open_zone_idx < f->zbd_info->num_open_zones);
985 new_zone_idx = f->zbd_info->open_zones[open_zone_idx];
986 if (new_zone_idx == zone_idx)
988 zone_idx = new_zone_idx;
989 pthread_mutex_unlock(&f->zbd_info->mutex);
990 pthread_mutex_unlock(&z->mutex);
993 /* Both z->mutex and f->zbd_info->mutex are held. */
996 if (z->wp + min_bs <= (z+1)->start) {
997 pthread_mutex_unlock(&f->zbd_info->mutex);
1000 dprint(FD_ZBD, "%s(%s): closing zone %d\n", __func__, f->file_name,
1002 if (td->o.max_open_zones)
1003 zbd_close_zone(td, f, open_zone_idx);
1004 pthread_mutex_unlock(&f->zbd_info->mutex);
1006 /* Only z->mutex is held. */
1008 /* Zone 'z' is full, so try to open a new zone. */
1009 for (i = f->io_size / f->zbd_info->zone_size; i > 0; i--) {
1011 pthread_mutex_unlock(&z->mutex);
1013 if (!is_valid_offset(f, z->start)) {
1015 zone_idx = zbd_zone_idx(f, f->file_offset);
1016 z = &f->zbd_info->zone_info[zone_idx];
1018 assert(is_valid_offset(f, z->start));
1019 pthread_mutex_lock(&z->mutex);
1022 if (zbd_open_zone(td, io_u, zone_idx))
1026 /* Only z->mutex is held. */
1028 /* Check whether the write fits in any of the already opened zones. */
1029 pthread_mutex_lock(&f->zbd_info->mutex);
1030 for (i = 0; i < f->zbd_info->num_open_zones; i++) {
1031 zone_idx = f->zbd_info->open_zones[i];
1032 pthread_mutex_unlock(&f->zbd_info->mutex);
1033 pthread_mutex_unlock(&z->mutex);
1035 z = &f->zbd_info->zone_info[zone_idx];
1037 pthread_mutex_lock(&z->mutex);
1038 if (z->wp + min_bs <= (z+1)->start)
1040 pthread_mutex_lock(&f->zbd_info->mutex);
1042 pthread_mutex_unlock(&f->zbd_info->mutex);
1043 pthread_mutex_unlock(&z->mutex);
1044 dprint(FD_ZBD, "%s(%s): did not open another zone\n", __func__,
1049 dprint(FD_ZBD, "%s(%s): returning zone %d\n", __func__, f->file_name,
1051 io_u->offset = z->start;
1055 /* The caller must hold z->mutex. */
1056 static struct fio_zone_info *zbd_replay_write_order(struct thread_data *td,
1058 struct fio_zone_info *z)
1060 const struct fio_file *f = io_u->file;
1061 const uint32_t min_bs = td->o.min_bs[DDIR_WRITE];
1063 if (!zbd_open_zone(td, io_u, z - f->zbd_info->zone_info)) {
1064 pthread_mutex_unlock(&z->mutex);
1065 z = zbd_convert_to_open_zone(td, io_u);
1069 if (z->verify_block * min_bs >= f->zbd_info->zone_size)
1070 log_err("%s: %d * %d >= %llu\n", f->file_name, z->verify_block,
1071 min_bs, (unsigned long long) f->zbd_info->zone_size);
1072 io_u->offset = z->start + z->verify_block++ * min_bs;
1077 * Find another zone for which @io_u fits below the write pointer. Start
1078 * searching in zones @zb + 1 .. @zl and continue searching in zones
1081 * Either returns NULL or returns a zone pointer and holds the mutex for that
1084 static struct fio_zone_info *
1085 zbd_find_zone(struct thread_data *td, struct io_u *io_u,
1086 struct fio_zone_info *zb, struct fio_zone_info *zl)
1088 const uint32_t min_bs = td->o.min_bs[io_u->ddir];
1089 const struct fio_file *f = io_u->file;
1090 struct fio_zone_info *z1, *z2;
1091 const struct fio_zone_info *const zf =
1092 &f->zbd_info->zone_info[zbd_zone_idx(f, f->file_offset)];
1095 * Skip to the next non-empty zone in case of sequential I/O and to
1096 * the nearest non-empty zone in case of random I/O.
1098 for (z1 = zb + 1, z2 = zb - 1; z1 < zl || z2 >= zf; z1++, z2--) {
1099 if (z1 < zl && z1->cond != BLK_ZONE_COND_OFFLINE) {
1100 pthread_mutex_lock(&z1->mutex);
1101 if (z1->start + min_bs <= z1->wp)
1103 pthread_mutex_unlock(&z1->mutex);
1104 } else if (!td_random(td)) {
1107 if (td_random(td) && z2 >= zf &&
1108 z2->cond != BLK_ZONE_COND_OFFLINE) {
1109 pthread_mutex_lock(&z2->mutex);
1110 if (z2->start + min_bs <= z2->wp)
1112 pthread_mutex_unlock(&z2->mutex);
1115 dprint(FD_ZBD, "%s: adjusting random read offset failed\n",
1121 * zbd_queue_io - update the write pointer of a sequential zone
1123 * @success: Whether or not the I/O unit has been queued successfully
1124 * @q: queueing status (busy, completed or queued).
1126 * For write and trim operations, update the write pointer of the I/O unit
1129 static void zbd_queue_io(struct io_u *io_u, int q, bool success)
1131 const struct fio_file *f = io_u->file;
1132 struct zoned_block_device_info *zbd_info = f->zbd_info;
1133 struct fio_zone_info *z;
1140 zone_idx = zbd_zone_idx(f, io_u->offset);
1141 assert(zone_idx < zbd_info->nr_zones);
1142 z = &zbd_info->zone_info[zone_idx];
1144 if (z->type != BLK_ZONE_TYPE_SEQWRITE_REQ)
1151 "%s: queued I/O (%lld, %llu) for zone %u\n",
1152 f->file_name, io_u->offset, io_u->buflen, zone_idx);
1154 switch (io_u->ddir) {
1156 zone_end = min((uint64_t)(io_u->offset + io_u->buflen),
1158 pthread_mutex_lock(&zbd_info->mutex);
1160 * z->wp > zone_end means that one or more I/O errors
1163 if (z->wp <= zone_end)
1164 zbd_info->sectors_with_data += zone_end - z->wp;
1165 pthread_mutex_unlock(&zbd_info->mutex);
1169 assert(z->wp == z->start);
1176 if (!success || q != FIO_Q_QUEUED) {
1177 /* BUSY or COMPLETED: unlock the zone */
1178 pthread_mutex_unlock(&z->mutex);
1179 io_u->zbd_put_io = NULL;
1184 * zbd_put_io - Unlock an I/O unit target zone lock
1187 static void zbd_put_io(const struct io_u *io_u)
1189 const struct fio_file *f = io_u->file;
1190 struct zoned_block_device_info *zbd_info = f->zbd_info;
1191 struct fio_zone_info *z;
1197 zone_idx = zbd_zone_idx(f, io_u->offset);
1198 assert(zone_idx < zbd_info->nr_zones);
1199 z = &zbd_info->zone_info[zone_idx];
1201 if (z->type != BLK_ZONE_TYPE_SEQWRITE_REQ)
1205 "%s: terminate I/O (%lld, %llu) for zone %u\n",
1206 f->file_name, io_u->offset, io_u->buflen, zone_idx);
1208 assert(pthread_mutex_unlock(&z->mutex) == 0);
1212 bool zbd_unaligned_write(int error_code)
1214 switch (error_code) {
1223 * zbd_adjust_block - adjust the offset and length as necessary for ZBD drives
1224 * @td: FIO thread data.
1225 * @io_u: FIO I/O unit.
1227 * Locking strategy: returns with z->mutex locked if and only if z refers
1228 * to a sequential zone and if io_u_accept is returned. z is the zone that
1229 * corresponds to io_u->offset at the end of this function.
1231 enum io_u_action zbd_adjust_block(struct thread_data *td, struct io_u *io_u)
1233 const struct fio_file *f = io_u->file;
1234 uint32_t zone_idx_b;
1235 struct fio_zone_info *zb, *zl, *orig_zb;
1236 uint32_t orig_len = io_u->buflen;
1237 uint32_t min_bs = td->o.min_bs[io_u->ddir];
1244 assert(is_valid_offset(f, io_u->offset));
1245 assert(io_u->buflen);
1246 zone_idx_b = zbd_zone_idx(f, io_u->offset);
1247 zb = &f->zbd_info->zone_info[zone_idx_b];
1250 /* Accept the I/O offset for conventional zones. */
1251 if (zb->type == BLK_ZONE_TYPE_CONVENTIONAL)
1255 * Accept the I/O offset for reads if reading beyond the write pointer
1258 if (zb->cond != BLK_ZONE_COND_OFFLINE &&
1259 io_u->ddir == DDIR_READ && td->o.read_beyond_wp)
1265 * Lock the io_u target zone. The zone will be unlocked if io_u offset
1266 * is changed or when io_u completes and zbd_put_io() executed.
1267 * To avoid multiple jobs doing asynchronous I/Os from deadlocking each
1268 * other waiting for zone locks when building an io_u batch, first
1269 * only trylock the zone. If the zone is already locked by another job,
1270 * process the currently queued I/Os so that I/O progress is made and
1273 if (pthread_mutex_trylock(&zb->mutex) != 0) {
1274 if (!td_ioengine_flagged(td, FIO_SYNCIO))
1276 pthread_mutex_lock(&zb->mutex);
1279 switch (io_u->ddir) {
1281 if (td->runstate == TD_VERIFYING) {
1282 zb = zbd_replay_write_order(td, io_u, zb);
1286 * Check that there is enough written data in the zone to do an
1287 * I/O of at least min_bs B. If there isn't, find a new zone for
1290 range = zb->cond != BLK_ZONE_COND_OFFLINE ?
1291 zb->wp - zb->start : 0;
1292 if (range < min_bs ||
1293 ((!td_random(td)) && (io_u->offset + min_bs > zb->wp))) {
1294 pthread_mutex_unlock(&zb->mutex);
1295 zl = &f->zbd_info->zone_info[zbd_zone_idx(f,
1296 f->file_offset + f->io_size)];
1297 zb = zbd_find_zone(td, io_u, zb, zl);
1300 "%s: zbd_find_zone(%lld, %llu) failed\n",
1301 f->file_name, io_u->offset,
1306 * zbd_find_zone() returned a zone with a range of at
1309 range = zb->wp - zb->start;
1310 assert(range >= min_bs);
1313 io_u->offset = zb->start;
1316 * Make sure the I/O is within the zone valid data range while
1317 * maximizing the I/O size and preserving randomness.
1319 if (range <= io_u->buflen)
1320 io_u->offset = zb->start;
1321 else if (td_random(td))
1322 io_u->offset = zb->start +
1323 ((io_u->offset - orig_zb->start) %
1324 (range - io_u->buflen)) / min_bs * min_bs;
1326 * Make sure the I/O does not cross over the zone wp position.
1328 new_len = min((unsigned long long)io_u->buflen,
1329 (unsigned long long)(zb->wp - io_u->offset));
1330 new_len = new_len / min_bs * min_bs;
1331 if (new_len < io_u->buflen) {
1332 io_u->buflen = new_len;
1333 dprint(FD_IO, "Changed length from %u into %llu\n",
1334 orig_len, io_u->buflen);
1336 assert(zb->start <= io_u->offset);
1337 assert(io_u->offset + io_u->buflen <= zb->wp);
1340 if (io_u->buflen > f->zbd_info->zone_size)
1342 if (!zbd_open_zone(td, io_u, zone_idx_b)) {
1343 pthread_mutex_unlock(&zb->mutex);
1344 zb = zbd_convert_to_open_zone(td, io_u);
1347 zone_idx_b = zb - f->zbd_info->zone_info;
1349 /* Check whether the zone reset threshold has been exceeded */
1350 if (td->o.zrf.u.f) {
1351 if (f->zbd_info->sectors_with_data >=
1352 f->io_size * td->o.zrt.u.f &&
1353 zbd_dec_and_reset_write_cnt(td, f)) {
1357 /* Reset the zone pointer if necessary */
1358 if (zb->reset_zone || zbd_zone_full(f, zb, min_bs)) {
1359 assert(td->o.verify == VERIFY_NONE);
1361 * Since previous write requests may have been submitted
1362 * asynchronously and since we will submit the zone
1363 * reset synchronously, wait until previously submitted
1364 * write requests have completed before issuing a
1369 if (zbd_reset_zone(td, f, zb) < 0)
1372 /* Make writes occur at the write pointer */
1373 assert(!zbd_zone_full(f, zb, min_bs));
1374 io_u->offset = zb->wp;
1375 if (!is_valid_offset(f, io_u->offset)) {
1376 dprint(FD_ZBD, "Dropped request with offset %llu\n",
1381 * Make sure that the buflen is a multiple of the minimal
1382 * block size. Give up if shrinking would make the request too
1385 new_len = min((unsigned long long)io_u->buflen,
1386 (zb + 1)->start - io_u->offset);
1387 new_len = new_len / min_bs * min_bs;
1388 if (new_len == io_u->buflen)
1390 if (new_len >= min_bs) {
1391 io_u->buflen = new_len;
1392 dprint(FD_IO, "Changed length from %u into %llu\n",
1393 orig_len, io_u->buflen);
1396 log_err("Zone remainder %lld smaller than minimum block size %d\n",
1397 ((zb + 1)->start - io_u->offset),
1404 case DDIR_SYNC_FILE_RANGE:
1415 assert(zb->cond != BLK_ZONE_COND_OFFLINE);
1416 assert(!io_u->zbd_queue_io);
1417 assert(!io_u->zbd_put_io);
1418 io_u->zbd_queue_io = zbd_queue_io;
1419 io_u->zbd_put_io = zbd_put_io;
1424 pthread_mutex_unlock(&zb->mutex);
1428 /* Return a string with ZBD statistics */
1429 char *zbd_write_status(const struct thread_stat *ts)
1433 if (asprintf(&res, "; %llu zone resets", (unsigned long long) ts->nr_zone_resets) < 0)