2 * Copyright (C) 2018 Western Digital Corporation or its affiliates.
4 * This file is released under the GPL.
12 #include <sys/ioctl.h>
15 #include <linux/blkzoned.h>
25 * zbd_zone_idx - convert an offset into a zone number
27 * @offset: offset in bytes. If this offset is in the first zone_size bytes
28 * past the disk size then the index of the sentinel is returned.
30 static uint32_t zbd_zone_idx(const struct fio_file *f, uint64_t offset)
34 if (f->zbd_info->zone_size_log2)
35 zone_idx = offset >> f->zbd_info->zone_size_log2;
37 zone_idx = (offset >> 9) / f->zbd_info->zone_size;
39 return min(zone_idx, f->zbd_info->nr_zones);
43 * zbd_zone_full - verify whether a minimum number of bytes remain in a zone
45 * @z: zone info pointer.
46 * @required: minimum number of bytes that must remain in a zone.
48 * The caller must hold z->mutex.
50 static bool zbd_zone_full(const struct fio_file *f, struct fio_zone_info *z,
53 assert((required & 511) == 0);
55 return z->type == BLK_ZONE_TYPE_SEQWRITE_REQ &&
56 z->wp + (required >> 9) > z->start + f->zbd_info->zone_size;
59 static bool is_valid_offset(const struct fio_file *f, uint64_t offset)
61 return (uint64_t)(offset - f->file_offset) < f->io_size;
64 /* Verify whether direct I/O is used for all host-managed zoned drives. */
65 static bool zbd_using_direct_io(void)
67 struct thread_data *td;
72 if (td->o.odirect || !(td->o.td_ddir & TD_DDIR_WRITE))
74 for_each_file(td, f, j) {
76 f->zbd_info->model == ZBD_DM_HOST_MANAGED)
84 /* Whether or not the I/O range for f includes one or more sequential zones */
85 static bool zbd_is_seq_job(struct fio_file *f)
87 uint32_t zone_idx, zone_idx_b, zone_idx_e;
92 zone_idx_b = zbd_zone_idx(f, f->file_offset);
93 zone_idx_e = zbd_zone_idx(f, f->file_offset + f->io_size - 1);
94 for (zone_idx = zone_idx_b; zone_idx <= zone_idx_e; zone_idx++)
95 if (f->zbd_info->zone_info[zone_idx].type ==
96 BLK_ZONE_TYPE_SEQWRITE_REQ)
103 * Verify whether offset and size parameters are aligned with zone boundaries.
105 static bool zbd_verify_sizes(void)
107 const struct fio_zone_info *z;
108 struct thread_data *td;
110 uint64_t new_offset, new_end;
115 for_each_file(td, f, j) {
118 if (f->file_offset >= f->real_file_size)
120 if (!zbd_is_seq_job(f))
122 zone_idx = zbd_zone_idx(f, f->file_offset);
123 z = &f->zbd_info->zone_info[zone_idx];
124 if (f->file_offset != (z->start << 9)) {
125 new_offset = (z+1)->start << 9;
126 if (new_offset >= f->file_offset + f->io_size) {
127 log_info("%s: io_size must be at least one zone\n",
131 log_info("%s: rounded up offset from %lu to %lu\n",
132 f->file_name, f->file_offset,
134 f->io_size -= (new_offset - f->file_offset);
135 f->file_offset = new_offset;
137 zone_idx = zbd_zone_idx(f, f->file_offset + f->io_size);
138 z = &f->zbd_info->zone_info[zone_idx];
139 new_end = z->start << 9;
140 if (f->file_offset + f->io_size != new_end) {
141 if (new_end <= f->file_offset) {
142 log_info("%s: io_size must be at least one zone\n",
146 log_info("%s: rounded down io_size from %lu to %lu\n",
147 f->file_name, f->io_size,
148 new_end - f->file_offset);
149 f->io_size = new_end - f->file_offset;
157 static bool zbd_verify_bs(void)
159 struct thread_data *td;
165 for_each_file(td, f, j) {
168 zone_size = f->zbd_info->zone_size;
169 for (k = 0; k < ARRAY_SIZE(td->o.bs); k++) {
170 if (td->o.verify != VERIFY_NONE &&
171 (zone_size << 9) % td->o.bs[k] != 0) {
172 log_info("%s: block size %llu is not a divisor of the zone size %d\n",
173 f->file_name, td->o.bs[k],
184 * Read zone information into @buf starting from sector @start_sector.
185 * @fd is a file descriptor that refers to a block device and @bufsz is the
188 * Returns 0 upon success and a negative error code upon failure.
190 static int read_zone_info(int fd, uint64_t start_sector,
191 void *buf, unsigned int bufsz)
193 struct blk_zone_report *hdr = buf;
195 if (bufsz < sizeof(*hdr))
198 memset(hdr, 0, sizeof(*hdr));
200 hdr->nr_zones = (bufsz - sizeof(*hdr)) / sizeof(struct blk_zone);
201 hdr->sector = start_sector;
202 return ioctl(fd, BLKREPORTZONE, hdr) >= 0 ? 0 : -errno;
206 * Read up to 255 characters from the first line of a file. Strip the trailing
209 static char *read_file(const char *path)
211 char line[256], *p = line;
214 f = fopen(path, "rb");
217 if (!fgets(line, sizeof(line), f))
225 static enum blk_zoned_model get_zbd_model(const char *file_name)
227 enum blk_zoned_model model = ZBD_DM_NONE;
228 char *zoned_attr_path = NULL;
229 char *model_str = NULL;
232 if (stat(file_name, &statbuf) < 0)
234 if (asprintf(&zoned_attr_path, "/sys/dev/block/%d:%d/queue/zoned",
235 major(statbuf.st_rdev), minor(statbuf.st_rdev)) < 0)
237 model_str = read_file(zoned_attr_path);
240 dprint(FD_ZBD, "%s: zbd model string: %s\n", file_name, model_str);
241 if (strcmp(model_str, "host-aware") == 0)
242 model = ZBD_DM_HOST_AWARE;
243 else if (strcmp(model_str, "host-managed") == 0)
244 model = ZBD_DM_HOST_MANAGED;
248 free(zoned_attr_path);
252 static int ilog2(uint64_t i)
264 * Initialize f->zbd_info for devices that are not zoned block devices. This
265 * allows to execute a ZBD workload against a non-ZBD device.
267 static int init_zone_info(struct thread_data *td, struct fio_file *f)
270 struct fio_zone_info *p;
272 struct zoned_block_device_info *zbd_info = NULL;
273 pthread_mutexattr_t attr;
276 zone_size = td->o.zone_size >> 9;
278 nr_zones = ((f->real_file_size >> 9) + zone_size - 1) / zone_size;
279 zbd_info = scalloc(1, sizeof(*zbd_info) +
280 (nr_zones + 1) * sizeof(zbd_info->zone_info[0]));
284 pthread_mutexattr_init(&attr);
285 pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
286 pthread_mutexattr_setpshared(&attr, true);
287 pthread_mutex_init(&zbd_info->mutex, &attr);
288 zbd_info->refcount = 1;
289 p = &zbd_info->zone_info[0];
290 for (i = 0; i < nr_zones; i++, p++) {
291 pthread_mutex_init(&p->mutex, &attr);
292 p->start = i * zone_size;
293 p->wp = p->start + zone_size;
294 p->type = BLK_ZONE_TYPE_SEQWRITE_REQ;
295 p->cond = BLK_ZONE_COND_EMPTY;
298 p->start = nr_zones * zone_size;
300 f->zbd_info = zbd_info;
301 f->zbd_info->zone_size = zone_size;
302 f->zbd_info->zone_size_log2 = is_power_of_2(zone_size) ?
303 ilog2(zone_size) + 9 : -1;
304 f->zbd_info->nr_zones = nr_zones;
305 pthread_mutexattr_destroy(&attr);
310 * Parse the BLKREPORTZONE output and store it in f->zbd_info. Must be called
311 * only for devices that support this ioctl, namely zoned block devices.
313 static int parse_zone_info(struct thread_data *td, struct fio_file *f)
315 const unsigned int bufsz = sizeof(struct blk_zone_report) +
316 4096 * sizeof(struct blk_zone);
318 struct blk_zone_report *hdr;
319 const struct blk_zone *z;
320 struct fio_zone_info *p;
321 uint64_t zone_size, start_sector;
322 struct zoned_block_device_info *zbd_info = NULL;
323 pthread_mutexattr_t attr;
325 int fd, i, j, ret = 0;
327 pthread_mutexattr_init(&attr);
328 pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
329 pthread_mutexattr_setpshared(&attr, true);
335 fd = open(f->file_name, O_RDONLY | O_LARGEFILE);
341 ret = read_zone_info(fd, 0, buf, bufsz);
343 log_info("fio: BLKREPORTZONE(%lu) failed for %s (%d).\n",
344 0UL, f->file_name, -ret);
348 if (hdr->nr_zones < 1) {
349 log_info("fio: %s has invalid zone information.\n",
353 z = (void *)(hdr + 1);
355 nr_zones = ((f->real_file_size >> 9) + zone_size - 1) / zone_size;
357 if (td->o.zone_size == 0) {
358 td->o.zone_size = zone_size << 9;
359 } else if (td->o.zone_size != zone_size << 9) {
360 log_info("fio: %s job parameter zonesize %lld does not match disk zone size %ld.\n",
361 f->file_name, td->o.zone_size, zone_size << 9);
366 dprint(FD_ZBD, "Device %s has %d zones of size %lu KB\n", f->file_name,
367 nr_zones, zone_size / 2);
369 zbd_info = scalloc(1, sizeof(*zbd_info) +
370 (nr_zones + 1) * sizeof(zbd_info->zone_info[0]));
374 pthread_mutex_init(&zbd_info->mutex, &attr);
375 zbd_info->refcount = 1;
376 p = &zbd_info->zone_info[0];
377 for (start_sector = 0, j = 0; j < nr_zones;) {
378 z = (void *)(hdr + 1);
379 for (i = 0; i < hdr->nr_zones; i++, j++, z++, p++) {
380 pthread_mutex_init(&p->mutex, &attr);
383 case BLK_ZONE_COND_NOT_WP:
386 case BLK_ZONE_COND_FULL:
387 p->wp = z->start + zone_size;
390 assert(z->start <= z->wp);
391 assert(z->wp <= z->start + zone_size);
397 if (j > 0 && p->start != p[-1].start + zone_size) {
398 log_info("%s: invalid zone data\n",
405 start_sector = z->start + z->len;
408 ret = read_zone_info(fd, start_sector, buf, bufsz);
410 log_info("fio: BLKREPORTZONE(%lu) failed for %s (%d).\n",
411 start_sector, f->file_name, -ret);
416 zbd_info->zone_info[nr_zones].start = start_sector;
418 f->zbd_info = zbd_info;
419 f->zbd_info->zone_size = zone_size;
420 f->zbd_info->zone_size_log2 = is_power_of_2(zone_size) ?
421 ilog2(zone_size) + 9 : -1;
422 f->zbd_info->nr_zones = nr_zones;
432 pthread_mutexattr_destroy(&attr);
437 * Allocate zone information and store it into f->zbd_info if zonemode=zbd.
439 * Returns 0 upon success and a negative error code upon failure.
441 int zbd_create_zone_info(struct thread_data *td, struct fio_file *f)
443 enum blk_zoned_model zbd_model;
446 assert(td->o.zone_mode == ZONE_MODE_ZBD);
448 zbd_model = get_zbd_model(f->file_name);
450 case ZBD_DM_HOST_AWARE:
451 case ZBD_DM_HOST_MANAGED:
452 ret = parse_zone_info(td, f);
455 ret = init_zone_info(td, f);
459 f->zbd_info->model = zbd_model;
463 void zbd_free_zone_info(struct fio_file *f)
470 pthread_mutex_lock(&f->zbd_info->mutex);
471 refcount = --f->zbd_info->refcount;
472 pthread_mutex_unlock(&f->zbd_info->mutex);
474 assert((int32_t)refcount >= 0);
481 * Initialize f->zbd_info.
483 * Returns 0 upon success and a negative error code upon failure.
485 * Note: this function can only work correctly if it is called before the first
488 static int zbd_init_zone_info(struct thread_data *td, struct fio_file *file)
490 struct thread_data *td2;
494 for_each_td(td2, i) {
495 for_each_file(td2, f2, j) {
496 if (td2 == td && f2 == file)
499 strcmp(f2->file_name, file->file_name) != 0)
501 file->zbd_info = f2->zbd_info;
502 file->zbd_info->refcount++;
507 ret = zbd_create_zone_info(td, file);
509 td_verror(td, -ret, "BLKREPORTZONE failed");
513 int zbd_init(struct thread_data *td)
518 for_each_file(td, f, i) {
519 if (f->filetype != FIO_TYPE_BLOCK)
521 if (td->o.zone_size && td->o.zone_size < 512) {
522 log_err("%s: zone size must be at least 512 bytes for --zonemode=zbd\n\n",
526 if (td->o.zone_size == 0 &&
527 get_zbd_model(f->file_name) == ZBD_DM_NONE) {
528 log_err("%s: Specifying the zone size is mandatory for regular block devices with --zonemode=zbd\n\n",
532 zbd_init_zone_info(td, f);
535 if (!zbd_using_direct_io()) {
536 log_err("Using direct I/O is mandatory for writing to ZBD drives\n\n");
540 if (!zbd_verify_sizes())
543 if (!zbd_verify_bs())
550 * zbd_reset_range - reset zones for a range of sectors
551 * @td: FIO thread data.
552 * @f: Fio file for which to reset zones
553 * @sector: Starting sector in units of 512 bytes
554 * @nr_sectors: Number of sectors in units of 512 bytes
556 * Returns 0 upon success and a negative error code upon failure.
558 static int zbd_reset_range(struct thread_data *td, const struct fio_file *f,
559 uint64_t sector, uint64_t nr_sectors)
561 struct blk_zone_range zr = {
563 .nr_sectors = nr_sectors,
565 uint32_t zone_idx_b, zone_idx_e;
566 struct fio_zone_info *zb, *ze, *z;
570 assert(is_valid_offset(f, ((sector + nr_sectors) << 9) - 1));
571 switch (f->zbd_info->model) {
572 case ZBD_DM_HOST_AWARE:
573 case ZBD_DM_HOST_MANAGED:
574 ret = ioctl(f->fd, BLKRESETZONE, &zr);
576 td_verror(td, errno, "resetting wp failed");
577 log_err("%s: resetting wp for %llu sectors at sector %llu failed (%d).\n",
578 f->file_name, zr.nr_sectors, zr.sector, errno);
586 zone_idx_b = zbd_zone_idx(f, sector << 9);
587 zb = &f->zbd_info->zone_info[zone_idx_b];
588 zone_idx_e = zbd_zone_idx(f, (sector + nr_sectors) << 9);
589 ze = &f->zbd_info->zone_info[zone_idx_e];
590 for (z = zb; z < ze; z++) {
591 pthread_mutex_lock(&z->mutex);
594 pthread_mutex_unlock(&z->mutex);
597 td->ts.nr_zone_resets += ze - zb;
603 * zbd_reset_zone - reset the write pointer of a single zone
604 * @td: FIO thread data.
605 * @f: FIO file associated with the disk for which to reset a write pointer.
608 * Returns 0 upon success and a negative error code upon failure.
610 static int zbd_reset_zone(struct thread_data *td, const struct fio_file *f,
611 struct fio_zone_info *z)
615 dprint(FD_ZBD, "%s: resetting wp of zone %lu.\n", f->file_name,
616 z - f->zbd_info->zone_info);
617 ret = zbd_reset_range(td, f, z->start, (z+1)->start - z->start);
622 * Reset a range of zones. Returns 0 upon success and 1 upon failure.
623 * @td: fio thread data.
624 * @f: fio file for which to reset zones
625 * @zb: first zone to reset.
626 * @ze: first zone not to reset.
627 * @all_zones: whether to reset all zones or only those zones for which the
628 * write pointer is not a multiple of td->o.min_bs[DDIR_WRITE].
630 static int zbd_reset_zones(struct thread_data *td, struct fio_file *f,
631 struct fio_zone_info *const zb,
632 struct fio_zone_info *const ze, bool all_zones)
634 struct fio_zone_info *z, *start_z = ze;
635 const uint32_t min_bs = td->o.min_bs[DDIR_WRITE] >> 9;
639 dprint(FD_ZBD, "%s: examining zones %lu .. %lu\n", f->file_name,
640 zb - f->zbd_info->zone_info, ze - f->zbd_info->zone_info);
642 for (z = zb; z < ze; z++) {
643 pthread_mutex_lock(&z->mutex);
645 case BLK_ZONE_TYPE_SEQWRITE_REQ:
646 reset_wp = all_zones ? z->wp != z->start :
647 (td->o.td_ddir & TD_DDIR_WRITE) &&
649 if (start_z == ze && reset_wp) {
651 } else if (start_z < ze && !reset_wp) {
653 "%s: resetting zones %lu .. %lu\n",
655 start_z - f->zbd_info->zone_info,
656 z - f->zbd_info->zone_info);
657 if (zbd_reset_range(td, f, start_z->start,
658 z->start - start_z->start) < 0)
666 dprint(FD_ZBD, "%s: resetting zones %lu .. %lu\n",
667 f->file_name, start_z - f->zbd_info->zone_info,
668 z - f->zbd_info->zone_info);
669 if (zbd_reset_range(td, f, start_z->start,
670 z->start - start_z->start) < 0)
677 dprint(FD_ZBD, "%s: resetting zones %lu .. %lu\n", f->file_name,
678 start_z - f->zbd_info->zone_info,
679 z - f->zbd_info->zone_info);
680 if (zbd_reset_range(td, f, start_z->start,
681 z->start - start_z->start) < 0)
684 for (z = zb; z < ze; z++)
685 pthread_mutex_unlock(&z->mutex);
690 void zbd_file_reset(struct thread_data *td, struct fio_file *f)
692 struct fio_zone_info *zb, *ze;
698 zb = &f->zbd_info->zone_info[zbd_zone_idx(f, f->file_offset)];
699 zone_idx_e = zbd_zone_idx(f, f->file_offset + f->io_size);
700 ze = &f->zbd_info->zone_info[zone_idx_e];
702 * If data verification is enabled reset the affected zones before
703 * writing any data to avoid that a zone reset has to be issued while
704 * writing data, which causes data loss.
706 zbd_reset_zones(td, f, zb, ze, td->o.verify != VERIFY_NONE &&
707 (td->o.td_ddir & TD_DDIR_WRITE) &&
708 td->runstate != TD_VERIFYING);
711 /* The caller must hold f->zbd_info->mutex. */
712 static bool is_zone_open(const struct thread_data *td, const struct fio_file *f,
713 unsigned int zone_idx)
715 struct zoned_block_device_info *zbdi = f->zbd_info;
718 assert(td->o.max_open_zones <= ARRAY_SIZE(zbdi->open_zones));
719 assert(zbdi->num_open_zones <= td->o.max_open_zones);
721 for (i = 0; i < zbdi->num_open_zones; i++)
722 if (zbdi->open_zones[i] == zone_idx)
729 * Open a ZBD zone if it was not yet open. Returns true if either the zone was
730 * already open or if opening a new zone is allowed. Returns false if the zone
731 * was not yet open and opening a new zone would cause the zone limit to be
734 static bool zbd_open_zone(struct thread_data *td, const struct io_u *io_u,
737 const uint32_t min_bs = td->o.min_bs[DDIR_WRITE];
738 const struct fio_file *f = io_u->file;
739 struct fio_zone_info *z = &f->zbd_info->zone_info[zone_idx];
742 if (z->cond == BLK_ZONE_COND_OFFLINE)
746 * Skip full zones with data verification enabled because resetting a
747 * zone causes data loss and hence causes verification to fail.
749 if (td->o.verify != VERIFY_NONE && zbd_zone_full(f, z, min_bs))
752 /* Zero means no limit */
753 if (!td->o.max_open_zones)
756 pthread_mutex_lock(&f->zbd_info->mutex);
757 if (is_zone_open(td, f, zone_idx))
760 if (f->zbd_info->num_open_zones >= td->o.max_open_zones)
762 dprint(FD_ZBD, "%s: opening zone %d\n", f->file_name, zone_idx);
763 f->zbd_info->open_zones[f->zbd_info->num_open_zones++] = zone_idx;
768 pthread_mutex_unlock(&f->zbd_info->mutex);
772 /* The caller must hold f->zbd_info->mutex */
773 static void zbd_close_zone(struct thread_data *td, const struct fio_file *f,
774 unsigned int open_zone_idx)
778 assert(open_zone_idx < f->zbd_info->num_open_zones);
779 zone_idx = f->zbd_info->open_zones[open_zone_idx];
780 memmove(f->zbd_info->open_zones + open_zone_idx,
781 f->zbd_info->open_zones + open_zone_idx + 1,
782 (FIO_MAX_OPEN_ZBD_ZONES - (open_zone_idx + 1)) *
783 sizeof(f->zbd_info->open_zones[0]));
784 f->zbd_info->num_open_zones--;
785 f->zbd_info->zone_info[zone_idx].open = 0;
789 * Modify the offset of an I/O unit that does not refer to an open zone such
790 * that it refers to an open zone. Close an open zone and open a new zone if
791 * necessary. This algorithm can only work correctly if all write pointers are
792 * a multiple of the fio block size. The caller must neither hold z->mutex
793 * nor f->zbd_info->mutex. Returns with z->mutex held upon success.
795 struct fio_zone_info *zbd_convert_to_open_zone(struct thread_data *td,
798 const uint32_t min_bs = td->o.min_bs[io_u->ddir];
799 const struct fio_file *f = io_u->file;
800 struct fio_zone_info *z;
801 unsigned int open_zone_idx = -1;
802 uint32_t zone_idx, new_zone_idx;
805 assert(is_valid_offset(f, io_u->offset));
807 if (td->o.max_open_zones) {
809 * This statement accesses f->zbd_info->open_zones[] on purpose
812 zone_idx = f->zbd_info->open_zones[(io_u->offset -
814 f->zbd_info->num_open_zones / f->io_size];
816 zone_idx = zbd_zone_idx(f, io_u->offset);
818 dprint(FD_ZBD, "%s(%s): starting from zone %d (offset %lld, buflen %lld)\n",
819 __func__, f->file_name, zone_idx, io_u->offset, io_u->buflen);
822 * Since z->mutex is the outer lock and f->zbd_info->mutex the inner
823 * lock it can happen that the state of the zone with index zone_idx
824 * has changed after 'z' has been assigned and before f->zbd_info->mutex
825 * has been obtained. Hence the loop.
828 z = &f->zbd_info->zone_info[zone_idx];
830 pthread_mutex_lock(&z->mutex);
831 pthread_mutex_lock(&f->zbd_info->mutex);
832 if (td->o.max_open_zones == 0)
834 if (f->zbd_info->num_open_zones == 0) {
835 pthread_mutex_unlock(&f->zbd_info->mutex);
836 pthread_mutex_unlock(&z->mutex);
837 dprint(FD_ZBD, "%s(%s): no zones are open\n",
838 __func__, f->file_name);
841 open_zone_idx = (io_u->offset - f->file_offset) *
842 f->zbd_info->num_open_zones / f->io_size;
843 assert(open_zone_idx < f->zbd_info->num_open_zones);
844 new_zone_idx = f->zbd_info->open_zones[open_zone_idx];
845 if (new_zone_idx == zone_idx)
847 zone_idx = new_zone_idx;
848 pthread_mutex_unlock(&f->zbd_info->mutex);
849 pthread_mutex_unlock(&z->mutex);
852 /* Both z->mutex and f->zbd_info->mutex are held. */
855 if ((z->wp << 9) + min_bs <= ((z+1)->start << 9)) {
856 pthread_mutex_unlock(&f->zbd_info->mutex);
859 dprint(FD_ZBD, "%s(%s): closing zone %d\n", __func__, f->file_name,
861 if (td->o.max_open_zones)
862 zbd_close_zone(td, f, open_zone_idx);
863 pthread_mutex_unlock(&f->zbd_info->mutex);
865 /* Only z->mutex is held. */
867 /* Zone 'z' is full, so try to open a new zone. */
868 for (i = f->io_size / f->zbd_info->zone_size; i > 0; i--) {
870 pthread_mutex_unlock(&z->mutex);
872 if (!is_valid_offset(f, z->start << 9)) {
874 zone_idx = zbd_zone_idx(f, f->file_offset);
875 z = &f->zbd_info->zone_info[zone_idx];
877 assert(is_valid_offset(f, z->start << 9));
878 pthread_mutex_lock(&z->mutex);
881 if (zbd_open_zone(td, io_u, zone_idx))
885 /* Only z->mutex is held. */
887 /* Check whether the write fits in any of the already opened zones. */
888 pthread_mutex_lock(&f->zbd_info->mutex);
889 for (i = 0; i < f->zbd_info->num_open_zones; i++) {
890 zone_idx = f->zbd_info->open_zones[i];
891 pthread_mutex_unlock(&f->zbd_info->mutex);
892 pthread_mutex_unlock(&z->mutex);
894 z = &f->zbd_info->zone_info[zone_idx];
896 pthread_mutex_lock(&z->mutex);
897 if ((z->wp << 9) + min_bs <= ((z+1)->start << 9))
899 pthread_mutex_lock(&f->zbd_info->mutex);
901 pthread_mutex_unlock(&f->zbd_info->mutex);
902 pthread_mutex_unlock(&z->mutex);
903 dprint(FD_ZBD, "%s(%s): did not open another zone\n", __func__,
908 dprint(FD_ZBD, "%s(%s): returning zone %d\n", __func__, f->file_name,
910 io_u->offset = z->start << 9;
914 /* The caller must hold z->mutex. */
915 static struct fio_zone_info *zbd_replay_write_order(struct thread_data *td,
917 struct fio_zone_info *z)
919 const struct fio_file *f = io_u->file;
920 const uint32_t min_bs = td->o.min_bs[DDIR_WRITE];
922 if (!zbd_open_zone(td, io_u, z - f->zbd_info->zone_info)) {
923 pthread_mutex_unlock(&z->mutex);
924 z = zbd_convert_to_open_zone(td, io_u);
928 if (z->verify_block * min_bs >= f->zbd_info->zone_size)
929 log_err("%s: %d * %d >= %ld\n", f->file_name, z->verify_block,
930 min_bs, f->zbd_info->zone_size);
931 io_u->offset = (z->start << 9) + z->verify_block++ * min_bs;
936 * Find another zone for which @io_u fits below the write pointer. Start
937 * searching in zones @zb + 1 .. @zl and continue searching in zones
940 * Either returns NULL or returns a zone pointer and holds the mutex for that
943 static struct fio_zone_info *
944 zbd_find_zone(struct thread_data *td, struct io_u *io_u,
945 struct fio_zone_info *zb, struct fio_zone_info *zl)
947 const uint32_t min_bs = td->o.min_bs[io_u->ddir];
948 const struct fio_file *f = io_u->file;
949 struct fio_zone_info *z1, *z2;
950 const struct fio_zone_info *const zf =
951 &f->zbd_info->zone_info[zbd_zone_idx(f, f->file_offset)];
954 * Skip to the next non-empty zone in case of sequential I/O and to
955 * the nearest non-empty zone in case of random I/O.
957 for (z1 = zb + 1, z2 = zb - 1; z1 < zl || z2 >= zf; z1++, z2--) {
958 if (z1 < zl && z1->cond != BLK_ZONE_COND_OFFLINE) {
959 pthread_mutex_lock(&z1->mutex);
960 if (z1->start + (min_bs >> 9) <= z1->wp)
962 pthread_mutex_unlock(&z1->mutex);
963 } else if (!td_random(td)) {
966 if (td_random(td) && z2 >= zf &&
967 z2->cond != BLK_ZONE_COND_OFFLINE) {
968 pthread_mutex_lock(&z2->mutex);
969 if (z2->start + (min_bs >> 9) <= z2->wp)
971 pthread_mutex_unlock(&z2->mutex);
974 dprint(FD_ZBD, "%s: adjusting random read offset failed\n",
981 * zbd_post_submit - update the write pointer and unlock the zone lock
983 * @success: Whether or not the I/O unit has been executed successfully
985 * For write and trim operations, update the write pointer of all affected
988 static void zbd_post_submit(const struct io_u *io_u, bool success)
990 struct zoned_block_device_info *zbd_info;
991 struct fio_zone_info *z;
993 uint64_t end, zone_end;
995 zbd_info = io_u->file->zbd_info;
999 zone_idx = zbd_zone_idx(io_u->file, io_u->offset);
1000 end = (io_u->offset + io_u->buflen) >> 9;
1001 z = &zbd_info->zone_info[zone_idx];
1002 assert(zone_idx < zbd_info->nr_zones);
1003 if (z->type != BLK_ZONE_TYPE_SEQWRITE_REQ)
1007 switch (io_u->ddir) {
1009 zone_end = min(end, (z + 1)->start);
1013 assert(z->wp == z->start);
1019 pthread_mutex_unlock(&z->mutex);
1022 bool zbd_unaligned_write(int error_code)
1024 switch (error_code) {
1033 * zbd_adjust_block - adjust the offset and length as necessary for ZBD drives
1034 * @td: FIO thread data.
1035 * @io_u: FIO I/O unit.
1037 * Locking strategy: returns with z->mutex locked if and only if z refers
1038 * to a sequential zone and if io_u_accept is returned. z is the zone that
1039 * corresponds to io_u->offset at the end of this function.
1041 enum io_u_action zbd_adjust_block(struct thread_data *td, struct io_u *io_u)
1043 const struct fio_file *f = io_u->file;
1044 uint32_t zone_idx_b;
1045 struct fio_zone_info *zb, *zl;
1046 uint32_t orig_len = io_u->buflen;
1047 uint32_t min_bs = td->o.min_bs[io_u->ddir];
1054 assert(is_valid_offset(f, io_u->offset));
1055 assert(io_u->buflen);
1056 zone_idx_b = zbd_zone_idx(f, io_u->offset);
1057 zb = &f->zbd_info->zone_info[zone_idx_b];
1059 /* Accept the I/O offset for conventional zones. */
1060 if (zb->type == BLK_ZONE_TYPE_CONVENTIONAL)
1064 * Accept the I/O offset for reads if reading beyond the write pointer
1067 if (zb->cond != BLK_ZONE_COND_OFFLINE &&
1068 io_u->ddir == DDIR_READ && td->o.read_beyond_wp)
1071 pthread_mutex_lock(&zb->mutex);
1072 switch (io_u->ddir) {
1074 if (td->runstate == TD_VERIFYING) {
1075 zb = zbd_replay_write_order(td, io_u, zb);
1079 * Avoid reads past the write pointer because such reads do not
1082 range = zb->cond != BLK_ZONE_COND_OFFLINE ?
1083 ((zb->wp - zb->start) << 9) - io_u->buflen : 0;
1084 if (td_random(td) && range >= 0) {
1085 io_u->offset = (zb->start << 9) +
1086 ((io_u->offset - (zb->start << 9)) %
1087 (range + 1)) / min_bs * min_bs;
1088 assert(zb->start << 9 <= io_u->offset);
1089 assert(io_u->offset + io_u->buflen <= zb->wp << 9);
1092 if (zb->cond == BLK_ZONE_COND_OFFLINE ||
1093 (io_u->offset + io_u->buflen) >> 9 > zb->wp) {
1094 pthread_mutex_unlock(&zb->mutex);
1095 zl = &f->zbd_info->zone_info[zbd_zone_idx(f,
1096 f->file_offset + f->io_size)];
1097 zb = zbd_find_zone(td, io_u, zb, zl);
1100 "%s: zbd_find_zone(%lld, %llu) failed\n",
1101 f->file_name, io_u->offset,
1105 io_u->offset = zb->start << 9;
1107 if ((io_u->offset + io_u->buflen) >> 9 > zb->wp) {
1108 dprint(FD_ZBD, "%s: %lld + %lld > %" PRIu64 "\n",
1109 f->file_name, io_u->offset, io_u->buflen,
1115 if (io_u->buflen > (f->zbd_info->zone_size << 9))
1117 if (!zbd_open_zone(td, io_u, zone_idx_b)) {
1118 pthread_mutex_unlock(&zb->mutex);
1119 zb = zbd_convert_to_open_zone(td, io_u);
1122 zone_idx_b = zb - f->zbd_info->zone_info;
1124 /* Reset the zone pointer if necessary */
1125 if (zb->reset_zone || zbd_zone_full(f, zb, min_bs)) {
1126 assert(td->o.verify == VERIFY_NONE);
1128 * Since previous write requests may have been submitted
1129 * asynchronously and since we will submit the zone
1130 * reset synchronously, wait until previously submitted
1131 * write requests have completed before issuing a
1136 if (zbd_reset_zone(td, f, zb) < 0)
1139 /* Make writes occur at the write pointer */
1140 assert(!zbd_zone_full(f, zb, min_bs));
1141 io_u->offset = zb->wp << 9;
1142 if (!is_valid_offset(f, io_u->offset)) {
1143 dprint(FD_ZBD, "Dropped request with offset %llu\n",
1148 * Make sure that the buflen is a multiple of the minimal
1149 * block size. Give up if shrinking would make the request too
1152 new_len = min((unsigned long long)io_u->buflen,
1153 ((zb + 1)->start << 9) - io_u->offset);
1154 new_len = new_len / min_bs * min_bs;
1155 if (new_len == io_u->buflen)
1157 if (new_len >= min_bs) {
1158 io_u->buflen = new_len;
1159 dprint(FD_IO, "Changed length from %u into %llu\n",
1160 orig_len, io_u->buflen);
1163 log_err("Zone remainder %lld smaller than minimum block size %d\n",
1164 (((zb + 1)->start << 9) - io_u->offset),
1171 case DDIR_SYNC_FILE_RANGE:
1182 assert(zb->cond != BLK_ZONE_COND_OFFLINE);
1183 assert(!io_u->post_submit);
1184 io_u->post_submit = zbd_post_submit;
1189 pthread_mutex_unlock(&zb->mutex);
1193 /* Return a string with ZBD statistics */
1194 char *zbd_write_status(const struct thread_stat *ts)
1198 if (asprintf(&res, "; %ld zone resets", ts->nr_zone_resets) < 0)