2 * Copyright (C) 2018 Western Digital Corporation or its affiliates.
4 * This file is released under the GPL.
19 #include "oslib/asprintf.h"
26 * zbd_get_zoned_model - Get a device zoned model
27 * @td: FIO thread data
28 * @f: FIO file for which to get model information
30 int zbd_get_zoned_model(struct thread_data *td, struct fio_file *f,
31 enum zbd_zoned_model *model)
35 if (td->io_ops && td->io_ops->get_zoned_model)
36 ret = td->io_ops->get_zoned_model(td, f, model);
38 ret = blkzoned_get_zoned_model(td, f, model);
40 td_verror(td, errno, "get zoned model failed");
41 log_err("%s: get zoned model failed (%d).\n",
49 * zbd_report_zones - Get zone information
50 * @td: FIO thread data.
51 * @f: FIO file for which to get zone information
52 * @offset: offset from which to report zones
53 * @zones: Array of struct zbd_zone
54 * @nr_zones: Size of @zones array
56 * Get zone information into @zones starting from the zone at offset @offset
57 * for the device specified by @f.
59 * Returns the number of zones reported upon success and a negative error code
60 * upon failure. If the zone report is empty, always assume an error (device
61 * problem) and return -EIO.
63 int zbd_report_zones(struct thread_data *td, struct fio_file *f,
64 uint64_t offset, struct zbd_zone *zones,
65 unsigned int nr_zones)
69 if (td->io_ops && td->io_ops->report_zones)
70 ret = td->io_ops->report_zones(td, f, offset, zones, nr_zones);
72 ret = blkzoned_report_zones(td, f, offset, zones, nr_zones);
74 td_verror(td, errno, "report zones failed");
75 log_err("%s: report zones from sector %llu failed (%d).\n",
76 f->file_name, (unsigned long long)offset >> 9, errno);
77 } else if (ret == 0) {
78 td_verror(td, errno, "Empty zone report");
79 log_err("%s: report zones from sector %llu is empty.\n",
80 f->file_name, (unsigned long long)offset >> 9);
88 * zbd_reset_wp - reset the write pointer of a range of zones
89 * @td: FIO thread data.
90 * @f: FIO file for which to reset zones
91 * @offset: Starting offset of the first zone to reset
92 * @length: Length of the range of zones to reset
94 * Reset the write pointer of all zones in the range @offset...@offset+@length.
95 * Returns 0 upon success and a negative error code upon failure.
97 int zbd_reset_wp(struct thread_data *td, struct fio_file *f,
98 uint64_t offset, uint64_t length)
102 if (td->io_ops && td->io_ops->reset_wp)
103 ret = td->io_ops->reset_wp(td, f, offset, length);
105 ret = blkzoned_reset_wp(td, f, offset, length);
107 td_verror(td, errno, "resetting wp failed");
108 log_err("%s: resetting wp for %llu sectors at sector %llu failed (%d).\n",
109 f->file_name, (unsigned long long)length >> 9,
110 (unsigned long long)offset >> 9, errno);
117 * zbd_zone_idx - convert an offset into a zone number
119 * @offset: offset in bytes. If this offset is in the first zone_size bytes
120 * past the disk size then the index of the sentinel is returned.
122 static uint32_t zbd_zone_idx(const struct fio_file *f, uint64_t offset)
126 if (f->zbd_info->zone_size_log2 > 0)
127 zone_idx = offset >> f->zbd_info->zone_size_log2;
129 zone_idx = offset / f->zbd_info->zone_size;
131 return min(zone_idx, f->zbd_info->nr_zones);
135 * zbd_zone_end - Return zone end location
136 * @z: zone info pointer.
138 static inline uint64_t zbd_zone_end(const struct fio_zone_info *z)
144 * zbd_zone_capacity_end - Return zone capacity limit end location
145 * @z: zone info pointer.
147 static inline uint64_t zbd_zone_capacity_end(const struct fio_zone_info *z)
149 return z->start + z->capacity;
153 * zbd_zone_full - verify whether a minimum number of bytes remain in a zone
155 * @z: zone info pointer.
156 * @required: minimum number of bytes that must remain in a zone.
158 * The caller must hold z->mutex.
160 static bool zbd_zone_full(const struct fio_file *f, struct fio_zone_info *z,
163 assert((required & 511) == 0);
166 z->wp + required > zbd_zone_capacity_end(z);
169 static void zone_lock(struct thread_data *td, struct fio_file *f, struct fio_zone_info *z)
171 struct zoned_block_device_info *zbd = f->zbd_info;
172 uint32_t nz = z - zbd->zone_info;
174 /* A thread should never lock zones outside its working area. */
175 assert(f->min_zone <= nz && nz < f->max_zone);
178 * Lock the io_u target zone. The zone will be unlocked if io_u offset
179 * is changed or when io_u completes and zbd_put_io() executed.
180 * To avoid multiple jobs doing asynchronous I/Os from deadlocking each
181 * other waiting for zone locks when building an io_u batch, first
182 * only trylock the zone. If the zone is already locked by another job,
183 * process the currently queued I/Os so that I/O progress is made and
186 if (pthread_mutex_trylock(&z->mutex) != 0) {
187 if (!td_ioengine_flagged(td, FIO_SYNCIO))
189 pthread_mutex_lock(&z->mutex);
193 static inline void zone_unlock(struct fio_zone_info *z)
197 ret = pthread_mutex_unlock(&z->mutex);
201 static bool is_valid_offset(const struct fio_file *f, uint64_t offset)
203 return (uint64_t)(offset - f->file_offset) < f->io_size;
206 static inline struct fio_zone_info *get_zone(const struct fio_file *f,
207 unsigned int zone_nr)
209 return &f->zbd_info->zone_info[zone_nr];
212 /* Verify whether direct I/O is used for all host-managed zoned drives. */
213 static bool zbd_using_direct_io(void)
215 struct thread_data *td;
220 if (td->o.odirect || !(td->o.td_ddir & TD_DDIR_WRITE))
222 for_each_file(td, f, j) {
224 f->zbd_info->model == ZBD_HOST_MANAGED)
232 /* Whether or not the I/O range for f includes one or more sequential zones */
233 static bool zbd_is_seq_job(struct fio_file *f)
235 uint32_t zone_idx, zone_idx_b, zone_idx_e;
240 zone_idx_b = zbd_zone_idx(f, f->file_offset);
241 zone_idx_e = zbd_zone_idx(f, f->file_offset + f->io_size - 1);
242 for (zone_idx = zone_idx_b; zone_idx <= zone_idx_e; zone_idx++)
243 if (get_zone(f, zone_idx)->has_wp)
250 * Verify whether offset and size parameters are aligned with zone boundaries.
252 static bool zbd_verify_sizes(void)
254 const struct fio_zone_info *z;
255 struct thread_data *td;
257 uint64_t new_offset, new_end;
262 for_each_file(td, f, j) {
265 if (f->file_offset >= f->real_file_size)
267 if (!zbd_is_seq_job(f))
270 if (!td->o.zone_size) {
271 td->o.zone_size = f->zbd_info->zone_size;
272 if (!td->o.zone_size) {
273 log_err("%s: invalid 0 zone size\n",
277 } else if (td->o.zone_size != f->zbd_info->zone_size) {
278 log_err("%s: job parameter zonesize %llu does not match disk zone size %llu.\n",
279 f->file_name, (unsigned long long) td->o.zone_size,
280 (unsigned long long) f->zbd_info->zone_size);
284 if (td->o.zone_skip &&
285 (td->o.zone_skip < td->o.zone_size ||
286 td->o.zone_skip % td->o.zone_size)) {
287 log_err("%s: zoneskip %llu is not a multiple of the device zone size %llu.\n",
288 f->file_name, (unsigned long long) td->o.zone_skip,
289 (unsigned long long) td->o.zone_size);
293 zone_idx = zbd_zone_idx(f, f->file_offset);
294 z = get_zone(f, zone_idx);
295 if ((f->file_offset != z->start) &&
296 (td->o.td_ddir != TD_DDIR_READ)) {
297 new_offset = zbd_zone_end(z);
298 if (new_offset >= f->file_offset + f->io_size) {
299 log_info("%s: io_size must be at least one zone\n",
303 log_info("%s: rounded up offset from %llu to %llu\n",
304 f->file_name, (unsigned long long) f->file_offset,
305 (unsigned long long) new_offset);
306 f->io_size -= (new_offset - f->file_offset);
307 f->file_offset = new_offset;
309 zone_idx = zbd_zone_idx(f, f->file_offset + f->io_size);
310 z = get_zone(f, zone_idx);
312 if ((td->o.td_ddir != TD_DDIR_READ) &&
313 (f->file_offset + f->io_size != new_end)) {
314 if (new_end <= f->file_offset) {
315 log_info("%s: io_size must be at least one zone\n",
319 log_info("%s: rounded down io_size from %llu to %llu\n",
320 f->file_name, (unsigned long long) f->io_size,
321 (unsigned long long) new_end - f->file_offset);
322 f->io_size = new_end - f->file_offset;
325 f->min_zone = zbd_zone_idx(f, f->file_offset);
326 f->max_zone = zbd_zone_idx(f, f->file_offset + f->io_size);
327 assert(f->min_zone < f->max_zone);
334 static bool zbd_verify_bs(void)
336 struct thread_data *td;
342 for_each_file(td, f, j) {
345 zone_size = f->zbd_info->zone_size;
346 for (k = 0; k < FIO_ARRAY_SIZE(td->o.bs); k++) {
347 if (td->o.verify != VERIFY_NONE &&
348 zone_size % td->o.bs[k] != 0) {
349 log_info("%s: block size %llu is not a divisor of the zone size %d\n",
350 f->file_name, td->o.bs[k],
360 static int ilog2(uint64_t i)
372 * Initialize f->zbd_info for devices that are not zoned block devices. This
373 * allows to execute a ZBD workload against a non-ZBD device.
375 static int init_zone_info(struct thread_data *td, struct fio_file *f)
378 struct fio_zone_info *p;
379 uint64_t zone_size = td->o.zone_size;
380 uint64_t zone_capacity = td->o.zone_capacity;
381 struct zoned_block_device_info *zbd_info = NULL;
384 if (zone_size == 0) {
385 log_err("%s: Specifying the zone size is mandatory for regular block devices with --zonemode=zbd\n\n",
390 if (zone_size < 512) {
391 log_err("%s: zone size must be at least 512 bytes for --zonemode=zbd\n\n",
396 if (zone_capacity == 0)
397 zone_capacity = zone_size;
399 if (zone_capacity > zone_size) {
400 log_err("%s: job parameter zonecapacity %llu is larger than zone size %llu\n",
401 f->file_name, (unsigned long long) td->o.zone_capacity,
402 (unsigned long long) td->o.zone_size);
406 nr_zones = (f->real_file_size + zone_size - 1) / zone_size;
407 zbd_info = scalloc(1, sizeof(*zbd_info) +
408 (nr_zones + 1) * sizeof(zbd_info->zone_info[0]));
412 mutex_init_pshared(&zbd_info->mutex);
413 zbd_info->refcount = 1;
414 p = &zbd_info->zone_info[0];
415 for (i = 0; i < nr_zones; i++, p++) {
416 mutex_init_pshared_with_type(&p->mutex,
417 PTHREAD_MUTEX_RECURSIVE);
418 p->start = i * zone_size;
420 p->type = ZBD_ZONE_TYPE_SWR;
421 p->cond = ZBD_ZONE_COND_EMPTY;
422 p->capacity = zone_capacity;
426 p->start = nr_zones * zone_size;
428 f->zbd_info = zbd_info;
429 f->zbd_info->zone_size = zone_size;
430 f->zbd_info->zone_size_log2 = is_power_of_2(zone_size) ?
431 ilog2(zone_size) : 0;
432 f->zbd_info->nr_zones = nr_zones;
437 * Maximum number of zones to report in one operation.
439 #define ZBD_REPORT_MAX_ZONES 8192U
442 * Parse the device zone report and store it in f->zbd_info. Must be called
443 * only for devices that are zoned, namely those with a model != ZBD_NONE.
445 static int parse_zone_info(struct thread_data *td, struct fio_file *f)
448 struct zbd_zone *zones, *z;
449 struct fio_zone_info *p;
450 uint64_t zone_size, offset;
451 struct zoned_block_device_info *zbd_info = NULL;
452 int i, j, ret = -ENOMEM;
454 zones = calloc(ZBD_REPORT_MAX_ZONES, sizeof(struct zbd_zone));
458 nrz = zbd_report_zones(td, f, 0, zones, ZBD_REPORT_MAX_ZONES);
461 log_info("fio: report zones (offset 0) failed for %s (%d).\n",
466 zone_size = zones[0].len;
467 nr_zones = (f->real_file_size + zone_size - 1) / zone_size;
469 if (td->o.zone_size == 0) {
470 td->o.zone_size = zone_size;
471 } else if (td->o.zone_size != zone_size) {
472 log_err("fio: %s job parameter zonesize %llu does not match disk zone size %llu.\n",
473 f->file_name, (unsigned long long) td->o.zone_size,
474 (unsigned long long) zone_size);
479 dprint(FD_ZBD, "Device %s has %d zones of size %llu KB\n", f->file_name,
480 nr_zones, (unsigned long long) zone_size / 1024);
482 zbd_info = scalloc(1, sizeof(*zbd_info) +
483 (nr_zones + 1) * sizeof(zbd_info->zone_info[0]));
486 mutex_init_pshared(&zbd_info->mutex);
487 zbd_info->refcount = 1;
488 p = &zbd_info->zone_info[0];
489 for (offset = 0, j = 0; j < nr_zones;) {
491 for (i = 0; i < nrz; i++, j++, z++, p++) {
492 mutex_init_pshared_with_type(&p->mutex,
493 PTHREAD_MUTEX_RECURSIVE);
495 p->capacity = z->capacity;
497 case ZBD_ZONE_COND_NOT_WP:
498 case ZBD_ZONE_COND_FULL:
499 p->wp = p->start + p->capacity;
502 assert(z->start <= z->wp);
503 assert(z->wp <= z->start + zone_size);
509 case ZBD_ZONE_TYPE_SWR:
518 if (j > 0 && p->start != p[-1].start + zone_size) {
519 log_info("%s: invalid zone data\n",
526 offset = z->start + z->len;
529 nrz = zbd_report_zones(td, f, offset,
530 zones, ZBD_REPORT_MAX_ZONES);
533 log_info("fio: report zones (offset %llu) failed for %s (%d).\n",
534 (unsigned long long)offset,
541 zbd_info->zone_info[nr_zones].start = offset;
543 f->zbd_info = zbd_info;
544 f->zbd_info->zone_size = zone_size;
545 f->zbd_info->zone_size_log2 = is_power_of_2(zone_size) ?
546 ilog2(zone_size) : 0;
547 f->zbd_info->nr_zones = nr_zones;
558 * Allocate zone information and store it into f->zbd_info if zonemode=zbd.
560 * Returns 0 upon success and a negative error code upon failure.
562 static int zbd_create_zone_info(struct thread_data *td, struct fio_file *f)
564 enum zbd_zoned_model zbd_model;
567 assert(td->o.zone_mode == ZONE_MODE_ZBD);
569 ret = zbd_get_zoned_model(td, f, &zbd_model);
577 case ZBD_HOST_MANAGED:
578 ret = parse_zone_info(td, f);
581 ret = init_zone_info(td, f);
584 td_verror(td, EINVAL, "Unsupported zoned model");
585 log_err("Unsupported zoned model\n");
590 f->zbd_info->model = zbd_model;
591 f->zbd_info->max_open_zones = td->o.max_open_zones;
596 void zbd_free_zone_info(struct fio_file *f)
602 pthread_mutex_lock(&f->zbd_info->mutex);
603 refcount = --f->zbd_info->refcount;
604 pthread_mutex_unlock(&f->zbd_info->mutex);
606 assert((int32_t)refcount >= 0);
613 * Initialize f->zbd_info.
615 * Returns 0 upon success and a negative error code upon failure.
617 * Note: this function can only work correctly if it is called before the first
620 static int zbd_init_zone_info(struct thread_data *td, struct fio_file *file)
622 struct thread_data *td2;
626 for_each_td(td2, i) {
627 for_each_file(td2, f2, j) {
628 if (td2 == td && f2 == file)
631 strcmp(f2->file_name, file->file_name) != 0)
633 file->zbd_info = f2->zbd_info;
634 file->zbd_info->refcount++;
639 ret = zbd_create_zone_info(td, file);
641 td_verror(td, -ret, "zbd_create_zone_info() failed");
645 static bool zbd_open_zone(struct thread_data *td, const struct fio_file *f,
647 static int zbd_reset_zone(struct thread_data *td, struct fio_file *f,
648 struct fio_zone_info *z);
650 int zbd_setup_files(struct thread_data *td)
655 for_each_file(td, f, i) {
656 if (zbd_init_zone_info(td, f))
660 if (!zbd_using_direct_io()) {
661 log_err("Using direct I/O is mandatory for writing to ZBD drives\n\n");
665 if (!zbd_verify_sizes())
668 if (!zbd_verify_bs())
671 for_each_file(td, f, i) {
672 struct zoned_block_device_info *zbd = f->zbd_info;
673 struct fio_zone_info *z;
679 zbd->max_open_zones = zbd->max_open_zones ?: ZBD_MAX_OPEN_ZONES;
681 if (td->o.max_open_zones > 0 &&
682 zbd->max_open_zones != td->o.max_open_zones) {
683 log_err("Different 'max_open_zones' values\n");
686 if (zbd->max_open_zones > ZBD_MAX_OPEN_ZONES) {
687 log_err("'max_open_zones' value is limited by %u\n", ZBD_MAX_OPEN_ZONES);
691 for (zi = f->min_zone; zi < f->max_zone; zi++) {
692 z = &zbd->zone_info[zi];
693 if (z->cond != ZBD_ZONE_COND_IMP_OPEN &&
694 z->cond != ZBD_ZONE_COND_EXP_OPEN)
696 if (zbd_open_zone(td, f, zi))
699 * If the number of open zones exceeds specified limits,
700 * reset all extra open zones.
702 if (zbd_reset_zone(td, f, z) < 0) {
703 log_err("Failed to reest zone %d\n", zi);
712 static inline unsigned int zbd_zone_nr(const struct fio_file *f,
713 struct fio_zone_info *zone)
715 return zone - f->zbd_info->zone_info;
719 * zbd_reset_zone - reset the write pointer of a single zone
720 * @td: FIO thread data.
721 * @f: FIO file associated with the disk for which to reset a write pointer.
724 * Returns 0 upon success and a negative error code upon failure.
726 * The caller must hold z->mutex.
728 static int zbd_reset_zone(struct thread_data *td, struct fio_file *f,
729 struct fio_zone_info *z)
731 uint64_t offset = z->start;
732 uint64_t length = (z+1)->start - offset;
735 if (z->wp == z->start)
738 assert(is_valid_offset(f, offset + length - 1));
740 dprint(FD_ZBD, "%s: resetting wp of zone %u.\n", f->file_name,
742 switch (f->zbd_info->model) {
744 case ZBD_HOST_MANAGED:
745 ret = zbd_reset_wp(td, f, offset, length);
753 pthread_mutex_lock(&f->zbd_info->mutex);
754 f->zbd_info->sectors_with_data -= z->wp - z->start;
755 pthread_mutex_unlock(&f->zbd_info->mutex);
759 td->ts.nr_zone_resets++;
764 /* The caller must hold f->zbd_info->mutex */
765 static void zbd_close_zone(struct thread_data *td, const struct fio_file *f,
766 unsigned int zone_idx)
768 uint32_t open_zone_idx = 0;
770 for (; open_zone_idx < f->zbd_info->num_open_zones; open_zone_idx++) {
771 if (f->zbd_info->open_zones[open_zone_idx] == zone_idx)
774 if (open_zone_idx == f->zbd_info->num_open_zones) {
775 dprint(FD_ZBD, "%s: zone %d is not open\n",
776 f->file_name, zone_idx);
780 dprint(FD_ZBD, "%s: closing zone %d\n", f->file_name, zone_idx);
781 memmove(f->zbd_info->open_zones + open_zone_idx,
782 f->zbd_info->open_zones + open_zone_idx + 1,
783 (ZBD_MAX_OPEN_ZONES - (open_zone_idx + 1)) *
784 sizeof(f->zbd_info->open_zones[0]));
785 f->zbd_info->num_open_zones--;
786 td->num_open_zones--;
787 get_zone(f, zone_idx)->open = 0;
791 * Reset a range of zones. Returns 0 upon success and 1 upon failure.
792 * @td: fio thread data.
793 * @f: fio file for which to reset zones
794 * @zb: first zone to reset.
795 * @ze: first zone not to reset.
796 * @all_zones: whether to reset all zones or only those zones for which the
797 * write pointer is not a multiple of td->o.min_bs[DDIR_WRITE].
799 static int zbd_reset_zones(struct thread_data *td, struct fio_file *f,
800 struct fio_zone_info *const zb,
801 struct fio_zone_info *const ze, bool all_zones)
803 struct fio_zone_info *z;
804 const uint32_t min_bs = td->o.min_bs[DDIR_WRITE];
810 dprint(FD_ZBD, "%s: examining zones %u .. %u\n", f->file_name,
811 zbd_zone_nr(f, zb), zbd_zone_nr(f, ze));
812 for (z = zb; z < ze; z++) {
813 uint32_t nz = zbd_zone_nr(f, z);
819 pthread_mutex_lock(&f->zbd_info->mutex);
820 zbd_close_zone(td, f, nz);
821 pthread_mutex_unlock(&f->zbd_info->mutex);
823 reset_wp = z->wp != z->start;
825 reset_wp = z->wp % min_bs != 0;
828 dprint(FD_ZBD, "%s: resetting zone %u\n",
829 f->file_name, zbd_zone_nr(f, z));
830 if (zbd_reset_zone(td, f, z) < 0)
840 * Reset zbd_info.write_cnt, the counter that counts down towards the next
843 static void _zbd_reset_write_cnt(const struct thread_data *td,
844 const struct fio_file *f)
846 assert(0 <= td->o.zrf.u.f && td->o.zrf.u.f <= 1);
848 f->zbd_info->write_cnt = td->o.zrf.u.f ?
849 min(1.0 / td->o.zrf.u.f, 0.0 + UINT_MAX) : UINT_MAX;
852 static void zbd_reset_write_cnt(const struct thread_data *td,
853 const struct fio_file *f)
855 pthread_mutex_lock(&f->zbd_info->mutex);
856 _zbd_reset_write_cnt(td, f);
857 pthread_mutex_unlock(&f->zbd_info->mutex);
860 static bool zbd_dec_and_reset_write_cnt(const struct thread_data *td,
861 const struct fio_file *f)
863 uint32_t write_cnt = 0;
865 pthread_mutex_lock(&f->zbd_info->mutex);
866 assert(f->zbd_info->write_cnt);
867 if (f->zbd_info->write_cnt)
868 write_cnt = --f->zbd_info->write_cnt;
870 _zbd_reset_write_cnt(td, f);
871 pthread_mutex_unlock(&f->zbd_info->mutex);
873 return write_cnt == 0;
881 /* Calculate the number of sectors with data (swd) and perform action 'a' */
882 static uint64_t zbd_process_swd(const struct fio_file *f, enum swd_action a)
884 struct fio_zone_info *zb, *ze, *z;
887 zb = get_zone(f, f->min_zone);
888 ze = get_zone(f, f->max_zone);
889 for (z = zb; z < ze; z++) {
890 pthread_mutex_lock(&z->mutex);
891 swd += z->wp - z->start;
893 pthread_mutex_lock(&f->zbd_info->mutex);
896 assert(f->zbd_info->sectors_with_data == swd);
899 f->zbd_info->sectors_with_data = swd;
902 pthread_mutex_unlock(&f->zbd_info->mutex);
903 for (z = zb; z < ze; z++)
910 * The swd check is useful for debugging but takes too much time to leave
911 * it enabled all the time. Hence it is disabled by default.
913 static const bool enable_check_swd = false;
915 /* Check whether the value of zbd_info.sectors_with_data is correct. */
916 static void zbd_check_swd(const struct fio_file *f)
918 if (!enable_check_swd)
921 zbd_process_swd(f, CHECK_SWD);
924 static void zbd_init_swd(struct fio_file *f)
928 if (!enable_check_swd)
931 swd = zbd_process_swd(f, SET_SWD);
932 dprint(FD_ZBD, "%s(%s): swd = %" PRIu64 "\n", __func__, f->file_name,
936 void zbd_file_reset(struct thread_data *td, struct fio_file *f)
938 struct fio_zone_info *zb, *ze;
940 if (!f->zbd_info || !td_write(td))
943 zb = get_zone(f, f->min_zone);
944 ze = get_zone(f, f->max_zone);
947 * If data verification is enabled reset the affected zones before
948 * writing any data to avoid that a zone reset has to be issued while
949 * writing data, which causes data loss.
951 zbd_reset_zones(td, f, zb, ze, td->o.verify != VERIFY_NONE &&
952 td->runstate != TD_VERIFYING);
953 zbd_reset_write_cnt(td, f);
956 /* The caller must hold f->zbd_info->mutex. */
957 static bool is_zone_open(const struct thread_data *td, const struct fio_file *f,
958 unsigned int zone_idx)
960 struct zoned_block_device_info *zbdi = f->zbd_info;
963 assert(td->o.job_max_open_zones == 0 || td->num_open_zones <= td->o.job_max_open_zones);
964 assert(td->o.job_max_open_zones <= zbdi->max_open_zones);
965 assert(zbdi->num_open_zones <= zbdi->max_open_zones);
967 for (i = 0; i < zbdi->num_open_zones; i++)
968 if (zbdi->open_zones[i] == zone_idx)
975 * Open a ZBD zone if it was not yet open. Returns true if either the zone was
976 * already open or if opening a new zone is allowed. Returns false if the zone
977 * was not yet open and opening a new zone would cause the zone limit to be
980 static bool zbd_open_zone(struct thread_data *td, const struct fio_file *f,
983 const uint32_t min_bs = td->o.min_bs[DDIR_WRITE];
984 struct fio_zone_info *z = get_zone(f, zone_idx);
987 if (z->cond == ZBD_ZONE_COND_OFFLINE)
991 * Skip full zones with data verification enabled because resetting a
992 * zone causes data loss and hence causes verification to fail.
994 if (td->o.verify != VERIFY_NONE && zbd_zone_full(f, z, min_bs))
997 pthread_mutex_lock(&f->zbd_info->mutex);
998 if (is_zone_open(td, f, zone_idx)) {
1000 * If the zone is already open and going to be full by writes
1001 * in-flight, handle it as a full zone instead of an open zone.
1003 if (z->wp >= zbd_zone_capacity_end(z))
1008 /* Zero means no limit */
1009 if (td->o.job_max_open_zones > 0 &&
1010 td->num_open_zones >= td->o.job_max_open_zones)
1012 if (f->zbd_info->num_open_zones >= f->zbd_info->max_open_zones)
1014 dprint(FD_ZBD, "%s: opening zone %d\n", f->file_name, zone_idx);
1015 f->zbd_info->open_zones[f->zbd_info->num_open_zones++] = zone_idx;
1016 td->num_open_zones++;
1021 pthread_mutex_unlock(&f->zbd_info->mutex);
1025 /* Anything goes as long as it is not a constant. */
1026 static uint32_t pick_random_zone_idx(const struct fio_file *f,
1027 const struct io_u *io_u)
1029 return io_u->offset * f->zbd_info->num_open_zones / f->real_file_size;
1033 * Modify the offset of an I/O unit that does not refer to an open zone such
1034 * that it refers to an open zone. Close an open zone and open a new zone if
1035 * necessary. This algorithm can only work correctly if all write pointers are
1036 * a multiple of the fio block size. The caller must neither hold z->mutex
1037 * nor f->zbd_info->mutex. Returns with z->mutex held upon success.
1039 static struct fio_zone_info *zbd_convert_to_open_zone(struct thread_data *td,
1042 const uint32_t min_bs = td->o.min_bs[io_u->ddir];
1043 struct fio_file *f = io_u->file;
1044 struct fio_zone_info *z;
1045 unsigned int open_zone_idx = -1;
1046 uint32_t zone_idx, new_zone_idx;
1048 bool wait_zone_close;
1050 assert(is_valid_offset(f, io_u->offset));
1052 if (td->o.max_open_zones || td->o.job_max_open_zones) {
1054 * This statement accesses f->zbd_info->open_zones[] on purpose
1057 zone_idx = f->zbd_info->open_zones[pick_random_zone_idx(f, io_u)];
1059 zone_idx = zbd_zone_idx(f, io_u->offset);
1061 if (zone_idx < f->min_zone)
1062 zone_idx = f->min_zone;
1063 else if (zone_idx >= f->max_zone)
1064 zone_idx = f->max_zone - 1;
1065 dprint(FD_ZBD, "%s(%s): starting from zone %d (offset %lld, buflen %lld)\n",
1066 __func__, f->file_name, zone_idx, io_u->offset, io_u->buflen);
1069 * Since z->mutex is the outer lock and f->zbd_info->mutex the inner
1070 * lock it can happen that the state of the zone with index zone_idx
1071 * has changed after 'z' has been assigned and before f->zbd_info->mutex
1072 * has been obtained. Hence the loop.
1077 z = get_zone(f, zone_idx);
1079 zone_lock(td, f, z);
1080 pthread_mutex_lock(&f->zbd_info->mutex);
1081 if (z->cond != ZBD_ZONE_COND_OFFLINE &&
1082 td->o.max_open_zones == 0 && td->o.job_max_open_zones == 0)
1084 if (f->zbd_info->num_open_zones == 0) {
1085 dprint(FD_ZBD, "%s(%s): no zones are open\n",
1086 __func__, f->file_name);
1087 goto open_other_zone;
1091 * List of opened zones is per-device, shared across all threads.
1092 * Start with quasi-random candidate zone.
1093 * Ignore zones which don't belong to thread's offset/size area.
1095 open_zone_idx = pick_random_zone_idx(f, io_u);
1096 assert(open_zone_idx < f->zbd_info->num_open_zones);
1097 tmp_idx = open_zone_idx;
1098 for (i = 0; i < f->zbd_info->num_open_zones; i++) {
1101 if (tmp_idx >= f->zbd_info->num_open_zones)
1103 tmpz = f->zbd_info->open_zones[tmp_idx];
1104 if (f->min_zone <= tmpz && tmpz < f->max_zone) {
1105 open_zone_idx = tmp_idx;
1106 goto found_candidate_zone;
1112 dprint(FD_ZBD, "%s(%s): no candidate zone\n",
1113 __func__, f->file_name);
1114 pthread_mutex_unlock(&f->zbd_info->mutex);
1118 found_candidate_zone:
1119 new_zone_idx = f->zbd_info->open_zones[open_zone_idx];
1120 if (new_zone_idx == zone_idx)
1122 zone_idx = new_zone_idx;
1123 pthread_mutex_unlock(&f->zbd_info->mutex);
1127 /* Both z->mutex and f->zbd_info->mutex are held. */
1130 if (z->wp + min_bs <= zbd_zone_capacity_end(z)) {
1131 pthread_mutex_unlock(&f->zbd_info->mutex);
1136 /* Check if number of open zones reaches one of limits. */
1138 f->zbd_info->num_open_zones == f->max_zone - f->min_zone ||
1139 (td->o.max_open_zones &&
1140 f->zbd_info->num_open_zones == td->o.max_open_zones) ||
1141 (td->o.job_max_open_zones &&
1142 td->num_open_zones == td->o.job_max_open_zones);
1144 pthread_mutex_unlock(&f->zbd_info->mutex);
1146 /* Only z->mutex is held. */
1149 * When number of open zones reaches to one of limits, wait for
1150 * zone close before opening a new zone.
1152 if (wait_zone_close) {
1153 dprint(FD_ZBD, "%s(%s): quiesce to allow open zones to close\n",
1154 __func__, f->file_name);
1158 /* Zone 'z' is full, so try to open a new zone. */
1159 for (i = f->io_size / f->zbd_info->zone_size; i > 0; i--) {
1163 if (!is_valid_offset(f, z->start)) {
1165 zone_idx = f->min_zone;
1166 z = get_zone(f, zone_idx);
1168 assert(is_valid_offset(f, z->start));
1169 zone_lock(td, f, z);
1172 if (zbd_open_zone(td, f, zone_idx))
1176 /* Only z->mutex is held. */
1178 /* Check whether the write fits in any of the already opened zones. */
1179 pthread_mutex_lock(&f->zbd_info->mutex);
1180 for (i = 0; i < f->zbd_info->num_open_zones; i++) {
1181 zone_idx = f->zbd_info->open_zones[i];
1182 if (zone_idx < f->min_zone || zone_idx >= f->max_zone)
1184 pthread_mutex_unlock(&f->zbd_info->mutex);
1187 z = get_zone(f, zone_idx);
1189 zone_lock(td, f, z);
1190 if (z->wp + min_bs <= zbd_zone_capacity_end(z))
1192 pthread_mutex_lock(&f->zbd_info->mutex);
1194 pthread_mutex_unlock(&f->zbd_info->mutex);
1196 dprint(FD_ZBD, "%s(%s): did not open another zone\n", __func__,
1201 dprint(FD_ZBD, "%s(%s): returning zone %d\n", __func__, f->file_name,
1203 io_u->offset = z->start;
1204 assert(z->cond != ZBD_ZONE_COND_OFFLINE);
1208 /* The caller must hold z->mutex. */
1209 static struct fio_zone_info *zbd_replay_write_order(struct thread_data *td,
1211 struct fio_zone_info *z)
1213 const struct fio_file *f = io_u->file;
1214 const uint32_t min_bs = td->o.min_bs[DDIR_WRITE];
1216 if (!zbd_open_zone(td, f, zbd_zone_nr(f, z))) {
1218 z = zbd_convert_to_open_zone(td, io_u);
1222 if (z->verify_block * min_bs >= z->capacity)
1223 log_err("%s: %d * %d >= %llu\n", f->file_name, z->verify_block,
1224 min_bs, (unsigned long long)z->capacity);
1225 io_u->offset = z->start + z->verify_block++ * min_bs;
1230 * Find another zone for which @io_u fits below the write pointer. Start
1231 * searching in zones @zb + 1 .. @zl and continue searching in zones
1234 * Either returns NULL or returns a zone pointer and holds the mutex for that
1237 static struct fio_zone_info *
1238 zbd_find_zone(struct thread_data *td, struct io_u *io_u,
1239 struct fio_zone_info *zb, struct fio_zone_info *zl)
1241 const uint32_t min_bs = td->o.min_bs[io_u->ddir];
1242 struct fio_file *f = io_u->file;
1243 struct fio_zone_info *z1, *z2;
1244 const struct fio_zone_info *const zf = get_zone(f, f->min_zone);
1247 * Skip to the next non-empty zone in case of sequential I/O and to
1248 * the nearest non-empty zone in case of random I/O.
1250 for (z1 = zb + 1, z2 = zb - 1; z1 < zl || z2 >= zf; z1++, z2--) {
1251 if (z1 < zl && z1->cond != ZBD_ZONE_COND_OFFLINE) {
1252 zone_lock(td, f, z1);
1253 if (z1->start + min_bs <= z1->wp)
1256 } else if (!td_random(td)) {
1259 if (td_random(td) && z2 >= zf &&
1260 z2->cond != ZBD_ZONE_COND_OFFLINE) {
1261 zone_lock(td, f, z2);
1262 if (z2->start + min_bs <= z2->wp)
1267 dprint(FD_ZBD, "%s: adjusting random read offset failed\n",
1273 * zbd_end_zone_io - update zone status at command completion
1275 * @z: zone info pointer
1277 * If the write command made the zone full, close it.
1279 * The caller must hold z->mutex.
1281 static void zbd_end_zone_io(struct thread_data *td, const struct io_u *io_u,
1282 struct fio_zone_info *z)
1284 const struct fio_file *f = io_u->file;
1286 if (io_u->ddir == DDIR_WRITE &&
1287 io_u->offset + io_u->buflen >= zbd_zone_capacity_end(z)) {
1288 pthread_mutex_lock(&f->zbd_info->mutex);
1289 zbd_close_zone(td, f, zbd_zone_nr(f, z));
1290 pthread_mutex_unlock(&f->zbd_info->mutex);
1295 * zbd_queue_io - update the write pointer of a sequential zone
1297 * @success: Whether or not the I/O unit has been queued successfully
1298 * @q: queueing status (busy, completed or queued).
1300 * For write and trim operations, update the write pointer of the I/O unit
1303 static void zbd_queue_io(struct thread_data *td, struct io_u *io_u, int q,
1306 const struct fio_file *f = io_u->file;
1307 struct zoned_block_device_info *zbd_info = f->zbd_info;
1308 struct fio_zone_info *z;
1315 zone_idx = zbd_zone_idx(f, io_u->offset);
1316 assert(zone_idx < zbd_info->nr_zones);
1317 z = get_zone(f, zone_idx);
1326 "%s: queued I/O (%lld, %llu) for zone %u\n",
1327 f->file_name, io_u->offset, io_u->buflen, zone_idx);
1329 switch (io_u->ddir) {
1331 zone_end = min((uint64_t)(io_u->offset + io_u->buflen),
1332 zbd_zone_capacity_end(z));
1333 pthread_mutex_lock(&zbd_info->mutex);
1335 * z->wp > zone_end means that one or more I/O errors
1338 if (z->wp <= zone_end)
1339 zbd_info->sectors_with_data += zone_end - z->wp;
1340 pthread_mutex_unlock(&zbd_info->mutex);
1344 assert(z->wp == z->start);
1350 if (q == FIO_Q_COMPLETED && !io_u->error)
1351 zbd_end_zone_io(td, io_u, z);
1354 if (!success || q != FIO_Q_QUEUED) {
1355 /* BUSY or COMPLETED: unlock the zone */
1357 io_u->zbd_put_io = NULL;
1362 * zbd_put_io - Unlock an I/O unit target zone lock
1365 static void zbd_put_io(struct thread_data *td, const struct io_u *io_u)
1367 const struct fio_file *f = io_u->file;
1368 struct zoned_block_device_info *zbd_info = f->zbd_info;
1369 struct fio_zone_info *z;
1375 zone_idx = zbd_zone_idx(f, io_u->offset);
1376 assert(zone_idx < zbd_info->nr_zones);
1377 z = get_zone(f, zone_idx);
1383 "%s: terminate I/O (%lld, %llu) for zone %u\n",
1384 f->file_name, io_u->offset, io_u->buflen, zone_idx);
1386 zbd_end_zone_io(td, io_u, z);
1393 * Windows and MacOS do not define this.
1396 #define EREMOTEIO 121 /* POSIX value */
1399 bool zbd_unaligned_write(int error_code)
1401 switch (error_code) {
1410 * setup_zbd_zone_mode - handle zoneskip as necessary for ZBD drives
1411 * @td: FIO thread data.
1412 * @io_u: FIO I/O unit.
1414 * For sequential workloads, change the file offset to skip zoneskip bytes when
1415 * no more IO can be performed in the current zone.
1416 * - For read workloads, zoneskip is applied when the io has reached the end of
1417 * the zone or the zone write position (when td->o.read_beyond_wp is false).
1418 * - For write workloads, zoneskip is applied when the zone is full.
1419 * This applies only to read and write operations.
1421 void setup_zbd_zone_mode(struct thread_data *td, struct io_u *io_u)
1423 struct fio_file *f = io_u->file;
1424 enum fio_ddir ddir = io_u->ddir;
1425 struct fio_zone_info *z;
1428 assert(td->o.zone_mode == ZONE_MODE_ZBD);
1429 assert(td->o.zone_size);
1431 zone_idx = zbd_zone_idx(f, f->last_pos[ddir]);
1432 z = get_zone(f, zone_idx);
1435 * When the zone capacity is smaller than the zone size and the I/O is
1436 * sequential write, skip to zone end if the latest position is at the
1437 * zone capacity limit.
1439 if (z->capacity < f->zbd_info->zone_size && !td_random(td) &&
1440 ddir == DDIR_WRITE &&
1441 f->last_pos[ddir] >= zbd_zone_capacity_end(z)) {
1443 "%s: Jump from zone capacity limit to zone end:"
1444 " (%llu -> %llu) for zone %u (%llu)\n",
1445 f->file_name, (unsigned long long) f->last_pos[ddir],
1446 (unsigned long long) zbd_zone_end(z), zone_idx,
1447 (unsigned long long) z->capacity);
1448 td->io_skip_bytes += zbd_zone_end(z) - f->last_pos[ddir];
1449 f->last_pos[ddir] = zbd_zone_end(z);
1453 * zone_skip is valid only for sequential workloads.
1455 if (td_random(td) || !td->o.zone_skip)
1459 * It is time to switch to a new zone if:
1460 * - zone_bytes == zone_size bytes have already been accessed
1461 * - The last position reached the end of the current zone.
1462 * - For reads with td->o.read_beyond_wp == false, the last position
1463 * reached the zone write pointer.
1465 if (td->zone_bytes >= td->o.zone_size ||
1466 f->last_pos[ddir] >= zbd_zone_end(z) ||
1467 (ddir == DDIR_READ &&
1468 (!td->o.read_beyond_wp) && f->last_pos[ddir] >= z->wp)) {
1473 f->file_offset += td->o.zone_size + td->o.zone_skip;
1476 * Wrap from the beginning, if we exceed the file size
1478 if (f->file_offset >= f->real_file_size)
1479 f->file_offset = get_start_offset(td, f);
1481 f->last_pos[ddir] = f->file_offset;
1482 td->io_skip_bytes += td->o.zone_skip;
1487 * zbd_adjust_ddir - Adjust an I/O direction for zonemode=zbd.
1489 * @td: FIO thread data.
1490 * @io_u: FIO I/O unit.
1491 * @ddir: I/O direction before adjustment.
1493 * Return adjusted I/O direction.
1495 enum fio_ddir zbd_adjust_ddir(struct thread_data *td, struct io_u *io_u,
1499 * In case read direction is chosen for the first random I/O, fio with
1500 * zonemode=zbd stops because no data can be read from zoned block
1501 * devices with all empty zones. Overwrite the first I/O direction as
1502 * write to make sure data to read exists.
1504 if (ddir != DDIR_READ || !td_rw(td))
1507 if (io_u->file->zbd_info->sectors_with_data ||
1508 td->o.read_beyond_wp)
1515 * zbd_adjust_block - adjust the offset and length as necessary for ZBD drives
1516 * @td: FIO thread data.
1517 * @io_u: FIO I/O unit.
1519 * Locking strategy: returns with z->mutex locked if and only if z refers
1520 * to a sequential zone and if io_u_accept is returned. z is the zone that
1521 * corresponds to io_u->offset at the end of this function.
1523 enum io_u_action zbd_adjust_block(struct thread_data *td, struct io_u *io_u)
1525 struct fio_file *f = io_u->file;
1526 uint32_t zone_idx_b;
1527 struct fio_zone_info *zb, *zl, *orig_zb;
1528 uint32_t orig_len = io_u->buflen;
1529 uint32_t min_bs = td->o.min_bs[io_u->ddir];
1537 assert(is_valid_offset(f, io_u->offset));
1538 assert(io_u->buflen);
1539 zone_idx_b = zbd_zone_idx(f, io_u->offset);
1540 zb = get_zone(f, zone_idx_b);
1543 /* Accept the I/O offset for conventional zones. */
1548 * Accept the I/O offset for reads if reading beyond the write pointer
1551 if (zb->cond != ZBD_ZONE_COND_OFFLINE &&
1552 io_u->ddir == DDIR_READ && td->o.read_beyond_wp)
1557 zone_lock(td, f, zb);
1559 switch (io_u->ddir) {
1561 if (td->runstate == TD_VERIFYING && td_write(td)) {
1562 zb = zbd_replay_write_order(td, io_u, zb);
1567 * Check that there is enough written data in the zone to do an
1568 * I/O of at least min_bs B. If there isn't, find a new zone for
1571 range = zb->cond != ZBD_ZONE_COND_OFFLINE ?
1572 zb->wp - zb->start : 0;
1573 if (range < min_bs ||
1574 ((!td_random(td)) && (io_u->offset + min_bs > zb->wp))) {
1576 zl = get_zone(f, f->max_zone);
1577 zb = zbd_find_zone(td, io_u, zb, zl);
1580 "%s: zbd_find_zone(%lld, %llu) failed\n",
1581 f->file_name, io_u->offset,
1586 * zbd_find_zone() returned a zone with a range of at
1589 range = zb->wp - zb->start;
1590 assert(range >= min_bs);
1593 io_u->offset = zb->start;
1596 * Make sure the I/O is within the zone valid data range while
1597 * maximizing the I/O size and preserving randomness.
1599 if (range <= io_u->buflen)
1600 io_u->offset = zb->start;
1601 else if (td_random(td))
1602 io_u->offset = zb->start +
1603 ((io_u->offset - orig_zb->start) %
1604 (range - io_u->buflen)) / min_bs * min_bs;
1606 * Make sure the I/O does not cross over the zone wp position.
1608 new_len = min((unsigned long long)io_u->buflen,
1609 (unsigned long long)(zb->wp - io_u->offset));
1610 new_len = new_len / min_bs * min_bs;
1611 if (new_len < io_u->buflen) {
1612 io_u->buflen = new_len;
1613 dprint(FD_IO, "Changed length from %u into %llu\n",
1614 orig_len, io_u->buflen);
1616 assert(zb->start <= io_u->offset);
1617 assert(io_u->offset + io_u->buflen <= zb->wp);
1620 if (io_u->buflen > f->zbd_info->zone_size)
1622 if (!zbd_open_zone(td, f, zone_idx_b)) {
1624 zb = zbd_convert_to_open_zone(td, io_u);
1627 zone_idx_b = zbd_zone_nr(f, zb);
1629 /* Check whether the zone reset threshold has been exceeded */
1630 if (td->o.zrf.u.f) {
1631 if (f->zbd_info->sectors_with_data >=
1632 f->io_size * td->o.zrt.u.f &&
1633 zbd_dec_and_reset_write_cnt(td, f)) {
1637 /* Reset the zone pointer if necessary */
1638 if (zb->reset_zone || zbd_zone_full(f, zb, min_bs)) {
1639 assert(td->o.verify == VERIFY_NONE);
1641 * Since previous write requests may have been submitted
1642 * asynchronously and since we will submit the zone
1643 * reset synchronously, wait until previously submitted
1644 * write requests have completed before issuing a
1649 if (zbd_reset_zone(td, f, zb) < 0)
1652 if (zb->capacity < min_bs) {
1653 log_err("zone capacity %llu smaller than minimum block size %d\n",
1654 (unsigned long long)zb->capacity,
1659 /* Make writes occur at the write pointer */
1660 assert(!zbd_zone_full(f, zb, min_bs));
1661 io_u->offset = zb->wp;
1662 if (!is_valid_offset(f, io_u->offset)) {
1663 dprint(FD_ZBD, "Dropped request with offset %llu\n",
1668 * Make sure that the buflen is a multiple of the minimal
1669 * block size. Give up if shrinking would make the request too
1672 new_len = min((unsigned long long)io_u->buflen,
1673 zbd_zone_capacity_end(zb) - io_u->offset);
1674 new_len = new_len / min_bs * min_bs;
1675 if (new_len == io_u->buflen)
1677 if (new_len >= min_bs) {
1678 io_u->buflen = new_len;
1679 dprint(FD_IO, "Changed length from %u into %llu\n",
1680 orig_len, io_u->buflen);
1683 log_err("Zone remainder %lld smaller than minimum block size %d\n",
1684 (zbd_zone_capacity_end(zb) - io_u->offset),
1691 case DDIR_SYNC_FILE_RANGE:
1702 assert(zb->cond != ZBD_ZONE_COND_OFFLINE);
1703 assert(!io_u->zbd_queue_io);
1704 assert(!io_u->zbd_put_io);
1705 io_u->zbd_queue_io = zbd_queue_io;
1706 io_u->zbd_put_io = zbd_put_io;
1715 /* Return a string with ZBD statistics */
1716 char *zbd_write_status(const struct thread_stat *ts)
1720 if (asprintf(&res, "; %llu zone resets", (unsigned long long) ts->nr_zone_resets) < 0)