2 * Copyright (C) 2018 Western Digital Corporation or its affiliates.
4 * This file is released under the GPL.
14 #include "compiler/compiler.h"
20 #include "oslib/asprintf.h"
26 static bool is_valid_offset(const struct fio_file *f, uint64_t offset)
28 return (uint64_t)(offset - f->file_offset) < f->io_size;
31 static inline unsigned int zbd_zone_idx(const struct fio_file *f,
32 struct fio_zone_info *zone)
34 return zone - f->zbd_info->zone_info;
38 * zbd_offset_to_zone_idx - convert an offset into a zone number
40 * @offset: offset in bytes. If this offset is in the first zone_size bytes
41 * past the disk size then the index of the sentinel is returned.
43 static unsigned int zbd_offset_to_zone_idx(const struct fio_file *f,
48 if (f->zbd_info->zone_size_log2 > 0)
49 zone_idx = offset >> f->zbd_info->zone_size_log2;
51 zone_idx = offset / f->zbd_info->zone_size;
53 return min(zone_idx, f->zbd_info->nr_zones);
57 * zbd_zone_end - Return zone end location
58 * @z: zone info pointer.
60 static inline uint64_t zbd_zone_end(const struct fio_zone_info *z)
66 * zbd_zone_capacity_end - Return zone capacity limit end location
67 * @z: zone info pointer.
69 static inline uint64_t zbd_zone_capacity_end(const struct fio_zone_info *z)
71 return z->start + z->capacity;
75 * zbd_zone_remainder - Return the number of bytes that are still available for
76 * writing before the zone gets full
77 * @z: zone info pointer.
79 static inline uint64_t zbd_zone_remainder(struct fio_zone_info *z)
81 if (z->wp >= zbd_zone_capacity_end(z))
84 return zbd_zone_capacity_end(z) - z->wp;
88 * zbd_zone_full - verify whether a minimum number of bytes remain in a zone
90 * @z: zone info pointer.
91 * @required: minimum number of bytes that must remain in a zone.
93 * The caller must hold z->mutex.
95 static bool zbd_zone_full(const struct fio_file *f, struct fio_zone_info *z,
98 assert((required & 511) == 0);
100 return z->has_wp && required > zbd_zone_remainder(z);
103 static void zone_lock(struct thread_data *td, const struct fio_file *f,
104 struct fio_zone_info *z)
107 unsigned int const nz = zbd_zone_idx(f, z);
108 /* A thread should never lock zones outside its working area. */
109 assert(f->min_zone <= nz && nz < f->max_zone);
114 * Lock the io_u target zone. The zone will be unlocked if io_u offset
115 * is changed or when io_u completes and zbd_put_io() executed.
116 * To avoid multiple jobs doing asynchronous I/Os from deadlocking each
117 * other waiting for zone locks when building an io_u batch, first
118 * only trylock the zone. If the zone is already locked by another job,
119 * process the currently queued I/Os so that I/O progress is made and
122 if (pthread_mutex_trylock(&z->mutex) != 0) {
123 if (!td_ioengine_flagged(td, FIO_SYNCIO))
125 pthread_mutex_lock(&z->mutex);
129 static inline void zone_unlock(struct fio_zone_info *z)
132 pthread_mutex_unlock(&z->mutex);
135 static inline struct fio_zone_info *zbd_get_zone(const struct fio_file *f,
136 unsigned int zone_idx)
138 return &f->zbd_info->zone_info[zone_idx];
141 static inline struct fio_zone_info *
142 zbd_offset_to_zone(const struct fio_file *f, uint64_t offset)
144 return zbd_get_zone(f, zbd_offset_to_zone_idx(f, offset));
147 static bool accounting_vdb(struct thread_data *td, const struct fio_file *f)
149 return td->o.zrt.u.f && td_write(td);
153 * zbd_get_zoned_model - Get a device zoned model
154 * @td: FIO thread data
155 * @f: FIO file for which to get model information
157 static int zbd_get_zoned_model(struct thread_data *td, struct fio_file *f,
158 enum zbd_zoned_model *model)
162 if (f->filetype == FIO_TYPE_PIPE) {
163 log_err("zonemode=zbd does not support pipes\n");
167 /* If regular file, always emulate zones inside the file. */
168 if (f->filetype == FIO_TYPE_FILE) {
173 if (td->io_ops && td->io_ops->get_zoned_model)
174 ret = td->io_ops->get_zoned_model(td, f, model);
176 ret = blkzoned_get_zoned_model(td, f, model);
178 td_verror(td, errno, "get zoned model failed");
179 log_err("%s: get zoned model failed (%d).\n",
180 f->file_name, errno);
187 * zbd_report_zones - Get zone information
188 * @td: FIO thread data.
189 * @f: FIO file for which to get zone information
190 * @offset: offset from which to report zones
191 * @zones: Array of struct zbd_zone
192 * @nr_zones: Size of @zones array
194 * Get zone information into @zones starting from the zone at offset @offset
195 * for the device specified by @f.
197 * Returns the number of zones reported upon success and a negative error code
198 * upon failure. If the zone report is empty, always assume an error (device
199 * problem) and return -EIO.
201 static int zbd_report_zones(struct thread_data *td, struct fio_file *f,
202 uint64_t offset, struct zbd_zone *zones,
203 unsigned int nr_zones)
207 if (td->io_ops && td->io_ops->report_zones)
208 ret = td->io_ops->report_zones(td, f, offset, zones, nr_zones);
210 ret = blkzoned_report_zones(td, f, offset, zones, nr_zones);
212 td_verror(td, errno, "report zones failed");
213 log_err("%s: report zones from sector %"PRIu64" failed (nr_zones=%d; errno=%d).\n",
214 f->file_name, offset >> 9, nr_zones, errno);
215 } else if (ret == 0) {
216 td_verror(td, errno, "Empty zone report");
217 log_err("%s: report zones from sector %"PRIu64" is empty.\n",
218 f->file_name, offset >> 9);
226 * zbd_reset_wp - reset the write pointer of a range of zones
227 * @td: FIO thread data.
228 * @f: FIO file for which to reset zones
229 * @offset: Starting offset of the first zone to reset
230 * @length: Length of the range of zones to reset
232 * Reset the write pointer of all zones in the range @offset...@offset+@length.
233 * Returns 0 upon success and a negative error code upon failure.
235 static int zbd_reset_wp(struct thread_data *td, struct fio_file *f,
236 uint64_t offset, uint64_t length)
240 if (td->io_ops && td->io_ops->reset_wp)
241 ret = td->io_ops->reset_wp(td, f, offset, length);
243 ret = blkzoned_reset_wp(td, f, offset, length);
245 td_verror(td, errno, "resetting wp failed");
246 log_err("%s: resetting wp for %"PRIu64" sectors at sector %"PRIu64" failed (%d).\n",
247 f->file_name, length >> 9, offset >> 9, errno);
254 * __zbd_reset_zone - reset the write pointer of a single zone
255 * @td: FIO thread data.
256 * @f: FIO file associated with the disk for which to reset a write pointer.
259 * Returns 0 upon success and a negative error code upon failure.
261 * The caller must hold z->mutex.
263 static int __zbd_reset_zone(struct thread_data *td, struct fio_file *f,
264 struct fio_zone_info *z)
266 uint64_t offset = z->start;
267 uint64_t length = (z+1)->start - offset;
268 uint64_t data_in_zone = z->wp - z->start;
274 assert(is_valid_offset(f, offset + length - 1));
276 dprint(FD_ZBD, "%s: resetting wp of zone %u.\n",
277 f->file_name, zbd_zone_idx(f, z));
279 switch (f->zbd_info->model) {
281 case ZBD_HOST_MANAGED:
282 ret = zbd_reset_wp(td, f, offset, length);
290 if (accounting_vdb(td, f)) {
291 pthread_mutex_lock(&f->zbd_info->mutex);
292 f->zbd_info->wp_valid_data_bytes -= data_in_zone;
293 pthread_mutex_unlock(&f->zbd_info->mutex);
298 td->ts.nr_zone_resets++;
304 * zbd_write_zone_put - Remove a zone from the write target zones array.
305 * @td: FIO thread data.
306 * @f: FIO file that has the write zones array to remove.
307 * @zone_idx: Index of the zone to remove.
309 * The caller must hold f->zbd_info->mutex.
311 static void zbd_write_zone_put(struct thread_data *td, const struct fio_file *f,
312 struct fio_zone_info *z)
319 for (zi = 0; zi < f->zbd_info->num_write_zones; zi++) {
320 if (zbd_get_zone(f, f->zbd_info->write_zones[zi]) == z)
323 if (zi == f->zbd_info->num_write_zones)
326 dprint(FD_ZBD, "%s: removing zone %u from write zone array\n",
327 f->file_name, zbd_zone_idx(f, z));
329 memmove(f->zbd_info->write_zones + zi,
330 f->zbd_info->write_zones + zi + 1,
331 (ZBD_MAX_WRITE_ZONES - (zi + 1)) *
332 sizeof(f->zbd_info->write_zones[0]));
334 f->zbd_info->num_write_zones--;
335 td->num_write_zones--;
340 * zbd_reset_zone - reset the write pointer of a single zone and remove the zone
341 * from the array of write zones.
342 * @td: FIO thread data.
343 * @f: FIO file associated with the disk for which to reset a write pointer.
346 * Returns 0 upon success and a negative error code upon failure.
348 * The caller must hold z->mutex.
350 static int zbd_reset_zone(struct thread_data *td, struct fio_file *f,
351 struct fio_zone_info *z)
355 ret = __zbd_reset_zone(td, f, z);
359 pthread_mutex_lock(&f->zbd_info->mutex);
360 zbd_write_zone_put(td, f, z);
361 pthread_mutex_unlock(&f->zbd_info->mutex);
366 * zbd_finish_zone - finish the specified zone
367 * @td: FIO thread data.
368 * @f: FIO file for which to finish a zone
369 * @z: Zone to finish.
371 * Finish the zone at @offset with open or close status.
373 static int zbd_finish_zone(struct thread_data *td, struct fio_file *f,
374 struct fio_zone_info *z)
376 uint64_t offset = z->start;
377 uint64_t length = f->zbd_info->zone_size;
380 switch (f->zbd_info->model) {
382 case ZBD_HOST_MANAGED:
383 if (td->io_ops && td->io_ops->finish_zone)
384 ret = td->io_ops->finish_zone(td, f, offset, length);
386 ret = blkzoned_finish_zone(td, f, offset, length);
393 td_verror(td, errno, "finish zone failed");
394 log_err("%s: finish zone at sector %"PRIu64" failed (%d).\n",
395 f->file_name, offset >> 9, errno);
397 z->wp = (z+1)->start;
404 * zbd_reset_zones - Reset a range of zones.
405 * @td: fio thread data.
406 * @f: fio file for which to reset zones
407 * @zb: first zone to reset.
408 * @ze: first zone not to reset.
410 * Returns 0 upon success and 1 upon failure.
412 static int zbd_reset_zones(struct thread_data *td, struct fio_file *f,
413 struct fio_zone_info *const zb,
414 struct fio_zone_info *const ze)
416 struct fio_zone_info *z;
417 const uint64_t min_bs = td->o.min_bs[DDIR_WRITE];
420 if (fio_unlikely(0 == min_bs))
423 dprint(FD_ZBD, "%s: examining zones %u .. %u\n",
424 f->file_name, zbd_zone_idx(f, zb), zbd_zone_idx(f, ze));
426 for (z = zb; z < ze; z++) {
432 if (z->wp != z->start) {
433 dprint(FD_ZBD, "%s: resetting zone %u\n",
434 f->file_name, zbd_zone_idx(f, z));
435 if (zbd_reset_zone(td, f, z) < 0)
446 * zbd_move_zone_wp - move the write pointer of a zone by writing the data in
447 * the specified buffer
448 * @td: FIO thread data.
449 * @f: FIO file for which to move write pointer
450 * @z: Target zone to move the write pointer
451 * @length: Length of the move
452 * @buf: Buffer which holds the data to write
454 * Move the write pointer at the specified offset by writing the data
455 * in the specified buffer.
456 * Returns 0 upon success and a negative error code upon failure.
458 static int zbd_move_zone_wp(struct thread_data *td, struct fio_file *f,
459 struct zbd_zone *z, uint64_t length,
464 switch (f->zbd_info->model) {
466 case ZBD_HOST_MANAGED:
467 if (td->io_ops && td->io_ops->move_zone_wp)
468 ret = td->io_ops->move_zone_wp(td, f, z, length, buf);
470 ret = blkzoned_move_zone_wp(td, f, z, length, buf);
477 td_verror(td, errno, "move wp failed");
478 log_err("%s: moving wp for %"PRIu64" sectors at sector %"PRIu64" failed (%d).\n",
479 f->file_name, length >> 9, z->wp >> 9, errno);
486 * zbd_get_max_open_zones - Get the maximum number of open zones
487 * @td: FIO thread data
488 * @f: FIO file for which to get max open zones
489 * @max_open_zones: Upon success, result will be stored here.
491 * A @max_open_zones value set to zero means no limit.
493 * Returns 0 upon success and a negative error code upon failure.
495 static int zbd_get_max_open_zones(struct thread_data *td, struct fio_file *f,
496 unsigned int *max_open_zones)
500 if (td->io_ops && td->io_ops->get_max_open_zones)
501 ret = td->io_ops->get_max_open_zones(td, f, max_open_zones);
503 ret = blkzoned_get_max_open_zones(td, f, max_open_zones);
505 td_verror(td, errno, "get max open zones failed");
506 log_err("%s: get max open zones failed (%d).\n",
507 f->file_name, errno);
514 * zbd_get_max_active_zones - Get the maximum number of active zones
515 * @td: FIO thread data
516 * @f: FIO file for which to get max active zones
518 * Returns max_active_zones limit value of the target file if it is available.
519 * Otherwise return zero, which means no limit.
521 static unsigned int zbd_get_max_active_zones(struct thread_data *td,
524 unsigned int max_active_zones;
527 if (td->io_ops && td->io_ops->get_max_active_zones)
528 ret = td->io_ops->get_max_active_zones(td, f,
531 ret = blkzoned_get_max_active_zones(td, f, &max_active_zones);
533 dprint(FD_ZBD, "%s: max_active_zones is not available\n",
538 return max_active_zones;
542 * __zbd_write_zone_get - Add a zone to the array of write zones.
543 * @td: fio thread data.
544 * @f: fio file that has the write zones array to add.
545 * @zone_idx: Index of the zone to add.
547 * Do same operation as @zbd_write_zone_get, except it adds the zone at
548 * @zone_idx to write target zones array even when it does not have remainder
549 * space to write one block.
551 static bool __zbd_write_zone_get(struct thread_data *td,
552 const struct fio_file *f,
553 struct fio_zone_info *z)
555 struct zoned_block_device_info *zbdi = f->zbd_info;
556 uint32_t zone_idx = zbd_zone_idx(f, z);
559 if (z->cond == ZBD_ZONE_COND_OFFLINE)
563 * Skip full zones with data verification enabled because resetting a
564 * zone causes data loss and hence causes verification to fail.
566 if (td->o.verify != VERIFY_NONE && zbd_zone_remainder(z) == 0)
570 * zbdi->max_write_zones == 0 means that there is no limit on the
571 * maximum number of write target zones. In this case, do no track write
572 * target zones in zbdi->write_zones array.
574 if (!zbdi->max_write_zones)
577 pthread_mutex_lock(&zbdi->mutex);
581 * If the zone is going to be completely filled by writes
582 * already in-flight, handle it as a full zone instead of a
585 if (!zbd_zone_remainder(z))
591 /* Zero means no limit */
592 if (td->o.job_max_open_zones > 0 &&
593 td->num_write_zones >= td->o.job_max_open_zones)
595 if (zbdi->num_write_zones >= zbdi->max_write_zones)
598 dprint(FD_ZBD, "%s: adding zone %u to write zone array\n",
599 f->file_name, zone_idx);
601 zbdi->write_zones[zbdi->num_write_zones++] = zone_idx;
602 td->num_write_zones++;
607 pthread_mutex_unlock(&zbdi->mutex);
612 * zbd_write_zone_get - Add a zone to the array of write zones.
613 * @td: fio thread data.
614 * @f: fio file that has the open zones to add.
615 * @zone_idx: Index of the zone to add.
617 * Add a ZBD zone to write target zones array, if it is not yet added. Returns
618 * true if either the zone was already added or if the zone was successfully
619 * added to the array without exceeding the maximum number of write zones.
620 * Returns false if the zone was not already added and addition of the zone
621 * would cause the zone limit to be exceeded.
623 static bool zbd_write_zone_get(struct thread_data *td, const struct fio_file *f,
624 struct fio_zone_info *z)
626 const uint64_t min_bs = td->o.min_bs[DDIR_WRITE];
629 * Skip full zones with data verification enabled because resetting a
630 * zone causes data loss and hence causes verification to fail.
632 if (td->o.verify != VERIFY_NONE && zbd_zone_full(f, z, min_bs))
635 return __zbd_write_zone_get(td, f, z);
638 /* Verify whether direct I/O is used for all host-managed zoned block drives. */
639 static bool zbd_using_direct_io(void)
645 if (td->o.odirect || !(td->o.td_ddir & TD_DDIR_WRITE))
647 for_each_file(td, f, j) {
648 if (f->zbd_info && f->filetype == FIO_TYPE_BLOCK &&
649 f->zbd_info->model == ZBD_HOST_MANAGED)
657 /* Whether or not the I/O range for f includes one or more sequential zones */
658 static bool zbd_is_seq_job(const struct fio_file *f)
660 uint32_t zone_idx, zone_idx_b, zone_idx_e;
667 zone_idx_b = zbd_offset_to_zone_idx(f, f->file_offset);
669 zbd_offset_to_zone_idx(f, f->file_offset + f->io_size - 1);
670 for (zone_idx = zone_idx_b; zone_idx <= zone_idx_e; zone_idx++)
671 if (zbd_get_zone(f, zone_idx)->has_wp)
678 * Verify whether the file offset and size parameters are aligned with zone
679 * boundaries. If the file offset is not aligned, align it down to the start of
680 * the zone containing the start offset and align up the file io_size parameter.
682 static bool zbd_zone_align_file_sizes(struct thread_data *td,
685 const struct fio_zone_info *z;
686 uint64_t new_offset, new_end;
690 if (f->file_offset >= f->real_file_size)
692 if (!zbd_is_seq_job(f))
695 if (!td->o.zone_size) {
696 td->o.zone_size = f->zbd_info->zone_size;
697 if (!td->o.zone_size) {
698 log_err("%s: invalid 0 zone size\n",
702 } else if (td->o.zone_size != f->zbd_info->zone_size) {
703 log_err("%s: zonesize %llu does not match the device zone size %"PRIu64".\n",
704 f->file_name, td->o.zone_size,
705 f->zbd_info->zone_size);
709 if (td->o.zone_skip % td->o.zone_size) {
710 log_err("%s: zoneskip %llu is not a multiple of the device zone size %llu.\n",
711 f->file_name, td->o.zone_skip,
716 if (td->o.td_ddir == TD_DDIR_READ) {
717 z = zbd_offset_to_zone(f, f->file_offset + f->io_size);
719 if (f->file_offset + f->io_size > new_end) {
720 log_info("%s: rounded io_size from %"PRIu64" to %"PRIu64"\n",
721 f->file_name, f->io_size,
722 new_end - f->file_offset);
723 f->io_size = new_end - f->file_offset;
728 z = zbd_offset_to_zone(f, f->file_offset);
729 if (f->file_offset != z->start) {
730 new_offset = zbd_zone_end(z);
731 if (new_offset >= f->file_offset + f->io_size) {
732 log_info("%s: io_size must be at least one zone\n",
736 log_info("%s: rounded up offset from %"PRIu64" to %"PRIu64"\n",
737 f->file_name, f->file_offset,
739 f->io_size -= (new_offset - f->file_offset);
740 f->file_offset = new_offset;
743 z = zbd_offset_to_zone(f, f->file_offset + f->io_size);
745 if (f->file_offset + f->io_size != new_end) {
746 if (new_end <= f->file_offset) {
747 log_info("%s: io_size must be at least one zone\n",
751 log_info("%s: rounded down io_size from %"PRIu64" to %"PRIu64"\n",
752 f->file_name, f->io_size,
753 new_end - f->file_offset);
754 f->io_size = new_end - f->file_offset;
761 * Verify whether offset and size parameters are aligned with zone boundaries.
763 static bool zbd_verify_sizes(void)
769 for_each_file(td, f, j) {
770 if (!zbd_zone_align_file_sizes(td, f))
778 static bool zbd_verify_bs(void)
785 (td->o.min_bs[DDIR_TRIM] != td->o.max_bs[DDIR_TRIM] ||
786 td->o.bssplit_nr[DDIR_TRIM])) {
787 log_info("bsrange and bssplit are not allowed for trim with zonemode=zbd\n");
790 for_each_file(td, f, j) {
796 zone_size = f->zbd_info->zone_size;
797 if (td_trim(td) && td->o.bs[DDIR_TRIM] != zone_size) {
798 log_info("%s: trim block size %llu is not the zone size %"PRIu64"\n",
799 f->file_name, td->o.bs[DDIR_TRIM],
808 static int ilog2(uint64_t i)
820 * Initialize f->zbd_info for devices that are not zoned block devices. This
821 * allows to execute a ZBD workload against a non-ZBD device.
823 static int init_zone_info(struct thread_data *td, struct fio_file *f)
826 struct fio_zone_info *p;
827 uint64_t zone_size = td->o.zone_size;
828 uint64_t zone_capacity = td->o.zone_capacity;
829 struct zoned_block_device_info *zbd_info = NULL;
832 if (zone_size == 0) {
833 log_err("%s: Specifying the zone size is mandatory for regular file/block device with --zonemode=zbd\n\n",
838 if (zone_size < 512) {
839 log_err("%s: zone size must be at least 512 bytes for --zonemode=zbd\n\n",
844 if (zone_capacity == 0)
845 zone_capacity = zone_size;
847 if (zone_capacity > zone_size) {
848 log_err("%s: job parameter zonecapacity %llu is larger than zone size %llu\n",
849 f->file_name, td->o.zone_capacity, td->o.zone_size);
853 if (f->real_file_size < zone_size) {
854 log_err("%s: file/device size %"PRIu64" is smaller than zone size %"PRIu64"\n",
855 f->file_name, f->real_file_size, zone_size);
859 nr_zones = (f->real_file_size + zone_size - 1) / zone_size;
860 zbd_info = scalloc(1, sizeof(*zbd_info) +
861 (nr_zones + 1) * sizeof(zbd_info->zone_info[0]));
865 mutex_init_pshared(&zbd_info->mutex);
866 zbd_info->refcount = 1;
867 p = &zbd_info->zone_info[0];
868 for (i = 0; i < nr_zones; i++, p++) {
869 mutex_init_pshared_with_type(&p->mutex,
870 PTHREAD_MUTEX_RECURSIVE);
871 p->start = i * zone_size;
873 p->type = ZBD_ZONE_TYPE_SWR;
874 p->cond = ZBD_ZONE_COND_EMPTY;
875 p->capacity = zone_capacity;
879 p->start = nr_zones * zone_size;
881 f->zbd_info = zbd_info;
882 f->zbd_info->zone_size = zone_size;
883 f->zbd_info->zone_size_log2 = is_power_of_2(zone_size) ?
884 ilog2(zone_size) : 0;
885 f->zbd_info->nr_zones = nr_zones;
890 * Maximum number of zones to report in one operation.
892 #define ZBD_REPORT_MAX_ZONES 8192U
895 * Parse the device zone report and store it in f->zbd_info. Must be called
896 * only for devices that are zoned, namely those with a model != ZBD_NONE.
898 static int parse_zone_info(struct thread_data *td, struct fio_file *f)
901 struct zbd_zone *zones, *z;
902 struct fio_zone_info *p;
903 uint64_t zone_size, offset, capacity;
904 bool same_zone_cap = true;
905 struct zoned_block_device_info *zbd_info = NULL;
906 int i, j, ret = -ENOMEM;
908 zones = calloc(ZBD_REPORT_MAX_ZONES, sizeof(struct zbd_zone));
912 nrz = zbd_report_zones(td, f, 0, zones, ZBD_REPORT_MAX_ZONES);
915 log_info("fio: report zones (offset 0) failed for %s (%d).\n",
920 zone_size = zones[0].len;
921 capacity = zones[0].capacity;
922 nr_zones = (f->real_file_size + zone_size - 1) / zone_size;
924 if (td->o.zone_size == 0) {
925 td->o.zone_size = zone_size;
926 } else if (td->o.zone_size != zone_size) {
927 log_err("fio: %s job parameter zonesize %llu does not match disk zone size %"PRIu64".\n",
928 f->file_name, td->o.zone_size, zone_size);
933 dprint(FD_ZBD, "Device %s has %d zones of size %"PRIu64" KB\n",
934 f->file_name, nr_zones, zone_size / 1024);
936 zbd_info = scalloc(1, sizeof(*zbd_info) +
937 (nr_zones + 1) * sizeof(zbd_info->zone_info[0]));
940 mutex_init_pshared(&zbd_info->mutex);
941 zbd_info->refcount = 1;
942 p = &zbd_info->zone_info[0];
943 for (offset = 0, j = 0; j < nr_zones;) {
945 for (i = 0; i < nrz; i++, j++, z++, p++) {
946 mutex_init_pshared_with_type(&p->mutex,
947 PTHREAD_MUTEX_RECURSIVE);
949 p->capacity = z->capacity;
950 if (capacity != z->capacity)
951 same_zone_cap = false;
954 case ZBD_ZONE_COND_NOT_WP:
955 case ZBD_ZONE_COND_FULL:
956 p->wp = p->start + p->capacity;
959 assert(z->start <= z->wp);
960 assert(z->wp <= z->start + zone_size);
966 case ZBD_ZONE_TYPE_SWR:
975 if (j > 0 && p->start != p[-1].start + zone_size) {
976 log_info("%s: invalid zone data [%d:%d]: %"PRIu64" + %"PRIu64" != %"PRIu64"\n",
978 p[-1].start, zone_size, p->start);
984 offset = z->start + z->len;
988 nrz = zbd_report_zones(td, f, offset, zones,
989 min((uint32_t)(nr_zones - j),
990 ZBD_REPORT_MAX_ZONES));
993 log_info("fio: report zones (offset %"PRIu64") failed for %s (%d).\n",
994 offset, f->file_name, -ret);
1000 zbd_info->zone_info[nr_zones].start = offset;
1002 f->zbd_info = zbd_info;
1003 f->zbd_info->zone_size = zone_size;
1004 f->zbd_info->zone_size_log2 = is_power_of_2(zone_size) ?
1005 ilog2(zone_size) : 0;
1006 f->zbd_info->nr_zones = nr_zones;
1007 f->zbd_info->max_active_zones = zbd_get_max_active_zones(td, f);
1010 dprint(FD_ZBD, "Zone capacity = %"PRIu64" KB\n",
1022 static int zbd_set_max_write_zones(struct thread_data *td, struct fio_file *f)
1024 struct zoned_block_device_info *zbd = f->zbd_info;
1025 unsigned int max_open_zones;
1028 if (zbd->model != ZBD_HOST_MANAGED || td->o.ignore_zone_limits) {
1029 /* Only host-managed devices have a max open limit */
1030 zbd->max_write_zones = td->o.max_open_zones;
1034 /* If host-managed, get the max open limit */
1035 ret = zbd_get_max_open_zones(td, f, &max_open_zones);
1039 if (!max_open_zones) {
1040 /* No device limit */
1041 zbd->max_write_zones = td->o.max_open_zones;
1042 } else if (!td->o.max_open_zones) {
1043 /* No user limit. Set limit to device limit */
1044 zbd->max_write_zones = max_open_zones;
1045 } else if (td->o.max_open_zones <= max_open_zones) {
1046 /* Both user limit and dev limit. User limit not too large */
1047 zbd->max_write_zones = td->o.max_open_zones;
1049 /* Both user limit and dev limit. User limit too large */
1050 td_verror(td, EINVAL,
1051 "Specified --max_open_zones is too large");
1052 log_err("Specified --max_open_zones (%d) is larger than max (%u)\n",
1053 td->o.max_open_zones, max_open_zones);
1058 /* Ensure that the limit is not larger than FIO's internal limit */
1059 if (zbd->max_write_zones > ZBD_MAX_WRITE_ZONES) {
1060 td_verror(td, EINVAL, "'max_open_zones' value is too large");
1061 log_err("'max_open_zones' value is larger than %u\n",
1062 ZBD_MAX_WRITE_ZONES);
1066 dprint(FD_ZBD, "%s: using max write zones limit: %"PRIu32"\n",
1067 f->file_name, zbd->max_write_zones);
1073 * Allocate zone information and store it into f->zbd_info if zonemode=zbd.
1075 * Returns 0 upon success and a negative error code upon failure.
1077 static int zbd_create_zone_info(struct thread_data *td, struct fio_file *f)
1079 enum zbd_zoned_model zbd_model;
1082 assert(td->o.zone_mode == ZONE_MODE_ZBD);
1084 ret = zbd_get_zoned_model(td, f, &zbd_model);
1088 switch (zbd_model) {
1089 case ZBD_HOST_AWARE:
1090 case ZBD_HOST_MANAGED:
1091 ret = parse_zone_info(td, f);
1096 ret = init_zone_info(td, f);
1101 td_verror(td, EINVAL, "Unsupported zoned model");
1102 log_err("Unsupported zoned model\n");
1106 assert(f->zbd_info);
1107 f->zbd_info->model = zbd_model;
1109 ret = zbd_set_max_write_zones(td, f);
1111 zbd_free_zone_info(f);
1118 void zbd_free_zone_info(struct fio_file *f)
1122 assert(f->zbd_info);
1124 pthread_mutex_lock(&f->zbd_info->mutex);
1125 refcount = --f->zbd_info->refcount;
1126 pthread_mutex_unlock(&f->zbd_info->mutex);
1128 assert((int32_t)refcount >= 0);
1135 * Initialize f->zbd_info.
1137 * Returns 0 upon success and a negative error code upon failure.
1139 * Note: this function can only work correctly if it is called before the first
1142 static int zbd_init_zone_info(struct thread_data *td, struct fio_file *file)
1144 struct fio_file *f2;
1148 for_each_file(td2, f2, j) {
1149 if (td2 == td && f2 == file)
1151 if (!f2->zbd_info ||
1152 strcmp(f2->file_name, file->file_name) != 0)
1154 file->zbd_info = f2->zbd_info;
1155 file->zbd_info->refcount++;
1160 ret = zbd_create_zone_info(td, file);
1162 td_verror(td, -ret, "zbd_create_zone_info() failed");
1167 int zbd_init_files(struct thread_data *td)
1172 for_each_file(td, f, i) {
1173 if (zbd_init_zone_info(td, f))
1180 void zbd_recalc_options_with_zone_granularity(struct thread_data *td)
1185 for_each_file(td, f, i) {
1186 struct zoned_block_device_info *zbd = f->zbd_info;
1189 /* zonemode=strided doesn't get per-file zone size. */
1190 zone_size = zbd ? zbd->zone_size : td->o.zone_size;
1194 if (td->o.size_nz > 0)
1195 td->o.size = td->o.size_nz * zone_size;
1196 if (td->o.io_size_nz > 0)
1197 td->o.io_size = td->o.io_size_nz * zone_size;
1198 if (td->o.start_offset_nz > 0)
1199 td->o.start_offset = td->o.start_offset_nz * zone_size;
1200 if (td->o.offset_increment_nz > 0)
1201 td->o.offset_increment =
1202 td->o.offset_increment_nz * zone_size;
1203 if (td->o.zone_skip_nz > 0)
1204 td->o.zone_skip = td->o.zone_skip_nz * zone_size;
1208 static uint64_t zbd_verify_and_set_vdb(struct thread_data *td,
1209 const struct fio_file *f)
1211 struct fio_zone_info *zb, *ze, *z;
1212 uint64_t wp_vdb = 0;
1213 struct zoned_block_device_info *zbdi = f->zbd_info;
1215 assert(td->runstate < TD_RUNNING);
1218 if (!accounting_vdb(td, f))
1222 * Ensure that the I/O range includes one or more sequential zones so
1223 * that f->min_zone and f->max_zone have different values.
1225 if (!zbd_is_seq_job(f))
1228 if (zbdi->write_min_zone != zbdi->write_max_zone) {
1229 if (zbdi->write_min_zone != f->min_zone ||
1230 zbdi->write_max_zone != f->max_zone) {
1231 td_verror(td, EINVAL,
1232 "multi-jobs with different write ranges are "
1233 "not supported with zone_reset_threshold");
1234 log_err("multi-jobs with different write ranges are "
1235 "not supported with zone_reset_threshold\n");
1240 zbdi->write_min_zone = f->min_zone;
1241 zbdi->write_max_zone = f->max_zone;
1243 zb = zbd_get_zone(f, f->min_zone);
1244 ze = zbd_get_zone(f, f->max_zone);
1245 for (z = zb; z < ze; z++)
1247 wp_vdb += z->wp - z->start;
1249 zbdi->wp_valid_data_bytes = wp_vdb;
1254 int zbd_setup_files(struct thread_data *td)
1259 if (!zbd_using_direct_io()) {
1260 log_err("Using direct I/O is mandatory for writing to ZBD drives\n\n");
1264 if (!zbd_verify_sizes())
1267 if (!zbd_verify_bs())
1270 if (td->o.recover_zbd_write_error && td_write(td)) {
1271 if (!td->o.continue_on_error) {
1272 log_err("recover_zbd_write_error works only when continue_on_error is set\n");
1275 if (td->o.verify != VERIFY_NONE &&
1276 !td_ioengine_flagged(td, FIO_SYNCIO)) {
1277 log_err("recover_zbd_write_error for async IO engines does not support verify\n");
1282 if (td->o.experimental_verify) {
1283 log_err("zonemode=zbd does not support experimental verify\n");
1287 for_each_file(td, f, i) {
1288 struct zoned_block_device_info *zbd = f->zbd_info;
1289 struct fio_zone_info *z;
1295 f->min_zone = zbd_offset_to_zone_idx(f, f->file_offset);
1297 zbd_offset_to_zone_idx(f, f->file_offset + f->io_size);
1299 vdb = zbd_verify_and_set_vdb(td, f);
1301 dprint(FD_ZBD, "%s(%s): valid data bytes = %" PRIu64 "\n",
1302 __func__, f->file_name, vdb);
1305 * When all zones in the I/O range are conventional, io_size
1306 * can be smaller than zone size, making min_zone the same
1307 * as max_zone. This is why the assert below needs to be made
1310 if (zbd_is_seq_job(f))
1311 assert(f->min_zone < f->max_zone);
1313 if (td->o.max_open_zones > 0 &&
1314 zbd->max_write_zones != td->o.max_open_zones) {
1315 log_err("Different 'max_open_zones' values\n");
1320 * If this job does not do write operations, skip open zone
1323 if (!td_write(td)) {
1324 if (td->o.job_max_open_zones)
1325 log_info("'job_max_open_zones' is valid only for write jobs\n");
1330 * The per job max open zones limit cannot be used without a
1331 * global max open zones limit. (As the tracking of open zones
1332 * is disabled when there is no global max open zones limit.)
1334 if (td->o.job_max_open_zones && !zbd->max_write_zones) {
1335 log_err("'job_max_open_zones' cannot be used without a global open zones limit\n");
1340 * zbd->max_write_zones is the global limit shared for all jobs
1341 * that target the same zoned block device. Force sync the per
1342 * thread global limit with the actual global limit. (The real
1343 * per thread/job limit is stored in td->o.job_max_open_zones).
1345 td->o.max_open_zones = zbd->max_write_zones;
1347 for (zi = f->min_zone; zi < f->max_zone; zi++) {
1348 z = &zbd->zone_info[zi];
1349 if (z->cond != ZBD_ZONE_COND_IMP_OPEN &&
1350 z->cond != ZBD_ZONE_COND_EXP_OPEN &&
1351 z->cond != ZBD_ZONE_COND_CLOSED)
1353 if (!zbd->max_active_zones &&
1354 z->cond == ZBD_ZONE_COND_CLOSED)
1356 if (__zbd_write_zone_get(td, f, z))
1359 * If the number of open zones exceeds specified limits,
1362 log_err("Number of open zones exceeds max_open_zones limit\n");
1371 * Reset zbd_info.write_cnt, the counter that counts down towards the next
1374 static void _zbd_reset_write_cnt(const struct thread_data *td,
1375 const struct fio_file *f)
1377 assert(0 <= td->o.zrf.u.f && td->o.zrf.u.f <= 1);
1379 f->zbd_info->write_cnt = td->o.zrf.u.f ?
1380 min(1.0 / td->o.zrf.u.f, 0.0 + UINT_MAX) : UINT_MAX;
1383 static void zbd_reset_write_cnt(const struct thread_data *td,
1384 const struct fio_file *f)
1386 pthread_mutex_lock(&f->zbd_info->mutex);
1387 _zbd_reset_write_cnt(td, f);
1388 pthread_mutex_unlock(&f->zbd_info->mutex);
1391 static bool zbd_dec_and_reset_write_cnt(const struct thread_data *td,
1392 const struct fio_file *f)
1394 uint32_t write_cnt = 0;
1396 pthread_mutex_lock(&f->zbd_info->mutex);
1397 assert(f->zbd_info->write_cnt);
1398 if (f->zbd_info->write_cnt)
1399 write_cnt = --f->zbd_info->write_cnt;
1401 _zbd_reset_write_cnt(td, f);
1402 pthread_mutex_unlock(&f->zbd_info->mutex);
1404 return write_cnt == 0;
1407 void zbd_file_reset(struct thread_data *td, struct fio_file *f)
1409 struct fio_zone_info *zb, *ze;
1410 bool verify_data_left = false;
1412 if (!f->zbd_info || !td_write(td))
1415 zb = zbd_get_zone(f, f->min_zone);
1416 ze = zbd_get_zone(f, f->max_zone);
1419 * If data verification is enabled reset the affected zones before
1420 * writing any data to avoid that a zone reset has to be issued while
1421 * writing data, which causes data loss.
1423 if (td->o.verify != VERIFY_NONE) {
1424 verify_data_left = td->runstate == TD_VERIFYING ||
1425 td->io_hist_len || td->verify_batch;
1426 if (!verify_data_left)
1427 zbd_reset_zones(td, f, zb, ze);
1430 zbd_reset_write_cnt(td, f);
1433 /* Return random zone index for one of the write target zones. */
1434 static uint32_t pick_random_zone_idx(const struct fio_file *f,
1435 const struct io_u *io_u)
1437 return (io_u->offset - f->file_offset) *
1438 f->zbd_info->num_write_zones / f->io_size;
1442 * Randomly choose a zone in the array of write zones and in the range for the
1443 * file f. If such a zone is found, return its index in f->zbd_info->zone_info[]
1444 * using @zone_idx, and return true. Otherwise, return false.
1446 * Caller must hold f->zbd_info->mutex.
1448 static bool zbd_pick_write_zone(const struct fio_file* f,
1449 const struct io_u *io_u, uint32_t *zone_idx)
1451 struct zoned_block_device_info *zbdi = f->zbd_info;
1452 uint32_t write_zone_idx;
1453 uint32_t cur_zone_idx;
1457 * An array of write target zones is per-device, shared across all jobs.
1458 * Start with quasi-random candidate zone. Ignore zones which do not
1459 * belong to offset/size range of the current job.
1461 write_zone_idx = pick_random_zone_idx(f, io_u);
1462 assert(!write_zone_idx || write_zone_idx < zbdi->num_write_zones);
1464 for (i = 0; i < zbdi->num_write_zones; i++) {
1465 if (write_zone_idx >= zbdi->num_write_zones)
1467 cur_zone_idx = zbdi->write_zones[write_zone_idx];
1468 if (f->min_zone <= cur_zone_idx && cur_zone_idx < f->max_zone) {
1469 *zone_idx = cur_zone_idx;
1478 static bool any_io_in_flight(void)
1481 if (td->io_u_in_flight)
1489 * zbd_convert_to_write_zone - Convert the target zone of an io_u to a writable zone
1490 * @td: The fio thread data
1491 * @io_u: The I/O unit that targets the zone to convert
1492 * @zb: The zone selected at the beginning of the function call. The caller must
1495 * Modify the offset of an I/O unit that does not refer to a zone such that
1496 * in write target zones array. Add a zone to or remove a zone from the array if
1497 * necessary. The write target zone is searched across sequential zones.
1498 * This algorithm can only work correctly if all write pointers are
1499 * a multiple of the fio block size. The caller must not hold
1500 * f->zbd_info->mutex. Returns with z->mutex held upon success.
1502 static struct fio_zone_info *zbd_convert_to_write_zone(struct thread_data *td,
1504 struct fio_zone_info *zb)
1506 const uint64_t min_bs = td->o.min_bs[io_u->ddir];
1507 struct fio_file *f = io_u->file;
1508 struct zoned_block_device_info *zbdi = f->zbd_info;
1509 struct fio_zone_info *z;
1510 uint32_t zone_idx, new_zone_idx;
1512 bool wait_zone_write;
1514 bool should_retry = true;
1515 bool need_zone_finish;
1517 assert(is_valid_offset(f, io_u->offset));
1519 if (zbd_zone_remainder(zb) > 0 && zbd_zone_remainder(zb) < min_bs) {
1520 pthread_mutex_lock(&f->zbd_info->mutex);
1521 zbd_write_zone_put(td, f, zb);
1522 pthread_mutex_unlock(&f->zbd_info->mutex);
1523 dprint(FD_ZBD, "%s: finish zone %d\n",
1524 f->file_name, zbd_zone_idx(f, zb));
1526 zbd_finish_zone(td, f, zb);
1529 if (zbd_zone_idx(f, zb) + 1 >= f->max_zone && !td_random(td))
1532 /* Find the next write pointer zone */
1535 if (zbd_zone_idx(f, zb) >= f->max_zone)
1536 zb = zbd_get_zone(f, f->min_zone);
1537 } while (!zb->has_wp);
1539 zone_lock(td, f, zb);
1542 if (zbd_write_zone_get(td, f, zb))
1547 if (zbdi->max_write_zones || td->o.job_max_open_zones) {
1549 * This statement accesses zbdi->write_zones[] on purpose
1552 zone_idx = zbdi->write_zones[pick_random_zone_idx(f, io_u)];
1554 zone_idx = zbd_offset_to_zone_idx(f, io_u->offset);
1556 if (zone_idx < f->min_zone)
1557 zone_idx = f->min_zone;
1558 else if (zone_idx >= f->max_zone)
1559 zone_idx = f->max_zone - 1;
1562 "%s(%s): starting from zone %d (offset %lld, buflen %lld)\n",
1563 __func__, f->file_name, zone_idx, io_u->offset, io_u->buflen);
1566 * Since z->mutex is the outer lock and zbdi->mutex the inner
1567 * lock it can happen that the state of the zone with index zone_idx
1568 * has changed after 'z' has been assigned and before zbdi->mutex
1569 * has been obtained. Hence the loop.
1572 z = zbd_get_zone(f, zone_idx);
1574 zone_lock(td, f, z);
1576 pthread_mutex_lock(&zbdi->mutex);
1579 if (z->cond != ZBD_ZONE_COND_OFFLINE &&
1580 zbdi->max_write_zones == 0 &&
1581 td->o.job_max_open_zones == 0)
1583 if (zbdi->num_write_zones == 0) {
1584 dprint(FD_ZBD, "%s(%s): no zone is write target\n",
1585 __func__, f->file_name);
1586 goto choose_other_zone;
1590 if (!zbd_pick_write_zone(f, io_u, &new_zone_idx)) {
1591 dprint(FD_ZBD, "%s(%s): no candidate zone\n",
1592 __func__, f->file_name);
1593 pthread_mutex_unlock(&zbdi->mutex);
1599 if (new_zone_idx == zone_idx)
1601 zone_idx = new_zone_idx;
1603 pthread_mutex_unlock(&zbdi->mutex);
1609 /* Both z->mutex and zbdi->mutex are held. */
1612 if (zbd_zone_remainder(z) >= min_bs) {
1613 pthread_mutex_unlock(&zbdi->mutex);
1618 /* Check if number of write target zones reaches one of limits. */
1620 zbdi->num_write_zones == f->max_zone - f->min_zone ||
1621 (zbdi->max_write_zones &&
1622 zbdi->num_write_zones == zbdi->max_write_zones) ||
1623 (td->o.job_max_open_zones &&
1624 td->num_write_zones == td->o.job_max_open_zones);
1626 pthread_mutex_unlock(&zbdi->mutex);
1628 /* Only z->mutex is held. */
1631 * When number of write target zones reaches to one of limits, wait for
1632 * zone write completion to one of them before trying a new zone.
1634 if (wait_zone_write) {
1636 "%s(%s): quiesce to remove a zone from write target zones array\n",
1637 __func__, f->file_name);
1642 /* Zone 'z' is full, so try to choose a new zone. */
1643 for (i = f->io_size / zbdi->zone_size; i > 0; i--) {
1648 if (!is_valid_offset(f, z->start)) {
1650 zone_idx = f->min_zone;
1651 z = zbd_get_zone(f, zone_idx);
1653 assert(is_valid_offset(f, z->start));
1656 zone_lock(td, f, z);
1659 if (zbd_write_zone_get(td, f, z))
1663 /* Only z->mutex is held. */
1665 /* Check whether the write fits in any of the write target zones. */
1666 pthread_mutex_lock(&zbdi->mutex);
1667 need_zone_finish = true;
1668 for (i = 0; i < zbdi->num_write_zones; i++) {
1669 zone_idx = zbdi->write_zones[i];
1670 if (zone_idx < f->min_zone || zone_idx >= f->max_zone)
1672 pthread_mutex_unlock(&zbdi->mutex);
1675 z = zbd_get_zone(f, zone_idx);
1677 zone_lock(td, f, z);
1678 if (zbd_zone_remainder(z) >= min_bs) {
1679 need_zone_finish = false;
1682 pthread_mutex_lock(&zbdi->mutex);
1686 * When any I/O is in-flight or when all I/Os in-flight get completed,
1687 * the I/Os might have removed zones from the write target array then
1688 * retry the steps to choose a zone. Before retry, call io_u_quiesce()
1689 * to complete in-flight writes.
1691 in_flight = any_io_in_flight();
1692 if (in_flight || should_retry) {
1694 "%s(%s): wait zone write and retry write target zone selection\n",
1695 __func__, f->file_name);
1696 should_retry = in_flight;
1697 pthread_mutex_unlock(&zbdi->mutex);
1700 zone_lock(td, f, z);
1704 if (td_random(td) && td->o.verify == VERIFY_NONE && need_zone_finish)
1706 * If all open zones have remainder smaller than the block size
1707 * for random write jobs, choose one of the write target zones
1708 * and finish it. When verify is enabled, skip this zone finish
1709 * operation to avoid verify data corruption by overwrite to the
1712 if (zbd_pick_write_zone(f, io_u, &zone_idx)) {
1713 pthread_mutex_unlock(&zbdi->mutex);
1715 z = zbd_get_zone(f, zone_idx);
1716 zone_lock(td, f, z);
1718 dprint(FD_ZBD, "%s(%s): All write target zones have remainder smaller than block size. Choose zone %d and finish.\n",
1719 __func__, f->file_name, zone_idx);
1720 zbd_finish_zone(td, f, z);
1724 pthread_mutex_unlock(&zbdi->mutex);
1728 dprint(FD_ZBD, "%s(%s): did not choose another write zone\n",
1729 __func__, f->file_name);
1734 dprint(FD_ZBD, "%s(%s): returning zone %d\n",
1735 __func__, f->file_name, zone_idx);
1737 io_u->offset = z->start;
1739 assert(z->cond != ZBD_ZONE_COND_OFFLINE);
1745 * Find another zone which has @min_bytes of readable data. Search in zones
1746 * @zb + 1 .. @zl. For random workload, also search in zones @zb - 1 .. @zf.
1748 * Either returns NULL or returns a zone pointer. When the zone has write
1749 * pointer, hold the mutex for the zone.
1751 static struct fio_zone_info *
1752 zbd_find_zone(struct thread_data *td, struct io_u *io_u, uint64_t min_bytes,
1753 struct fio_zone_info *zb, struct fio_zone_info *zl)
1755 struct fio_file *f = io_u->file;
1756 struct fio_zone_info *z1, *z2;
1757 const struct fio_zone_info *const zf = zbd_get_zone(f, f->min_zone);
1760 * Skip to the next non-empty zone in case of sequential I/O and to
1761 * the nearest non-empty zone in case of random I/O.
1763 for (z1 = zb + 1, z2 = zb - 1; z1 < zl || z2 >= zf; z1++, z2--) {
1764 if (z1 < zl && z1->cond != ZBD_ZONE_COND_OFFLINE) {
1766 zone_lock(td, f, z1);
1767 if (z1->start + min_bytes <= z1->wp)
1771 } else if (!td_random(td)) {
1775 if (td_random(td) && z2 >= zf &&
1776 z2->cond != ZBD_ZONE_COND_OFFLINE) {
1778 zone_lock(td, f, z2);
1779 if (z2->start + min_bytes <= z2->wp)
1787 "%s: no zone has %"PRIu64" bytes of readable data\n",
1788 f->file_name, min_bytes);
1794 * zbd_end_zone_io - update zone status at command completion
1796 * @z: zone info pointer
1798 * If the write command made the zone full, remove it from the write target
1801 * The caller must hold z->mutex.
1803 static void zbd_end_zone_io(struct thread_data *td, const struct io_u *io_u,
1804 struct fio_zone_info *z)
1806 const struct fio_file *f = io_u->file;
1808 if (io_u->ddir == DDIR_WRITE &&
1809 io_u->offset + io_u->buflen >= zbd_zone_capacity_end(z)) {
1810 pthread_mutex_lock(&f->zbd_info->mutex);
1811 zbd_write_zone_put(td, f, z);
1812 pthread_mutex_unlock(&f->zbd_info->mutex);
1817 * zbd_queue_io - update the write pointer of a sequential zone
1819 * @success: Whether or not the I/O unit has been queued successfully
1820 * @q: queueing status (busy, completed or queued).
1822 * For write and trim operations, update the write pointer of the I/O unit
1825 static void zbd_queue_io(struct thread_data *td, struct io_u *io_u, int *q)
1827 const struct fio_file *f = io_u->file;
1828 struct zoned_block_device_info *zbd_info = f->zbd_info;
1829 bool success = io_u->error == 0;
1830 struct fio_zone_info *z;
1835 z = zbd_offset_to_zone(f, io_u->offset);
1838 if (!success && td->o.recover_zbd_write_error &&
1839 io_u->ddir == DDIR_WRITE && td_ioengine_flagged(td, FIO_SYNCIO) &&
1840 *q == FIO_Q_COMPLETED) {
1841 zbd_recover_write_error(td, io_u);
1850 "%s: queued I/O (%lld, %llu) for zone %u\n",
1851 f->file_name, io_u->offset, io_u->buflen, zbd_zone_idx(f, z));
1853 switch (io_u->ddir) {
1855 zone_end = min((uint64_t)(io_u->offset + io_u->buflen),
1856 zbd_zone_capacity_end(z));
1859 * z->wp > zone_end means that one or more I/O errors
1862 if (accounting_vdb(td, f) && z->wp <= zone_end) {
1863 pthread_mutex_lock(&zbd_info->mutex);
1864 zbd_info->wp_valid_data_bytes += zone_end - z->wp;
1865 pthread_mutex_unlock(&zbd_info->mutex);
1873 if (*q == FIO_Q_COMPLETED && !io_u->error)
1874 zbd_end_zone_io(td, io_u, z);
1877 if (!success || *q != FIO_Q_QUEUED) {
1878 if (io_u->ddir == DDIR_WRITE) {
1879 z->writes_in_flight--;
1880 if (z->writes_in_flight == 0 && z->fixing_zone_wp) {
1881 dprint(FD_ZBD, "%s: Fixed write pointer of the zone %u\n",
1882 f->file_name, zbd_zone_idx(f, z));
1883 z->fixing_zone_wp = 0;
1886 /* BUSY or COMPLETED: unlock the zone */
1888 io_u->zbd_put_io = NULL;
1893 * zbd_put_io - Unlock an I/O unit target zone lock
1896 static void zbd_put_io(struct thread_data *td, const struct io_u *io_u)
1898 const struct fio_file *f = io_u->file;
1899 struct fio_zone_info *z;
1901 assert(f->zbd_info);
1903 z = zbd_offset_to_zone(f, io_u->offset);
1907 "%s: terminate I/O (%lld, %llu) for zone %u\n",
1908 f->file_name, io_u->offset, io_u->buflen, zbd_zone_idx(f, z));
1910 zbd_end_zone_io(td, io_u, z);
1912 if (io_u->ddir == DDIR_WRITE) {
1913 z->writes_in_flight--;
1914 if (z->writes_in_flight == 0 && z->fixing_zone_wp) {
1915 z->fixing_zone_wp = 0;
1916 dprint(FD_ZBD, "%s: Fixed write pointer of the zone %u\n",
1917 f->file_name, zbd_zone_idx(f, z));
1925 * Windows and MacOS do not define this.
1928 #define EREMOTEIO 121 /* POSIX value */
1931 bool zbd_unaligned_write(int error_code)
1933 switch (error_code) {
1942 * setup_zbd_zone_mode - handle zoneskip as necessary for ZBD drives
1943 * @td: FIO thread data.
1944 * @io_u: FIO I/O unit.
1946 * For sequential workloads, change the file offset to skip zoneskip bytes when
1947 * no more IO can be performed in the current zone.
1948 * - For read workloads, zoneskip is applied when the io has reached the end of
1949 * the zone or the zone write position (when td->o.read_beyond_wp is false).
1950 * - For write workloads, zoneskip is applied when the zone is full.
1951 * This applies only to read and write operations.
1953 void setup_zbd_zone_mode(struct thread_data *td, struct io_u *io_u)
1955 struct fio_file *f = io_u->file;
1956 enum fio_ddir ddir = io_u->ddir;
1957 struct fio_zone_info *z;
1959 assert(td->o.zone_mode == ZONE_MODE_ZBD);
1960 assert(td->o.zone_size);
1961 assert(f->zbd_info);
1963 z = zbd_offset_to_zone(f, f->last_pos[ddir]);
1966 * When the zone capacity is smaller than the zone size and the I/O is
1967 * sequential write, skip to zone end if the latest position is at the
1968 * zone capacity limit.
1970 if (z->capacity < f->zbd_info->zone_size &&
1971 !td_random(td) && ddir == DDIR_WRITE &&
1972 f->last_pos[ddir] >= zbd_zone_capacity_end(z)) {
1974 "%s: Jump from zone capacity limit to zone end:"
1975 " (%"PRIu64" -> %"PRIu64") for zone %u (%"PRIu64")\n",
1976 f->file_name, f->last_pos[ddir],
1977 zbd_zone_end(z), zbd_zone_idx(f, z), z->capacity);
1978 td->io_skip_bytes += zbd_zone_end(z) - f->last_pos[ddir];
1979 f->last_pos[ddir] = zbd_zone_end(z);
1983 * zone_skip is valid only for sequential workloads.
1985 if (td_random(td) || !td->o.zone_skip)
1989 * It is time to switch to a new zone if:
1990 * - zone_bytes == zone_size bytes have already been accessed
1991 * - The last position reached the end of the current zone.
1992 * - For reads with td->o.read_beyond_wp == false, the last position
1993 * reached the zone write pointer.
1995 if (td->zone_bytes >= td->o.zone_size ||
1996 f->last_pos[ddir] >= zbd_zone_end(z) ||
1997 (ddir == DDIR_READ &&
1998 (!td->o.read_beyond_wp) && f->last_pos[ddir] >= z->wp)) {
2003 f->file_offset += td->o.zone_size + td->o.zone_skip;
2006 * Wrap from the beginning, if we exceed the file size
2008 if (f->file_offset >= f->real_file_size)
2009 f->file_offset = get_start_offset(td, f);
2011 f->last_pos[ddir] = f->file_offset;
2012 td->io_skip_bytes += td->o.zone_skip;
2017 * zbd_adjust_ddir - Adjust an I/O direction for zonemode=zbd.
2019 * @td: FIO thread data.
2020 * @io_u: FIO I/O unit.
2021 * @ddir: I/O direction before adjustment.
2023 * Return adjusted I/O direction.
2025 enum fio_ddir zbd_adjust_ddir(struct thread_data *td, struct io_u *io_u,
2029 * In case read direction is chosen for the first random I/O, fio with
2030 * zonemode=zbd stops because no data can be read from zoned block
2031 * devices with all empty zones. Overwrite the first I/O direction as
2032 * write to make sure data to read exists.
2034 assert(io_u->file->zbd_info);
2035 if (ddir != DDIR_READ || !td_rw(td))
2038 if (io_u->file->last_start[DDIR_WRITE] != -1ULL ||
2039 td->o.read_beyond_wp || td->o.rwmix[DDIR_WRITE] == 0)
2046 * zbd_adjust_block - adjust the offset and length as necessary for ZBD drives
2047 * @td: FIO thread data.
2048 * @io_u: FIO I/O unit.
2050 * Locking strategy: returns with z->mutex locked if and only if z refers
2051 * to a sequential zone and if io_u_accept is returned. z is the zone that
2052 * corresponds to io_u->offset at the end of this function.
2054 enum io_u_action zbd_adjust_block(struct thread_data *td, struct io_u *io_u)
2056 struct fio_file *f = io_u->file;
2057 struct zoned_block_device_info *zbdi = f->zbd_info;
2058 struct fio_zone_info *zb, *zl, *orig_zb;
2059 uint32_t orig_len = io_u->buflen;
2060 uint64_t min_bs = td->o.min_bs[io_u->ddir];
2066 assert(is_valid_offset(f, io_u->offset));
2067 assert(io_u->buflen);
2069 zb = zbd_offset_to_zone(f, io_u->offset);
2073 /* Accept non-write I/Os for conventional zones. */
2074 if (io_u->ddir != DDIR_WRITE)
2078 * Make sure that writes to conventional zones
2079 * don't cross over to any sequential zones.
2081 if (!(zb + 1)->has_wp ||
2082 io_u->offset + io_u->buflen <= (zb + 1)->start)
2085 if (io_u->offset + min_bs > (zb + 1)->start) {
2087 "%s: off=%llu + min_bs=%"PRIu64" > next zone %"PRIu64"\n",
2088 f->file_name, io_u->offset,
2089 min_bs, (zb + 1)->start);
2091 zb->start + (zb + 1)->start - io_u->offset;
2092 new_len = min(io_u->buflen,
2093 (zb + 1)->start - io_u->offset);
2095 new_len = (zb + 1)->start - io_u->offset;
2098 io_u->buflen = new_len / min_bs * min_bs;
2104 * Accept the I/O offset for reads if reading beyond the write pointer
2107 if (zb->cond != ZBD_ZONE_COND_OFFLINE &&
2108 io_u->ddir == DDIR_READ && td->o.read_beyond_wp)
2112 zone_lock(td, f, zb);
2114 if (!td_ioengine_flagged(td, FIO_SYNCIO) && zb->fixing_zone_wp) {
2120 switch (io_u->ddir) {
2122 if (td->runstate == TD_VERIFYING && td_write(td))
2126 * Check that there is enough written data in the zone to do an
2127 * I/O of at least min_bs B. If there isn't, find a new zone for
2130 range = zb->cond != ZBD_ZONE_COND_OFFLINE ?
2131 zb->wp - zb->start : 0;
2132 if (range < min_bs ||
2133 ((!td_random(td)) && (io_u->offset + min_bs > zb->wp))) {
2135 zl = zbd_get_zone(f, f->max_zone);
2136 zb = zbd_find_zone(td, io_u, min_bs, zb, zl);
2139 "%s: zbd_find_zone(%lld, %llu) failed\n",
2140 f->file_name, io_u->offset,
2145 * zbd_find_zone() returned a zone with a range of at
2148 range = zb->wp - zb->start;
2149 assert(range >= min_bs);
2152 io_u->offset = zb->start;
2156 * Make sure the I/O is within the zone valid data range while
2157 * maximizing the I/O size and preserving randomness.
2159 if (range <= io_u->buflen)
2160 io_u->offset = zb->start;
2161 else if (td_random(td))
2162 io_u->offset = zb->start +
2163 ((io_u->offset - orig_zb->start) %
2164 (range - io_u->buflen)) / min_bs * min_bs;
2167 * When zbd_find_zone() returns a conventional zone,
2168 * we can simply accept the new i/o offset here.
2174 * Make sure the I/O does not cross over the zone wp position.
2176 new_len = min((unsigned long long)io_u->buflen,
2177 (unsigned long long)(zb->wp - io_u->offset));
2178 new_len = new_len / min_bs * min_bs;
2179 if (new_len < io_u->buflen) {
2180 io_u->buflen = new_len;
2181 dprint(FD_IO, "Changed length from %u into %llu\n",
2182 orig_len, io_u->buflen);
2185 assert(zb->start <= io_u->offset);
2186 assert(io_u->offset + io_u->buflen <= zb->wp);
2191 if (io_u->buflen > zbdi->zone_size) {
2192 td_verror(td, EINVAL, "I/O buflen exceeds zone size");
2194 "%s: I/O buflen %llu exceeds zone size %"PRIu64"\n",
2195 f->file_name, io_u->buflen, zbdi->zone_size);
2200 zb = zbd_convert_to_write_zone(td, io_u, zb);
2202 dprint(FD_IO, "%s: can't convert to write target zone",
2207 if (zbd_zone_remainder(zb) > 0 &&
2208 zbd_zone_remainder(zb) < min_bs)
2211 /* Check whether the zone reset threshold has been exceeded */
2212 if (td->o.zrf.u.f) {
2213 if (zbdi->wp_valid_data_bytes >=
2214 f->io_size * td->o.zrt.u.f &&
2215 zbd_dec_and_reset_write_cnt(td, f))
2219 /* Reset the zone pointer if necessary */
2220 if (zb->reset_zone || zbd_zone_full(f, zb, min_bs)) {
2221 if (td->o.verify != VERIFY_NONE) {
2223 * Unset io-u->file to tell get_next_verify()
2224 * that this IO is not requeue.
2227 if (!get_next_verify(td, io_u)) {
2235 * Since previous write requests may have been submitted
2236 * asynchronously and since we will submit the zone
2237 * reset synchronously, wait until previously submitted
2238 * write requests have completed before issuing a
2243 if (__zbd_reset_zone(td, f, zb) < 0)
2246 if (zb->capacity < min_bs) {
2247 td_verror(td, EINVAL, "ZCAP is less min_bs");
2248 log_err("zone capacity %"PRIu64" smaller than minimum block size %"PRIu64"\n",
2249 zb->capacity, min_bs);
2254 /* Make writes occur at the write pointer */
2255 assert(!zbd_zone_full(f, zb, min_bs));
2256 io_u->offset = zb->wp;
2257 if (!is_valid_offset(f, io_u->offset)) {
2258 td_verror(td, EINVAL, "invalid WP value");
2259 dprint(FD_ZBD, "%s: dropped request with offset %llu\n",
2260 f->file_name, io_u->offset);
2265 * Make sure that the buflen is a multiple of the minimal
2266 * block size. Give up if shrinking would make the request too
2269 new_len = min((unsigned long long)io_u->buflen,
2270 zbd_zone_capacity_end(zb) - io_u->offset);
2271 new_len = new_len / min_bs * min_bs;
2272 if (new_len == io_u->buflen)
2274 if (new_len >= min_bs) {
2275 io_u->buflen = new_len;
2276 dprint(FD_IO, "Changed length from %u into %llu\n",
2277 orig_len, io_u->buflen);
2281 td_verror(td, EIO, "zone remainder too small");
2282 log_err("zone remainder %lld smaller than min block size %"PRIu64"\n",
2283 (zbd_zone_capacity_end(zb) - io_u->offset), min_bs);
2288 /* Check random trim targets a non-empty zone */
2289 if (!td_random(td) || zb->wp > zb->start)
2292 /* Find out a non-empty zone to trim */
2294 zl = zbd_get_zone(f, f->max_zone);
2295 zb = zbd_find_zone(td, io_u, 1, zb, zl);
2297 io_u->offset = zb->start;
2298 dprint(FD_ZBD, "%s: found new zone(%lld) for trim\n",
2299 f->file_name, io_u->offset);
2308 case DDIR_SYNC_FILE_RANGE:
2320 assert(zb->cond != ZBD_ZONE_COND_OFFLINE);
2321 assert(!io_u->zbd_queue_io);
2322 assert(!io_u->zbd_put_io);
2324 io_u->zbd_queue_io = zbd_queue_io;
2325 io_u->zbd_put_io = zbd_put_io;
2326 if (io_u->ddir == DDIR_WRITE)
2327 zb->writes_in_flight++;
2330 * Since we return with the zone lock still held,
2331 * add an annotation to let Coverity know that it
2334 /* coverity[missing_unlock] */
2339 if (zb && zb->has_wp)
2345 /* Return a string with ZBD statistics */
2346 char *zbd_write_status(const struct thread_stat *ts)
2350 if (asprintf(&res, "; %"PRIu64" zone resets", ts->nr_zone_resets) < 0)
2356 * zbd_do_io_u_trim - If reset zone is applicable, do reset zone instead of trim
2358 * @td: FIO thread data.
2359 * @io_u: FIO I/O unit.
2361 * It is assumed that z->mutex is already locked.
2362 * Return io_u_completed when reset zone succeeds. Return 0 when the target zone
2363 * does not have write pointer. On error, return negative errno.
2365 int zbd_do_io_u_trim(struct thread_data *td, struct io_u *io_u)
2367 struct fio_file *f = io_u->file;
2368 struct fio_zone_info *z;
2371 z = zbd_offset_to_zone(f, io_u->offset);
2375 if (io_u->offset != z->start) {
2376 log_err("Trim offset not at zone start (%lld)\n",
2381 ret = zbd_reset_zone((struct thread_data *)td, f, z);
2385 return io_u_completed;
2388 void zbd_log_err(const struct thread_data *td, const struct io_u *io_u)
2390 const struct fio_file *f = io_u->file;
2392 if (td->o.zone_mode != ZONE_MODE_ZBD)
2395 if (io_u->error == EOVERFLOW)
2396 log_err("%s: Exceeded max_active_zones limit. Check conditions of zones out of I/O ranges.\n",
2400 void zbd_recover_write_error(struct thread_data *td, struct io_u *io_u)
2402 struct fio_file *f = io_u->file;
2403 struct fio_zone_info *z;
2404 struct zbd_zone zrep;
2405 unsigned long long retry_offset;
2406 unsigned long long retry_len;
2408 uint64_t write_end_offset;
2411 z = zbd_offset_to_zone(f, io_u->offset);
2414 write_end_offset = io_u->offset + io_u->buflen - z->start;
2416 assert(z->writes_in_flight);
2418 if (!z->fixing_zone_wp) {
2419 z->fixing_zone_wp = 1;
2420 dprint(FD_ZBD, "%s: Start fixing %u write pointer\n",
2421 f->file_name, zbd_zone_idx(f, z));
2424 if (z->max_write_error_offset < write_end_offset)
2425 z->max_write_error_offset = write_end_offset;
2427 if (z->writes_in_flight > 1)
2431 * This is the last write to the zone since the write error to recover.
2432 * Get the zone current write pointer and recover the write pointer
2433 * position so that next write can continue.
2435 ret = zbd_report_zones(td, f, z->start, &zrep, 1);
2437 log_info("fio: Report zone for write recovery failed for %s\n",
2442 if (zrep.wp < z->start ||
2443 z->start + z->max_write_error_offset < zrep.wp ) {
2444 log_info("fio: unexpected write pointer position on error for %s: wp=%"PRIu64"\n",
2445 f->file_name, zrep.wp);
2449 retry_offset = zrep.wp;
2450 retry_len = z->start + z->max_write_error_offset - retry_offset;
2452 if (retry_offset >= io_u->offset)
2453 retry_buf = (char *)io_u->buf + (retry_offset - io_u->offset);
2455 ret = zbd_move_zone_wp(td, io_u->file, &zrep, retry_len, retry_buf);
2457 log_info("fio: Failed to recover write pointer for %s\n",
2462 z->wp = retry_offset + retry_len;
2464 dprint(FD_ZBD, "%s: Write pointer move succeeded for error=%d\n",
2465 f->file_name, io_u->error);