zbd: remove is_zone_open() helper
[fio.git] / zbd.c
CommitLineData
bfbdd35b
BVA
1/*
2 * Copyright (C) 2018 Western Digital Corporation or its affiliates.
3 *
4 * This file is released under the GPL.
5 */
6
7#include <errno.h>
8#include <string.h>
9#include <stdlib.h>
bfbdd35b 10#include <fcntl.h>
bfbdd35b
BVA
11#include <sys/stat.h>
12#include <unistd.h>
f5bff36e 13
cf42d79e 14#include "os/os.h"
bfbdd35b
BVA
15#include "file.h"
16#include "fio.h"
17#include "lib/pow2.h"
18#include "log.h"
f5bff36e 19#include "oslib/asprintf.h"
bfbdd35b
BVA
20#include "smalloc.h"
21#include "verify.h"
44ec32cb 22#include "pshared.h"
bfbdd35b
BVA
23#include "zbd.h"
24
410a071c
DLM
25static bool is_valid_offset(const struct fio_file *f, uint64_t offset)
26{
27 return (uint64_t)(offset - f->file_offset) < f->io_size;
28}
29
30static inline unsigned int zbd_zone_nr(const struct fio_file *f,
31 struct fio_zone_info *zone)
32{
33 return zone - f->zbd_info->zone_info;
34}
35
36/**
37 * zbd_zone_idx - convert an offset into a zone number
38 * @f: file pointer.
39 * @offset: offset in bytes. If this offset is in the first zone_size bytes
40 * past the disk size then the index of the sentinel is returned.
41 */
42static uint32_t zbd_zone_idx(const struct fio_file *f, uint64_t offset)
43{
44 uint32_t zone_idx;
45
46 if (f->zbd_info->zone_size_log2 > 0)
47 zone_idx = offset >> f->zbd_info->zone_size_log2;
48 else
49 zone_idx = offset / f->zbd_info->zone_size;
50
51 return min(zone_idx, f->zbd_info->nr_zones);
52}
53
54/**
55 * zbd_zone_end - Return zone end location
56 * @z: zone info pointer.
57 */
58static inline uint64_t zbd_zone_end(const struct fio_zone_info *z)
59{
60 return (z+1)->start;
61}
62
63/**
64 * zbd_zone_capacity_end - Return zone capacity limit end location
65 * @z: zone info pointer.
66 */
67static inline uint64_t zbd_zone_capacity_end(const struct fio_zone_info *z)
68{
69 return z->start + z->capacity;
70}
71
72/**
73 * zbd_zone_full - verify whether a minimum number of bytes remain in a zone
74 * @f: file pointer.
75 * @z: zone info pointer.
76 * @required: minimum number of bytes that must remain in a zone.
77 *
78 * The caller must hold z->mutex.
79 */
80static bool zbd_zone_full(const struct fio_file *f, struct fio_zone_info *z,
81 uint64_t required)
82{
83 assert((required & 511) == 0);
84
85 return z->has_wp &&
86 z->wp + required > zbd_zone_capacity_end(z);
87}
88
89static void zone_lock(struct thread_data *td, const struct fio_file *f,
90 struct fio_zone_info *z)
91{
92 struct zoned_block_device_info *zbd = f->zbd_info;
93 uint32_t nz = z - zbd->zone_info;
94
95 /* A thread should never lock zones outside its working area. */
96 assert(f->min_zone <= nz && nz < f->max_zone);
97
98 assert(z->has_wp);
99
100 /*
101 * Lock the io_u target zone. The zone will be unlocked if io_u offset
102 * is changed or when io_u completes and zbd_put_io() executed.
103 * To avoid multiple jobs doing asynchronous I/Os from deadlocking each
104 * other waiting for zone locks when building an io_u batch, first
105 * only trylock the zone. If the zone is already locked by another job,
106 * process the currently queued I/Os so that I/O progress is made and
107 * zones unlocked.
108 */
109 if (pthread_mutex_trylock(&z->mutex) != 0) {
110 if (!td_ioengine_flagged(td, FIO_SYNCIO))
111 io_u_quiesce(td);
112 pthread_mutex_lock(&z->mutex);
113 }
114}
115
116static inline void zone_unlock(struct fio_zone_info *z)
117{
118 int ret;
119
120 assert(z->has_wp);
121 ret = pthread_mutex_unlock(&z->mutex);
122 assert(!ret);
123}
124
125static inline struct fio_zone_info *get_zone(const struct fio_file *f,
126 unsigned int zone_nr)
127{
128 return &f->zbd_info->zone_info[zone_nr];
129}
130
b7694961
DLM
131/**
132 * zbd_get_zoned_model - Get a device zoned model
133 * @td: FIO thread data
134 * @f: FIO file for which to get model information
135 */
38334c13
DLM
136static int zbd_get_zoned_model(struct thread_data *td, struct fio_file *f,
137 enum zbd_zoned_model *model)
b7694961
DLM
138{
139 int ret;
140
50cc48d5
NC
141 if (f->filetype == FIO_TYPE_PIPE) {
142 log_err("zonemode=zbd does not support pipes\n");
143 return -EINVAL;
144 }
145
9db0cde8
NC
146 /* If regular file, always emulate zones inside the file. */
147 if (f->filetype == FIO_TYPE_FILE) {
148 *model = ZBD_NONE;
149 return 0;
150 }
151
6c5b11d3
DLM
152 if (td->io_ops && td->io_ops->get_zoned_model)
153 ret = td->io_ops->get_zoned_model(td, f, model);
154 else
155 ret = blkzoned_get_zoned_model(td, f, model);
b7694961
DLM
156 if (ret < 0) {
157 td_verror(td, errno, "get zoned model failed");
158 log_err("%s: get zoned model failed (%d).\n",
159 f->file_name, errno);
160 }
161
162 return ret;
163}
164
165/**
166 * zbd_report_zones - Get zone information
167 * @td: FIO thread data.
168 * @f: FIO file for which to get zone information
169 * @offset: offset from which to report zones
170 * @zones: Array of struct zbd_zone
171 * @nr_zones: Size of @zones array
172 *
173 * Get zone information into @zones starting from the zone at offset @offset
174 * for the device specified by @f.
175 *
176 * Returns the number of zones reported upon success and a negative error code
177 * upon failure. If the zone report is empty, always assume an error (device
178 * problem) and return -EIO.
179 */
38334c13
DLM
180static int zbd_report_zones(struct thread_data *td, struct fio_file *f,
181 uint64_t offset, struct zbd_zone *zones,
182 unsigned int nr_zones)
b7694961
DLM
183{
184 int ret;
185
6c5b11d3
DLM
186 if (td->io_ops && td->io_ops->report_zones)
187 ret = td->io_ops->report_zones(td, f, offset, zones, nr_zones);
188 else
189 ret = blkzoned_report_zones(td, f, offset, zones, nr_zones);
b7694961
DLM
190 if (ret < 0) {
191 td_verror(td, errno, "report zones failed");
ee5e3436
SK
192 log_err("%s: report zones from sector %"PRIu64" failed (%d).\n",
193 f->file_name, offset >> 9, errno);
b7694961
DLM
194 } else if (ret == 0) {
195 td_verror(td, errno, "Empty zone report");
ee5e3436
SK
196 log_err("%s: report zones from sector %"PRIu64" is empty.\n",
197 f->file_name, offset >> 9);
b7694961
DLM
198 ret = -EIO;
199 }
200
201 return ret;
202}
203
204/**
205 * zbd_reset_wp - reset the write pointer of a range of zones
206 * @td: FIO thread data.
207 * @f: FIO file for which to reset zones
208 * @offset: Starting offset of the first zone to reset
209 * @length: Length of the range of zones to reset
210 *
211 * Reset the write pointer of all zones in the range @offset...@offset+@length.
212 * Returns 0 upon success and a negative error code upon failure.
213 */
38334c13
DLM
214static int zbd_reset_wp(struct thread_data *td, struct fio_file *f,
215 uint64_t offset, uint64_t length)
b7694961
DLM
216{
217 int ret;
218
6c5b11d3
DLM
219 if (td->io_ops && td->io_ops->reset_wp)
220 ret = td->io_ops->reset_wp(td, f, offset, length);
221 else
222 ret = blkzoned_reset_wp(td, f, offset, length);
b7694961
DLM
223 if (ret < 0) {
224 td_verror(td, errno, "resetting wp failed");
ee5e3436
SK
225 log_err("%s: resetting wp for %"PRIu64" sectors at sector %"PRIu64" failed (%d).\n",
226 f->file_name, length >> 9, offset >> 9, errno);
b7694961
DLM
227 }
228
229 return ret;
230}
231
410a071c
DLM
232/**
233 * zbd_reset_zone - reset the write pointer of a single zone
234 * @td: FIO thread data.
235 * @f: FIO file associated with the disk for which to reset a write pointer.
236 * @z: Zone to reset.
237 *
238 * Returns 0 upon success and a negative error code upon failure.
239 *
240 * The caller must hold z->mutex.
241 */
242static int zbd_reset_zone(struct thread_data *td, struct fio_file *f,
243 struct fio_zone_info *z)
244{
245 uint64_t offset = z->start;
246 uint64_t length = (z+1)->start - offset;
247 uint64_t data_in_zone = z->wp - z->start;
248 int ret = 0;
249
250 if (!data_in_zone)
251 return 0;
252
253 assert(is_valid_offset(f, offset + length - 1));
254
255 dprint(FD_ZBD, "%s: resetting wp of zone %u.\n", f->file_name,
256 zbd_zone_nr(f, z));
257 switch (f->zbd_info->model) {
258 case ZBD_HOST_AWARE:
259 case ZBD_HOST_MANAGED:
260 ret = zbd_reset_wp(td, f, offset, length);
261 if (ret < 0)
262 return ret;
263 break;
264 default:
265 break;
266 }
267
268 pthread_mutex_lock(&f->zbd_info->mutex);
269 f->zbd_info->sectors_with_data -= data_in_zone;
270 f->zbd_info->wp_sectors_with_data -= data_in_zone;
271 pthread_mutex_unlock(&f->zbd_info->mutex);
272 z->wp = z->start;
273 z->verify_block = 0;
274
275 td->ts.nr_zone_resets++;
276
277 return ret;
278}
279
280/**
281 * zbd_close_zone - Remove a zone from the open zones array.
282 * @td: FIO thread data.
283 * @f: FIO file associated with the disk for which to reset a write pointer.
284 * @zone_idx: Index of the zone to remove.
285 *
286 * The caller must hold f->zbd_info->mutex.
287 */
288static void zbd_close_zone(struct thread_data *td, const struct fio_file *f,
289 unsigned int zone_idx)
290{
291 uint32_t open_zone_idx = 0;
292
293 for (; open_zone_idx < f->zbd_info->num_open_zones; open_zone_idx++) {
294 if (f->zbd_info->open_zones[open_zone_idx] == zone_idx)
295 break;
296 }
297 if (open_zone_idx == f->zbd_info->num_open_zones)
298 return;
299
300 dprint(FD_ZBD, "%s: closing zone %d\n", f->file_name, zone_idx);
301 memmove(f->zbd_info->open_zones + open_zone_idx,
302 f->zbd_info->open_zones + open_zone_idx + 1,
303 (ZBD_MAX_OPEN_ZONES - (open_zone_idx + 1)) *
304 sizeof(f->zbd_info->open_zones[0]));
305 f->zbd_info->num_open_zones--;
306 td->num_open_zones--;
307 get_zone(f, zone_idx)->open = 0;
308}
309
310/**
311 * zbd_reset_zones - Reset a range of zones.
312 * @td: fio thread data.
313 * @f: fio file for which to reset zones
314 * @zb: first zone to reset.
315 * @ze: first zone not to reset.
316 *
317 * Returns 0 upon success and 1 upon failure.
318 */
319static int zbd_reset_zones(struct thread_data *td, struct fio_file *f,
320 struct fio_zone_info *const zb,
321 struct fio_zone_info *const ze)
322{
323 struct fio_zone_info *z;
324 const uint64_t min_bs = td->o.min_bs[DDIR_WRITE];
325 int res = 0;
326
327 assert(min_bs);
328
329 dprint(FD_ZBD, "%s: examining zones %u .. %u\n", f->file_name,
330 zbd_zone_nr(f, zb), zbd_zone_nr(f, ze));
331 for (z = zb; z < ze; z++) {
332 uint32_t nz = zbd_zone_nr(f, z);
333
334 if (!z->has_wp)
335 continue;
336 zone_lock(td, f, z);
337 pthread_mutex_lock(&f->zbd_info->mutex);
338 zbd_close_zone(td, f, nz);
339 pthread_mutex_unlock(&f->zbd_info->mutex);
340 if (z->wp != z->start) {
341 dprint(FD_ZBD, "%s: resetting zone %u\n",
342 f->file_name, zbd_zone_nr(f, z));
343 if (zbd_reset_zone(td, f, z) < 0)
344 res = 1;
345 }
346 zone_unlock(z);
347 }
348
349 return res;
350}
351
d2f442bc
NC
352/**
353 * zbd_get_max_open_zones - Get the maximum number of open zones
354 * @td: FIO thread data
355 * @f: FIO file for which to get max open zones
356 * @max_open_zones: Upon success, result will be stored here.
357 *
358 * A @max_open_zones value set to zero means no limit.
359 *
360 * Returns 0 upon success and a negative error code upon failure.
361 */
38334c13
DLM
362static int zbd_get_max_open_zones(struct thread_data *td, struct fio_file *f,
363 unsigned int *max_open_zones)
d2f442bc
NC
364{
365 int ret;
366
367 if (td->io_ops && td->io_ops->get_max_open_zones)
368 ret = td->io_ops->get_max_open_zones(td, f, max_open_zones);
369 else
370 ret = blkzoned_get_max_open_zones(td, f, max_open_zones);
371 if (ret < 0) {
372 td_verror(td, errno, "get max open zones failed");
373 log_err("%s: get max open zones failed (%d).\n",
374 f->file_name, errno);
375 }
376
377 return ret;
378}
379
bfbdd35b 380/**
410a071c
DLM
381 * zbd_open_zone - Add a zone to the array of open zones.
382 * @td: fio thread data.
383 * @f: fio file that has the open zones to add.
384 * @zone_idx: Index of the zone to add.
bfbdd35b 385 *
410a071c
DLM
386 * Open a ZBD zone if it is not already open. Returns true if either the zone
387 * was already open or if the zone was successfully added to the array of open
388 * zones without exceeding the maximum number of open zones. Returns false if
389 * the zone was not already open and opening the zone would cause the zone limit
390 * to be exceeded.
bfbdd35b 391 */
410a071c
DLM
392static bool zbd_open_zone(struct thread_data *td, const struct fio_file *f,
393 uint32_t zone_idx)
1f57803b 394{
410a071c
DLM
395 const uint64_t min_bs = td->o.min_bs[DDIR_WRITE];
396 struct zoned_block_device_info *zbdi = f->zbd_info;
397 struct fio_zone_info *z = get_zone(f, zone_idx);
398 bool res = true;
fae3b9a0 399
410a071c
DLM
400 if (z->cond == ZBD_ZONE_COND_OFFLINE)
401 return false;
43bcbd5b 402
1f57803b 403 /*
410a071c
DLM
404 * Skip full zones with data verification enabled because resetting a
405 * zone causes data loss and hence causes verification to fail.
1f57803b 406 */
410a071c
DLM
407 if (td->o.verify != VERIFY_NONE && zbd_zone_full(f, z, min_bs))
408 return false;
4d4c71e6 409
410a071c
DLM
410 /*
411 * zbdi->max_open_zones == 0 means that there is no limit on the maximum
412 * number of open zones. In this case, do no track open zones in
413 * zbdi->open_zones array.
414 */
415 if (!zbdi->max_open_zones)
416 return true;
4d4c71e6 417
410a071c 418 pthread_mutex_lock(&zbdi->mutex);
b5a0f7ce
DLM
419
420 if (z->open) {
410a071c 421 /*
b5a0f7ce
DLM
422 * If the zone is going to be completely filled by writes
423 * already in-flight, handle it as a full zone instead of an
424 * open zone.
410a071c
DLM
425 */
426 if (z->wp >= zbd_zone_capacity_end(z))
427 res = false;
428 goto out;
429 }
430 res = false;
431 /* Zero means no limit */
432 if (td->o.job_max_open_zones > 0 &&
433 td->num_open_zones >= td->o.job_max_open_zones)
434 goto out;
435 if (zbdi->num_open_zones >= zbdi->max_open_zones)
436 goto out;
437 dprint(FD_ZBD, "%s: opening zone %d\n", f->file_name, zone_idx);
438 zbdi->open_zones[zbdi->num_open_zones++] = zone_idx;
439 td->num_open_zones++;
440 z->open = 1;
441 res = true;
bfbdd35b 442
410a071c
DLM
443out:
444 pthread_mutex_unlock(&zbdi->mutex);
445 return res;
923f7c1e
DF
446}
447
bfbdd35b
BVA
448/* Verify whether direct I/O is used for all host-managed zoned drives. */
449static bool zbd_using_direct_io(void)
450{
451 struct thread_data *td;
452 struct fio_file *f;
453 int i, j;
454
455 for_each_td(td, i) {
456 if (td->o.odirect || !(td->o.td_ddir & TD_DDIR_WRITE))
457 continue;
458 for_each_file(td, f, j) {
459 if (f->zbd_info &&
b7694961 460 f->zbd_info->model == ZBD_HOST_MANAGED)
bfbdd35b
BVA
461 return false;
462 }
463 }
464
465 return true;
466}
467
468/* Whether or not the I/O range for f includes one or more sequential zones */
469static bool zbd_is_seq_job(struct fio_file *f)
470{
471 uint32_t zone_idx, zone_idx_b, zone_idx_e;
472
473 assert(f->zbd_info);
474 if (f->io_size == 0)
475 return false;
476 zone_idx_b = zbd_zone_idx(f, f->file_offset);
477 zone_idx_e = zbd_zone_idx(f, f->file_offset + f->io_size - 1);
478 for (zone_idx = zone_idx_b; zone_idx <= zone_idx_e; zone_idx++)
be7a6bae 479 if (get_zone(f, zone_idx)->has_wp)
bfbdd35b
BVA
480 return true;
481
482 return false;
483}
484
485/*
486 * Verify whether offset and size parameters are aligned with zone boundaries.
487 */
488static bool zbd_verify_sizes(void)
489{
490 const struct fio_zone_info *z;
491 struct thread_data *td;
492 struct fio_file *f;
493 uint64_t new_offset, new_end;
494 uint32_t zone_idx;
495 int i, j;
496
497 for_each_td(td, i) {
498 for_each_file(td, f, j) {
499 if (!f->zbd_info)
500 continue;
501 if (f->file_offset >= f->real_file_size)
502 continue;
503 if (!zbd_is_seq_job(f))
504 continue;
4d37720a 505
fc52e449
DLM
506 if (!td->o.zone_size) {
507 td->o.zone_size = f->zbd_info->zone_size;
508 if (!td->o.zone_size) {
509 log_err("%s: invalid 0 zone size\n",
510 f->file_name);
511 return false;
512 }
971d6a22 513 } else if (td->o.zone_size != f->zbd_info->zone_size) {
ee5e3436
SK
514 log_err("%s: job parameter zonesize %llu does not match disk zone size %"PRIu64".\n",
515 f->file_name, td->o.zone_size,
516 f->zbd_info->zone_size);
971d6a22 517 return false;
fc52e449
DLM
518 }
519
4316f640 520 if (td->o.zone_skip % td->o.zone_size) {
4d37720a 521 log_err("%s: zoneskip %llu is not a multiple of the device zone size %llu.\n",
ee5e3436
SK
522 f->file_name, td->o.zone_skip,
523 td->o.zone_size);
4d37720a
DLM
524 return false;
525 }
526
bfbdd35b 527 zone_idx = zbd_zone_idx(f, f->file_offset);
923f7c1e 528 z = get_zone(f, zone_idx);
f09a7773
PL
529 if ((f->file_offset != z->start) &&
530 (td->o.td_ddir != TD_DDIR_READ)) {
236d23a8 531 new_offset = zbd_zone_end(z);
bfbdd35b
BVA
532 if (new_offset >= f->file_offset + f->io_size) {
533 log_info("%s: io_size must be at least one zone\n",
534 f->file_name);
535 return false;
536 }
ee5e3436
SK
537 log_info("%s: rounded up offset from %"PRIu64" to %"PRIu64"\n",
538 f->file_name, f->file_offset,
539 new_offset);
bfbdd35b
BVA
540 f->io_size -= (new_offset - f->file_offset);
541 f->file_offset = new_offset;
542 }
543 zone_idx = zbd_zone_idx(f, f->file_offset + f->io_size);
923f7c1e 544 z = get_zone(f, zone_idx);
ee3696bd 545 new_end = z->start;
f09a7773
PL
546 if ((td->o.td_ddir != TD_DDIR_READ) &&
547 (f->file_offset + f->io_size != new_end)) {
bfbdd35b
BVA
548 if (new_end <= f->file_offset) {
549 log_info("%s: io_size must be at least one zone\n",
550 f->file_name);
551 return false;
552 }
ee5e3436
SK
553 log_info("%s: rounded down io_size from %"PRIu64" to %"PRIu64"\n",
554 f->file_name, f->io_size,
555 new_end - f->file_offset);
bfbdd35b
BVA
556 f->io_size = new_end - f->file_offset;
557 }
558 }
559 }
560
561 return true;
562}
563
564static bool zbd_verify_bs(void)
565{
566 struct thread_data *td;
567 struct fio_file *f;
bfbdd35b
BVA
568 int i, j, k;
569
570 for_each_td(td, i) {
e3be810b
SK
571 if (td_trim(td) &&
572 (td->o.min_bs[DDIR_TRIM] != td->o.max_bs[DDIR_TRIM] ||
573 td->o.bssplit_nr[DDIR_TRIM])) {
574 log_info("bsrange and bssplit are not allowed for trim with zonemode=zbd\n");
575 return false;
576 }
bfbdd35b 577 for_each_file(td, f, j) {
1ddd225e
AD
578 uint64_t zone_size;
579
bfbdd35b
BVA
580 if (!f->zbd_info)
581 continue;
582 zone_size = f->zbd_info->zone_size;
e3be810b 583 if (td_trim(td) && td->o.bs[DDIR_TRIM] != zone_size) {
ee5e3436 584 log_info("%s: trim block size %llu is not the zone size %"PRIu64"\n",
e3be810b 585 f->file_name, td->o.bs[DDIR_TRIM],
ee5e3436 586 zone_size);
e3be810b
SK
587 return false;
588 }
59f94d26 589 for (k = 0; k < FIO_ARRAY_SIZE(td->o.bs); k++) {
bfbdd35b 590 if (td->o.verify != VERIFY_NONE &&
ee3696bd 591 zone_size % td->o.bs[k] != 0) {
ee5e3436 592 log_info("%s: block size %llu is not a divisor of the zone size %"PRIu64"\n",
bfbdd35b 593 f->file_name, td->o.bs[k],
ee5e3436 594 zone_size);
bfbdd35b
BVA
595 return false;
596 }
597 }
598 }
599 }
600 return true;
601}
602
bfbdd35b
BVA
603static int ilog2(uint64_t i)
604{
605 int log = -1;
606
607 while (i) {
608 i >>= 1;
609 log++;
610 }
611 return log;
612}
613
614/*
615 * Initialize f->zbd_info for devices that are not zoned block devices. This
616 * allows to execute a ZBD workload against a non-ZBD device.
617 */
618static int init_zone_info(struct thread_data *td, struct fio_file *f)
619{
620 uint32_t nr_zones;
621 struct fio_zone_info *p;
a4b7f12b 622 uint64_t zone_size = td->o.zone_size;
b8dd9750 623 uint64_t zone_capacity = td->o.zone_capacity;
bfbdd35b 624 struct zoned_block_device_info *zbd_info = NULL;
bfbdd35b
BVA
625 int i;
626
a4b7f12b 627 if (zone_size == 0) {
9db0cde8 628 log_err("%s: Specifying the zone size is mandatory for regular file/block device with --zonemode=zbd\n\n",
a4b7f12b
DLM
629 f->file_name);
630 return 1;
631 }
632
633 if (zone_size < 512) {
634 log_err("%s: zone size must be at least 512 bytes for --zonemode=zbd\n\n",
635 f->file_name);
636 return 1;
637 }
638
b8dd9750
HH
639 if (zone_capacity == 0)
640 zone_capacity = zone_size;
641
642 if (zone_capacity > zone_size) {
643 log_err("%s: job parameter zonecapacity %llu is larger than zone size %llu\n",
ee5e3436 644 f->file_name, td->o.zone_capacity, td->o.zone_size);
b8dd9750
HH
645 return 1;
646 }
647
9db0cde8
NC
648 if (f->real_file_size < zone_size) {
649 log_err("%s: file/device size %"PRIu64" is smaller than zone size %"PRIu64"\n",
650 f->file_name, f->real_file_size, zone_size);
651 return -EINVAL;
652 }
653
ee3696bd 654 nr_zones = (f->real_file_size + zone_size - 1) / zone_size;
bfbdd35b
BVA
655 zbd_info = scalloc(1, sizeof(*zbd_info) +
656 (nr_zones + 1) * sizeof(zbd_info->zone_info[0]));
657 if (!zbd_info)
658 return -ENOMEM;
659
44ec32cb 660 mutex_init_pshared(&zbd_info->mutex);
bfbdd35b
BVA
661 zbd_info->refcount = 1;
662 p = &zbd_info->zone_info[0];
663 for (i = 0; i < nr_zones; i++, p++) {
44ec32cb
SK
664 mutex_init_pshared_with_type(&p->mutex,
665 PTHREAD_MUTEX_RECURSIVE);
bfbdd35b 666 p->start = i * zone_size;
b14651a2 667 p->wp = p->start;
b7694961
DLM
668 p->type = ZBD_ZONE_TYPE_SWR;
669 p->cond = ZBD_ZONE_COND_EMPTY;
b8dd9750 670 p->capacity = zone_capacity;
be7a6bae 671 p->has_wp = 1;
bfbdd35b
BVA
672 }
673 /* a sentinel */
674 p->start = nr_zones * zone_size;
675
676 f->zbd_info = zbd_info;
677 f->zbd_info->zone_size = zone_size;
678 f->zbd_info->zone_size_log2 = is_power_of_2(zone_size) ?
ebc403fe 679 ilog2(zone_size) : 0;
bfbdd35b 680 f->zbd_info->nr_zones = nr_zones;
bfbdd35b
BVA
681 return 0;
682}
683
684/*
b7694961
DLM
685 * Maximum number of zones to report in one operation.
686 */
687#define ZBD_REPORT_MAX_ZONES 8192U
688
689/*
690 * Parse the device zone report and store it in f->zbd_info. Must be called
691 * only for devices that are zoned, namely those with a model != ZBD_NONE.
bfbdd35b
BVA
692 */
693static int parse_zone_info(struct thread_data *td, struct fio_file *f)
694{
b7694961
DLM
695 int nr_zones, nrz;
696 struct zbd_zone *zones, *z;
bfbdd35b 697 struct fio_zone_info *p;
b7694961 698 uint64_t zone_size, offset;
bfbdd35b 699 struct zoned_block_device_info *zbd_info = NULL;
d060babc 700 int i, j, ret = -ENOMEM;
bfbdd35b 701
b7694961
DLM
702 zones = calloc(ZBD_REPORT_MAX_ZONES, sizeof(struct zbd_zone));
703 if (!zones)
bfbdd35b
BVA
704 goto out;
705
b7694961
DLM
706 nrz = zbd_report_zones(td, f, 0, zones, ZBD_REPORT_MAX_ZONES);
707 if (nrz < 0) {
708 ret = nrz;
709 log_info("fio: report zones (offset 0) failed for %s (%d).\n",
710 f->file_name, -ret);
711 goto out;
bfbdd35b
BVA
712 }
713
b7694961 714 zone_size = zones[0].len;
ee3696bd 715 nr_zones = (f->real_file_size + zone_size - 1) / zone_size;
bfbdd35b
BVA
716
717 if (td->o.zone_size == 0) {
ee3696bd
DLM
718 td->o.zone_size = zone_size;
719 } else if (td->o.zone_size != zone_size) {
ee5e3436
SK
720 log_err("fio: %s job parameter zonesize %llu does not match disk zone size %"PRIu64".\n",
721 f->file_name, td->o.zone_size, zone_size);
bfbdd35b 722 ret = -EINVAL;
b7694961 723 goto out;
bfbdd35b
BVA
724 }
725
ee5e3436
SK
726 dprint(FD_ZBD, "Device %s has %d zones of size %"PRIu64" KB\n", f->file_name,
727 nr_zones, zone_size / 1024);
bfbdd35b
BVA
728
729 zbd_info = scalloc(1, sizeof(*zbd_info) +
730 (nr_zones + 1) * sizeof(zbd_info->zone_info[0]));
bfbdd35b 731 if (!zbd_info)
b7694961 732 goto out;
44ec32cb 733 mutex_init_pshared(&zbd_info->mutex);
bfbdd35b
BVA
734 zbd_info->refcount = 1;
735 p = &zbd_info->zone_info[0];
b7694961
DLM
736 for (offset = 0, j = 0; j < nr_zones;) {
737 z = &zones[0];
738 for (i = 0; i < nrz; i++, j++, z++, p++) {
44ec32cb
SK
739 mutex_init_pshared_with_type(&p->mutex,
740 PTHREAD_MUTEX_RECURSIVE);
b7694961 741 p->start = z->start;
236d23a8 742 p->capacity = z->capacity;
bfbdd35b 743 switch (z->cond) {
b7694961
DLM
744 case ZBD_ZONE_COND_NOT_WP:
745 case ZBD_ZONE_COND_FULL:
236d23a8 746 p->wp = p->start + p->capacity;
bfbdd35b
BVA
747 break;
748 default:
749 assert(z->start <= z->wp);
b7694961
DLM
750 assert(z->wp <= z->start + zone_size);
751 p->wp = z->wp;
bfbdd35b
BVA
752 break;
753 }
be7a6bae
DF
754
755 switch (z->type) {
756 case ZBD_ZONE_TYPE_SWR:
757 p->has_wp = 1;
758 break;
759 default:
760 p->has_wp = 0;
761 }
bfbdd35b
BVA
762 p->type = z->type;
763 p->cond = z->cond;
be7a6bae 764
bfbdd35b
BVA
765 if (j > 0 && p->start != p[-1].start + zone_size) {
766 log_info("%s: invalid zone data\n",
767 f->file_name);
768 ret = -EINVAL;
b7694961 769 goto out;
bfbdd35b
BVA
770 }
771 }
772 z--;
b7694961 773 offset = z->start + z->len;
bfbdd35b
BVA
774 if (j >= nr_zones)
775 break;
6c3f1cc1
DF
776 nrz = zbd_report_zones(td, f, offset, zones,
777 min((uint32_t)(nr_zones - j),
778 ZBD_REPORT_MAX_ZONES));
b7694961
DLM
779 if (nrz < 0) {
780 ret = nrz;
ee5e3436
SK
781 log_info("fio: report zones (offset %"PRIu64") failed for %s (%d).\n",
782 offset, f->file_name, -ret);
b7694961 783 goto out;
bfbdd35b
BVA
784 }
785 }
b7694961 786
bfbdd35b 787 /* a sentinel */
b7694961 788 zbd_info->zone_info[nr_zones].start = offset;
bfbdd35b
BVA
789
790 f->zbd_info = zbd_info;
791 f->zbd_info->zone_size = zone_size;
792 f->zbd_info->zone_size_log2 = is_power_of_2(zone_size) ?
ebc403fe 793 ilog2(zone_size) : 0;
bfbdd35b
BVA
794 f->zbd_info->nr_zones = nr_zones;
795 zbd_info = NULL;
796 ret = 0;
797
bfbdd35b 798out:
b7694961
DLM
799 sfree(zbd_info);
800 free(zones);
bfbdd35b
BVA
801 return ret;
802}
803
d2f442bc
NC
804static int zbd_set_max_open_zones(struct thread_data *td, struct fio_file *f)
805{
806 struct zoned_block_device_info *zbd = f->zbd_info;
807 unsigned int max_open_zones;
808 int ret;
809
575686bb 810 if (zbd->model != ZBD_HOST_MANAGED || td->o.ignore_zone_limits) {
d2f442bc
NC
811 /* Only host-managed devices have a max open limit */
812 zbd->max_open_zones = td->o.max_open_zones;
813 goto out;
814 }
815
816 /* If host-managed, get the max open limit */
817 ret = zbd_get_max_open_zones(td, f, &max_open_zones);
818 if (ret)
819 return ret;
820
821 if (!max_open_zones) {
822 /* No device limit */
823 zbd->max_open_zones = td->o.max_open_zones;
824 } else if (!td->o.max_open_zones) {
825 /* No user limit. Set limit to device limit */
826 zbd->max_open_zones = max_open_zones;
827 } else if (td->o.max_open_zones <= max_open_zones) {
828 /* Both user limit and dev limit. User limit not too large */
829 zbd->max_open_zones = td->o.max_open_zones;
830 } else {
831 /* Both user limit and dev limit. User limit too large */
832 td_verror(td, EINVAL,
833 "Specified --max_open_zones is too large");
834 log_err("Specified --max_open_zones (%d) is larger than max (%u)\n",
835 td->o.max_open_zones, max_open_zones);
836 return -EINVAL;
837 }
838
839out:
840 /* Ensure that the limit is not larger than FIO's internal limit */
b346af90
NC
841 if (zbd->max_open_zones > ZBD_MAX_OPEN_ZONES) {
842 td_verror(td, EINVAL, "'max_open_zones' value is too large");
843 log_err("'max_open_zones' value is larger than %u\n", ZBD_MAX_OPEN_ZONES);
844 return -EINVAL;
845 }
846
d2f442bc
NC
847 dprint(FD_ZBD, "%s: using max open zones limit: %"PRIu32"\n",
848 f->file_name, zbd->max_open_zones);
849
850 return 0;
851}
852
bfbdd35b
BVA
853/*
854 * Allocate zone information and store it into f->zbd_info if zonemode=zbd.
855 *
856 * Returns 0 upon success and a negative error code upon failure.
857 */
379e5f09 858static int zbd_create_zone_info(struct thread_data *td, struct fio_file *f)
bfbdd35b 859{
b7694961
DLM
860 enum zbd_zoned_model zbd_model;
861 int ret;
bfbdd35b
BVA
862
863 assert(td->o.zone_mode == ZONE_MODE_ZBD);
864
b7694961
DLM
865 ret = zbd_get_zoned_model(td, f, &zbd_model);
866 if (ret)
867 return ret;
868
bfbdd35b 869 switch (zbd_model) {
b7694961
DLM
870 case ZBD_HOST_AWARE:
871 case ZBD_HOST_MANAGED:
bfbdd35b 872 ret = parse_zone_info(td, f);
d2f442bc
NC
873 if (ret)
874 return ret;
bfbdd35b 875 break;
b7694961 876 case ZBD_NONE:
bfbdd35b 877 ret = init_zone_info(td, f);
d2f442bc
NC
878 if (ret)
879 return ret;
bfbdd35b 880 break;
b7694961
DLM
881 default:
882 td_verror(td, EINVAL, "Unsupported zoned model");
883 log_err("Unsupported zoned model\n");
884 return -EINVAL;
bfbdd35b 885 }
b7694961 886
2c7dd23e 887 assert(f->zbd_info);
d2f442bc
NC
888 f->zbd_info->model = zbd_model;
889
890 ret = zbd_set_max_open_zones(td, f);
891 if (ret) {
892 zbd_free_zone_info(f);
893 return ret;
219c662d 894 }
d2f442bc
NC
895
896 return 0;
bfbdd35b
BVA
897}
898
899void zbd_free_zone_info(struct fio_file *f)
900{
901 uint32_t refcount;
902
3c1dc34c 903 assert(f->zbd_info);
bfbdd35b
BVA
904
905 pthread_mutex_lock(&f->zbd_info->mutex);
906 refcount = --f->zbd_info->refcount;
907 pthread_mutex_unlock(&f->zbd_info->mutex);
908
909 assert((int32_t)refcount >= 0);
910 if (refcount == 0)
911 sfree(f->zbd_info);
912 f->zbd_info = NULL;
913}
914
915/*
916 * Initialize f->zbd_info.
917 *
918 * Returns 0 upon success and a negative error code upon failure.
919 *
920 * Note: this function can only work correctly if it is called before the first
921 * fio fork() call.
922 */
923static int zbd_init_zone_info(struct thread_data *td, struct fio_file *file)
924{
925 struct thread_data *td2;
926 struct fio_file *f2;
927 int i, j, ret;
928
929 for_each_td(td2, i) {
930 for_each_file(td2, f2, j) {
931 if (td2 == td && f2 == file)
932 continue;
933 if (!f2->zbd_info ||
934 strcmp(f2->file_name, file->file_name) != 0)
935 continue;
936 file->zbd_info = f2->zbd_info;
937 file->zbd_info->refcount++;
938 return 0;
939 }
940 }
941
942 ret = zbd_create_zone_info(td, file);
943 if (ret < 0)
c5837eec 944 td_verror(td, -ret, "zbd_create_zone_info() failed");
bfbdd35b
BVA
945 return ret;
946}
947
8f39afa7 948int zbd_init_files(struct thread_data *td)
bfbdd35b
BVA
949{
950 struct fio_file *f;
951 int i;
952
953 for_each_file(td, f, i) {
a4b7f12b 954 if (zbd_init_zone_info(td, f))
bfbdd35b 955 return 1;
bfbdd35b 956 }
8f39afa7
AD
957 return 0;
958}
959
960void zbd_recalc_options_with_zone_granularity(struct thread_data *td)
961{
962 struct fio_file *f;
963 int i;
964
965 for_each_file(td, f, i) {
966 struct zoned_block_device_info *zbd = f->zbd_info;
967 // zonemode=strided doesn't get per-file zone size.
968 uint64_t zone_size = zbd ? zbd->zone_size : td->o.zone_size;
969
970 if (zone_size == 0)
971 continue;
972
973 if (td->o.size_nz > 0) {
974 td->o.size = td->o.size_nz * zone_size;
975 }
976 if (td->o.io_size_nz > 0) {
977 td->o.io_size = td->o.io_size_nz * zone_size;
978 }
979 if (td->o.start_offset_nz > 0) {
980 td->o.start_offset = td->o.start_offset_nz * zone_size;
981 }
982 if (td->o.offset_increment_nz > 0) {
983 td->o.offset_increment = td->o.offset_increment_nz * zone_size;
984 }
985 if (td->o.zone_skip_nz > 0) {
986 td->o.zone_skip = td->o.zone_skip_nz * zone_size;
987 }
988 }
989}
990
991int zbd_setup_files(struct thread_data *td)
992{
993 struct fio_file *f;
994 int i;
bfbdd35b
BVA
995
996 if (!zbd_using_direct_io()) {
997 log_err("Using direct I/O is mandatory for writing to ZBD drives\n\n");
998 return 1;
999 }
1000
1001 if (!zbd_verify_sizes())
1002 return 1;
1003
1004 if (!zbd_verify_bs())
1005 return 1;
1006
219c662d
AD
1007 for_each_file(td, f, i) {
1008 struct zoned_block_device_info *zbd = f->zbd_info;
954217b9
SK
1009 struct fio_zone_info *z;
1010 int zi;
219c662d 1011
5ddf46d0 1012 assert(zbd);
219c662d 1013
f952800a
SK
1014 f->min_zone = zbd_zone_idx(f, f->file_offset);
1015 f->max_zone = zbd_zone_idx(f, f->file_offset + f->io_size);
1016
1017 /*
1018 * When all zones in the I/O range are conventional, io_size
1019 * can be smaller than zone size, making min_zone the same
1020 * as max_zone. This is why the assert below needs to be made
1021 * conditional.
1022 */
1023 if (zbd_is_seq_job(f))
1024 assert(f->min_zone < f->max_zone);
1025
219c662d
AD
1026 if (td->o.max_open_zones > 0 &&
1027 zbd->max_open_zones != td->o.max_open_zones) {
1028 log_err("Different 'max_open_zones' values\n");
1029 return 1;
1030 }
b346af90
NC
1031
1032 /*
1033 * The per job max open zones limit cannot be used without a
1034 * global max open zones limit. (As the tracking of open zones
1035 * is disabled when there is no global max open zones limit.)
1036 */
1037 if (td->o.job_max_open_zones && !zbd->max_open_zones) {
1038 log_err("'job_max_open_zones' cannot be used without a global open zones limit\n");
219c662d
AD
1039 return 1;
1040 }
954217b9 1041
ea51055c
NC
1042 /*
1043 * zbd->max_open_zones is the global limit shared for all jobs
1044 * that target the same zoned block device. Force sync the per
1045 * thread global limit with the actual global limit. (The real
1046 * per thread/job limit is stored in td->o.job_max_open_zones).
1047 */
1048 td->o.max_open_zones = zbd->max_open_zones;
1049
954217b9
SK
1050 for (zi = f->min_zone; zi < f->max_zone; zi++) {
1051 z = &zbd->zone_info[zi];
1052 if (z->cond != ZBD_ZONE_COND_IMP_OPEN &&
1053 z->cond != ZBD_ZONE_COND_EXP_OPEN)
1054 continue;
1055 if (zbd_open_zone(td, f, zi))
1056 continue;
1057 /*
1058 * If the number of open zones exceeds specified limits,
1059 * reset all extra open zones.
1060 */
1061 if (zbd_reset_zone(td, f, z) < 0) {
1062 log_err("Failed to reest zone %d\n", zi);
1063 return 1;
1064 }
1065 }
219c662d
AD
1066 }
1067
bfbdd35b
BVA
1068 return 0;
1069}
1070
a7c2b6fc
BVA
1071/*
1072 * Reset zbd_info.write_cnt, the counter that counts down towards the next
1073 * zone reset.
1074 */
1bb1bcad
AD
1075static void _zbd_reset_write_cnt(const struct thread_data *td,
1076 const struct fio_file *f)
a7c2b6fc
BVA
1077{
1078 assert(0 <= td->o.zrf.u.f && td->o.zrf.u.f <= 1);
1079
a7c2b6fc
BVA
1080 f->zbd_info->write_cnt = td->o.zrf.u.f ?
1081 min(1.0 / td->o.zrf.u.f, 0.0 + UINT_MAX) : UINT_MAX;
1bb1bcad
AD
1082}
1083
1084static void zbd_reset_write_cnt(const struct thread_data *td,
1085 const struct fio_file *f)
1086{
1087 pthread_mutex_lock(&f->zbd_info->mutex);
1088 _zbd_reset_write_cnt(td, f);
a7c2b6fc
BVA
1089 pthread_mutex_unlock(&f->zbd_info->mutex);
1090}
1091
1092static bool zbd_dec_and_reset_write_cnt(const struct thread_data *td,
1093 const struct fio_file *f)
1094{
1095 uint32_t write_cnt = 0;
1096
1097 pthread_mutex_lock(&f->zbd_info->mutex);
1098 assert(f->zbd_info->write_cnt);
1099 if (f->zbd_info->write_cnt)
1100 write_cnt = --f->zbd_info->write_cnt;
1101 if (write_cnt == 0)
1bb1bcad 1102 _zbd_reset_write_cnt(td, f);
a7c2b6fc
BVA
1103 pthread_mutex_unlock(&f->zbd_info->mutex);
1104
1105 return write_cnt == 0;
1106}
1107
91d25131
BVA
1108enum swd_action {
1109 CHECK_SWD,
1110 SET_SWD,
1111};
1112
1113/* Calculate the number of sectors with data (swd) and perform action 'a' */
11afb212
DF
1114static uint64_t zbd_process_swd(struct thread_data *td,
1115 const struct fio_file *f, enum swd_action a)
615555bb 1116{
615555bb
BVA
1117 struct fio_zone_info *zb, *ze, *z;
1118 uint64_t swd = 0;
403ea795 1119 uint64_t wp_swd = 0;
615555bb 1120
923f7c1e
DF
1121 zb = get_zone(f, f->min_zone);
1122 ze = get_zone(f, f->max_zone);
615555bb 1123 for (z = zb; z < ze; z++) {
403ea795 1124 if (z->has_wp) {
11afb212 1125 zone_lock(td, f, z);
403ea795
SK
1126 wp_swd += z->wp - z->start;
1127 }
615555bb
BVA
1128 swd += z->wp - z->start;
1129 }
1130 pthread_mutex_lock(&f->zbd_info->mutex);
91d25131
BVA
1131 switch (a) {
1132 case CHECK_SWD:
1133 assert(f->zbd_info->sectors_with_data == swd);
403ea795 1134 assert(f->zbd_info->wp_sectors_with_data == wp_swd);
91d25131
BVA
1135 break;
1136 case SET_SWD:
1137 f->zbd_info->sectors_with_data = swd;
403ea795 1138 f->zbd_info->wp_sectors_with_data = wp_swd;
91d25131
BVA
1139 break;
1140 }
615555bb
BVA
1141 pthread_mutex_unlock(&f->zbd_info->mutex);
1142 for (z = zb; z < ze; z++)
403ea795
SK
1143 if (z->has_wp)
1144 zone_unlock(z);
91d25131
BVA
1145
1146 return swd;
1147}
1148
1149/*
1150 * The swd check is useful for debugging but takes too much time to leave
1151 * it enabled all the time. Hence it is disabled by default.
1152 */
1153static const bool enable_check_swd = false;
1154
403ea795 1155/* Check whether the values of zbd_info.*sectors_with_data are correct. */
11afb212 1156static void zbd_check_swd(struct thread_data *td, const struct fio_file *f)
91d25131
BVA
1157{
1158 if (!enable_check_swd)
1159 return;
1160
11afb212 1161 zbd_process_swd(td, f, CHECK_SWD);
615555bb
BVA
1162}
1163
bfbdd35b
BVA
1164void zbd_file_reset(struct thread_data *td, struct fio_file *f)
1165{
91d25131 1166 struct fio_zone_info *zb, *ze;
11afb212 1167 uint64_t swd;
bfbdd35b 1168
767d1372 1169 if (!f->zbd_info || !td_write(td))
bfbdd35b
BVA
1170 return;
1171
923f7c1e
DF
1172 zb = get_zone(f, f->min_zone);
1173 ze = get_zone(f, f->max_zone);
11afb212
DF
1174 swd = zbd_process_swd(td, f, SET_SWD);
1175 dprint(FD_ZBD, "%s(%s): swd = %" PRIu64 "\n", __func__, f->file_name,
1176 swd);
bfbdd35b
BVA
1177 /*
1178 * If data verification is enabled reset the affected zones before
1179 * writing any data to avoid that a zone reset has to be issued while
1180 * writing data, which causes data loss.
1181 */
57f88219
SK
1182 if (td->o.verify != VERIFY_NONE && td->runstate != TD_VERIFYING)
1183 zbd_reset_zones(td, f, zb, ze);
a7c2b6fc 1184 zbd_reset_write_cnt(td, f);
bfbdd35b
BVA
1185}
1186
cb765e41 1187/* Return random zone index for one of the open zones. */
6463db6c
AD
1188static uint32_t pick_random_zone_idx(const struct fio_file *f,
1189 const struct io_u *io_u)
1190{
cb765e41
AK
1191 return (io_u->offset - f->file_offset) * f->zbd_info->num_open_zones /
1192 f->io_size;
6463db6c
AD
1193}
1194
0f77c977
SK
1195static bool any_io_in_flight(void)
1196{
1197 struct thread_data *td;
1198 int i;
1199
1200 for_each_td(td, i) {
1201 if (td->io_u_in_flight)
1202 return true;
1203 }
1204
1205 return false;
1206}
1207
59b07544
BVA
1208/*
1209 * Modify the offset of an I/O unit that does not refer to an open zone such
1210 * that it refers to an open zone. Close an open zone and open a new zone if
21c0c884
SK
1211 * necessary. The open zone is searched across sequential zones.
1212 * This algorithm can only work correctly if all write pointers are
59b07544
BVA
1213 * a multiple of the fio block size. The caller must neither hold z->mutex
1214 * nor f->zbd_info->mutex. Returns with z->mutex held upon success.
1215 */
379e5f09
BVA
1216static struct fio_zone_info *zbd_convert_to_open_zone(struct thread_data *td,
1217 struct io_u *io_u)
59b07544 1218{
07fc3f57 1219 const uint64_t min_bs = td->o.min_bs[io_u->ddir];
fae3b9a0 1220 struct fio_file *f = io_u->file;
af94a8c3 1221 struct zoned_block_device_info *zbdi = f->zbd_info;
59b07544
BVA
1222 struct fio_zone_info *z;
1223 unsigned int open_zone_idx = -1;
1224 uint32_t zone_idx, new_zone_idx;
1225 int i;
b2da58c4 1226 bool wait_zone_close;
0f77c977
SK
1227 bool in_flight;
1228 bool should_retry = true;
59b07544
BVA
1229
1230 assert(is_valid_offset(f, io_u->offset));
1231
ea51055c 1232 if (zbdi->max_open_zones || td->o.job_max_open_zones) {
59b07544 1233 /*
af94a8c3 1234 * This statement accesses zbdi->open_zones[] on purpose
59b07544
BVA
1235 * without locking.
1236 */
af94a8c3 1237 zone_idx = zbdi->open_zones[pick_random_zone_idx(f, io_u)];
59b07544
BVA
1238 } else {
1239 zone_idx = zbd_zone_idx(f, io_u->offset);
1240 }
fae3b9a0
AD
1241 if (zone_idx < f->min_zone)
1242 zone_idx = f->min_zone;
1243 else if (zone_idx >= f->max_zone)
1244 zone_idx = f->max_zone - 1;
59b07544
BVA
1245 dprint(FD_ZBD, "%s(%s): starting from zone %d (offset %lld, buflen %lld)\n",
1246 __func__, f->file_name, zone_idx, io_u->offset, io_u->buflen);
1247
1248 /*
af94a8c3 1249 * Since z->mutex is the outer lock and zbdi->mutex the inner
59b07544 1250 * lock it can happen that the state of the zone with index zone_idx
af94a8c3 1251 * has changed after 'z' has been assigned and before zbdi->mutex
59b07544
BVA
1252 * has been obtained. Hence the loop.
1253 */
1254 for (;;) {
6463db6c
AD
1255 uint32_t tmp_idx;
1256
923f7c1e 1257 z = get_zone(f, zone_idx);
14351148
DF
1258 if (z->has_wp)
1259 zone_lock(td, f, z);
af94a8c3 1260 pthread_mutex_lock(&zbdi->mutex);
14351148
DF
1261 if (z->has_wp) {
1262 if (z->cond != ZBD_ZONE_COND_OFFLINE &&
ea51055c 1263 zbdi->max_open_zones == 0 && td->o.job_max_open_zones == 0)
14351148 1264 goto examine_zone;
af94a8c3 1265 if (zbdi->num_open_zones == 0) {
14351148
DF
1266 dprint(FD_ZBD, "%s(%s): no zones are open\n",
1267 __func__, f->file_name);
1268 goto open_other_zone;
1269 }
59b07544 1270 }
6463db6c
AD
1271
1272 /*
1273 * List of opened zones is per-device, shared across all threads.
1274 * Start with quasi-random candidate zone.
1275 * Ignore zones which don't belong to thread's offset/size area.
1276 */
1277 open_zone_idx = pick_random_zone_idx(f, io_u);
1a8d510a 1278 assert(!open_zone_idx ||
af94a8c3 1279 open_zone_idx < zbdi->num_open_zones);
6463db6c 1280 tmp_idx = open_zone_idx;
af94a8c3 1281 for (i = 0; i < zbdi->num_open_zones; i++) {
6463db6c
AD
1282 uint32_t tmpz;
1283
af94a8c3 1284 if (tmp_idx >= zbdi->num_open_zones)
6463db6c 1285 tmp_idx = 0;
af94a8c3 1286 tmpz = zbdi->open_zones[tmp_idx];
fae3b9a0 1287 if (f->min_zone <= tmpz && tmpz < f->max_zone) {
6463db6c
AD
1288 open_zone_idx = tmp_idx;
1289 goto found_candidate_zone;
1290 }
1291
1292 tmp_idx++;
1293 }
1294
1295 dprint(FD_ZBD, "%s(%s): no candidate zone\n",
1296 __func__, f->file_name);
af94a8c3 1297 pthread_mutex_unlock(&zbdi->mutex);
14351148
DF
1298 if (z->has_wp)
1299 zone_unlock(z);
6463db6c
AD
1300 return NULL;
1301
1302found_candidate_zone:
af94a8c3 1303 new_zone_idx = zbdi->open_zones[open_zone_idx];
59b07544
BVA
1304 if (new_zone_idx == zone_idx)
1305 break;
1306 zone_idx = new_zone_idx;
af94a8c3 1307 pthread_mutex_unlock(&zbdi->mutex);
14351148
DF
1308 if (z->has_wp)
1309 zone_unlock(z);
59b07544
BVA
1310 }
1311
af94a8c3 1312 /* Both z->mutex and zbdi->mutex are held. */
59b07544
BVA
1313
1314examine_zone:
236d23a8 1315 if (z->wp + min_bs <= zbd_zone_capacity_end(z)) {
af94a8c3 1316 pthread_mutex_unlock(&zbdi->mutex);
59b07544
BVA
1317 goto out;
1318 }
b2da58c4
SK
1319
1320open_other_zone:
1321 /* Check if number of open zones reaches one of limits. */
1322 wait_zone_close =
af94a8c3 1323 zbdi->num_open_zones == f->max_zone - f->min_zone ||
ea51055c
NC
1324 (zbdi->max_open_zones &&
1325 zbdi->num_open_zones == zbdi->max_open_zones) ||
b2da58c4
SK
1326 (td->o.job_max_open_zones &&
1327 td->num_open_zones == td->o.job_max_open_zones);
1328
af94a8c3 1329 pthread_mutex_unlock(&zbdi->mutex);
59b07544
BVA
1330
1331 /* Only z->mutex is held. */
1332
b2da58c4
SK
1333 /*
1334 * When number of open zones reaches to one of limits, wait for
1335 * zone close before opening a new zone.
1336 */
1337 if (wait_zone_close) {
1338 dprint(FD_ZBD, "%s(%s): quiesce to allow open zones to close\n",
1339 __func__, f->file_name);
1340 io_u_quiesce(td);
1341 }
1342
0f77c977 1343retry:
59b07544 1344 /* Zone 'z' is full, so try to open a new zone. */
af94a8c3 1345 for (i = f->io_size / zbdi->zone_size; i > 0; i--) {
59b07544 1346 zone_idx++;
21c0c884
SK
1347 if (z->has_wp)
1348 zone_unlock(z);
59b07544 1349 z++;
ee3696bd 1350 if (!is_valid_offset(f, z->start)) {
59b07544 1351 /* Wrap-around. */
fae3b9a0 1352 zone_idx = f->min_zone;
923f7c1e 1353 z = get_zone(f, zone_idx);
59b07544 1354 }
ee3696bd 1355 assert(is_valid_offset(f, z->start));
21c0c884
SK
1356 if (!z->has_wp)
1357 continue;
fae3b9a0 1358 zone_lock(td, f, z);
59b07544
BVA
1359 if (z->open)
1360 continue;
954217b9 1361 if (zbd_open_zone(td, f, zone_idx))
59b07544
BVA
1362 goto out;
1363 }
1364
1365 /* Only z->mutex is held. */
1366
1367 /* Check whether the write fits in any of the already opened zones. */
af94a8c3
NC
1368 pthread_mutex_lock(&zbdi->mutex);
1369 for (i = 0; i < zbdi->num_open_zones; i++) {
1370 zone_idx = zbdi->open_zones[i];
fae3b9a0
AD
1371 if (zone_idx < f->min_zone || zone_idx >= f->max_zone)
1372 continue;
af94a8c3 1373 pthread_mutex_unlock(&zbdi->mutex);
4d4c71e6 1374 zone_unlock(z);
59b07544 1375
923f7c1e 1376 z = get_zone(f, zone_idx);
59b07544 1377
fae3b9a0 1378 zone_lock(td, f, z);
236d23a8 1379 if (z->wp + min_bs <= zbd_zone_capacity_end(z))
59b07544 1380 goto out;
af94a8c3 1381 pthread_mutex_lock(&zbdi->mutex);
59b07544 1382 }
0f77c977
SK
1383
1384 /*
1385 * When any I/O is in-flight or when all I/Os in-flight get completed,
1386 * the I/Os might have closed zones then retry the steps to open a zone.
1387 * Before retry, call io_u_quiesce() to complete in-flight writes.
1388 */
1389 in_flight = any_io_in_flight();
1390 if (in_flight || should_retry) {
1391 dprint(FD_ZBD, "%s(%s): wait zone close and retry open zones\n",
1392 __func__, f->file_name);
1393 pthread_mutex_unlock(&zbdi->mutex);
1394 zone_unlock(z);
1395 io_u_quiesce(td);
1396 zone_lock(td, f, z);
1397 should_retry = in_flight;
1398 goto retry;
1399 }
1400
af94a8c3 1401 pthread_mutex_unlock(&zbdi->mutex);
4d4c71e6 1402 zone_unlock(z);
59b07544
BVA
1403 dprint(FD_ZBD, "%s(%s): did not open another zone\n", __func__,
1404 f->file_name);
1405 return NULL;
1406
1407out:
1408 dprint(FD_ZBD, "%s(%s): returning zone %d\n", __func__, f->file_name,
1409 zone_idx);
ee3696bd 1410 io_u->offset = z->start;
21c0c884 1411 assert(z->has_wp);
8a866de7 1412 assert(z->cond != ZBD_ZONE_COND_OFFLINE);
59b07544
BVA
1413 return z;
1414}
1415
bfbdd35b 1416/* The caller must hold z->mutex. */
59b07544
BVA
1417static struct fio_zone_info *zbd_replay_write_order(struct thread_data *td,
1418 struct io_u *io_u,
1419 struct fio_zone_info *z)
bfbdd35b
BVA
1420{
1421 const struct fio_file *f = io_u->file;
07fc3f57 1422 const uint64_t min_bs = td->o.min_bs[DDIR_WRITE];
bfbdd35b 1423
122c3539 1424 if (!zbd_open_zone(td, f, zbd_zone_nr(f, z))) {
4d4c71e6 1425 zone_unlock(z);
59b07544
BVA
1426 z = zbd_convert_to_open_zone(td, io_u);
1427 assert(z);
1428 }
1429
0cac10b4 1430 if (z->verify_block * min_bs >= z->capacity) {
07fc3f57 1431 log_err("%s: %d * %"PRIu64" >= %"PRIu64"\n", f->file_name, z->verify_block,
ee5e3436 1432 min_bs, z->capacity);
0cac10b4
DF
1433 /*
1434 * If the assertion below fails during a test run, adding
1435 * "--experimental_verify=1" to the command line may help.
1436 */
1437 assert(false);
1438 }
1439 io_u->offset = z->start + z->verify_block * min_bs;
1440 if (io_u->offset + io_u->buflen >= zbd_zone_capacity_end(z)) {
ee5e3436
SK
1441 log_err("%s: %llu + %llu >= %"PRIu64"\n", f->file_name, io_u->offset,
1442 io_u->buflen, zbd_zone_capacity_end(z));
0cac10b4
DF
1443 assert(false);
1444 }
1445 z->verify_block += io_u->buflen / min_bs;
1446
59b07544 1447 return z;
bfbdd35b
BVA
1448}
1449
1450/*
5c86fdf6
SK
1451 * Find another zone which has @min_bytes of readable data. Search in zones
1452 * @zb + 1 .. @zl. For random workload, also search in zones @zb - 1 .. @zf.
bfbdd35b 1453 *
21c0c884
SK
1454 * Either returns NULL or returns a zone pointer. When the zone has write
1455 * pointer, hold the mutex for the zone.
bfbdd35b
BVA
1456 */
1457static struct fio_zone_info *
07fc3f57 1458zbd_find_zone(struct thread_data *td, struct io_u *io_u, uint64_t min_bytes,
bfbdd35b
BVA
1459 struct fio_zone_info *zb, struct fio_zone_info *zl)
1460{
fae3b9a0 1461 struct fio_file *f = io_u->file;
bfbdd35b 1462 struct fio_zone_info *z1, *z2;
923f7c1e 1463 const struct fio_zone_info *const zf = get_zone(f, f->min_zone);
bfbdd35b
BVA
1464
1465 /*
1466 * Skip to the next non-empty zone in case of sequential I/O and to
1467 * the nearest non-empty zone in case of random I/O.
1468 */
1469 for (z1 = zb + 1, z2 = zb - 1; z1 < zl || z2 >= zf; z1++, z2--) {
b7694961 1470 if (z1 < zl && z1->cond != ZBD_ZONE_COND_OFFLINE) {
21c0c884
SK
1471 if (z1->has_wp)
1472 zone_lock(td, f, z1);
5c86fdf6 1473 if (z1->start + min_bytes <= z1->wp)
bfbdd35b 1474 return z1;
21c0c884
SK
1475 if (z1->has_wp)
1476 zone_unlock(z1);
bfbdd35b
BVA
1477 } else if (!td_random(td)) {
1478 break;
1479 }
1480 if (td_random(td) && z2 >= zf &&
b7694961 1481 z2->cond != ZBD_ZONE_COND_OFFLINE) {
21c0c884
SK
1482 if (z2->has_wp)
1483 zone_lock(td, f, z2);
5c86fdf6 1484 if (z2->start + min_bytes <= z2->wp)
bfbdd35b 1485 return z2;
21c0c884
SK
1486 if (z2->has_wp)
1487 zone_unlock(z2);
bfbdd35b
BVA
1488 }
1489 }
07fc3f57 1490 dprint(FD_ZBD, "%s: no zone has %"PRIu64" bytes of readable data\n",
5c86fdf6 1491 f->file_name, min_bytes);
bfbdd35b
BVA
1492 return NULL;
1493}
1494
b2da58c4
SK
1495/**
1496 * zbd_end_zone_io - update zone status at command completion
1497 * @io_u: I/O unit
1498 * @z: zone info pointer
1499 *
1500 * If the write command made the zone full, close it.
1501 *
1502 * The caller must hold z->mutex.
1503 */
1504static void zbd_end_zone_io(struct thread_data *td, const struct io_u *io_u,
1505 struct fio_zone_info *z)
1506{
1507 const struct fio_file *f = io_u->file;
1508
1509 if (io_u->ddir == DDIR_WRITE &&
1510 io_u->offset + io_u->buflen >= zbd_zone_capacity_end(z)) {
1511 pthread_mutex_lock(&f->zbd_info->mutex);
122c3539 1512 zbd_close_zone(td, f, zbd_zone_nr(f, z));
b2da58c4
SK
1513 pthread_mutex_unlock(&f->zbd_info->mutex);
1514 }
1515}
1516
bfbdd35b 1517/**
d9ed3e63 1518 * zbd_queue_io - update the write pointer of a sequential zone
bfbdd35b 1519 * @io_u: I/O unit
d9ed3e63
DLM
1520 * @success: Whether or not the I/O unit has been queued successfully
1521 * @q: queueing status (busy, completed or queued).
bfbdd35b 1522 *
d9ed3e63
DLM
1523 * For write and trim operations, update the write pointer of the I/O unit
1524 * target zone.
bfbdd35b 1525 */
b2da58c4
SK
1526static void zbd_queue_io(struct thread_data *td, struct io_u *io_u, int q,
1527 bool success)
bfbdd35b 1528{
d9ed3e63
DLM
1529 const struct fio_file *f = io_u->file;
1530 struct zoned_block_device_info *zbd_info = f->zbd_info;
bfbdd35b
BVA
1531 struct fio_zone_info *z;
1532 uint32_t zone_idx;
d9ed3e63 1533 uint64_t zone_end;
bfbdd35b 1534
5ddf46d0 1535 assert(zbd_info);
bfbdd35b 1536
d9ed3e63 1537 zone_idx = zbd_zone_idx(f, io_u->offset);
bfbdd35b 1538 assert(zone_idx < zbd_info->nr_zones);
923f7c1e 1539 z = get_zone(f, zone_idx);
d9ed3e63 1540
43bcbd5b 1541 assert(z->has_wp);
d9ed3e63 1542
bfbdd35b
BVA
1543 if (!success)
1544 goto unlock;
d9ed3e63
DLM
1545
1546 dprint(FD_ZBD,
1547 "%s: queued I/O (%lld, %llu) for zone %u\n",
1548 f->file_name, io_u->offset, io_u->buflen, zone_idx);
1549
bfbdd35b
BVA
1550 switch (io_u->ddir) {
1551 case DDIR_WRITE:
d9ed3e63 1552 zone_end = min((uint64_t)(io_u->offset + io_u->buflen),
236d23a8 1553 zbd_zone_capacity_end(z));
a7c2b6fc
BVA
1554 pthread_mutex_lock(&zbd_info->mutex);
1555 /*
1556 * z->wp > zone_end means that one or more I/O errors
1557 * have occurred.
1558 */
403ea795 1559 if (z->wp <= zone_end) {
a7c2b6fc 1560 zbd_info->sectors_with_data += zone_end - z->wp;
403ea795
SK
1561 zbd_info->wp_sectors_with_data += zone_end - z->wp;
1562 }
a7c2b6fc 1563 pthread_mutex_unlock(&zbd_info->mutex);
bfbdd35b
BVA
1564 z->wp = zone_end;
1565 break;
bfbdd35b
BVA
1566 default:
1567 break;
1568 }
d9ed3e63 1569
b2da58c4
SK
1570 if (q == FIO_Q_COMPLETED && !io_u->error)
1571 zbd_end_zone_io(td, io_u, z);
1572
bfbdd35b 1573unlock:
d9ed3e63
DLM
1574 if (!success || q != FIO_Q_QUEUED) {
1575 /* BUSY or COMPLETED: unlock the zone */
4d4c71e6 1576 zone_unlock(z);
d9ed3e63
DLM
1577 io_u->zbd_put_io = NULL;
1578 }
1579}
1580
1581/**
1582 * zbd_put_io - Unlock an I/O unit target zone lock
1583 * @io_u: I/O unit
1584 */
b2da58c4 1585static void zbd_put_io(struct thread_data *td, const struct io_u *io_u)
d9ed3e63
DLM
1586{
1587 const struct fio_file *f = io_u->file;
1588 struct zoned_block_device_info *zbd_info = f->zbd_info;
1589 struct fio_zone_info *z;
1590 uint32_t zone_idx;
1591
5ddf46d0 1592 assert(zbd_info);
615555bb 1593
d9ed3e63
DLM
1594 zone_idx = zbd_zone_idx(f, io_u->offset);
1595 assert(zone_idx < zbd_info->nr_zones);
923f7c1e 1596 z = get_zone(f, zone_idx);
d9ed3e63 1597
43bcbd5b 1598 assert(z->has_wp);
d9ed3e63
DLM
1599
1600 dprint(FD_ZBD,
1601 "%s: terminate I/O (%lld, %llu) for zone %u\n",
1602 f->file_name, io_u->offset, io_u->buflen, zone_idx);
1603
b2da58c4
SK
1604 zbd_end_zone_io(td, io_u, z);
1605
4d4c71e6 1606 zone_unlock(z);
11afb212 1607 zbd_check_swd(td, f);
bfbdd35b
BVA
1608}
1609
9d87c646
DLM
1610/*
1611 * Windows and MacOS do not define this.
1612 */
1613#ifndef EREMOTEIO
1614#define EREMOTEIO 121 /* POSIX value */
1615#endif
1616
bfbdd35b
BVA
1617bool zbd_unaligned_write(int error_code)
1618{
1619 switch (error_code) {
1620 case EIO:
1621 case EREMOTEIO:
1622 return true;
1623 }
1624 return false;
1625}
1626
4d37720a
DLM
1627/**
1628 * setup_zbd_zone_mode - handle zoneskip as necessary for ZBD drives
1629 * @td: FIO thread data.
1630 * @io_u: FIO I/O unit.
1631 *
1632 * For sequential workloads, change the file offset to skip zoneskip bytes when
1633 * no more IO can be performed in the current zone.
1634 * - For read workloads, zoneskip is applied when the io has reached the end of
1635 * the zone or the zone write position (when td->o.read_beyond_wp is false).
1636 * - For write workloads, zoneskip is applied when the zone is full.
1637 * This applies only to read and write operations.
1638 */
1639void setup_zbd_zone_mode(struct thread_data *td, struct io_u *io_u)
1640{
1641 struct fio_file *f = io_u->file;
1642 enum fio_ddir ddir = io_u->ddir;
1643 struct fio_zone_info *z;
1644 uint32_t zone_idx;
1645
1646 assert(td->o.zone_mode == ZONE_MODE_ZBD);
1647 assert(td->o.zone_size);
5ddf46d0 1648 assert(f->zbd_info);
4d37720a 1649
236d23a8 1650 zone_idx = zbd_zone_idx(f, f->last_pos[ddir]);
923f7c1e 1651 z = get_zone(f, zone_idx);
236d23a8
SK
1652
1653 /*
1654 * When the zone capacity is smaller than the zone size and the I/O is
1655 * sequential write, skip to zone end if the latest position is at the
1656 * zone capacity limit.
1657 */
1658 if (z->capacity < f->zbd_info->zone_size && !td_random(td) &&
1659 ddir == DDIR_WRITE &&
1660 f->last_pos[ddir] >= zbd_zone_capacity_end(z)) {
1661 dprint(FD_ZBD,
1662 "%s: Jump from zone capacity limit to zone end:"
ee5e3436
SK
1663 " (%"PRIu64" -> %"PRIu64") for zone %u (%"PRIu64")\n",
1664 f->file_name, f->last_pos[ddir],
1665 zbd_zone_end(z), zone_idx, z->capacity);
236d23a8
SK
1666 td->io_skip_bytes += zbd_zone_end(z) - f->last_pos[ddir];
1667 f->last_pos[ddir] = zbd_zone_end(z);
1668 }
1669
4d37720a
DLM
1670 /*
1671 * zone_skip is valid only for sequential workloads.
1672 */
1673 if (td_random(td) || !td->o.zone_skip)
1674 return;
1675
1676 /*
1677 * It is time to switch to a new zone if:
1678 * - zone_bytes == zone_size bytes have already been accessed
1679 * - The last position reached the end of the current zone.
1680 * - For reads with td->o.read_beyond_wp == false, the last position
1681 * reached the zone write pointer.
1682 */
4d37720a 1683 if (td->zone_bytes >= td->o.zone_size ||
236d23a8 1684 f->last_pos[ddir] >= zbd_zone_end(z) ||
4d37720a
DLM
1685 (ddir == DDIR_READ &&
1686 (!td->o.read_beyond_wp) && f->last_pos[ddir] >= z->wp)) {
1687 /*
1688 * Skip zones.
1689 */
1690 td->zone_bytes = 0;
1691 f->file_offset += td->o.zone_size + td->o.zone_skip;
1692
1693 /*
1694 * Wrap from the beginning, if we exceed the file size
1695 */
1696 if (f->file_offset >= f->real_file_size)
1697 f->file_offset = get_start_offset(td, f);
1698
1699 f->last_pos[ddir] = f->file_offset;
1700 td->io_skip_bytes += td->o.zone_skip;
1701 }
1702}
1703
c65057f9 1704/**
c7d5e152 1705 * zbd_adjust_ddir - Adjust an I/O direction for zonemode=zbd.
c65057f9
SK
1706 *
1707 * @td: FIO thread data.
1708 * @io_u: FIO I/O unit.
1709 * @ddir: I/O direction before adjustment.
1710 *
1711 * Return adjusted I/O direction.
1712 */
1713enum fio_ddir zbd_adjust_ddir(struct thread_data *td, struct io_u *io_u,
1714 enum fio_ddir ddir)
1715{
1716 /*
1717 * In case read direction is chosen for the first random I/O, fio with
1718 * zonemode=zbd stops because no data can be read from zoned block
1719 * devices with all empty zones. Overwrite the first I/O direction as
1720 * write to make sure data to read exists.
1721 */
5ddf46d0 1722 assert(io_u->file->zbd_info);
731461cc 1723 if (ddir != DDIR_READ || !td_rw(td))
c65057f9
SK
1724 return ddir;
1725
1726 if (io_u->file->zbd_info->sectors_with_data ||
1727 td->o.read_beyond_wp)
1728 return DDIR_READ;
1729
1730 return DDIR_WRITE;
1731}
1732
bfbdd35b
BVA
1733/**
1734 * zbd_adjust_block - adjust the offset and length as necessary for ZBD drives
1735 * @td: FIO thread data.
1736 * @io_u: FIO I/O unit.
1737 *
1738 * Locking strategy: returns with z->mutex locked if and only if z refers
1739 * to a sequential zone and if io_u_accept is returned. z is the zone that
1740 * corresponds to io_u->offset at the end of this function.
1741 */
1742enum io_u_action zbd_adjust_block(struct thread_data *td, struct io_u *io_u)
1743{
b7694961 1744 struct fio_file *f = io_u->file;
af94a8c3 1745 struct zoned_block_device_info *zbdi = f->zbd_info;
bfbdd35b 1746 uint32_t zone_idx_b;
de65f7b7 1747 struct fio_zone_info *zb, *zl, *orig_zb;
bfbdd35b 1748 uint32_t orig_len = io_u->buflen;
07fc3f57 1749 uint64_t min_bs = td->o.min_bs[io_u->ddir];
bfbdd35b
BVA
1750 uint64_t new_len;
1751 int64_t range;
1752
af94a8c3 1753 assert(zbdi);
adc6adcb 1754 assert(min_bs);
bfbdd35b
BVA
1755 assert(is_valid_offset(f, io_u->offset));
1756 assert(io_u->buflen);
1757 zone_idx_b = zbd_zone_idx(f, io_u->offset);
923f7c1e 1758 zb = get_zone(f, zone_idx_b);
de65f7b7 1759 orig_zb = zb;
bfbdd35b 1760
2efcf74b
SK
1761 if (!zb->has_wp) {
1762 /* Accept non-write I/Os for conventional zones. */
1763 if (io_u->ddir != DDIR_WRITE)
1764 return io_u_accept;
1765 /*
1766 * Make sure that writes to conventional zones
1767 * don't cross over to any sequential zones.
1768 */
1769 if (!(zb + 1)->has_wp ||
1770 io_u->offset + io_u->buflen <= (zb + 1)->start)
1771 return io_u_accept;
1772
1773 if (io_u->offset + min_bs > (zb + 1)->start) {
1774 dprint(FD_IO,
07fc3f57 1775 "%s: off=%llu + min_bs=%"PRIu64" > next zone %"PRIu64"\n",
1e3d6e03 1776 f->file_name, io_u->offset,
ee5e3436 1777 min_bs, (zb + 1)->start);
2efcf74b
SK
1778 io_u->offset = zb->start + (zb + 1)->start - io_u->offset;
1779 new_len = min(io_u->buflen, (zb + 1)->start - io_u->offset);
1780 } else {
1781 new_len = (zb + 1)->start - io_u->offset;
1782 }
1783 io_u->buflen = new_len / min_bs * min_bs;
bfbdd35b 1784 return io_u_accept;
2efcf74b 1785 }
bfbdd35b
BVA
1786
1787 /*
1788 * Accept the I/O offset for reads if reading beyond the write pointer
1789 * is enabled.
1790 */
b7694961 1791 if (zb->cond != ZBD_ZONE_COND_OFFLINE &&
bfbdd35b
BVA
1792 io_u->ddir == DDIR_READ && td->o.read_beyond_wp)
1793 return io_u_accept;
1794
11afb212 1795 zbd_check_swd(td, f);
615555bb 1796
fae3b9a0 1797 zone_lock(td, f, zb);
6f0c6085 1798
bfbdd35b
BVA
1799 switch (io_u->ddir) {
1800 case DDIR_READ:
0ed7d55e
AR
1801 if (td->runstate == TD_VERIFYING && td_write(td)) {
1802 zb = zbd_replay_write_order(td, io_u, zb);
bfbdd35b
BVA
1803 goto accept;
1804 }
1805 /*
de65f7b7
DLM
1806 * Check that there is enough written data in the zone to do an
1807 * I/O of at least min_bs B. If there isn't, find a new zone for
1808 * the I/O.
bfbdd35b 1809 */
b7694961 1810 range = zb->cond != ZBD_ZONE_COND_OFFLINE ?
ee3696bd 1811 zb->wp - zb->start : 0;
de65f7b7 1812 if (range < min_bs ||
ee3696bd 1813 ((!td_random(td)) && (io_u->offset + min_bs > zb->wp))) {
4d4c71e6 1814 zone_unlock(zb);
923f7c1e 1815 zl = get_zone(f, f->max_zone);
5c86fdf6 1816 zb = zbd_find_zone(td, io_u, min_bs, zb, zl);
bfbdd35b
BVA
1817 if (!zb) {
1818 dprint(FD_ZBD,
1819 "%s: zbd_find_zone(%lld, %llu) failed\n",
1820 f->file_name, io_u->offset,
1821 io_u->buflen);
1822 goto eof;
1823 }
de65f7b7
DLM
1824 /*
1825 * zbd_find_zone() returned a zone with a range of at
1826 * least min_bs.
1827 */
ee3696bd 1828 range = zb->wp - zb->start;
de65f7b7
DLM
1829 assert(range >= min_bs);
1830
1831 if (!td_random(td))
ee3696bd 1832 io_u->offset = zb->start;
bfbdd35b 1833 }
de65f7b7
DLM
1834 /*
1835 * Make sure the I/O is within the zone valid data range while
1836 * maximizing the I/O size and preserving randomness.
1837 */
1838 if (range <= io_u->buflen)
ee3696bd 1839 io_u->offset = zb->start;
de65f7b7 1840 else if (td_random(td))
ee3696bd
DLM
1841 io_u->offset = zb->start +
1842 ((io_u->offset - orig_zb->start) %
de65f7b7 1843 (range - io_u->buflen)) / min_bs * min_bs;
43bcbd5b
SK
1844 /*
1845 * When zbd_find_zone() returns a conventional zone,
1846 * we can simply accept the new i/o offset here.
1847 */
1848 if (!zb->has_wp)
1849 return io_u_accept;
de65f7b7
DLM
1850 /*
1851 * Make sure the I/O does not cross over the zone wp position.
1852 */
1853 new_len = min((unsigned long long)io_u->buflen,
ee3696bd 1854 (unsigned long long)(zb->wp - io_u->offset));
de65f7b7
DLM
1855 new_len = new_len / min_bs * min_bs;
1856 if (new_len < io_u->buflen) {
1857 io_u->buflen = new_len;
1858 dprint(FD_IO, "Changed length from %u into %llu\n",
1859 orig_len, io_u->buflen);
bfbdd35b 1860 }
ee3696bd
DLM
1861 assert(zb->start <= io_u->offset);
1862 assert(io_u->offset + io_u->buflen <= zb->wp);
bfbdd35b
BVA
1863 goto accept;
1864 case DDIR_WRITE:
af94a8c3 1865 if (io_u->buflen > zbdi->zone_size) {
1c74aadc
DF
1866 td_verror(td, EINVAL, "I/O buflen exceeds zone size");
1867 dprint(FD_IO,
ee5e3436
SK
1868 "%s: I/O buflen %llu exceeds zone size %"PRIu64"\n",
1869 f->file_name, io_u->buflen, zbdi->zone_size);
bfbdd35b 1870 goto eof;
1c74aadc 1871 }
954217b9 1872 if (!zbd_open_zone(td, f, zone_idx_b)) {
4d4c71e6 1873 zone_unlock(zb);
59b07544 1874 zb = zbd_convert_to_open_zone(td, io_u);
1c74aadc
DF
1875 if (!zb) {
1876 dprint(FD_IO, "%s: can't convert to open zone",
1877 f->file_name);
59b07544 1878 goto eof;
1c74aadc 1879 }
59b07544 1880 }
a7c2b6fc
BVA
1881 /* Check whether the zone reset threshold has been exceeded */
1882 if (td->o.zrf.u.f) {
af94a8c3 1883 if (zbdi->wp_sectors_with_data >=
a7c2b6fc
BVA
1884 f->io_size * td->o.zrt.u.f &&
1885 zbd_dec_and_reset_write_cnt(td, f)) {
1886 zb->reset_zone = 1;
1887 }
1888 }
bfbdd35b
BVA
1889 /* Reset the zone pointer if necessary */
1890 if (zb->reset_zone || zbd_zone_full(f, zb, min_bs)) {
1891 assert(td->o.verify == VERIFY_NONE);
1892 /*
1893 * Since previous write requests may have been submitted
1894 * asynchronously and since we will submit the zone
1895 * reset synchronously, wait until previously submitted
1896 * write requests have completed before issuing a
1897 * zone reset.
1898 */
1899 io_u_quiesce(td);
1900 zb->reset_zone = 0;
1901 if (zbd_reset_zone(td, f, zb) < 0)
1902 goto eof;
236d23a8
SK
1903
1904 if (zb->capacity < min_bs) {
1c74aadc 1905 td_verror(td, EINVAL, "ZCAP is less min_bs");
07fc3f57 1906 log_err("zone capacity %"PRIu64" smaller than minimum block size %"PRIu64"\n",
ee5e3436 1907 zb->capacity, min_bs);
236d23a8
SK
1908 goto eof;
1909 }
bfbdd35b
BVA
1910 }
1911 /* Make writes occur at the write pointer */
1912 assert(!zbd_zone_full(f, zb, min_bs));
ee3696bd 1913 io_u->offset = zb->wp;
bfbdd35b 1914 if (!is_valid_offset(f, io_u->offset)) {
1c74aadc
DF
1915 td_verror(td, EINVAL, "invalid WP value");
1916 dprint(FD_ZBD, "%s: dropped request with offset %llu\n",
1917 f->file_name, io_u->offset);
bfbdd35b
BVA
1918 goto eof;
1919 }
1920 /*
1921 * Make sure that the buflen is a multiple of the minimal
1922 * block size. Give up if shrinking would make the request too
1923 * small.
1924 */
1925 new_len = min((unsigned long long)io_u->buflen,
236d23a8 1926 zbd_zone_capacity_end(zb) - io_u->offset);
bfbdd35b
BVA
1927 new_len = new_len / min_bs * min_bs;
1928 if (new_len == io_u->buflen)
1929 goto accept;
1930 if (new_len >= min_bs) {
1931 io_u->buflen = new_len;
1932 dprint(FD_IO, "Changed length from %u into %llu\n",
1933 orig_len, io_u->buflen);
1934 goto accept;
1935 }
1c74aadc 1936 td_verror(td, EIO, "zone remainder too small");
07fc3f57 1937 log_err("zone remainder %lld smaller than min block size %"PRIu64"\n",
1c74aadc 1938 (zbd_zone_capacity_end(zb) - io_u->offset), min_bs);
bfbdd35b
BVA
1939 goto eof;
1940 case DDIR_TRIM:
e3be810b
SK
1941 /* Check random trim targets a non-empty zone */
1942 if (!td_random(td) || zb->wp > zb->start)
1943 goto accept;
1944
1945 /* Find out a non-empty zone to trim */
1946 zone_unlock(zb);
1947 zl = get_zone(f, f->max_zone);
1948 zb = zbd_find_zone(td, io_u, 1, zb, zl);
1949 if (zb) {
1950 io_u->offset = zb->start;
1951 dprint(FD_ZBD, "%s: found new zone(%lld) for trim\n",
1952 f->file_name, io_u->offset);
1953 goto accept;
1954 }
1955 goto eof;
bfbdd35b 1956 case DDIR_SYNC:
e3be810b 1957 /* fall-through */
bfbdd35b
BVA
1958 case DDIR_DATASYNC:
1959 case DDIR_SYNC_FILE_RANGE:
1960 case DDIR_WAIT:
1961 case DDIR_LAST:
1962 case DDIR_INVAL:
1963 goto accept;
1964 }
1965
1966 assert(false);
1967
1968accept:
43bcbd5b 1969 assert(zb->has_wp);
b7694961 1970 assert(zb->cond != ZBD_ZONE_COND_OFFLINE);
d9ed3e63
DLM
1971 assert(!io_u->zbd_queue_io);
1972 assert(!io_u->zbd_put_io);
1973 io_u->zbd_queue_io = zbd_queue_io;
1974 io_u->zbd_put_io = zbd_put_io;
2ef3c1b0
DF
1975 /*
1976 * Since we return with the zone lock still held,
1977 * add an annotation to let Coverity know that it
1978 * is intentional.
1979 */
1980 /* coverity[missing_unlock] */
bfbdd35b
BVA
1981 return io_u_accept;
1982
1983eof:
43bcbd5b 1984 if (zb && zb->has_wp)
4d4c71e6 1985 zone_unlock(zb);
bfbdd35b
BVA
1986 return io_u_eof;
1987}
fd5d733f
BVA
1988
1989/* Return a string with ZBD statistics */
1990char *zbd_write_status(const struct thread_stat *ts)
1991{
1992 char *res;
1993
ee5e3436 1994 if (asprintf(&res, "; %"PRIu64" zone resets", ts->nr_zone_resets) < 0)
fd5d733f
BVA
1995 return NULL;
1996 return res;
1997}
e3be810b
SK
1998
1999/**
2000 * zbd_do_io_u_trim - If reset zone is applicable, do reset zone instead of trim
2001 *
2002 * @td: FIO thread data.
2003 * @io_u: FIO I/O unit.
2004 *
2005 * It is assumed that z->mutex is already locked.
2006 * Return io_u_completed when reset zone succeeds. Return 0 when the target zone
2007 * does not have write pointer. On error, return negative errno.
2008 */
2009int zbd_do_io_u_trim(const struct thread_data *td, struct io_u *io_u)
2010{
2011 struct fio_file *f = io_u->file;
2012 struct fio_zone_info *z;
2013 uint32_t zone_idx;
2014 int ret;
2015
2016 zone_idx = zbd_zone_idx(f, io_u->offset);
2017 z = get_zone(f, zone_idx);
2018
2019 if (!z->has_wp)
2020 return 0;
2021
2022 if (io_u->offset != z->start) {
2023 log_err("Trim offset not at zone start (%lld)\n", io_u->offset);
2024 return -EINVAL;
2025 }
2026
2027 ret = zbd_reset_zone((struct thread_data *)td, f, z);
2028 if (ret < 0)
2029 return ret;
2030
2031 return io_u_completed;
2032}