zbd: Avoid async I/O multi-job workload deadlock
[fio.git] / zbd.c
CommitLineData
bfbdd35b
BVA
1/*
2 * Copyright (C) 2018 Western Digital Corporation or its affiliates.
3 *
4 * This file is released under the GPL.
5 */
6
7#include <errno.h>
8#include <string.h>
9#include <stdlib.h>
10#include <dirent.h>
11#include <fcntl.h>
12#include <sys/ioctl.h>
13#include <sys/stat.h>
14#include <unistd.h>
15#include <linux/blkzoned.h>
16#include "file.h"
17#include "fio.h"
18#include "lib/pow2.h"
19#include "log.h"
20#include "smalloc.h"
21#include "verify.h"
22#include "zbd.h"
23
24/**
25 * zbd_zone_idx - convert an offset into a zone number
26 * @f: file pointer.
27 * @offset: offset in bytes. If this offset is in the first zone_size bytes
28 * past the disk size then the index of the sentinel is returned.
29 */
30static uint32_t zbd_zone_idx(const struct fio_file *f, uint64_t offset)
31{
32 uint32_t zone_idx;
33
cd775e06 34 if (f->zbd_info->zone_size_log2 > 0)
bfbdd35b
BVA
35 zone_idx = offset >> f->zbd_info->zone_size_log2;
36 else
ee3696bd 37 zone_idx = offset / f->zbd_info->zone_size;
bfbdd35b
BVA
38
39 return min(zone_idx, f->zbd_info->nr_zones);
40}
41
42/**
43 * zbd_zone_full - verify whether a minimum number of bytes remain in a zone
44 * @f: file pointer.
45 * @z: zone info pointer.
46 * @required: minimum number of bytes that must remain in a zone.
47 *
48 * The caller must hold z->mutex.
49 */
50static bool zbd_zone_full(const struct fio_file *f, struct fio_zone_info *z,
51 uint64_t required)
52{
53 assert((required & 511) == 0);
54
55 return z->type == BLK_ZONE_TYPE_SEQWRITE_REQ &&
ee3696bd 56 z->wp + required > z->start + f->zbd_info->zone_size;
bfbdd35b
BVA
57}
58
59static bool is_valid_offset(const struct fio_file *f, uint64_t offset)
60{
61 return (uint64_t)(offset - f->file_offset) < f->io_size;
62}
63
64/* Verify whether direct I/O is used for all host-managed zoned drives. */
65static bool zbd_using_direct_io(void)
66{
67 struct thread_data *td;
68 struct fio_file *f;
69 int i, j;
70
71 for_each_td(td, i) {
72 if (td->o.odirect || !(td->o.td_ddir & TD_DDIR_WRITE))
73 continue;
74 for_each_file(td, f, j) {
75 if (f->zbd_info &&
76 f->zbd_info->model == ZBD_DM_HOST_MANAGED)
77 return false;
78 }
79 }
80
81 return true;
82}
83
84/* Whether or not the I/O range for f includes one or more sequential zones */
85static bool zbd_is_seq_job(struct fio_file *f)
86{
87 uint32_t zone_idx, zone_idx_b, zone_idx_e;
88
89 assert(f->zbd_info);
90 if (f->io_size == 0)
91 return false;
92 zone_idx_b = zbd_zone_idx(f, f->file_offset);
93 zone_idx_e = zbd_zone_idx(f, f->file_offset + f->io_size - 1);
94 for (zone_idx = zone_idx_b; zone_idx <= zone_idx_e; zone_idx++)
95 if (f->zbd_info->zone_info[zone_idx].type ==
96 BLK_ZONE_TYPE_SEQWRITE_REQ)
97 return true;
98
99 return false;
100}
101
102/*
103 * Verify whether offset and size parameters are aligned with zone boundaries.
104 */
105static bool zbd_verify_sizes(void)
106{
107 const struct fio_zone_info *z;
108 struct thread_data *td;
109 struct fio_file *f;
110 uint64_t new_offset, new_end;
111 uint32_t zone_idx;
112 int i, j;
113
114 for_each_td(td, i) {
115 for_each_file(td, f, j) {
116 if (!f->zbd_info)
117 continue;
118 if (f->file_offset >= f->real_file_size)
119 continue;
120 if (!zbd_is_seq_job(f))
121 continue;
122 zone_idx = zbd_zone_idx(f, f->file_offset);
123 z = &f->zbd_info->zone_info[zone_idx];
ee3696bd
DLM
124 if (f->file_offset != z->start) {
125 new_offset = (z+1)->start;
bfbdd35b
BVA
126 if (new_offset >= f->file_offset + f->io_size) {
127 log_info("%s: io_size must be at least one zone\n",
128 f->file_name);
129 return false;
130 }
a0c84dd4
JA
131 log_info("%s: rounded up offset from %llu to %llu\n",
132 f->file_name, (unsigned long long) f->file_offset,
133 (unsigned long long) new_offset);
bfbdd35b
BVA
134 f->io_size -= (new_offset - f->file_offset);
135 f->file_offset = new_offset;
136 }
137 zone_idx = zbd_zone_idx(f, f->file_offset + f->io_size);
138 z = &f->zbd_info->zone_info[zone_idx];
ee3696bd 139 new_end = z->start;
bfbdd35b
BVA
140 if (f->file_offset + f->io_size != new_end) {
141 if (new_end <= f->file_offset) {
142 log_info("%s: io_size must be at least one zone\n",
143 f->file_name);
144 return false;
145 }
a0c84dd4
JA
146 log_info("%s: rounded down io_size from %llu to %llu\n",
147 f->file_name, (unsigned long long) f->io_size,
148 (unsigned long long) new_end - f->file_offset);
bfbdd35b
BVA
149 f->io_size = new_end - f->file_offset;
150 }
151 }
152 }
153
154 return true;
155}
156
157static bool zbd_verify_bs(void)
158{
159 struct thread_data *td;
160 struct fio_file *f;
161 uint32_t zone_size;
162 int i, j, k;
163
164 for_each_td(td, i) {
165 for_each_file(td, f, j) {
166 if (!f->zbd_info)
167 continue;
168 zone_size = f->zbd_info->zone_size;
169 for (k = 0; k < ARRAY_SIZE(td->o.bs); k++) {
170 if (td->o.verify != VERIFY_NONE &&
ee3696bd 171 zone_size % td->o.bs[k] != 0) {
bfbdd35b
BVA
172 log_info("%s: block size %llu is not a divisor of the zone size %d\n",
173 f->file_name, td->o.bs[k],
ee3696bd 174 zone_size);
bfbdd35b
BVA
175 return false;
176 }
177 }
178 }
179 }
180 return true;
181}
182
183/*
184 * Read zone information into @buf starting from sector @start_sector.
185 * @fd is a file descriptor that refers to a block device and @bufsz is the
186 * size of @buf.
187 *
188 * Returns 0 upon success and a negative error code upon failure.
189 */
190static int read_zone_info(int fd, uint64_t start_sector,
191 void *buf, unsigned int bufsz)
192{
193 struct blk_zone_report *hdr = buf;
194
195 if (bufsz < sizeof(*hdr))
196 return -EINVAL;
197
198 memset(hdr, 0, sizeof(*hdr));
199
200 hdr->nr_zones = (bufsz - sizeof(*hdr)) / sizeof(struct blk_zone);
201 hdr->sector = start_sector;
202 return ioctl(fd, BLKREPORTZONE, hdr) >= 0 ? 0 : -errno;
203}
204
205/*
206 * Read up to 255 characters from the first line of a file. Strip the trailing
207 * newline.
208 */
209static char *read_file(const char *path)
210{
211 char line[256], *p = line;
212 FILE *f;
213
214 f = fopen(path, "rb");
215 if (!f)
216 return NULL;
217 if (!fgets(line, sizeof(line), f))
218 line[0] = '\0';
219 strsep(&p, "\n");
220 fclose(f);
221
222 return strdup(line);
223}
224
225static enum blk_zoned_model get_zbd_model(const char *file_name)
226{
227 enum blk_zoned_model model = ZBD_DM_NONE;
228 char *zoned_attr_path = NULL;
229 char *model_str = NULL;
230 struct stat statbuf;
63e3e40d
SK
231 char *sys_devno_path = NULL;
232 char *part_attr_path = NULL;
233 char *part_str = NULL;
234 char sys_path[PATH_MAX];
235 ssize_t sz;
236 char *delim = NULL;
bfbdd35b
BVA
237
238 if (stat(file_name, &statbuf) < 0)
239 goto out;
63e3e40d
SK
240
241 if (asprintf(&sys_devno_path, "/sys/dev/block/%d:%d",
bfbdd35b
BVA
242 major(statbuf.st_rdev), minor(statbuf.st_rdev)) < 0)
243 goto out;
63e3e40d
SK
244
245 sz = readlink(sys_devno_path, sys_path, sizeof(sys_path) - 1);
246 if (sz < 0)
247 goto out;
248 sys_path[sz] = '\0';
249
250 /*
251 * If the device is a partition device, cut the device name in the
252 * canonical sysfs path to obtain the sysfs path of the holder device.
253 * e.g.: /sys/devices/.../sda/sda1 -> /sys/devices/.../sda
254 */
255 if (asprintf(&part_attr_path, "/sys/dev/block/%s/partition",
256 sys_path) < 0)
257 goto out;
258 part_str = read_file(part_attr_path);
259 if (part_str && *part_str == '1') {
260 delim = strrchr(sys_path, '/');
261 if (!delim)
262 goto out;
263 *delim = '\0';
264 }
265
266 if (asprintf(&zoned_attr_path,
267 "/sys/dev/block/%s/queue/zoned", sys_path) < 0)
268 goto out;
269
bfbdd35b
BVA
270 model_str = read_file(zoned_attr_path);
271 if (!model_str)
272 goto out;
273 dprint(FD_ZBD, "%s: zbd model string: %s\n", file_name, model_str);
274 if (strcmp(model_str, "host-aware") == 0)
275 model = ZBD_DM_HOST_AWARE;
276 else if (strcmp(model_str, "host-managed") == 0)
277 model = ZBD_DM_HOST_MANAGED;
278
279out:
280 free(model_str);
281 free(zoned_attr_path);
63e3e40d
SK
282 free(part_str);
283 free(part_attr_path);
284 free(sys_devno_path);
bfbdd35b
BVA
285 return model;
286}
287
288static int ilog2(uint64_t i)
289{
290 int log = -1;
291
292 while (i) {
293 i >>= 1;
294 log++;
295 }
296 return log;
297}
298
299/*
300 * Initialize f->zbd_info for devices that are not zoned block devices. This
301 * allows to execute a ZBD workload against a non-ZBD device.
302 */
303static int init_zone_info(struct thread_data *td, struct fio_file *f)
304{
305 uint32_t nr_zones;
306 struct fio_zone_info *p;
307 uint64_t zone_size;
308 struct zoned_block_device_info *zbd_info = NULL;
309 pthread_mutexattr_t attr;
310 int i;
311
ee3696bd 312 zone_size = td->o.zone_size;
bfbdd35b 313 assert(zone_size);
ee3696bd 314 nr_zones = (f->real_file_size + zone_size - 1) / zone_size;
bfbdd35b
BVA
315 zbd_info = scalloc(1, sizeof(*zbd_info) +
316 (nr_zones + 1) * sizeof(zbd_info->zone_info[0]));
317 if (!zbd_info)
318 return -ENOMEM;
319
320 pthread_mutexattr_init(&attr);
321 pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
322 pthread_mutexattr_setpshared(&attr, true);
323 pthread_mutex_init(&zbd_info->mutex, &attr);
324 zbd_info->refcount = 1;
325 p = &zbd_info->zone_info[0];
326 for (i = 0; i < nr_zones; i++, p++) {
327 pthread_mutex_init(&p->mutex, &attr);
328 p->start = i * zone_size;
329 p->wp = p->start + zone_size;
330 p->type = BLK_ZONE_TYPE_SEQWRITE_REQ;
331 p->cond = BLK_ZONE_COND_EMPTY;
332 }
333 /* a sentinel */
334 p->start = nr_zones * zone_size;
335
336 f->zbd_info = zbd_info;
337 f->zbd_info->zone_size = zone_size;
338 f->zbd_info->zone_size_log2 = is_power_of_2(zone_size) ?
ee3696bd 339 ilog2(zone_size) : -1;
bfbdd35b
BVA
340 f->zbd_info->nr_zones = nr_zones;
341 pthread_mutexattr_destroy(&attr);
342 return 0;
343}
344
345/*
346 * Parse the BLKREPORTZONE output and store it in f->zbd_info. Must be called
347 * only for devices that support this ioctl, namely zoned block devices.
348 */
349static int parse_zone_info(struct thread_data *td, struct fio_file *f)
350{
351 const unsigned int bufsz = sizeof(struct blk_zone_report) +
352 4096 * sizeof(struct blk_zone);
353 uint32_t nr_zones;
354 struct blk_zone_report *hdr;
355 const struct blk_zone *z;
356 struct fio_zone_info *p;
357 uint64_t zone_size, start_sector;
358 struct zoned_block_device_info *zbd_info = NULL;
359 pthread_mutexattr_t attr;
360 void *buf;
361 int fd, i, j, ret = 0;
362
363 pthread_mutexattr_init(&attr);
364 pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
365 pthread_mutexattr_setpshared(&attr, true);
366
367 buf = malloc(bufsz);
368 if (!buf)
369 goto out;
370
371 fd = open(f->file_name, O_RDONLY | O_LARGEFILE);
372 if (fd < 0) {
373 ret = -errno;
374 goto free;
375 }
376
377 ret = read_zone_info(fd, 0, buf, bufsz);
378 if (ret < 0) {
379 log_info("fio: BLKREPORTZONE(%lu) failed for %s (%d).\n",
380 0UL, f->file_name, -ret);
381 goto close;
382 }
383 hdr = buf;
384 if (hdr->nr_zones < 1) {
385 log_info("fio: %s has invalid zone information.\n",
386 f->file_name);
387 goto close;
388 }
389 z = (void *)(hdr + 1);
ee3696bd
DLM
390 zone_size = z->len << 9;
391 nr_zones = (f->real_file_size + zone_size - 1) / zone_size;
bfbdd35b
BVA
392
393 if (td->o.zone_size == 0) {
ee3696bd
DLM
394 td->o.zone_size = zone_size;
395 } else if (td->o.zone_size != zone_size) {
a0c84dd4
JA
396 log_info("fio: %s job parameter zonesize %llu does not match disk zone size %llu.\n",
397 f->file_name, (unsigned long long) td->o.zone_size,
398 (unsigned long long) zone_size);
bfbdd35b
BVA
399 ret = -EINVAL;
400 goto close;
401 }
402
a0c84dd4
JA
403 dprint(FD_ZBD, "Device %s has %d zones of size %llu KB\n", f->file_name,
404 nr_zones, (unsigned long long) zone_size / 1024);
bfbdd35b
BVA
405
406 zbd_info = scalloc(1, sizeof(*zbd_info) +
407 (nr_zones + 1) * sizeof(zbd_info->zone_info[0]));
408 ret = -ENOMEM;
409 if (!zbd_info)
410 goto close;
411 pthread_mutex_init(&zbd_info->mutex, &attr);
412 zbd_info->refcount = 1;
413 p = &zbd_info->zone_info[0];
414 for (start_sector = 0, j = 0; j < nr_zones;) {
415 z = (void *)(hdr + 1);
416 for (i = 0; i < hdr->nr_zones; i++, j++, z++, p++) {
417 pthread_mutex_init(&p->mutex, &attr);
ee3696bd 418 p->start = z->start << 9;
bfbdd35b
BVA
419 switch (z->cond) {
420 case BLK_ZONE_COND_NOT_WP:
ee3696bd 421 p->wp = p->start;
bfbdd35b
BVA
422 break;
423 case BLK_ZONE_COND_FULL:
ee3696bd 424 p->wp = p->start + zone_size;
bfbdd35b
BVA
425 break;
426 default:
427 assert(z->start <= z->wp);
ee3696bd
DLM
428 assert(z->wp <= z->start + (zone_size >> 9));
429 p->wp = z->wp << 9;
bfbdd35b
BVA
430 break;
431 }
432 p->type = z->type;
433 p->cond = z->cond;
434 if (j > 0 && p->start != p[-1].start + zone_size) {
435 log_info("%s: invalid zone data\n",
436 f->file_name);
437 ret = -EINVAL;
438 goto close;
439 }
440 }
441 z--;
442 start_sector = z->start + z->len;
443 if (j >= nr_zones)
444 break;
445 ret = read_zone_info(fd, start_sector, buf, bufsz);
446 if (ret < 0) {
a0c84dd4
JA
447 log_info("fio: BLKREPORTZONE(%llu) failed for %s (%d).\n",
448 (unsigned long long) start_sector, f->file_name, -ret);
bfbdd35b
BVA
449 goto close;
450 }
451 }
452 /* a sentinel */
ee3696bd 453 zbd_info->zone_info[nr_zones].start = start_sector << 9;
bfbdd35b
BVA
454
455 f->zbd_info = zbd_info;
456 f->zbd_info->zone_size = zone_size;
457 f->zbd_info->zone_size_log2 = is_power_of_2(zone_size) ?
ee3696bd 458 ilog2(zone_size) : -1;
bfbdd35b
BVA
459 f->zbd_info->nr_zones = nr_zones;
460 zbd_info = NULL;
461 ret = 0;
462
463close:
464 sfree(zbd_info);
465 close(fd);
466free:
467 free(buf);
468out:
469 pthread_mutexattr_destroy(&attr);
470 return ret;
471}
472
473/*
474 * Allocate zone information and store it into f->zbd_info if zonemode=zbd.
475 *
476 * Returns 0 upon success and a negative error code upon failure.
477 */
478int zbd_create_zone_info(struct thread_data *td, struct fio_file *f)
479{
480 enum blk_zoned_model zbd_model;
481 int ret = 0;
482
483 assert(td->o.zone_mode == ZONE_MODE_ZBD);
484
485 zbd_model = get_zbd_model(f->file_name);
486 switch (zbd_model) {
487 case ZBD_DM_HOST_AWARE:
488 case ZBD_DM_HOST_MANAGED:
489 ret = parse_zone_info(td, f);
490 break;
491 case ZBD_DM_NONE:
492 ret = init_zone_info(td, f);
493 break;
494 }
495 if (ret == 0)
496 f->zbd_info->model = zbd_model;
497 return ret;
498}
499
500void zbd_free_zone_info(struct fio_file *f)
501{
502 uint32_t refcount;
503
504 if (!f->zbd_info)
505 return;
506
507 pthread_mutex_lock(&f->zbd_info->mutex);
508 refcount = --f->zbd_info->refcount;
509 pthread_mutex_unlock(&f->zbd_info->mutex);
510
511 assert((int32_t)refcount >= 0);
512 if (refcount == 0)
513 sfree(f->zbd_info);
514 f->zbd_info = NULL;
515}
516
517/*
518 * Initialize f->zbd_info.
519 *
520 * Returns 0 upon success and a negative error code upon failure.
521 *
522 * Note: this function can only work correctly if it is called before the first
523 * fio fork() call.
524 */
525static int zbd_init_zone_info(struct thread_data *td, struct fio_file *file)
526{
527 struct thread_data *td2;
528 struct fio_file *f2;
529 int i, j, ret;
530
531 for_each_td(td2, i) {
532 for_each_file(td2, f2, j) {
533 if (td2 == td && f2 == file)
534 continue;
535 if (!f2->zbd_info ||
536 strcmp(f2->file_name, file->file_name) != 0)
537 continue;
538 file->zbd_info = f2->zbd_info;
539 file->zbd_info->refcount++;
540 return 0;
541 }
542 }
543
544 ret = zbd_create_zone_info(td, file);
545 if (ret < 0)
546 td_verror(td, -ret, "BLKREPORTZONE failed");
547 return ret;
548}
549
550int zbd_init(struct thread_data *td)
551{
552 struct fio_file *f;
553 int i;
554
555 for_each_file(td, f, i) {
556 if (f->filetype != FIO_TYPE_BLOCK)
557 continue;
558 if (td->o.zone_size && td->o.zone_size < 512) {
559 log_err("%s: zone size must be at least 512 bytes for --zonemode=zbd\n\n",
560 f->file_name);
561 return 1;
562 }
563 if (td->o.zone_size == 0 &&
564 get_zbd_model(f->file_name) == ZBD_DM_NONE) {
565 log_err("%s: Specifying the zone size is mandatory for regular block devices with --zonemode=zbd\n\n",
566 f->file_name);
567 return 1;
568 }
569 zbd_init_zone_info(td, f);
570 }
571
572 if (!zbd_using_direct_io()) {
573 log_err("Using direct I/O is mandatory for writing to ZBD drives\n\n");
574 return 1;
575 }
576
577 if (!zbd_verify_sizes())
578 return 1;
579
580 if (!zbd_verify_bs())
581 return 1;
582
583 return 0;
584}
585
586/**
587 * zbd_reset_range - reset zones for a range of sectors
588 * @td: FIO thread data.
589 * @f: Fio file for which to reset zones
590 * @sector: Starting sector in units of 512 bytes
591 * @nr_sectors: Number of sectors in units of 512 bytes
592 *
593 * Returns 0 upon success and a negative error code upon failure.
594 */
595static int zbd_reset_range(struct thread_data *td, const struct fio_file *f,
ee3696bd 596 uint64_t offset, uint64_t length)
bfbdd35b
BVA
597{
598 struct blk_zone_range zr = {
ee3696bd
DLM
599 .sector = offset >> 9,
600 .nr_sectors = length >> 9,
bfbdd35b
BVA
601 };
602 uint32_t zone_idx_b, zone_idx_e;
603 struct fio_zone_info *zb, *ze, *z;
604 int ret = 0;
605
606 assert(f->fd != -1);
ee3696bd 607 assert(is_valid_offset(f, offset + length - 1));
bfbdd35b
BVA
608 switch (f->zbd_info->model) {
609 case ZBD_DM_HOST_AWARE:
610 case ZBD_DM_HOST_MANAGED:
611 ret = ioctl(f->fd, BLKRESETZONE, &zr);
612 if (ret < 0) {
613 td_verror(td, errno, "resetting wp failed");
614 log_err("%s: resetting wp for %llu sectors at sector %llu failed (%d).\n",
615 f->file_name, zr.nr_sectors, zr.sector, errno);
616 return ret;
617 }
618 break;
619 case ZBD_DM_NONE:
620 break;
621 }
622
ee3696bd 623 zone_idx_b = zbd_zone_idx(f, offset);
bfbdd35b 624 zb = &f->zbd_info->zone_info[zone_idx_b];
ee3696bd 625 zone_idx_e = zbd_zone_idx(f, offset + length);
bfbdd35b
BVA
626 ze = &f->zbd_info->zone_info[zone_idx_e];
627 for (z = zb; z < ze; z++) {
628 pthread_mutex_lock(&z->mutex);
a7c2b6fc
BVA
629 pthread_mutex_lock(&f->zbd_info->mutex);
630 f->zbd_info->sectors_with_data -= z->wp - z->start;
631 pthread_mutex_unlock(&f->zbd_info->mutex);
bfbdd35b
BVA
632 z->wp = z->start;
633 z->verify_block = 0;
634 pthread_mutex_unlock(&z->mutex);
635 }
636
fd5d733f
BVA
637 td->ts.nr_zone_resets += ze - zb;
638
bfbdd35b
BVA
639 return ret;
640}
641
a0c84dd4
JA
642static unsigned int zbd_zone_nr(struct zoned_block_device_info *zbd_info,
643 struct fio_zone_info *zone)
644{
089ddd95 645 return zone - zbd_info->zone_info;
a0c84dd4
JA
646}
647
bfbdd35b
BVA
648/**
649 * zbd_reset_zone - reset the write pointer of a single zone
650 * @td: FIO thread data.
651 * @f: FIO file associated with the disk for which to reset a write pointer.
652 * @z: Zone to reset.
653 *
654 * Returns 0 upon success and a negative error code upon failure.
655 */
656static int zbd_reset_zone(struct thread_data *td, const struct fio_file *f,
657 struct fio_zone_info *z)
658{
a0c84dd4
JA
659 dprint(FD_ZBD, "%s: resetting wp of zone %u.\n", f->file_name,
660 zbd_zone_nr(f->zbd_info, z));
d60be7d5
DLM
661
662 return zbd_reset_range(td, f, z->start, (z+1)->start - z->start);
bfbdd35b
BVA
663}
664
665/*
666 * Reset a range of zones. Returns 0 upon success and 1 upon failure.
667 * @td: fio thread data.
668 * @f: fio file for which to reset zones
669 * @zb: first zone to reset.
670 * @ze: first zone not to reset.
671 * @all_zones: whether to reset all zones or only those zones for which the
672 * write pointer is not a multiple of td->o.min_bs[DDIR_WRITE].
673 */
674static int zbd_reset_zones(struct thread_data *td, struct fio_file *f,
675 struct fio_zone_info *const zb,
676 struct fio_zone_info *const ze, bool all_zones)
677{
678 struct fio_zone_info *z, *start_z = ze;
ee3696bd 679 const uint32_t min_bs = td->o.min_bs[DDIR_WRITE];
bfbdd35b
BVA
680 bool reset_wp;
681 int res = 0;
682
a0c84dd4
JA
683 dprint(FD_ZBD, "%s: examining zones %u .. %u\n", f->file_name,
684 zbd_zone_nr(f->zbd_info, zb), zbd_zone_nr(f->zbd_info, ze));
bfbdd35b
BVA
685 assert(f->fd != -1);
686 for (z = zb; z < ze; z++) {
687 pthread_mutex_lock(&z->mutex);
688 switch (z->type) {
689 case BLK_ZONE_TYPE_SEQWRITE_REQ:
690 reset_wp = all_zones ? z->wp != z->start :
691 (td->o.td_ddir & TD_DDIR_WRITE) &&
692 z->wp % min_bs != 0;
693 if (start_z == ze && reset_wp) {
694 start_z = z;
695 } else if (start_z < ze && !reset_wp) {
696 dprint(FD_ZBD,
a0c84dd4 697 "%s: resetting zones %u .. %u\n",
bfbdd35b 698 f->file_name,
a0c84dd4
JA
699 zbd_zone_nr(f->zbd_info, start_z),
700 zbd_zone_nr(f->zbd_info, z));
bfbdd35b
BVA
701 if (zbd_reset_range(td, f, start_z->start,
702 z->start - start_z->start) < 0)
703 res = 1;
704 start_z = ze;
705 }
706 break;
707 default:
708 if (start_z == ze)
709 break;
a0c84dd4
JA
710 dprint(FD_ZBD, "%s: resetting zones %u .. %u\n",
711 f->file_name, zbd_zone_nr(f->zbd_info, start_z),
712 zbd_zone_nr(f->zbd_info, z));
bfbdd35b
BVA
713 if (zbd_reset_range(td, f, start_z->start,
714 z->start - start_z->start) < 0)
715 res = 1;
716 start_z = ze;
717 break;
718 }
719 }
720 if (start_z < ze) {
a0c84dd4
JA
721 dprint(FD_ZBD, "%s: resetting zones %u .. %u\n", f->file_name,
722 zbd_zone_nr(f->zbd_info, start_z),
723 zbd_zone_nr(f->zbd_info, z));
bfbdd35b
BVA
724 if (zbd_reset_range(td, f, start_z->start,
725 z->start - start_z->start) < 0)
726 res = 1;
727 }
728 for (z = zb; z < ze; z++)
729 pthread_mutex_unlock(&z->mutex);
730
731 return res;
732}
733
a7c2b6fc
BVA
734/*
735 * Reset zbd_info.write_cnt, the counter that counts down towards the next
736 * zone reset.
737 */
738static void zbd_reset_write_cnt(const struct thread_data *td,
739 const struct fio_file *f)
740{
741 assert(0 <= td->o.zrf.u.f && td->o.zrf.u.f <= 1);
742
743 pthread_mutex_lock(&f->zbd_info->mutex);
744 f->zbd_info->write_cnt = td->o.zrf.u.f ?
745 min(1.0 / td->o.zrf.u.f, 0.0 + UINT_MAX) : UINT_MAX;
746 pthread_mutex_unlock(&f->zbd_info->mutex);
747}
748
749static bool zbd_dec_and_reset_write_cnt(const struct thread_data *td,
750 const struct fio_file *f)
751{
752 uint32_t write_cnt = 0;
753
754 pthread_mutex_lock(&f->zbd_info->mutex);
755 assert(f->zbd_info->write_cnt);
756 if (f->zbd_info->write_cnt)
757 write_cnt = --f->zbd_info->write_cnt;
758 if (write_cnt == 0)
759 zbd_reset_write_cnt(td, f);
760 pthread_mutex_unlock(&f->zbd_info->mutex);
761
762 return write_cnt == 0;
763}
764
91d25131
BVA
765enum swd_action {
766 CHECK_SWD,
767 SET_SWD,
768};
769
770/* Calculate the number of sectors with data (swd) and perform action 'a' */
771static uint64_t zbd_process_swd(const struct fio_file *f, enum swd_action a)
615555bb 772{
615555bb
BVA
773 struct fio_zone_info *zb, *ze, *z;
774 uint64_t swd = 0;
775
776 zb = &f->zbd_info->zone_info[zbd_zone_idx(f, f->file_offset)];
777 ze = &f->zbd_info->zone_info[zbd_zone_idx(f, f->file_offset +
778 f->io_size)];
779 for (z = zb; z < ze; z++) {
780 pthread_mutex_lock(&z->mutex);
781 swd += z->wp - z->start;
782 }
783 pthread_mutex_lock(&f->zbd_info->mutex);
91d25131
BVA
784 switch (a) {
785 case CHECK_SWD:
786 assert(f->zbd_info->sectors_with_data == swd);
787 break;
788 case SET_SWD:
789 f->zbd_info->sectors_with_data = swd;
790 break;
791 }
615555bb
BVA
792 pthread_mutex_unlock(&f->zbd_info->mutex);
793 for (z = zb; z < ze; z++)
794 pthread_mutex_unlock(&z->mutex);
91d25131
BVA
795
796 return swd;
797}
798
799/*
800 * The swd check is useful for debugging but takes too much time to leave
801 * it enabled all the time. Hence it is disabled by default.
802 */
803static const bool enable_check_swd = false;
804
805/* Check whether the value of zbd_info.sectors_with_data is correct. */
806static void zbd_check_swd(const struct fio_file *f)
807{
808 if (!enable_check_swd)
809 return;
810
811 zbd_process_swd(f, CHECK_SWD);
812}
813
814static void zbd_init_swd(struct fio_file *f)
815{
816 uint64_t swd;
817
818 swd = zbd_process_swd(f, SET_SWD);
819 dprint(FD_ZBD, "%s(%s): swd = %" PRIu64 "\n", __func__, f->file_name,
820 swd);
615555bb
BVA
821}
822
bfbdd35b
BVA
823void zbd_file_reset(struct thread_data *td, struct fio_file *f)
824{
91d25131 825 struct fio_zone_info *zb, *ze;
bfbdd35b
BVA
826 uint32_t zone_idx_e;
827
828 if (!f->zbd_info)
829 return;
830
831 zb = &f->zbd_info->zone_info[zbd_zone_idx(f, f->file_offset)];
832 zone_idx_e = zbd_zone_idx(f, f->file_offset + f->io_size);
833 ze = &f->zbd_info->zone_info[zone_idx_e];
91d25131 834 zbd_init_swd(f);
bfbdd35b
BVA
835 /*
836 * If data verification is enabled reset the affected zones before
837 * writing any data to avoid that a zone reset has to be issued while
838 * writing data, which causes data loss.
839 */
840 zbd_reset_zones(td, f, zb, ze, td->o.verify != VERIFY_NONE &&
841 (td->o.td_ddir & TD_DDIR_WRITE) &&
842 td->runstate != TD_VERIFYING);
a7c2b6fc 843 zbd_reset_write_cnt(td, f);
bfbdd35b
BVA
844}
845
59b07544
BVA
846/* The caller must hold f->zbd_info->mutex. */
847static bool is_zone_open(const struct thread_data *td, const struct fio_file *f,
848 unsigned int zone_idx)
849{
850 struct zoned_block_device_info *zbdi = f->zbd_info;
851 int i;
852
853 assert(td->o.max_open_zones <= ARRAY_SIZE(zbdi->open_zones));
854 assert(zbdi->num_open_zones <= td->o.max_open_zones);
855
856 for (i = 0; i < zbdi->num_open_zones; i++)
857 if (zbdi->open_zones[i] == zone_idx)
858 return true;
859
860 return false;
861}
862
863/*
864 * Open a ZBD zone if it was not yet open. Returns true if either the zone was
865 * already open or if opening a new zone is allowed. Returns false if the zone
866 * was not yet open and opening a new zone would cause the zone limit to be
867 * exceeded.
868 */
869static bool zbd_open_zone(struct thread_data *td, const struct io_u *io_u,
870 uint32_t zone_idx)
871{
872 const uint32_t min_bs = td->o.min_bs[DDIR_WRITE];
873 const struct fio_file *f = io_u->file;
874 struct fio_zone_info *z = &f->zbd_info->zone_info[zone_idx];
875 bool res = true;
876
877 if (z->cond == BLK_ZONE_COND_OFFLINE)
878 return false;
879
880 /*
881 * Skip full zones with data verification enabled because resetting a
882 * zone causes data loss and hence causes verification to fail.
883 */
884 if (td->o.verify != VERIFY_NONE && zbd_zone_full(f, z, min_bs))
885 return false;
886
887 /* Zero means no limit */
888 if (!td->o.max_open_zones)
889 return true;
890
891 pthread_mutex_lock(&f->zbd_info->mutex);
892 if (is_zone_open(td, f, zone_idx))
893 goto out;
894 res = false;
895 if (f->zbd_info->num_open_zones >= td->o.max_open_zones)
896 goto out;
897 dprint(FD_ZBD, "%s: opening zone %d\n", f->file_name, zone_idx);
898 f->zbd_info->open_zones[f->zbd_info->num_open_zones++] = zone_idx;
899 z->open = 1;
900 res = true;
901
902out:
903 pthread_mutex_unlock(&f->zbd_info->mutex);
904 return res;
905}
906
907/* The caller must hold f->zbd_info->mutex */
908static void zbd_close_zone(struct thread_data *td, const struct fio_file *f,
909 unsigned int open_zone_idx)
910{
911 uint32_t zone_idx;
912
913 assert(open_zone_idx < f->zbd_info->num_open_zones);
914 zone_idx = f->zbd_info->open_zones[open_zone_idx];
915 memmove(f->zbd_info->open_zones + open_zone_idx,
916 f->zbd_info->open_zones + open_zone_idx + 1,
917 (FIO_MAX_OPEN_ZBD_ZONES - (open_zone_idx + 1)) *
918 sizeof(f->zbd_info->open_zones[0]));
919 f->zbd_info->num_open_zones--;
920 f->zbd_info->zone_info[zone_idx].open = 0;
921}
922
923/*
924 * Modify the offset of an I/O unit that does not refer to an open zone such
925 * that it refers to an open zone. Close an open zone and open a new zone if
926 * necessary. This algorithm can only work correctly if all write pointers are
927 * a multiple of the fio block size. The caller must neither hold z->mutex
928 * nor f->zbd_info->mutex. Returns with z->mutex held upon success.
929 */
930struct fio_zone_info *zbd_convert_to_open_zone(struct thread_data *td,
931 struct io_u *io_u)
932{
933 const uint32_t min_bs = td->o.min_bs[io_u->ddir];
934 const struct fio_file *f = io_u->file;
935 struct fio_zone_info *z;
936 unsigned int open_zone_idx = -1;
937 uint32_t zone_idx, new_zone_idx;
938 int i;
939
940 assert(is_valid_offset(f, io_u->offset));
941
942 if (td->o.max_open_zones) {
943 /*
944 * This statement accesses f->zbd_info->open_zones[] on purpose
945 * without locking.
946 */
947 zone_idx = f->zbd_info->open_zones[(io_u->offset -
948 f->file_offset) *
949 f->zbd_info->num_open_zones / f->io_size];
950 } else {
951 zone_idx = zbd_zone_idx(f, io_u->offset);
952 }
953 dprint(FD_ZBD, "%s(%s): starting from zone %d (offset %lld, buflen %lld)\n",
954 __func__, f->file_name, zone_idx, io_u->offset, io_u->buflen);
955
956 /*
957 * Since z->mutex is the outer lock and f->zbd_info->mutex the inner
958 * lock it can happen that the state of the zone with index zone_idx
959 * has changed after 'z' has been assigned and before f->zbd_info->mutex
960 * has been obtained. Hence the loop.
961 */
962 for (;;) {
963 z = &f->zbd_info->zone_info[zone_idx];
964
965 pthread_mutex_lock(&z->mutex);
966 pthread_mutex_lock(&f->zbd_info->mutex);
967 if (td->o.max_open_zones == 0)
968 goto examine_zone;
969 if (f->zbd_info->num_open_zones == 0) {
970 pthread_mutex_unlock(&f->zbd_info->mutex);
971 pthread_mutex_unlock(&z->mutex);
972 dprint(FD_ZBD, "%s(%s): no zones are open\n",
973 __func__, f->file_name);
974 return NULL;
975 }
976 open_zone_idx = (io_u->offset - f->file_offset) *
977 f->zbd_info->num_open_zones / f->io_size;
978 assert(open_zone_idx < f->zbd_info->num_open_zones);
979 new_zone_idx = f->zbd_info->open_zones[open_zone_idx];
980 if (new_zone_idx == zone_idx)
981 break;
982 zone_idx = new_zone_idx;
983 pthread_mutex_unlock(&f->zbd_info->mutex);
984 pthread_mutex_unlock(&z->mutex);
985 }
986
987 /* Both z->mutex and f->zbd_info->mutex are held. */
988
989examine_zone:
ee3696bd 990 if (z->wp + min_bs <= (z+1)->start) {
59b07544
BVA
991 pthread_mutex_unlock(&f->zbd_info->mutex);
992 goto out;
993 }
994 dprint(FD_ZBD, "%s(%s): closing zone %d\n", __func__, f->file_name,
995 zone_idx);
996 if (td->o.max_open_zones)
997 zbd_close_zone(td, f, open_zone_idx);
998 pthread_mutex_unlock(&f->zbd_info->mutex);
999
1000 /* Only z->mutex is held. */
1001
1002 /* Zone 'z' is full, so try to open a new zone. */
1003 for (i = f->io_size / f->zbd_info->zone_size; i > 0; i--) {
1004 zone_idx++;
1005 pthread_mutex_unlock(&z->mutex);
1006 z++;
ee3696bd 1007 if (!is_valid_offset(f, z->start)) {
59b07544
BVA
1008 /* Wrap-around. */
1009 zone_idx = zbd_zone_idx(f, f->file_offset);
1010 z = &f->zbd_info->zone_info[zone_idx];
1011 }
ee3696bd 1012 assert(is_valid_offset(f, z->start));
59b07544
BVA
1013 pthread_mutex_lock(&z->mutex);
1014 if (z->open)
1015 continue;
1016 if (zbd_open_zone(td, io_u, zone_idx))
1017 goto out;
1018 }
1019
1020 /* Only z->mutex is held. */
1021
1022 /* Check whether the write fits in any of the already opened zones. */
1023 pthread_mutex_lock(&f->zbd_info->mutex);
1024 for (i = 0; i < f->zbd_info->num_open_zones; i++) {
1025 zone_idx = f->zbd_info->open_zones[i];
1026 pthread_mutex_unlock(&f->zbd_info->mutex);
1027 pthread_mutex_unlock(&z->mutex);
1028
1029 z = &f->zbd_info->zone_info[zone_idx];
1030
1031 pthread_mutex_lock(&z->mutex);
ee3696bd 1032 if (z->wp + min_bs <= (z+1)->start)
59b07544
BVA
1033 goto out;
1034 pthread_mutex_lock(&f->zbd_info->mutex);
1035 }
1036 pthread_mutex_unlock(&f->zbd_info->mutex);
1037 pthread_mutex_unlock(&z->mutex);
1038 dprint(FD_ZBD, "%s(%s): did not open another zone\n", __func__,
1039 f->file_name);
1040 return NULL;
1041
1042out:
1043 dprint(FD_ZBD, "%s(%s): returning zone %d\n", __func__, f->file_name,
1044 zone_idx);
ee3696bd 1045 io_u->offset = z->start;
59b07544
BVA
1046 return z;
1047}
1048
bfbdd35b 1049/* The caller must hold z->mutex. */
59b07544
BVA
1050static struct fio_zone_info *zbd_replay_write_order(struct thread_data *td,
1051 struct io_u *io_u,
1052 struct fio_zone_info *z)
bfbdd35b
BVA
1053{
1054 const struct fio_file *f = io_u->file;
1055 const uint32_t min_bs = td->o.min_bs[DDIR_WRITE];
1056
59b07544
BVA
1057 if (!zbd_open_zone(td, io_u, z - f->zbd_info->zone_info)) {
1058 pthread_mutex_unlock(&z->mutex);
1059 z = zbd_convert_to_open_zone(td, io_u);
1060 assert(z);
1061 }
1062
bfbdd35b 1063 if (z->verify_block * min_bs >= f->zbd_info->zone_size)
a0c84dd4
JA
1064 log_err("%s: %d * %d >= %llu\n", f->file_name, z->verify_block,
1065 min_bs, (unsigned long long) f->zbd_info->zone_size);
ee3696bd 1066 io_u->offset = z->start + z->verify_block++ * min_bs;
59b07544 1067 return z;
bfbdd35b
BVA
1068}
1069
1070/*
1071 * Find another zone for which @io_u fits below the write pointer. Start
1072 * searching in zones @zb + 1 .. @zl and continue searching in zones
1073 * @zf .. @zb - 1.
1074 *
1075 * Either returns NULL or returns a zone pointer and holds the mutex for that
1076 * zone.
1077 */
1078static struct fio_zone_info *
1079zbd_find_zone(struct thread_data *td, struct io_u *io_u,
1080 struct fio_zone_info *zb, struct fio_zone_info *zl)
1081{
1082 const uint32_t min_bs = td->o.min_bs[io_u->ddir];
1083 const struct fio_file *f = io_u->file;
1084 struct fio_zone_info *z1, *z2;
1085 const struct fio_zone_info *const zf =
1086 &f->zbd_info->zone_info[zbd_zone_idx(f, f->file_offset)];
1087
1088 /*
1089 * Skip to the next non-empty zone in case of sequential I/O and to
1090 * the nearest non-empty zone in case of random I/O.
1091 */
1092 for (z1 = zb + 1, z2 = zb - 1; z1 < zl || z2 >= zf; z1++, z2--) {
1093 if (z1 < zl && z1->cond != BLK_ZONE_COND_OFFLINE) {
1094 pthread_mutex_lock(&z1->mutex);
ee3696bd 1095 if (z1->start + min_bs <= z1->wp)
bfbdd35b
BVA
1096 return z1;
1097 pthread_mutex_unlock(&z1->mutex);
1098 } else if (!td_random(td)) {
1099 break;
1100 }
1101 if (td_random(td) && z2 >= zf &&
1102 z2->cond != BLK_ZONE_COND_OFFLINE) {
1103 pthread_mutex_lock(&z2->mutex);
ee3696bd 1104 if (z2->start + min_bs <= z2->wp)
bfbdd35b
BVA
1105 return z2;
1106 pthread_mutex_unlock(&z2->mutex);
1107 }
1108 }
1109 dprint(FD_ZBD, "%s: adjusting random read offset failed\n",
1110 f->file_name);
1111 return NULL;
1112}
1113
bfbdd35b 1114/**
d9ed3e63 1115 * zbd_queue_io - update the write pointer of a sequential zone
bfbdd35b 1116 * @io_u: I/O unit
d9ed3e63
DLM
1117 * @success: Whether or not the I/O unit has been queued successfully
1118 * @q: queueing status (busy, completed or queued).
bfbdd35b 1119 *
d9ed3e63
DLM
1120 * For write and trim operations, update the write pointer of the I/O unit
1121 * target zone.
bfbdd35b 1122 */
d9ed3e63 1123static void zbd_queue_io(struct io_u *io_u, int q, bool success)
bfbdd35b 1124{
d9ed3e63
DLM
1125 const struct fio_file *f = io_u->file;
1126 struct zoned_block_device_info *zbd_info = f->zbd_info;
bfbdd35b
BVA
1127 struct fio_zone_info *z;
1128 uint32_t zone_idx;
d9ed3e63 1129 uint64_t zone_end;
bfbdd35b 1130
bfbdd35b
BVA
1131 if (!zbd_info)
1132 return;
1133
d9ed3e63 1134 zone_idx = zbd_zone_idx(f, io_u->offset);
bfbdd35b 1135 assert(zone_idx < zbd_info->nr_zones);
d9ed3e63
DLM
1136 z = &zbd_info->zone_info[zone_idx];
1137
bfbdd35b
BVA
1138 if (z->type != BLK_ZONE_TYPE_SEQWRITE_REQ)
1139 return;
d9ed3e63 1140
bfbdd35b
BVA
1141 if (!success)
1142 goto unlock;
d9ed3e63
DLM
1143
1144 dprint(FD_ZBD,
1145 "%s: queued I/O (%lld, %llu) for zone %u\n",
1146 f->file_name, io_u->offset, io_u->buflen, zone_idx);
1147
bfbdd35b
BVA
1148 switch (io_u->ddir) {
1149 case DDIR_WRITE:
d9ed3e63
DLM
1150 zone_end = min((uint64_t)(io_u->offset + io_u->buflen),
1151 (z + 1)->start);
a7c2b6fc
BVA
1152 pthread_mutex_lock(&zbd_info->mutex);
1153 /*
1154 * z->wp > zone_end means that one or more I/O errors
1155 * have occurred.
1156 */
1157 if (z->wp <= zone_end)
1158 zbd_info->sectors_with_data += zone_end - z->wp;
1159 pthread_mutex_unlock(&zbd_info->mutex);
bfbdd35b
BVA
1160 z->wp = zone_end;
1161 break;
1162 case DDIR_TRIM:
1163 assert(z->wp == z->start);
1164 break;
1165 default:
1166 break;
1167 }
d9ed3e63 1168
bfbdd35b 1169unlock:
d9ed3e63
DLM
1170 if (!success || q != FIO_Q_QUEUED) {
1171 /* BUSY or COMPLETED: unlock the zone */
1172 pthread_mutex_unlock(&z->mutex);
1173 io_u->zbd_put_io = NULL;
1174 }
1175}
1176
1177/**
1178 * zbd_put_io - Unlock an I/O unit target zone lock
1179 * @io_u: I/O unit
1180 */
1181static void zbd_put_io(const struct io_u *io_u)
1182{
1183 const struct fio_file *f = io_u->file;
1184 struct zoned_block_device_info *zbd_info = f->zbd_info;
1185 struct fio_zone_info *z;
1186 uint32_t zone_idx;
1187
1188 if (!zbd_info)
1189 return;
615555bb 1190
d9ed3e63
DLM
1191 zone_idx = zbd_zone_idx(f, io_u->offset);
1192 assert(zone_idx < zbd_info->nr_zones);
1193 z = &zbd_info->zone_info[zone_idx];
1194
1195 if (z->type != BLK_ZONE_TYPE_SEQWRITE_REQ)
1196 return;
1197
1198 dprint(FD_ZBD,
1199 "%s: terminate I/O (%lld, %llu) for zone %u\n",
1200 f->file_name, io_u->offset, io_u->buflen, zone_idx);
1201
1202 assert(pthread_mutex_unlock(&z->mutex) == 0);
1203 zbd_check_swd(f);
bfbdd35b
BVA
1204}
1205
1206bool zbd_unaligned_write(int error_code)
1207{
1208 switch (error_code) {
1209 case EIO:
1210 case EREMOTEIO:
1211 return true;
1212 }
1213 return false;
1214}
1215
1216/**
1217 * zbd_adjust_block - adjust the offset and length as necessary for ZBD drives
1218 * @td: FIO thread data.
1219 * @io_u: FIO I/O unit.
1220 *
1221 * Locking strategy: returns with z->mutex locked if and only if z refers
1222 * to a sequential zone and if io_u_accept is returned. z is the zone that
1223 * corresponds to io_u->offset at the end of this function.
1224 */
1225enum io_u_action zbd_adjust_block(struct thread_data *td, struct io_u *io_u)
1226{
1227 const struct fio_file *f = io_u->file;
1228 uint32_t zone_idx_b;
de65f7b7 1229 struct fio_zone_info *zb, *zl, *orig_zb;
bfbdd35b
BVA
1230 uint32_t orig_len = io_u->buflen;
1231 uint32_t min_bs = td->o.min_bs[io_u->ddir];
1232 uint64_t new_len;
1233 int64_t range;
1234
1235 if (!f->zbd_info)
1236 return io_u_accept;
1237
1238 assert(is_valid_offset(f, io_u->offset));
1239 assert(io_u->buflen);
1240 zone_idx_b = zbd_zone_idx(f, io_u->offset);
1241 zb = &f->zbd_info->zone_info[zone_idx_b];
de65f7b7 1242 orig_zb = zb;
bfbdd35b
BVA
1243
1244 /* Accept the I/O offset for conventional zones. */
1245 if (zb->type == BLK_ZONE_TYPE_CONVENTIONAL)
1246 return io_u_accept;
1247
1248 /*
1249 * Accept the I/O offset for reads if reading beyond the write pointer
1250 * is enabled.
1251 */
1252 if (zb->cond != BLK_ZONE_COND_OFFLINE &&
1253 io_u->ddir == DDIR_READ && td->o.read_beyond_wp)
1254 return io_u_accept;
1255
615555bb
BVA
1256 zbd_check_swd(f);
1257
6f0c6085
DLM
1258 /*
1259 * Lock the io_u target zone. The zone will be unlocked if io_u offset
1260 * is changed or when io_u completes and zbd_put_io() executed.
1261 * To avoid multiple jobs doing asynchronous I/Os from deadlocking each
1262 * other waiting for zone locks when building an io_u batch, first
1263 * only trylock the zone. If the zone is already locked by another job,
1264 * process the currently queued I/Os so that I/O progress is made and
1265 * zones unlocked.
1266 */
1267 if (pthread_mutex_trylock(&zb->mutex) != 0) {
1268 if (!td_ioengine_flagged(td, FIO_SYNCIO))
1269 io_u_quiesce(td);
1270 pthread_mutex_lock(&zb->mutex);
1271 }
1272
bfbdd35b
BVA
1273 switch (io_u->ddir) {
1274 case DDIR_READ:
1275 if (td->runstate == TD_VERIFYING) {
59b07544 1276 zb = zbd_replay_write_order(td, io_u, zb);
bfbdd35b
BVA
1277 goto accept;
1278 }
1279 /*
de65f7b7
DLM
1280 * Check that there is enough written data in the zone to do an
1281 * I/O of at least min_bs B. If there isn't, find a new zone for
1282 * the I/O.
bfbdd35b
BVA
1283 */
1284 range = zb->cond != BLK_ZONE_COND_OFFLINE ?
ee3696bd 1285 zb->wp - zb->start : 0;
de65f7b7 1286 if (range < min_bs ||
ee3696bd 1287 ((!td_random(td)) && (io_u->offset + min_bs > zb->wp))) {
bfbdd35b
BVA
1288 pthread_mutex_unlock(&zb->mutex);
1289 zl = &f->zbd_info->zone_info[zbd_zone_idx(f,
1290 f->file_offset + f->io_size)];
1291 zb = zbd_find_zone(td, io_u, zb, zl);
1292 if (!zb) {
1293 dprint(FD_ZBD,
1294 "%s: zbd_find_zone(%lld, %llu) failed\n",
1295 f->file_name, io_u->offset,
1296 io_u->buflen);
1297 goto eof;
1298 }
de65f7b7
DLM
1299 /*
1300 * zbd_find_zone() returned a zone with a range of at
1301 * least min_bs.
1302 */
ee3696bd 1303 range = zb->wp - zb->start;
de65f7b7
DLM
1304 assert(range >= min_bs);
1305
1306 if (!td_random(td))
ee3696bd 1307 io_u->offset = zb->start;
bfbdd35b 1308 }
de65f7b7
DLM
1309 /*
1310 * Make sure the I/O is within the zone valid data range while
1311 * maximizing the I/O size and preserving randomness.
1312 */
1313 if (range <= io_u->buflen)
ee3696bd 1314 io_u->offset = zb->start;
de65f7b7 1315 else if (td_random(td))
ee3696bd
DLM
1316 io_u->offset = zb->start +
1317 ((io_u->offset - orig_zb->start) %
de65f7b7
DLM
1318 (range - io_u->buflen)) / min_bs * min_bs;
1319 /*
1320 * Make sure the I/O does not cross over the zone wp position.
1321 */
1322 new_len = min((unsigned long long)io_u->buflen,
ee3696bd 1323 (unsigned long long)(zb->wp - io_u->offset));
de65f7b7
DLM
1324 new_len = new_len / min_bs * min_bs;
1325 if (new_len < io_u->buflen) {
1326 io_u->buflen = new_len;
1327 dprint(FD_IO, "Changed length from %u into %llu\n",
1328 orig_len, io_u->buflen);
bfbdd35b 1329 }
ee3696bd
DLM
1330 assert(zb->start <= io_u->offset);
1331 assert(io_u->offset + io_u->buflen <= zb->wp);
bfbdd35b
BVA
1332 goto accept;
1333 case DDIR_WRITE:
ee3696bd 1334 if (io_u->buflen > f->zbd_info->zone_size)
bfbdd35b 1335 goto eof;
59b07544
BVA
1336 if (!zbd_open_zone(td, io_u, zone_idx_b)) {
1337 pthread_mutex_unlock(&zb->mutex);
1338 zb = zbd_convert_to_open_zone(td, io_u);
1339 if (!zb)
1340 goto eof;
1341 zone_idx_b = zb - f->zbd_info->zone_info;
1342 }
a7c2b6fc
BVA
1343 /* Check whether the zone reset threshold has been exceeded */
1344 if (td->o.zrf.u.f) {
ee3696bd 1345 if (f->zbd_info->sectors_with_data >=
a7c2b6fc
BVA
1346 f->io_size * td->o.zrt.u.f &&
1347 zbd_dec_and_reset_write_cnt(td, f)) {
1348 zb->reset_zone = 1;
1349 }
1350 }
bfbdd35b
BVA
1351 /* Reset the zone pointer if necessary */
1352 if (zb->reset_zone || zbd_zone_full(f, zb, min_bs)) {
1353 assert(td->o.verify == VERIFY_NONE);
1354 /*
1355 * Since previous write requests may have been submitted
1356 * asynchronously and since we will submit the zone
1357 * reset synchronously, wait until previously submitted
1358 * write requests have completed before issuing a
1359 * zone reset.
1360 */
1361 io_u_quiesce(td);
1362 zb->reset_zone = 0;
1363 if (zbd_reset_zone(td, f, zb) < 0)
1364 goto eof;
1365 }
1366 /* Make writes occur at the write pointer */
1367 assert(!zbd_zone_full(f, zb, min_bs));
ee3696bd 1368 io_u->offset = zb->wp;
bfbdd35b
BVA
1369 if (!is_valid_offset(f, io_u->offset)) {
1370 dprint(FD_ZBD, "Dropped request with offset %llu\n",
1371 io_u->offset);
1372 goto eof;
1373 }
1374 /*
1375 * Make sure that the buflen is a multiple of the minimal
1376 * block size. Give up if shrinking would make the request too
1377 * small.
1378 */
1379 new_len = min((unsigned long long)io_u->buflen,
ee3696bd 1380 (zb + 1)->start - io_u->offset);
bfbdd35b
BVA
1381 new_len = new_len / min_bs * min_bs;
1382 if (new_len == io_u->buflen)
1383 goto accept;
1384 if (new_len >= min_bs) {
1385 io_u->buflen = new_len;
1386 dprint(FD_IO, "Changed length from %u into %llu\n",
1387 orig_len, io_u->buflen);
1388 goto accept;
1389 }
1390 log_err("Zone remainder %lld smaller than minimum block size %d\n",
ee3696bd 1391 ((zb + 1)->start - io_u->offset),
bfbdd35b
BVA
1392 min_bs);
1393 goto eof;
1394 case DDIR_TRIM:
1395 /* fall-through */
1396 case DDIR_SYNC:
1397 case DDIR_DATASYNC:
1398 case DDIR_SYNC_FILE_RANGE:
1399 case DDIR_WAIT:
1400 case DDIR_LAST:
1401 case DDIR_INVAL:
1402 goto accept;
1403 }
1404
1405 assert(false);
1406
1407accept:
1408 assert(zb);
1409 assert(zb->cond != BLK_ZONE_COND_OFFLINE);
d9ed3e63
DLM
1410 assert(!io_u->zbd_queue_io);
1411 assert(!io_u->zbd_put_io);
1412 io_u->zbd_queue_io = zbd_queue_io;
1413 io_u->zbd_put_io = zbd_put_io;
bfbdd35b
BVA
1414 return io_u_accept;
1415
1416eof:
1417 if (zb)
1418 pthread_mutex_unlock(&zb->mutex);
1419 return io_u_eof;
1420}
fd5d733f
BVA
1421
1422/* Return a string with ZBD statistics */
1423char *zbd_write_status(const struct thread_stat *ts)
1424{
1425 char *res;
1426
a0c84dd4 1427 if (asprintf(&res, "; %llu zone resets", (unsigned long long) ts->nr_zone_resets) < 0)
fd5d733f
BVA
1428 return NULL;
1429 return res;
1430}