Log files names start with _ when write_XX_log= keys in config file have empty value(s)
[fio.git] / zbd.c
CommitLineData
bfbdd35b
BVA
1/*
2 * Copyright (C) 2018 Western Digital Corporation or its affiliates.
3 *
4 * This file is released under the GPL.
5 */
6
7#include <errno.h>
8#include <string.h>
9#include <stdlib.h>
10#include <dirent.h>
11#include <fcntl.h>
12#include <sys/ioctl.h>
13#include <sys/stat.h>
14#include <unistd.h>
15#include <linux/blkzoned.h>
16#include "file.h"
17#include "fio.h"
18#include "lib/pow2.h"
19#include "log.h"
20#include "smalloc.h"
21#include "verify.h"
22#include "zbd.h"
23
24/**
25 * zbd_zone_idx - convert an offset into a zone number
26 * @f: file pointer.
27 * @offset: offset in bytes. If this offset is in the first zone_size bytes
28 * past the disk size then the index of the sentinel is returned.
29 */
30static uint32_t zbd_zone_idx(const struct fio_file *f, uint64_t offset)
31{
32 uint32_t zone_idx;
33
34 if (f->zbd_info->zone_size_log2)
35 zone_idx = offset >> f->zbd_info->zone_size_log2;
36 else
37 zone_idx = (offset >> 9) / f->zbd_info->zone_size;
38
39 return min(zone_idx, f->zbd_info->nr_zones);
40}
41
42/**
43 * zbd_zone_full - verify whether a minimum number of bytes remain in a zone
44 * @f: file pointer.
45 * @z: zone info pointer.
46 * @required: minimum number of bytes that must remain in a zone.
47 *
48 * The caller must hold z->mutex.
49 */
50static bool zbd_zone_full(const struct fio_file *f, struct fio_zone_info *z,
51 uint64_t required)
52{
53 assert((required & 511) == 0);
54
55 return z->type == BLK_ZONE_TYPE_SEQWRITE_REQ &&
56 z->wp + (required >> 9) > z->start + f->zbd_info->zone_size;
57}
58
59static bool is_valid_offset(const struct fio_file *f, uint64_t offset)
60{
61 return (uint64_t)(offset - f->file_offset) < f->io_size;
62}
63
64/* Verify whether direct I/O is used for all host-managed zoned drives. */
65static bool zbd_using_direct_io(void)
66{
67 struct thread_data *td;
68 struct fio_file *f;
69 int i, j;
70
71 for_each_td(td, i) {
72 if (td->o.odirect || !(td->o.td_ddir & TD_DDIR_WRITE))
73 continue;
74 for_each_file(td, f, j) {
75 if (f->zbd_info &&
76 f->zbd_info->model == ZBD_DM_HOST_MANAGED)
77 return false;
78 }
79 }
80
81 return true;
82}
83
84/* Whether or not the I/O range for f includes one or more sequential zones */
85static bool zbd_is_seq_job(struct fio_file *f)
86{
87 uint32_t zone_idx, zone_idx_b, zone_idx_e;
88
89 assert(f->zbd_info);
90 if (f->io_size == 0)
91 return false;
92 zone_idx_b = zbd_zone_idx(f, f->file_offset);
93 zone_idx_e = zbd_zone_idx(f, f->file_offset + f->io_size - 1);
94 for (zone_idx = zone_idx_b; zone_idx <= zone_idx_e; zone_idx++)
95 if (f->zbd_info->zone_info[zone_idx].type ==
96 BLK_ZONE_TYPE_SEQWRITE_REQ)
97 return true;
98
99 return false;
100}
101
102/*
103 * Verify whether offset and size parameters are aligned with zone boundaries.
104 */
105static bool zbd_verify_sizes(void)
106{
107 const struct fio_zone_info *z;
108 struct thread_data *td;
109 struct fio_file *f;
110 uint64_t new_offset, new_end;
111 uint32_t zone_idx;
112 int i, j;
113
114 for_each_td(td, i) {
115 for_each_file(td, f, j) {
116 if (!f->zbd_info)
117 continue;
118 if (f->file_offset >= f->real_file_size)
119 continue;
120 if (!zbd_is_seq_job(f))
121 continue;
122 zone_idx = zbd_zone_idx(f, f->file_offset);
123 z = &f->zbd_info->zone_info[zone_idx];
124 if (f->file_offset != (z->start << 9)) {
125 new_offset = (z+1)->start << 9;
126 if (new_offset >= f->file_offset + f->io_size) {
127 log_info("%s: io_size must be at least one zone\n",
128 f->file_name);
129 return false;
130 }
131 log_info("%s: rounded up offset from %lu to %lu\n",
132 f->file_name, f->file_offset,
133 new_offset);
134 f->io_size -= (new_offset - f->file_offset);
135 f->file_offset = new_offset;
136 }
137 zone_idx = zbd_zone_idx(f, f->file_offset + f->io_size);
138 z = &f->zbd_info->zone_info[zone_idx];
139 new_end = z->start << 9;
140 if (f->file_offset + f->io_size != new_end) {
141 if (new_end <= f->file_offset) {
142 log_info("%s: io_size must be at least one zone\n",
143 f->file_name);
144 return false;
145 }
146 log_info("%s: rounded down io_size from %lu to %lu\n",
147 f->file_name, f->io_size,
148 new_end - f->file_offset);
149 f->io_size = new_end - f->file_offset;
150 }
151 }
152 }
153
154 return true;
155}
156
157static bool zbd_verify_bs(void)
158{
159 struct thread_data *td;
160 struct fio_file *f;
161 uint32_t zone_size;
162 int i, j, k;
163
164 for_each_td(td, i) {
165 for_each_file(td, f, j) {
166 if (!f->zbd_info)
167 continue;
168 zone_size = f->zbd_info->zone_size;
169 for (k = 0; k < ARRAY_SIZE(td->o.bs); k++) {
170 if (td->o.verify != VERIFY_NONE &&
171 (zone_size << 9) % td->o.bs[k] != 0) {
172 log_info("%s: block size %llu is not a divisor of the zone size %d\n",
173 f->file_name, td->o.bs[k],
174 zone_size << 9);
175 return false;
176 }
177 }
178 }
179 }
180 return true;
181}
182
183/*
184 * Read zone information into @buf starting from sector @start_sector.
185 * @fd is a file descriptor that refers to a block device and @bufsz is the
186 * size of @buf.
187 *
188 * Returns 0 upon success and a negative error code upon failure.
189 */
190static int read_zone_info(int fd, uint64_t start_sector,
191 void *buf, unsigned int bufsz)
192{
193 struct blk_zone_report *hdr = buf;
194
195 if (bufsz < sizeof(*hdr))
196 return -EINVAL;
197
198 memset(hdr, 0, sizeof(*hdr));
199
200 hdr->nr_zones = (bufsz - sizeof(*hdr)) / sizeof(struct blk_zone);
201 hdr->sector = start_sector;
202 return ioctl(fd, BLKREPORTZONE, hdr) >= 0 ? 0 : -errno;
203}
204
205/*
206 * Read up to 255 characters from the first line of a file. Strip the trailing
207 * newline.
208 */
209static char *read_file(const char *path)
210{
211 char line[256], *p = line;
212 FILE *f;
213
214 f = fopen(path, "rb");
215 if (!f)
216 return NULL;
217 if (!fgets(line, sizeof(line), f))
218 line[0] = '\0';
219 strsep(&p, "\n");
220 fclose(f);
221
222 return strdup(line);
223}
224
225static enum blk_zoned_model get_zbd_model(const char *file_name)
226{
227 enum blk_zoned_model model = ZBD_DM_NONE;
228 char *zoned_attr_path = NULL;
229 char *model_str = NULL;
230 struct stat statbuf;
231
232 if (stat(file_name, &statbuf) < 0)
233 goto out;
234 if (asprintf(&zoned_attr_path, "/sys/dev/block/%d:%d/queue/zoned",
235 major(statbuf.st_rdev), minor(statbuf.st_rdev)) < 0)
236 goto out;
237 model_str = read_file(zoned_attr_path);
238 if (!model_str)
239 goto out;
240 dprint(FD_ZBD, "%s: zbd model string: %s\n", file_name, model_str);
241 if (strcmp(model_str, "host-aware") == 0)
242 model = ZBD_DM_HOST_AWARE;
243 else if (strcmp(model_str, "host-managed") == 0)
244 model = ZBD_DM_HOST_MANAGED;
245
246out:
247 free(model_str);
248 free(zoned_attr_path);
249 return model;
250}
251
252static int ilog2(uint64_t i)
253{
254 int log = -1;
255
256 while (i) {
257 i >>= 1;
258 log++;
259 }
260 return log;
261}
262
263/*
264 * Initialize f->zbd_info for devices that are not zoned block devices. This
265 * allows to execute a ZBD workload against a non-ZBD device.
266 */
267static int init_zone_info(struct thread_data *td, struct fio_file *f)
268{
269 uint32_t nr_zones;
270 struct fio_zone_info *p;
271 uint64_t zone_size;
272 struct zoned_block_device_info *zbd_info = NULL;
273 pthread_mutexattr_t attr;
274 int i;
275
276 zone_size = td->o.zone_size >> 9;
277 assert(zone_size);
278 nr_zones = ((f->real_file_size >> 9) + zone_size - 1) / zone_size;
279 zbd_info = scalloc(1, sizeof(*zbd_info) +
280 (nr_zones + 1) * sizeof(zbd_info->zone_info[0]));
281 if (!zbd_info)
282 return -ENOMEM;
283
284 pthread_mutexattr_init(&attr);
285 pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
286 pthread_mutexattr_setpshared(&attr, true);
287 pthread_mutex_init(&zbd_info->mutex, &attr);
288 zbd_info->refcount = 1;
289 p = &zbd_info->zone_info[0];
290 for (i = 0; i < nr_zones; i++, p++) {
291 pthread_mutex_init(&p->mutex, &attr);
292 p->start = i * zone_size;
293 p->wp = p->start + zone_size;
294 p->type = BLK_ZONE_TYPE_SEQWRITE_REQ;
295 p->cond = BLK_ZONE_COND_EMPTY;
296 }
297 /* a sentinel */
298 p->start = nr_zones * zone_size;
299
300 f->zbd_info = zbd_info;
301 f->zbd_info->zone_size = zone_size;
302 f->zbd_info->zone_size_log2 = is_power_of_2(zone_size) ?
303 ilog2(zone_size) + 9 : -1;
304 f->zbd_info->nr_zones = nr_zones;
305 pthread_mutexattr_destroy(&attr);
306 return 0;
307}
308
309/*
310 * Parse the BLKREPORTZONE output and store it in f->zbd_info. Must be called
311 * only for devices that support this ioctl, namely zoned block devices.
312 */
313static int parse_zone_info(struct thread_data *td, struct fio_file *f)
314{
315 const unsigned int bufsz = sizeof(struct blk_zone_report) +
316 4096 * sizeof(struct blk_zone);
317 uint32_t nr_zones;
318 struct blk_zone_report *hdr;
319 const struct blk_zone *z;
320 struct fio_zone_info *p;
321 uint64_t zone_size, start_sector;
322 struct zoned_block_device_info *zbd_info = NULL;
323 pthread_mutexattr_t attr;
324 void *buf;
325 int fd, i, j, ret = 0;
326
327 pthread_mutexattr_init(&attr);
328 pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
329 pthread_mutexattr_setpshared(&attr, true);
330
331 buf = malloc(bufsz);
332 if (!buf)
333 goto out;
334
335 fd = open(f->file_name, O_RDONLY | O_LARGEFILE);
336 if (fd < 0) {
337 ret = -errno;
338 goto free;
339 }
340
341 ret = read_zone_info(fd, 0, buf, bufsz);
342 if (ret < 0) {
343 log_info("fio: BLKREPORTZONE(%lu) failed for %s (%d).\n",
344 0UL, f->file_name, -ret);
345 goto close;
346 }
347 hdr = buf;
348 if (hdr->nr_zones < 1) {
349 log_info("fio: %s has invalid zone information.\n",
350 f->file_name);
351 goto close;
352 }
353 z = (void *)(hdr + 1);
354 zone_size = z->len;
355 nr_zones = ((f->real_file_size >> 9) + zone_size - 1) / zone_size;
356
357 if (td->o.zone_size == 0) {
358 td->o.zone_size = zone_size << 9;
359 } else if (td->o.zone_size != zone_size << 9) {
360 log_info("fio: %s job parameter zonesize %lld does not match disk zone size %ld.\n",
361 f->file_name, td->o.zone_size, zone_size << 9);
362 ret = -EINVAL;
363 goto close;
364 }
365
366 dprint(FD_ZBD, "Device %s has %d zones of size %lu KB\n", f->file_name,
367 nr_zones, zone_size / 2);
368
369 zbd_info = scalloc(1, sizeof(*zbd_info) +
370 (nr_zones + 1) * sizeof(zbd_info->zone_info[0]));
371 ret = -ENOMEM;
372 if (!zbd_info)
373 goto close;
374 pthread_mutex_init(&zbd_info->mutex, &attr);
375 zbd_info->refcount = 1;
376 p = &zbd_info->zone_info[0];
377 for (start_sector = 0, j = 0; j < nr_zones;) {
378 z = (void *)(hdr + 1);
379 for (i = 0; i < hdr->nr_zones; i++, j++, z++, p++) {
380 pthread_mutex_init(&p->mutex, &attr);
381 p->start = z->start;
382 switch (z->cond) {
383 case BLK_ZONE_COND_NOT_WP:
384 p->wp = z->start;
385 break;
386 case BLK_ZONE_COND_FULL:
387 p->wp = z->start + zone_size;
388 break;
389 default:
390 assert(z->start <= z->wp);
391 assert(z->wp <= z->start + zone_size);
392 p->wp = z->wp;
393 break;
394 }
395 p->type = z->type;
396 p->cond = z->cond;
397 if (j > 0 && p->start != p[-1].start + zone_size) {
398 log_info("%s: invalid zone data\n",
399 f->file_name);
400 ret = -EINVAL;
401 goto close;
402 }
403 }
404 z--;
405 start_sector = z->start + z->len;
406 if (j >= nr_zones)
407 break;
408 ret = read_zone_info(fd, start_sector, buf, bufsz);
409 if (ret < 0) {
410 log_info("fio: BLKREPORTZONE(%lu) failed for %s (%d).\n",
411 start_sector, f->file_name, -ret);
412 goto close;
413 }
414 }
415 /* a sentinel */
416 zbd_info->zone_info[nr_zones].start = start_sector;
417
418 f->zbd_info = zbd_info;
419 f->zbd_info->zone_size = zone_size;
420 f->zbd_info->zone_size_log2 = is_power_of_2(zone_size) ?
421 ilog2(zone_size) + 9 : -1;
422 f->zbd_info->nr_zones = nr_zones;
423 zbd_info = NULL;
424 ret = 0;
425
426close:
427 sfree(zbd_info);
428 close(fd);
429free:
430 free(buf);
431out:
432 pthread_mutexattr_destroy(&attr);
433 return ret;
434}
435
436/*
437 * Allocate zone information and store it into f->zbd_info if zonemode=zbd.
438 *
439 * Returns 0 upon success and a negative error code upon failure.
440 */
441int zbd_create_zone_info(struct thread_data *td, struct fio_file *f)
442{
443 enum blk_zoned_model zbd_model;
444 int ret = 0;
445
446 assert(td->o.zone_mode == ZONE_MODE_ZBD);
447
448 zbd_model = get_zbd_model(f->file_name);
449 switch (zbd_model) {
450 case ZBD_DM_HOST_AWARE:
451 case ZBD_DM_HOST_MANAGED:
452 ret = parse_zone_info(td, f);
453 break;
454 case ZBD_DM_NONE:
455 ret = init_zone_info(td, f);
456 break;
457 }
458 if (ret == 0)
459 f->zbd_info->model = zbd_model;
460 return ret;
461}
462
463void zbd_free_zone_info(struct fio_file *f)
464{
465 uint32_t refcount;
466
467 if (!f->zbd_info)
468 return;
469
470 pthread_mutex_lock(&f->zbd_info->mutex);
471 refcount = --f->zbd_info->refcount;
472 pthread_mutex_unlock(&f->zbd_info->mutex);
473
474 assert((int32_t)refcount >= 0);
475 if (refcount == 0)
476 sfree(f->zbd_info);
477 f->zbd_info = NULL;
478}
479
480/*
481 * Initialize f->zbd_info.
482 *
483 * Returns 0 upon success and a negative error code upon failure.
484 *
485 * Note: this function can only work correctly if it is called before the first
486 * fio fork() call.
487 */
488static int zbd_init_zone_info(struct thread_data *td, struct fio_file *file)
489{
490 struct thread_data *td2;
491 struct fio_file *f2;
492 int i, j, ret;
493
494 for_each_td(td2, i) {
495 for_each_file(td2, f2, j) {
496 if (td2 == td && f2 == file)
497 continue;
498 if (!f2->zbd_info ||
499 strcmp(f2->file_name, file->file_name) != 0)
500 continue;
501 file->zbd_info = f2->zbd_info;
502 file->zbd_info->refcount++;
503 return 0;
504 }
505 }
506
507 ret = zbd_create_zone_info(td, file);
508 if (ret < 0)
509 td_verror(td, -ret, "BLKREPORTZONE failed");
510 return ret;
511}
512
513int zbd_init(struct thread_data *td)
514{
515 struct fio_file *f;
516 int i;
517
518 for_each_file(td, f, i) {
519 if (f->filetype != FIO_TYPE_BLOCK)
520 continue;
521 if (td->o.zone_size && td->o.zone_size < 512) {
522 log_err("%s: zone size must be at least 512 bytes for --zonemode=zbd\n\n",
523 f->file_name);
524 return 1;
525 }
526 if (td->o.zone_size == 0 &&
527 get_zbd_model(f->file_name) == ZBD_DM_NONE) {
528 log_err("%s: Specifying the zone size is mandatory for regular block devices with --zonemode=zbd\n\n",
529 f->file_name);
530 return 1;
531 }
532 zbd_init_zone_info(td, f);
533 }
534
535 if (!zbd_using_direct_io()) {
536 log_err("Using direct I/O is mandatory for writing to ZBD drives\n\n");
537 return 1;
538 }
539
540 if (!zbd_verify_sizes())
541 return 1;
542
543 if (!zbd_verify_bs())
544 return 1;
545
546 return 0;
547}
548
549/**
550 * zbd_reset_range - reset zones for a range of sectors
551 * @td: FIO thread data.
552 * @f: Fio file for which to reset zones
553 * @sector: Starting sector in units of 512 bytes
554 * @nr_sectors: Number of sectors in units of 512 bytes
555 *
556 * Returns 0 upon success and a negative error code upon failure.
557 */
558static int zbd_reset_range(struct thread_data *td, const struct fio_file *f,
559 uint64_t sector, uint64_t nr_sectors)
560{
561 struct blk_zone_range zr = {
562 .sector = sector,
563 .nr_sectors = nr_sectors,
564 };
565 uint32_t zone_idx_b, zone_idx_e;
566 struct fio_zone_info *zb, *ze, *z;
567 int ret = 0;
568
569 assert(f->fd != -1);
570 assert(is_valid_offset(f, ((sector + nr_sectors) << 9) - 1));
571 switch (f->zbd_info->model) {
572 case ZBD_DM_HOST_AWARE:
573 case ZBD_DM_HOST_MANAGED:
574 ret = ioctl(f->fd, BLKRESETZONE, &zr);
575 if (ret < 0) {
576 td_verror(td, errno, "resetting wp failed");
577 log_err("%s: resetting wp for %llu sectors at sector %llu failed (%d).\n",
578 f->file_name, zr.nr_sectors, zr.sector, errno);
579 return ret;
580 }
581 break;
582 case ZBD_DM_NONE:
583 break;
584 }
585
586 zone_idx_b = zbd_zone_idx(f, sector << 9);
587 zb = &f->zbd_info->zone_info[zone_idx_b];
588 zone_idx_e = zbd_zone_idx(f, (sector + nr_sectors) << 9);
589 ze = &f->zbd_info->zone_info[zone_idx_e];
590 for (z = zb; z < ze; z++) {
591 pthread_mutex_lock(&z->mutex);
a7c2b6fc
BVA
592 pthread_mutex_lock(&f->zbd_info->mutex);
593 f->zbd_info->sectors_with_data -= z->wp - z->start;
594 pthread_mutex_unlock(&f->zbd_info->mutex);
bfbdd35b
BVA
595 z->wp = z->start;
596 z->verify_block = 0;
597 pthread_mutex_unlock(&z->mutex);
598 }
599
fd5d733f
BVA
600 td->ts.nr_zone_resets += ze - zb;
601
bfbdd35b
BVA
602 return ret;
603}
604
605/**
606 * zbd_reset_zone - reset the write pointer of a single zone
607 * @td: FIO thread data.
608 * @f: FIO file associated with the disk for which to reset a write pointer.
609 * @z: Zone to reset.
610 *
611 * Returns 0 upon success and a negative error code upon failure.
612 */
613static int zbd_reset_zone(struct thread_data *td, const struct fio_file *f,
614 struct fio_zone_info *z)
615{
616 int ret;
617
618 dprint(FD_ZBD, "%s: resetting wp of zone %lu.\n", f->file_name,
619 z - f->zbd_info->zone_info);
620 ret = zbd_reset_range(td, f, z->start, (z+1)->start - z->start);
621 return ret;
622}
623
624/*
625 * Reset a range of zones. Returns 0 upon success and 1 upon failure.
626 * @td: fio thread data.
627 * @f: fio file for which to reset zones
628 * @zb: first zone to reset.
629 * @ze: first zone not to reset.
630 * @all_zones: whether to reset all zones or only those zones for which the
631 * write pointer is not a multiple of td->o.min_bs[DDIR_WRITE].
632 */
633static int zbd_reset_zones(struct thread_data *td, struct fio_file *f,
634 struct fio_zone_info *const zb,
635 struct fio_zone_info *const ze, bool all_zones)
636{
637 struct fio_zone_info *z, *start_z = ze;
638 const uint32_t min_bs = td->o.min_bs[DDIR_WRITE] >> 9;
639 bool reset_wp;
640 int res = 0;
641
642 dprint(FD_ZBD, "%s: examining zones %lu .. %lu\n", f->file_name,
643 zb - f->zbd_info->zone_info, ze - f->zbd_info->zone_info);
644 assert(f->fd != -1);
645 for (z = zb; z < ze; z++) {
646 pthread_mutex_lock(&z->mutex);
647 switch (z->type) {
648 case BLK_ZONE_TYPE_SEQWRITE_REQ:
649 reset_wp = all_zones ? z->wp != z->start :
650 (td->o.td_ddir & TD_DDIR_WRITE) &&
651 z->wp % min_bs != 0;
652 if (start_z == ze && reset_wp) {
653 start_z = z;
654 } else if (start_z < ze && !reset_wp) {
655 dprint(FD_ZBD,
656 "%s: resetting zones %lu .. %lu\n",
657 f->file_name,
658 start_z - f->zbd_info->zone_info,
659 z - f->zbd_info->zone_info);
660 if (zbd_reset_range(td, f, start_z->start,
661 z->start - start_z->start) < 0)
662 res = 1;
663 start_z = ze;
664 }
665 break;
666 default:
667 if (start_z == ze)
668 break;
669 dprint(FD_ZBD, "%s: resetting zones %lu .. %lu\n",
670 f->file_name, start_z - f->zbd_info->zone_info,
671 z - f->zbd_info->zone_info);
672 if (zbd_reset_range(td, f, start_z->start,
673 z->start - start_z->start) < 0)
674 res = 1;
675 start_z = ze;
676 break;
677 }
678 }
679 if (start_z < ze) {
680 dprint(FD_ZBD, "%s: resetting zones %lu .. %lu\n", f->file_name,
681 start_z - f->zbd_info->zone_info,
682 z - f->zbd_info->zone_info);
683 if (zbd_reset_range(td, f, start_z->start,
684 z->start - start_z->start) < 0)
685 res = 1;
686 }
687 for (z = zb; z < ze; z++)
688 pthread_mutex_unlock(&z->mutex);
689
690 return res;
691}
692
a7c2b6fc
BVA
693/*
694 * Reset zbd_info.write_cnt, the counter that counts down towards the next
695 * zone reset.
696 */
697static void zbd_reset_write_cnt(const struct thread_data *td,
698 const struct fio_file *f)
699{
700 assert(0 <= td->o.zrf.u.f && td->o.zrf.u.f <= 1);
701
702 pthread_mutex_lock(&f->zbd_info->mutex);
703 f->zbd_info->write_cnt = td->o.zrf.u.f ?
704 min(1.0 / td->o.zrf.u.f, 0.0 + UINT_MAX) : UINT_MAX;
705 pthread_mutex_unlock(&f->zbd_info->mutex);
706}
707
708static bool zbd_dec_and_reset_write_cnt(const struct thread_data *td,
709 const struct fio_file *f)
710{
711 uint32_t write_cnt = 0;
712
713 pthread_mutex_lock(&f->zbd_info->mutex);
714 assert(f->zbd_info->write_cnt);
715 if (f->zbd_info->write_cnt)
716 write_cnt = --f->zbd_info->write_cnt;
717 if (write_cnt == 0)
718 zbd_reset_write_cnt(td, f);
719 pthread_mutex_unlock(&f->zbd_info->mutex);
720
721 return write_cnt == 0;
722}
723
724/* Check whether the value of zbd_info.sectors_with_data is correct. */
725static void check_swd(const struct thread_data *td, const struct fio_file *f)
726{
727#if 0
728 struct fio_zone_info *zb, *ze, *z;
729 uint64_t swd;
730
731 zb = &f->zbd_info->zone_info[zbd_zone_idx(f, f->file_offset)];
732 ze = &f->zbd_info->zone_info[zbd_zone_idx(f, f->file_offset +
733 f->io_size)];
734 swd = 0;
735 for (z = zb; z < ze; z++) {
736 pthread_mutex_lock(&z->mutex);
737 swd += z->wp - z->start;
738 }
739 pthread_mutex_lock(&f->zbd_info->mutex);
740 assert(f->zbd_info->sectors_with_data == swd);
741 pthread_mutex_unlock(&f->zbd_info->mutex);
742 for (z = zb; z < ze; z++)
743 pthread_mutex_unlock(&z->mutex);
744#endif
745}
746
bfbdd35b
BVA
747void zbd_file_reset(struct thread_data *td, struct fio_file *f)
748{
a7c2b6fc 749 struct fio_zone_info *zb, *ze, *z;
bfbdd35b 750 uint32_t zone_idx_e;
a7c2b6fc 751 uint64_t swd = 0;
bfbdd35b
BVA
752
753 if (!f->zbd_info)
754 return;
755
756 zb = &f->zbd_info->zone_info[zbd_zone_idx(f, f->file_offset)];
757 zone_idx_e = zbd_zone_idx(f, f->file_offset + f->io_size);
758 ze = &f->zbd_info->zone_info[zone_idx_e];
a7c2b6fc
BVA
759 for (z = zb ; z < ze; z++) {
760 pthread_mutex_lock(&z->mutex);
761 swd += z->wp - z->start;
762 }
763 pthread_mutex_lock(&f->zbd_info->mutex);
764 f->zbd_info->sectors_with_data = swd;
765 pthread_mutex_unlock(&f->zbd_info->mutex);
766 for (z = zb ; z < ze; z++)
767 pthread_mutex_unlock(&z->mutex);
768 dprint(FD_ZBD, "%s(%s): swd = %ld\n", __func__, f->file_name, swd);
bfbdd35b
BVA
769 /*
770 * If data verification is enabled reset the affected zones before
771 * writing any data to avoid that a zone reset has to be issued while
772 * writing data, which causes data loss.
773 */
774 zbd_reset_zones(td, f, zb, ze, td->o.verify != VERIFY_NONE &&
775 (td->o.td_ddir & TD_DDIR_WRITE) &&
776 td->runstate != TD_VERIFYING);
a7c2b6fc 777 zbd_reset_write_cnt(td, f);
bfbdd35b
BVA
778}
779
59b07544
BVA
780/* The caller must hold f->zbd_info->mutex. */
781static bool is_zone_open(const struct thread_data *td, const struct fio_file *f,
782 unsigned int zone_idx)
783{
784 struct zoned_block_device_info *zbdi = f->zbd_info;
785 int i;
786
787 assert(td->o.max_open_zones <= ARRAY_SIZE(zbdi->open_zones));
788 assert(zbdi->num_open_zones <= td->o.max_open_zones);
789
790 for (i = 0; i < zbdi->num_open_zones; i++)
791 if (zbdi->open_zones[i] == zone_idx)
792 return true;
793
794 return false;
795}
796
797/*
798 * Open a ZBD zone if it was not yet open. Returns true if either the zone was
799 * already open or if opening a new zone is allowed. Returns false if the zone
800 * was not yet open and opening a new zone would cause the zone limit to be
801 * exceeded.
802 */
803static bool zbd_open_zone(struct thread_data *td, const struct io_u *io_u,
804 uint32_t zone_idx)
805{
806 const uint32_t min_bs = td->o.min_bs[DDIR_WRITE];
807 const struct fio_file *f = io_u->file;
808 struct fio_zone_info *z = &f->zbd_info->zone_info[zone_idx];
809 bool res = true;
810
811 if (z->cond == BLK_ZONE_COND_OFFLINE)
812 return false;
813
814 /*
815 * Skip full zones with data verification enabled because resetting a
816 * zone causes data loss and hence causes verification to fail.
817 */
818 if (td->o.verify != VERIFY_NONE && zbd_zone_full(f, z, min_bs))
819 return false;
820
821 /* Zero means no limit */
822 if (!td->o.max_open_zones)
823 return true;
824
825 pthread_mutex_lock(&f->zbd_info->mutex);
826 if (is_zone_open(td, f, zone_idx))
827 goto out;
828 res = false;
829 if (f->zbd_info->num_open_zones >= td->o.max_open_zones)
830 goto out;
831 dprint(FD_ZBD, "%s: opening zone %d\n", f->file_name, zone_idx);
832 f->zbd_info->open_zones[f->zbd_info->num_open_zones++] = zone_idx;
833 z->open = 1;
834 res = true;
835
836out:
837 pthread_mutex_unlock(&f->zbd_info->mutex);
838 return res;
839}
840
841/* The caller must hold f->zbd_info->mutex */
842static void zbd_close_zone(struct thread_data *td, const struct fio_file *f,
843 unsigned int open_zone_idx)
844{
845 uint32_t zone_idx;
846
847 assert(open_zone_idx < f->zbd_info->num_open_zones);
848 zone_idx = f->zbd_info->open_zones[open_zone_idx];
849 memmove(f->zbd_info->open_zones + open_zone_idx,
850 f->zbd_info->open_zones + open_zone_idx + 1,
851 (FIO_MAX_OPEN_ZBD_ZONES - (open_zone_idx + 1)) *
852 sizeof(f->zbd_info->open_zones[0]));
853 f->zbd_info->num_open_zones--;
854 f->zbd_info->zone_info[zone_idx].open = 0;
855}
856
857/*
858 * Modify the offset of an I/O unit that does not refer to an open zone such
859 * that it refers to an open zone. Close an open zone and open a new zone if
860 * necessary. This algorithm can only work correctly if all write pointers are
861 * a multiple of the fio block size. The caller must neither hold z->mutex
862 * nor f->zbd_info->mutex. Returns with z->mutex held upon success.
863 */
864struct fio_zone_info *zbd_convert_to_open_zone(struct thread_data *td,
865 struct io_u *io_u)
866{
867 const uint32_t min_bs = td->o.min_bs[io_u->ddir];
868 const struct fio_file *f = io_u->file;
869 struct fio_zone_info *z;
870 unsigned int open_zone_idx = -1;
871 uint32_t zone_idx, new_zone_idx;
872 int i;
873
874 assert(is_valid_offset(f, io_u->offset));
875
876 if (td->o.max_open_zones) {
877 /*
878 * This statement accesses f->zbd_info->open_zones[] on purpose
879 * without locking.
880 */
881 zone_idx = f->zbd_info->open_zones[(io_u->offset -
882 f->file_offset) *
883 f->zbd_info->num_open_zones / f->io_size];
884 } else {
885 zone_idx = zbd_zone_idx(f, io_u->offset);
886 }
887 dprint(FD_ZBD, "%s(%s): starting from zone %d (offset %lld, buflen %lld)\n",
888 __func__, f->file_name, zone_idx, io_u->offset, io_u->buflen);
889
890 /*
891 * Since z->mutex is the outer lock and f->zbd_info->mutex the inner
892 * lock it can happen that the state of the zone with index zone_idx
893 * has changed after 'z' has been assigned and before f->zbd_info->mutex
894 * has been obtained. Hence the loop.
895 */
896 for (;;) {
897 z = &f->zbd_info->zone_info[zone_idx];
898
899 pthread_mutex_lock(&z->mutex);
900 pthread_mutex_lock(&f->zbd_info->mutex);
901 if (td->o.max_open_zones == 0)
902 goto examine_zone;
903 if (f->zbd_info->num_open_zones == 0) {
904 pthread_mutex_unlock(&f->zbd_info->mutex);
905 pthread_mutex_unlock(&z->mutex);
906 dprint(FD_ZBD, "%s(%s): no zones are open\n",
907 __func__, f->file_name);
908 return NULL;
909 }
910 open_zone_idx = (io_u->offset - f->file_offset) *
911 f->zbd_info->num_open_zones / f->io_size;
912 assert(open_zone_idx < f->zbd_info->num_open_zones);
913 new_zone_idx = f->zbd_info->open_zones[open_zone_idx];
914 if (new_zone_idx == zone_idx)
915 break;
916 zone_idx = new_zone_idx;
917 pthread_mutex_unlock(&f->zbd_info->mutex);
918 pthread_mutex_unlock(&z->mutex);
919 }
920
921 /* Both z->mutex and f->zbd_info->mutex are held. */
922
923examine_zone:
924 if ((z->wp << 9) + min_bs <= ((z+1)->start << 9)) {
925 pthread_mutex_unlock(&f->zbd_info->mutex);
926 goto out;
927 }
928 dprint(FD_ZBD, "%s(%s): closing zone %d\n", __func__, f->file_name,
929 zone_idx);
930 if (td->o.max_open_zones)
931 zbd_close_zone(td, f, open_zone_idx);
932 pthread_mutex_unlock(&f->zbd_info->mutex);
933
934 /* Only z->mutex is held. */
935
936 /* Zone 'z' is full, so try to open a new zone. */
937 for (i = f->io_size / f->zbd_info->zone_size; i > 0; i--) {
938 zone_idx++;
939 pthread_mutex_unlock(&z->mutex);
940 z++;
941 if (!is_valid_offset(f, z->start << 9)) {
942 /* Wrap-around. */
943 zone_idx = zbd_zone_idx(f, f->file_offset);
944 z = &f->zbd_info->zone_info[zone_idx];
945 }
946 assert(is_valid_offset(f, z->start << 9));
947 pthread_mutex_lock(&z->mutex);
948 if (z->open)
949 continue;
950 if (zbd_open_zone(td, io_u, zone_idx))
951 goto out;
952 }
953
954 /* Only z->mutex is held. */
955
956 /* Check whether the write fits in any of the already opened zones. */
957 pthread_mutex_lock(&f->zbd_info->mutex);
958 for (i = 0; i < f->zbd_info->num_open_zones; i++) {
959 zone_idx = f->zbd_info->open_zones[i];
960 pthread_mutex_unlock(&f->zbd_info->mutex);
961 pthread_mutex_unlock(&z->mutex);
962
963 z = &f->zbd_info->zone_info[zone_idx];
964
965 pthread_mutex_lock(&z->mutex);
966 if ((z->wp << 9) + min_bs <= ((z+1)->start << 9))
967 goto out;
968 pthread_mutex_lock(&f->zbd_info->mutex);
969 }
970 pthread_mutex_unlock(&f->zbd_info->mutex);
971 pthread_mutex_unlock(&z->mutex);
972 dprint(FD_ZBD, "%s(%s): did not open another zone\n", __func__,
973 f->file_name);
974 return NULL;
975
976out:
977 dprint(FD_ZBD, "%s(%s): returning zone %d\n", __func__, f->file_name,
978 zone_idx);
979 io_u->offset = z->start << 9;
980 return z;
981}
982
bfbdd35b 983/* The caller must hold z->mutex. */
59b07544
BVA
984static struct fio_zone_info *zbd_replay_write_order(struct thread_data *td,
985 struct io_u *io_u,
986 struct fio_zone_info *z)
bfbdd35b
BVA
987{
988 const struct fio_file *f = io_u->file;
989 const uint32_t min_bs = td->o.min_bs[DDIR_WRITE];
990
59b07544
BVA
991 if (!zbd_open_zone(td, io_u, z - f->zbd_info->zone_info)) {
992 pthread_mutex_unlock(&z->mutex);
993 z = zbd_convert_to_open_zone(td, io_u);
994 assert(z);
995 }
996
bfbdd35b
BVA
997 if (z->verify_block * min_bs >= f->zbd_info->zone_size)
998 log_err("%s: %d * %d >= %ld\n", f->file_name, z->verify_block,
999 min_bs, f->zbd_info->zone_size);
1000 io_u->offset = (z->start << 9) + z->verify_block++ * min_bs;
59b07544 1001 return z;
bfbdd35b
BVA
1002}
1003
1004/*
1005 * Find another zone for which @io_u fits below the write pointer. Start
1006 * searching in zones @zb + 1 .. @zl and continue searching in zones
1007 * @zf .. @zb - 1.
1008 *
1009 * Either returns NULL or returns a zone pointer and holds the mutex for that
1010 * zone.
1011 */
1012static struct fio_zone_info *
1013zbd_find_zone(struct thread_data *td, struct io_u *io_u,
1014 struct fio_zone_info *zb, struct fio_zone_info *zl)
1015{
1016 const uint32_t min_bs = td->o.min_bs[io_u->ddir];
1017 const struct fio_file *f = io_u->file;
1018 struct fio_zone_info *z1, *z2;
1019 const struct fio_zone_info *const zf =
1020 &f->zbd_info->zone_info[zbd_zone_idx(f, f->file_offset)];
1021
1022 /*
1023 * Skip to the next non-empty zone in case of sequential I/O and to
1024 * the nearest non-empty zone in case of random I/O.
1025 */
1026 for (z1 = zb + 1, z2 = zb - 1; z1 < zl || z2 >= zf; z1++, z2--) {
1027 if (z1 < zl && z1->cond != BLK_ZONE_COND_OFFLINE) {
1028 pthread_mutex_lock(&z1->mutex);
1029 if (z1->start + (min_bs >> 9) <= z1->wp)
1030 return z1;
1031 pthread_mutex_unlock(&z1->mutex);
1032 } else if (!td_random(td)) {
1033 break;
1034 }
1035 if (td_random(td) && z2 >= zf &&
1036 z2->cond != BLK_ZONE_COND_OFFLINE) {
1037 pthread_mutex_lock(&z2->mutex);
1038 if (z2->start + (min_bs >> 9) <= z2->wp)
1039 return z2;
1040 pthread_mutex_unlock(&z2->mutex);
1041 }
1042 }
1043 dprint(FD_ZBD, "%s: adjusting random read offset failed\n",
1044 f->file_name);
1045 return NULL;
1046}
1047
1048
1049/**
1050 * zbd_post_submit - update the write pointer and unlock the zone lock
1051 * @io_u: I/O unit
1052 * @success: Whether or not the I/O unit has been executed successfully
1053 *
1054 * For write and trim operations, update the write pointer of all affected
1055 * zones.
1056 */
1057static void zbd_post_submit(const struct io_u *io_u, bool success)
1058{
1059 struct zoned_block_device_info *zbd_info;
1060 struct fio_zone_info *z;
1061 uint32_t zone_idx;
1062 uint64_t end, zone_end;
1063
1064 zbd_info = io_u->file->zbd_info;
1065 if (!zbd_info)
1066 return;
1067
1068 zone_idx = zbd_zone_idx(io_u->file, io_u->offset);
1069 end = (io_u->offset + io_u->buflen) >> 9;
1070 z = &zbd_info->zone_info[zone_idx];
1071 assert(zone_idx < zbd_info->nr_zones);
1072 if (z->type != BLK_ZONE_TYPE_SEQWRITE_REQ)
1073 return;
1074 if (!success)
1075 goto unlock;
1076 switch (io_u->ddir) {
1077 case DDIR_WRITE:
1078 zone_end = min(end, (z + 1)->start);
a7c2b6fc
BVA
1079 pthread_mutex_lock(&zbd_info->mutex);
1080 /*
1081 * z->wp > zone_end means that one or more I/O errors
1082 * have occurred.
1083 */
1084 if (z->wp <= zone_end)
1085 zbd_info->sectors_with_data += zone_end - z->wp;
1086 pthread_mutex_unlock(&zbd_info->mutex);
bfbdd35b
BVA
1087 z->wp = zone_end;
1088 break;
1089 case DDIR_TRIM:
1090 assert(z->wp == z->start);
1091 break;
1092 default:
1093 break;
1094 }
1095unlock:
1096 pthread_mutex_unlock(&z->mutex);
1097}
1098
1099bool zbd_unaligned_write(int error_code)
1100{
1101 switch (error_code) {
1102 case EIO:
1103 case EREMOTEIO:
1104 return true;
1105 }
1106 return false;
1107}
1108
1109/**
1110 * zbd_adjust_block - adjust the offset and length as necessary for ZBD drives
1111 * @td: FIO thread data.
1112 * @io_u: FIO I/O unit.
1113 *
1114 * Locking strategy: returns with z->mutex locked if and only if z refers
1115 * to a sequential zone and if io_u_accept is returned. z is the zone that
1116 * corresponds to io_u->offset at the end of this function.
1117 */
1118enum io_u_action zbd_adjust_block(struct thread_data *td, struct io_u *io_u)
1119{
1120 const struct fio_file *f = io_u->file;
1121 uint32_t zone_idx_b;
1122 struct fio_zone_info *zb, *zl;
1123 uint32_t orig_len = io_u->buflen;
1124 uint32_t min_bs = td->o.min_bs[io_u->ddir];
1125 uint64_t new_len;
1126 int64_t range;
1127
1128 if (!f->zbd_info)
1129 return io_u_accept;
1130
1131 assert(is_valid_offset(f, io_u->offset));
1132 assert(io_u->buflen);
1133 zone_idx_b = zbd_zone_idx(f, io_u->offset);
1134 zb = &f->zbd_info->zone_info[zone_idx_b];
1135
1136 /* Accept the I/O offset for conventional zones. */
1137 if (zb->type == BLK_ZONE_TYPE_CONVENTIONAL)
1138 return io_u_accept;
1139
1140 /*
1141 * Accept the I/O offset for reads if reading beyond the write pointer
1142 * is enabled.
1143 */
1144 if (zb->cond != BLK_ZONE_COND_OFFLINE &&
1145 io_u->ddir == DDIR_READ && td->o.read_beyond_wp)
1146 return io_u_accept;
1147
1148 pthread_mutex_lock(&zb->mutex);
1149 switch (io_u->ddir) {
1150 case DDIR_READ:
1151 if (td->runstate == TD_VERIFYING) {
59b07544 1152 zb = zbd_replay_write_order(td, io_u, zb);
bfbdd35b
BVA
1153 goto accept;
1154 }
1155 /*
1156 * Avoid reads past the write pointer because such reads do not
1157 * hit the medium.
1158 */
1159 range = zb->cond != BLK_ZONE_COND_OFFLINE ?
1160 ((zb->wp - zb->start) << 9) - io_u->buflen : 0;
1161 if (td_random(td) && range >= 0) {
1162 io_u->offset = (zb->start << 9) +
1163 ((io_u->offset - (zb->start << 9)) %
1164 (range + 1)) / min_bs * min_bs;
1165 assert(zb->start << 9 <= io_u->offset);
1166 assert(io_u->offset + io_u->buflen <= zb->wp << 9);
1167 goto accept;
1168 }
1169 if (zb->cond == BLK_ZONE_COND_OFFLINE ||
1170 (io_u->offset + io_u->buflen) >> 9 > zb->wp) {
1171 pthread_mutex_unlock(&zb->mutex);
1172 zl = &f->zbd_info->zone_info[zbd_zone_idx(f,
1173 f->file_offset + f->io_size)];
1174 zb = zbd_find_zone(td, io_u, zb, zl);
1175 if (!zb) {
1176 dprint(FD_ZBD,
1177 "%s: zbd_find_zone(%lld, %llu) failed\n",
1178 f->file_name, io_u->offset,
1179 io_u->buflen);
1180 goto eof;
1181 }
1182 io_u->offset = zb->start << 9;
1183 }
1184 if ((io_u->offset + io_u->buflen) >> 9 > zb->wp) {
1185 dprint(FD_ZBD, "%s: %lld + %lld > %" PRIu64 "\n",
1186 f->file_name, io_u->offset, io_u->buflen,
1187 zb->wp);
1188 goto eof;
1189 }
1190 goto accept;
1191 case DDIR_WRITE:
1192 if (io_u->buflen > (f->zbd_info->zone_size << 9))
1193 goto eof;
59b07544
BVA
1194 if (!zbd_open_zone(td, io_u, zone_idx_b)) {
1195 pthread_mutex_unlock(&zb->mutex);
1196 zb = zbd_convert_to_open_zone(td, io_u);
1197 if (!zb)
1198 goto eof;
1199 zone_idx_b = zb - f->zbd_info->zone_info;
1200 }
a7c2b6fc
BVA
1201 /* Check whether the zone reset threshold has been exceeded */
1202 if (td->o.zrf.u.f) {
1203 check_swd(td, f);
1204 if ((f->zbd_info->sectors_with_data << 9) >=
1205 f->io_size * td->o.zrt.u.f &&
1206 zbd_dec_and_reset_write_cnt(td, f)) {
1207 zb->reset_zone = 1;
1208 }
1209 }
bfbdd35b
BVA
1210 /* Reset the zone pointer if necessary */
1211 if (zb->reset_zone || zbd_zone_full(f, zb, min_bs)) {
1212 assert(td->o.verify == VERIFY_NONE);
1213 /*
1214 * Since previous write requests may have been submitted
1215 * asynchronously and since we will submit the zone
1216 * reset synchronously, wait until previously submitted
1217 * write requests have completed before issuing a
1218 * zone reset.
1219 */
1220 io_u_quiesce(td);
1221 zb->reset_zone = 0;
1222 if (zbd_reset_zone(td, f, zb) < 0)
1223 goto eof;
a7c2b6fc 1224 check_swd(td, f);
bfbdd35b
BVA
1225 }
1226 /* Make writes occur at the write pointer */
1227 assert(!zbd_zone_full(f, zb, min_bs));
1228 io_u->offset = zb->wp << 9;
1229 if (!is_valid_offset(f, io_u->offset)) {
1230 dprint(FD_ZBD, "Dropped request with offset %llu\n",
1231 io_u->offset);
1232 goto eof;
1233 }
1234 /*
1235 * Make sure that the buflen is a multiple of the minimal
1236 * block size. Give up if shrinking would make the request too
1237 * small.
1238 */
1239 new_len = min((unsigned long long)io_u->buflen,
1240 ((zb + 1)->start << 9) - io_u->offset);
1241 new_len = new_len / min_bs * min_bs;
1242 if (new_len == io_u->buflen)
1243 goto accept;
1244 if (new_len >= min_bs) {
1245 io_u->buflen = new_len;
1246 dprint(FD_IO, "Changed length from %u into %llu\n",
1247 orig_len, io_u->buflen);
1248 goto accept;
1249 }
1250 log_err("Zone remainder %lld smaller than minimum block size %d\n",
1251 (((zb + 1)->start << 9) - io_u->offset),
1252 min_bs);
1253 goto eof;
1254 case DDIR_TRIM:
1255 /* fall-through */
1256 case DDIR_SYNC:
1257 case DDIR_DATASYNC:
1258 case DDIR_SYNC_FILE_RANGE:
1259 case DDIR_WAIT:
1260 case DDIR_LAST:
1261 case DDIR_INVAL:
1262 goto accept;
1263 }
1264
1265 assert(false);
1266
1267accept:
1268 assert(zb);
1269 assert(zb->cond != BLK_ZONE_COND_OFFLINE);
1270 assert(!io_u->post_submit);
1271 io_u->post_submit = zbd_post_submit;
1272 return io_u_accept;
1273
1274eof:
1275 if (zb)
1276 pthread_mutex_unlock(&zb->mutex);
1277 return io_u_eof;
1278}
fd5d733f
BVA
1279
1280/* Return a string with ZBD statistics */
1281char *zbd_write_status(const struct thread_stat *ts)
1282{
1283 char *res;
1284
1285 if (asprintf(&res, "; %ld zone resets", ts->nr_zone_resets) < 0)
1286 return NULL;
1287 return res;
1288}