Merge branch 'stephen/rate-ull' of https://github.com/sbates130272/fio
[fio.git] / zbd.c
CommitLineData
bfbdd35b
BVA
1/*
2 * Copyright (C) 2018 Western Digital Corporation or its affiliates.
3 *
4 * This file is released under the GPL.
5 */
6
7#include <errno.h>
8#include <string.h>
9#include <stdlib.h>
bfbdd35b 10#include <fcntl.h>
bfbdd35b
BVA
11#include <sys/stat.h>
12#include <unistd.h>
f5bff36e 13
cf42d79e 14#include "os/os.h"
bfbdd35b
BVA
15#include "file.h"
16#include "fio.h"
17#include "lib/pow2.h"
18#include "log.h"
f5bff36e 19#include "oslib/asprintf.h"
bfbdd35b
BVA
20#include "smalloc.h"
21#include "verify.h"
22#include "zbd.h"
23
b7694961
DLM
24/**
25 * zbd_get_zoned_model - Get a device zoned model
26 * @td: FIO thread data
27 * @f: FIO file for which to get model information
28 */
29int zbd_get_zoned_model(struct thread_data *td, struct fio_file *f,
30 enum zbd_zoned_model *model)
31{
32 int ret;
33
6c5b11d3
DLM
34 if (td->io_ops && td->io_ops->get_zoned_model)
35 ret = td->io_ops->get_zoned_model(td, f, model);
36 else
37 ret = blkzoned_get_zoned_model(td, f, model);
b7694961
DLM
38 if (ret < 0) {
39 td_verror(td, errno, "get zoned model failed");
40 log_err("%s: get zoned model failed (%d).\n",
41 f->file_name, errno);
42 }
43
44 return ret;
45}
46
47/**
48 * zbd_report_zones - Get zone information
49 * @td: FIO thread data.
50 * @f: FIO file for which to get zone information
51 * @offset: offset from which to report zones
52 * @zones: Array of struct zbd_zone
53 * @nr_zones: Size of @zones array
54 *
55 * Get zone information into @zones starting from the zone at offset @offset
56 * for the device specified by @f.
57 *
58 * Returns the number of zones reported upon success and a negative error code
59 * upon failure. If the zone report is empty, always assume an error (device
60 * problem) and return -EIO.
61 */
62int zbd_report_zones(struct thread_data *td, struct fio_file *f,
63 uint64_t offset, struct zbd_zone *zones,
64 unsigned int nr_zones)
65{
66 int ret;
67
6c5b11d3
DLM
68 if (td->io_ops && td->io_ops->report_zones)
69 ret = td->io_ops->report_zones(td, f, offset, zones, nr_zones);
70 else
71 ret = blkzoned_report_zones(td, f, offset, zones, nr_zones);
b7694961
DLM
72 if (ret < 0) {
73 td_verror(td, errno, "report zones failed");
74 log_err("%s: report zones from sector %llu failed (%d).\n",
75 f->file_name, (unsigned long long)offset >> 9, errno);
76 } else if (ret == 0) {
77 td_verror(td, errno, "Empty zone report");
78 log_err("%s: report zones from sector %llu is empty.\n",
79 f->file_name, (unsigned long long)offset >> 9);
80 ret = -EIO;
81 }
82
83 return ret;
84}
85
86/**
87 * zbd_reset_wp - reset the write pointer of a range of zones
88 * @td: FIO thread data.
89 * @f: FIO file for which to reset zones
90 * @offset: Starting offset of the first zone to reset
91 * @length: Length of the range of zones to reset
92 *
93 * Reset the write pointer of all zones in the range @offset...@offset+@length.
94 * Returns 0 upon success and a negative error code upon failure.
95 */
96int zbd_reset_wp(struct thread_data *td, struct fio_file *f,
97 uint64_t offset, uint64_t length)
98{
99 int ret;
100
6c5b11d3
DLM
101 if (td->io_ops && td->io_ops->reset_wp)
102 ret = td->io_ops->reset_wp(td, f, offset, length);
103 else
104 ret = blkzoned_reset_wp(td, f, offset, length);
b7694961
DLM
105 if (ret < 0) {
106 td_verror(td, errno, "resetting wp failed");
107 log_err("%s: resetting wp for %llu sectors at sector %llu failed (%d).\n",
108 f->file_name, (unsigned long long)length >> 9,
109 (unsigned long long)offset >> 9, errno);
110 }
111
112 return ret;
113}
114
bfbdd35b
BVA
115/**
116 * zbd_zone_idx - convert an offset into a zone number
117 * @f: file pointer.
118 * @offset: offset in bytes. If this offset is in the first zone_size bytes
119 * past the disk size then the index of the sentinel is returned.
120 */
121static uint32_t zbd_zone_idx(const struct fio_file *f, uint64_t offset)
122{
123 uint32_t zone_idx;
124
cd775e06 125 if (f->zbd_info->zone_size_log2 > 0)
bfbdd35b
BVA
126 zone_idx = offset >> f->zbd_info->zone_size_log2;
127 else
ee3696bd 128 zone_idx = offset / f->zbd_info->zone_size;
bfbdd35b
BVA
129
130 return min(zone_idx, f->zbd_info->nr_zones);
131}
132
b7694961
DLM
133/**
134 * zbd_zone_swr - Test whether a zone requires sequential writes
135 * @z: zone info pointer.
136 */
137static inline bool zbd_zone_swr(struct fio_zone_info *z)
138{
139 return z->type == ZBD_ZONE_TYPE_SWR;
140}
141
bfbdd35b
BVA
142/**
143 * zbd_zone_full - verify whether a minimum number of bytes remain in a zone
144 * @f: file pointer.
145 * @z: zone info pointer.
146 * @required: minimum number of bytes that must remain in a zone.
147 *
148 * The caller must hold z->mutex.
149 */
150static bool zbd_zone_full(const struct fio_file *f, struct fio_zone_info *z,
151 uint64_t required)
152{
153 assert((required & 511) == 0);
154
b7694961 155 return zbd_zone_swr(z) &&
ee3696bd 156 z->wp + required > z->start + f->zbd_info->zone_size;
bfbdd35b
BVA
157}
158
1f57803b
DLM
159static void zone_lock(struct thread_data *td, struct fio_zone_info *z)
160{
161 /*
162 * Lock the io_u target zone. The zone will be unlocked if io_u offset
163 * is changed or when io_u completes and zbd_put_io() executed.
164 * To avoid multiple jobs doing asynchronous I/Os from deadlocking each
165 * other waiting for zone locks when building an io_u batch, first
166 * only trylock the zone. If the zone is already locked by another job,
167 * process the currently queued I/Os so that I/O progress is made and
168 * zones unlocked.
169 */
170 if (pthread_mutex_trylock(&z->mutex) != 0) {
171 if (!td_ioengine_flagged(td, FIO_SYNCIO))
172 io_u_quiesce(td);
173 pthread_mutex_lock(&z->mutex);
174 }
175}
176
bfbdd35b
BVA
177static bool is_valid_offset(const struct fio_file *f, uint64_t offset)
178{
179 return (uint64_t)(offset - f->file_offset) < f->io_size;
180}
181
182/* Verify whether direct I/O is used for all host-managed zoned drives. */
183static bool zbd_using_direct_io(void)
184{
185 struct thread_data *td;
186 struct fio_file *f;
187 int i, j;
188
189 for_each_td(td, i) {
190 if (td->o.odirect || !(td->o.td_ddir & TD_DDIR_WRITE))
191 continue;
192 for_each_file(td, f, j) {
193 if (f->zbd_info &&
b7694961 194 f->zbd_info->model == ZBD_HOST_MANAGED)
bfbdd35b
BVA
195 return false;
196 }
197 }
198
199 return true;
200}
201
202/* Whether or not the I/O range for f includes one or more sequential zones */
203static bool zbd_is_seq_job(struct fio_file *f)
204{
205 uint32_t zone_idx, zone_idx_b, zone_idx_e;
206
207 assert(f->zbd_info);
208 if (f->io_size == 0)
209 return false;
210 zone_idx_b = zbd_zone_idx(f, f->file_offset);
211 zone_idx_e = zbd_zone_idx(f, f->file_offset + f->io_size - 1);
212 for (zone_idx = zone_idx_b; zone_idx <= zone_idx_e; zone_idx++)
b7694961 213 if (zbd_zone_swr(&f->zbd_info->zone_info[zone_idx]))
bfbdd35b
BVA
214 return true;
215
216 return false;
217}
218
219/*
220 * Verify whether offset and size parameters are aligned with zone boundaries.
221 */
222static bool zbd_verify_sizes(void)
223{
224 const struct fio_zone_info *z;
225 struct thread_data *td;
226 struct fio_file *f;
227 uint64_t new_offset, new_end;
228 uint32_t zone_idx;
229 int i, j;
230
231 for_each_td(td, i) {
232 for_each_file(td, f, j) {
233 if (!f->zbd_info)
234 continue;
235 if (f->file_offset >= f->real_file_size)
236 continue;
237 if (!zbd_is_seq_job(f))
238 continue;
4d37720a 239
fc52e449
DLM
240 if (!td->o.zone_size) {
241 td->o.zone_size = f->zbd_info->zone_size;
242 if (!td->o.zone_size) {
243 log_err("%s: invalid 0 zone size\n",
244 f->file_name);
245 return false;
246 }
971d6a22
DLM
247 } else if (td->o.zone_size != f->zbd_info->zone_size) {
248 log_err("%s: job parameter zonesize %llu does not match disk zone size %llu.\n",
249 f->file_name, (unsigned long long) td->o.zone_size,
250 (unsigned long long) f->zbd_info->zone_size);
251 return false;
fc52e449
DLM
252 }
253
4d37720a
DLM
254 if (td->o.zone_skip &&
255 (td->o.zone_skip < td->o.zone_size ||
256 td->o.zone_skip % td->o.zone_size)) {
257 log_err("%s: zoneskip %llu is not a multiple of the device zone size %llu.\n",
258 f->file_name, (unsigned long long) td->o.zone_skip,
259 (unsigned long long) td->o.zone_size);
260 return false;
261 }
262
bfbdd35b
BVA
263 zone_idx = zbd_zone_idx(f, f->file_offset);
264 z = &f->zbd_info->zone_info[zone_idx];
f09a7773
PL
265 if ((f->file_offset != z->start) &&
266 (td->o.td_ddir != TD_DDIR_READ)) {
ee3696bd 267 new_offset = (z+1)->start;
bfbdd35b
BVA
268 if (new_offset >= f->file_offset + f->io_size) {
269 log_info("%s: io_size must be at least one zone\n",
270 f->file_name);
271 return false;
272 }
a0c84dd4
JA
273 log_info("%s: rounded up offset from %llu to %llu\n",
274 f->file_name, (unsigned long long) f->file_offset,
275 (unsigned long long) new_offset);
bfbdd35b
BVA
276 f->io_size -= (new_offset - f->file_offset);
277 f->file_offset = new_offset;
278 }
279 zone_idx = zbd_zone_idx(f, f->file_offset + f->io_size);
280 z = &f->zbd_info->zone_info[zone_idx];
ee3696bd 281 new_end = z->start;
f09a7773
PL
282 if ((td->o.td_ddir != TD_DDIR_READ) &&
283 (f->file_offset + f->io_size != new_end)) {
bfbdd35b
BVA
284 if (new_end <= f->file_offset) {
285 log_info("%s: io_size must be at least one zone\n",
286 f->file_name);
287 return false;
288 }
a0c84dd4
JA
289 log_info("%s: rounded down io_size from %llu to %llu\n",
290 f->file_name, (unsigned long long) f->io_size,
291 (unsigned long long) new_end - f->file_offset);
bfbdd35b
BVA
292 f->io_size = new_end - f->file_offset;
293 }
294 }
295 }
296
297 return true;
298}
299
300static bool zbd_verify_bs(void)
301{
302 struct thread_data *td;
303 struct fio_file *f;
304 uint32_t zone_size;
305 int i, j, k;
306
307 for_each_td(td, i) {
308 for_each_file(td, f, j) {
309 if (!f->zbd_info)
310 continue;
311 zone_size = f->zbd_info->zone_size;
312 for (k = 0; k < ARRAY_SIZE(td->o.bs); k++) {
313 if (td->o.verify != VERIFY_NONE &&
ee3696bd 314 zone_size % td->o.bs[k] != 0) {
bfbdd35b
BVA
315 log_info("%s: block size %llu is not a divisor of the zone size %d\n",
316 f->file_name, td->o.bs[k],
ee3696bd 317 zone_size);
bfbdd35b
BVA
318 return false;
319 }
320 }
321 }
322 }
323 return true;
324}
325
bfbdd35b
BVA
326static int ilog2(uint64_t i)
327{
328 int log = -1;
329
330 while (i) {
331 i >>= 1;
332 log++;
333 }
334 return log;
335}
336
337/*
338 * Initialize f->zbd_info for devices that are not zoned block devices. This
339 * allows to execute a ZBD workload against a non-ZBD device.
340 */
341static int init_zone_info(struct thread_data *td, struct fio_file *f)
342{
343 uint32_t nr_zones;
344 struct fio_zone_info *p;
a4b7f12b 345 uint64_t zone_size = td->o.zone_size;
bfbdd35b
BVA
346 struct zoned_block_device_info *zbd_info = NULL;
347 pthread_mutexattr_t attr;
348 int i;
349
a4b7f12b
DLM
350 if (zone_size == 0) {
351 log_err("%s: Specifying the zone size is mandatory for regular block devices with --zonemode=zbd\n\n",
352 f->file_name);
353 return 1;
354 }
355
356 if (zone_size < 512) {
357 log_err("%s: zone size must be at least 512 bytes for --zonemode=zbd\n\n",
358 f->file_name);
359 return 1;
360 }
361
ee3696bd 362 nr_zones = (f->real_file_size + zone_size - 1) / zone_size;
bfbdd35b
BVA
363 zbd_info = scalloc(1, sizeof(*zbd_info) +
364 (nr_zones + 1) * sizeof(zbd_info->zone_info[0]));
365 if (!zbd_info)
366 return -ENOMEM;
367
368 pthread_mutexattr_init(&attr);
369 pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
370 pthread_mutexattr_setpshared(&attr, true);
371 pthread_mutex_init(&zbd_info->mutex, &attr);
372 zbd_info->refcount = 1;
373 p = &zbd_info->zone_info[0];
374 for (i = 0; i < nr_zones; i++, p++) {
375 pthread_mutex_init(&p->mutex, &attr);
376 p->start = i * zone_size;
377 p->wp = p->start + zone_size;
b7694961
DLM
378 p->type = ZBD_ZONE_TYPE_SWR;
379 p->cond = ZBD_ZONE_COND_EMPTY;
bfbdd35b
BVA
380 }
381 /* a sentinel */
382 p->start = nr_zones * zone_size;
383
384 f->zbd_info = zbd_info;
385 f->zbd_info->zone_size = zone_size;
386 f->zbd_info->zone_size_log2 = is_power_of_2(zone_size) ?
ebc403fe 387 ilog2(zone_size) : 0;
bfbdd35b
BVA
388 f->zbd_info->nr_zones = nr_zones;
389 pthread_mutexattr_destroy(&attr);
390 return 0;
391}
392
393/*
b7694961
DLM
394 * Maximum number of zones to report in one operation.
395 */
396#define ZBD_REPORT_MAX_ZONES 8192U
397
398/*
399 * Parse the device zone report and store it in f->zbd_info. Must be called
400 * only for devices that are zoned, namely those with a model != ZBD_NONE.
bfbdd35b
BVA
401 */
402static int parse_zone_info(struct thread_data *td, struct fio_file *f)
403{
b7694961
DLM
404 int nr_zones, nrz;
405 struct zbd_zone *zones, *z;
bfbdd35b 406 struct fio_zone_info *p;
b7694961 407 uint64_t zone_size, offset;
bfbdd35b
BVA
408 struct zoned_block_device_info *zbd_info = NULL;
409 pthread_mutexattr_t attr;
b7694961 410 int i, j, ret = 0;
bfbdd35b
BVA
411
412 pthread_mutexattr_init(&attr);
413 pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
414 pthread_mutexattr_setpshared(&attr, true);
415
b7694961
DLM
416 zones = calloc(ZBD_REPORT_MAX_ZONES, sizeof(struct zbd_zone));
417 if (!zones)
bfbdd35b
BVA
418 goto out;
419
b7694961
DLM
420 nrz = zbd_report_zones(td, f, 0, zones, ZBD_REPORT_MAX_ZONES);
421 if (nrz < 0) {
422 ret = nrz;
423 log_info("fio: report zones (offset 0) failed for %s (%d).\n",
424 f->file_name, -ret);
425 goto out;
bfbdd35b
BVA
426 }
427
b7694961 428 zone_size = zones[0].len;
ee3696bd 429 nr_zones = (f->real_file_size + zone_size - 1) / zone_size;
bfbdd35b
BVA
430
431 if (td->o.zone_size == 0) {
ee3696bd
DLM
432 td->o.zone_size = zone_size;
433 } else if (td->o.zone_size != zone_size) {
4580ff92
DLM
434 log_err("fio: %s job parameter zonesize %llu does not match disk zone size %llu.\n",
435 f->file_name, (unsigned long long) td->o.zone_size,
a0c84dd4 436 (unsigned long long) zone_size);
bfbdd35b 437 ret = -EINVAL;
b7694961 438 goto out;
bfbdd35b
BVA
439 }
440
a0c84dd4
JA
441 dprint(FD_ZBD, "Device %s has %d zones of size %llu KB\n", f->file_name,
442 nr_zones, (unsigned long long) zone_size / 1024);
bfbdd35b
BVA
443
444 zbd_info = scalloc(1, sizeof(*zbd_info) +
445 (nr_zones + 1) * sizeof(zbd_info->zone_info[0]));
446 ret = -ENOMEM;
447 if (!zbd_info)
b7694961 448 goto out;
bfbdd35b
BVA
449 pthread_mutex_init(&zbd_info->mutex, &attr);
450 zbd_info->refcount = 1;
451 p = &zbd_info->zone_info[0];
b7694961
DLM
452 for (offset = 0, j = 0; j < nr_zones;) {
453 z = &zones[0];
454 for (i = 0; i < nrz; i++, j++, z++, p++) {
bfbdd35b 455 pthread_mutex_init(&p->mutex, &attr);
b7694961 456 p->start = z->start;
bfbdd35b 457 switch (z->cond) {
b7694961
DLM
458 case ZBD_ZONE_COND_NOT_WP:
459 case ZBD_ZONE_COND_FULL:
ee3696bd 460 p->wp = p->start + zone_size;
bfbdd35b
BVA
461 break;
462 default:
463 assert(z->start <= z->wp);
b7694961
DLM
464 assert(z->wp <= z->start + zone_size);
465 p->wp = z->wp;
bfbdd35b
BVA
466 break;
467 }
468 p->type = z->type;
469 p->cond = z->cond;
470 if (j > 0 && p->start != p[-1].start + zone_size) {
471 log_info("%s: invalid zone data\n",
472 f->file_name);
473 ret = -EINVAL;
b7694961 474 goto out;
bfbdd35b
BVA
475 }
476 }
477 z--;
b7694961 478 offset = z->start + z->len;
bfbdd35b
BVA
479 if (j >= nr_zones)
480 break;
b7694961
DLM
481 nrz = zbd_report_zones(td, f, offset,
482 zones, ZBD_REPORT_MAX_ZONES);
483 if (nrz < 0) {
484 ret = nrz;
485 log_info("fio: report zones (offset %llu) failed for %s (%d).\n",
486 (unsigned long long)offset,
487 f->file_name, -ret);
488 goto out;
bfbdd35b
BVA
489 }
490 }
b7694961 491
bfbdd35b 492 /* a sentinel */
b7694961 493 zbd_info->zone_info[nr_zones].start = offset;
bfbdd35b
BVA
494
495 f->zbd_info = zbd_info;
496 f->zbd_info->zone_size = zone_size;
497 f->zbd_info->zone_size_log2 = is_power_of_2(zone_size) ?
ebc403fe 498 ilog2(zone_size) : 0;
bfbdd35b
BVA
499 f->zbd_info->nr_zones = nr_zones;
500 zbd_info = NULL;
501 ret = 0;
502
bfbdd35b 503out:
b7694961
DLM
504 sfree(zbd_info);
505 free(zones);
bfbdd35b
BVA
506 pthread_mutexattr_destroy(&attr);
507 return ret;
508}
509
510/*
511 * Allocate zone information and store it into f->zbd_info if zonemode=zbd.
512 *
513 * Returns 0 upon success and a negative error code upon failure.
514 */
379e5f09 515static int zbd_create_zone_info(struct thread_data *td, struct fio_file *f)
bfbdd35b 516{
b7694961
DLM
517 enum zbd_zoned_model zbd_model;
518 int ret;
bfbdd35b
BVA
519
520 assert(td->o.zone_mode == ZONE_MODE_ZBD);
521
b7694961
DLM
522 ret = zbd_get_zoned_model(td, f, &zbd_model);
523 if (ret)
524 return ret;
525
bfbdd35b 526 switch (zbd_model) {
b7694961
DLM
527 case ZBD_IGNORE:
528 return 0;
529 case ZBD_HOST_AWARE:
530 case ZBD_HOST_MANAGED:
bfbdd35b
BVA
531 ret = parse_zone_info(td, f);
532 break;
b7694961 533 case ZBD_NONE:
bfbdd35b
BVA
534 ret = init_zone_info(td, f);
535 break;
b7694961
DLM
536 default:
537 td_verror(td, EINVAL, "Unsupported zoned model");
538 log_err("Unsupported zoned model\n");
539 return -EINVAL;
bfbdd35b 540 }
b7694961 541
bfbdd35b
BVA
542 if (ret == 0)
543 f->zbd_info->model = zbd_model;
544 return ret;
545}
546
547void zbd_free_zone_info(struct fio_file *f)
548{
549 uint32_t refcount;
550
3c1dc34c 551 assert(f->zbd_info);
bfbdd35b
BVA
552
553 pthread_mutex_lock(&f->zbd_info->mutex);
554 refcount = --f->zbd_info->refcount;
555 pthread_mutex_unlock(&f->zbd_info->mutex);
556
557 assert((int32_t)refcount >= 0);
558 if (refcount == 0)
559 sfree(f->zbd_info);
560 f->zbd_info = NULL;
561}
562
563/*
564 * Initialize f->zbd_info.
565 *
566 * Returns 0 upon success and a negative error code upon failure.
567 *
568 * Note: this function can only work correctly if it is called before the first
569 * fio fork() call.
570 */
571static int zbd_init_zone_info(struct thread_data *td, struct fio_file *file)
572{
573 struct thread_data *td2;
574 struct fio_file *f2;
575 int i, j, ret;
576
577 for_each_td(td2, i) {
578 for_each_file(td2, f2, j) {
579 if (td2 == td && f2 == file)
580 continue;
581 if (!f2->zbd_info ||
582 strcmp(f2->file_name, file->file_name) != 0)
583 continue;
584 file->zbd_info = f2->zbd_info;
585 file->zbd_info->refcount++;
586 return 0;
587 }
588 }
589
590 ret = zbd_create_zone_info(td, file);
591 if (ret < 0)
c5837eec 592 td_verror(td, -ret, "zbd_create_zone_info() failed");
bfbdd35b
BVA
593 return ret;
594}
595
3c1dc34c 596int zbd_setup_files(struct thread_data *td)
bfbdd35b
BVA
597{
598 struct fio_file *f;
599 int i;
600
601 for_each_file(td, f, i) {
a4b7f12b 602 if (zbd_init_zone_info(td, f))
bfbdd35b 603 return 1;
bfbdd35b
BVA
604 }
605
606 if (!zbd_using_direct_io()) {
607 log_err("Using direct I/O is mandatory for writing to ZBD drives\n\n");
608 return 1;
609 }
610
611 if (!zbd_verify_sizes())
612 return 1;
613
614 if (!zbd_verify_bs())
615 return 1;
616
617 return 0;
618}
619
620/**
621 * zbd_reset_range - reset zones for a range of sectors
622 * @td: FIO thread data.
623 * @f: Fio file for which to reset zones
624 * @sector: Starting sector in units of 512 bytes
625 * @nr_sectors: Number of sectors in units of 512 bytes
626 *
627 * Returns 0 upon success and a negative error code upon failure.
628 */
b7694961 629static int zbd_reset_range(struct thread_data *td, struct fio_file *f,
ee3696bd 630 uint64_t offset, uint64_t length)
bfbdd35b 631{
bfbdd35b
BVA
632 uint32_t zone_idx_b, zone_idx_e;
633 struct fio_zone_info *zb, *ze, *z;
634 int ret = 0;
635
ee3696bd 636 assert(is_valid_offset(f, offset + length - 1));
b7694961 637
bfbdd35b 638 switch (f->zbd_info->model) {
b7694961
DLM
639 case ZBD_HOST_AWARE:
640 case ZBD_HOST_MANAGED:
641 ret = zbd_reset_wp(td, f, offset, length);
642 if (ret < 0)
bfbdd35b 643 return ret;
bfbdd35b 644 break;
b7694961 645 default:
bfbdd35b
BVA
646 break;
647 }
648
ee3696bd 649 zone_idx_b = zbd_zone_idx(f, offset);
bfbdd35b 650 zb = &f->zbd_info->zone_info[zone_idx_b];
ee3696bd 651 zone_idx_e = zbd_zone_idx(f, offset + length);
bfbdd35b
BVA
652 ze = &f->zbd_info->zone_info[zone_idx_e];
653 for (z = zb; z < ze; z++) {
654 pthread_mutex_lock(&z->mutex);
a7c2b6fc
BVA
655 pthread_mutex_lock(&f->zbd_info->mutex);
656 f->zbd_info->sectors_with_data -= z->wp - z->start;
657 pthread_mutex_unlock(&f->zbd_info->mutex);
bfbdd35b
BVA
658 z->wp = z->start;
659 z->verify_block = 0;
660 pthread_mutex_unlock(&z->mutex);
661 }
662
fd5d733f
BVA
663 td->ts.nr_zone_resets += ze - zb;
664
bfbdd35b
BVA
665 return ret;
666}
667
a0c84dd4
JA
668static unsigned int zbd_zone_nr(struct zoned_block_device_info *zbd_info,
669 struct fio_zone_info *zone)
670{
089ddd95 671 return zone - zbd_info->zone_info;
a0c84dd4
JA
672}
673
bfbdd35b
BVA
674/**
675 * zbd_reset_zone - reset the write pointer of a single zone
676 * @td: FIO thread data.
677 * @f: FIO file associated with the disk for which to reset a write pointer.
678 * @z: Zone to reset.
679 *
680 * Returns 0 upon success and a negative error code upon failure.
681 */
b7694961 682static int zbd_reset_zone(struct thread_data *td, struct fio_file *f,
bfbdd35b
BVA
683 struct fio_zone_info *z)
684{
a0c84dd4
JA
685 dprint(FD_ZBD, "%s: resetting wp of zone %u.\n", f->file_name,
686 zbd_zone_nr(f->zbd_info, z));
d60be7d5
DLM
687
688 return zbd_reset_range(td, f, z->start, (z+1)->start - z->start);
bfbdd35b
BVA
689}
690
25dc6606
AD
691/* The caller must hold f->zbd_info->mutex */
692static void zbd_close_zone(struct thread_data *td, const struct fio_file *f,
693 unsigned int open_zone_idx)
694{
695 uint32_t zone_idx;
696
697 assert(open_zone_idx < f->zbd_info->num_open_zones);
698 zone_idx = f->zbd_info->open_zones[open_zone_idx];
699 memmove(f->zbd_info->open_zones + open_zone_idx,
700 f->zbd_info->open_zones + open_zone_idx + 1,
701 (ZBD_MAX_OPEN_ZONES - (open_zone_idx + 1)) *
702 sizeof(f->zbd_info->open_zones[0]));
703 f->zbd_info->num_open_zones--;
704 f->zbd_info->zone_info[zone_idx].open = 0;
705}
706
bfbdd35b
BVA
707/*
708 * Reset a range of zones. Returns 0 upon success and 1 upon failure.
709 * @td: fio thread data.
710 * @f: fio file for which to reset zones
711 * @zb: first zone to reset.
712 * @ze: first zone not to reset.
713 * @all_zones: whether to reset all zones or only those zones for which the
714 * write pointer is not a multiple of td->o.min_bs[DDIR_WRITE].
715 */
716static int zbd_reset_zones(struct thread_data *td, struct fio_file *f,
717 struct fio_zone_info *const zb,
718 struct fio_zone_info *const ze, bool all_zones)
719{
4803b841 720 struct fio_zone_info *z;
ee3696bd 721 const uint32_t min_bs = td->o.min_bs[DDIR_WRITE];
bfbdd35b
BVA
722 bool reset_wp;
723 int res = 0;
724
adc6adcb
DLM
725 assert(min_bs);
726
a0c84dd4
JA
727 dprint(FD_ZBD, "%s: examining zones %u .. %u\n", f->file_name,
728 zbd_zone_nr(f->zbd_info, zb), zbd_zone_nr(f->zbd_info, ze));
bfbdd35b 729 for (z = zb; z < ze; z++) {
25dc6606
AD
730 uint32_t nz = z - f->zbd_info->zone_info;
731
b7694961 732 if (!zbd_zone_swr(z))
1f57803b
DLM
733 continue;
734 zone_lock(td, z);
25dc6606
AD
735 if (all_zones) {
736 unsigned int i;
737
738 pthread_mutex_lock(&f->zbd_info->mutex);
739 for (i = 0; i < f->zbd_info->num_open_zones; i++) {
740 if (f->zbd_info->open_zones[i] == nz)
741 zbd_close_zone(td, f, i);
742 }
743 pthread_mutex_unlock(&f->zbd_info->mutex);
744
745 reset_wp = z->wp != z->start;
746 } else {
767d1372 747 reset_wp = z->wp % min_bs != 0;
25dc6606 748 }
1f57803b
DLM
749 if (reset_wp) {
750 dprint(FD_ZBD, "%s: resetting zone %u\n",
751 f->file_name,
752 zbd_zone_nr(f->zbd_info, z));
753 if (zbd_reset_zone(td, f, z) < 0)
754 res = 1;
bfbdd35b 755 }
bfbdd35b 756 pthread_mutex_unlock(&z->mutex);
4803b841 757 }
bfbdd35b
BVA
758
759 return res;
760}
761
a7c2b6fc
BVA
762/*
763 * Reset zbd_info.write_cnt, the counter that counts down towards the next
764 * zone reset.
765 */
766static void zbd_reset_write_cnt(const struct thread_data *td,
767 const struct fio_file *f)
768{
769 assert(0 <= td->o.zrf.u.f && td->o.zrf.u.f <= 1);
770
771 pthread_mutex_lock(&f->zbd_info->mutex);
772 f->zbd_info->write_cnt = td->o.zrf.u.f ?
773 min(1.0 / td->o.zrf.u.f, 0.0 + UINT_MAX) : UINT_MAX;
774 pthread_mutex_unlock(&f->zbd_info->mutex);
775}
776
777static bool zbd_dec_and_reset_write_cnt(const struct thread_data *td,
778 const struct fio_file *f)
779{
780 uint32_t write_cnt = 0;
781
782 pthread_mutex_lock(&f->zbd_info->mutex);
783 assert(f->zbd_info->write_cnt);
784 if (f->zbd_info->write_cnt)
785 write_cnt = --f->zbd_info->write_cnt;
786 if (write_cnt == 0)
787 zbd_reset_write_cnt(td, f);
788 pthread_mutex_unlock(&f->zbd_info->mutex);
789
790 return write_cnt == 0;
791}
792
91d25131
BVA
793enum swd_action {
794 CHECK_SWD,
795 SET_SWD,
796};
797
798/* Calculate the number of sectors with data (swd) and perform action 'a' */
799static uint64_t zbd_process_swd(const struct fio_file *f, enum swd_action a)
615555bb 800{
615555bb
BVA
801 struct fio_zone_info *zb, *ze, *z;
802 uint64_t swd = 0;
803
804 zb = &f->zbd_info->zone_info[zbd_zone_idx(f, f->file_offset)];
805 ze = &f->zbd_info->zone_info[zbd_zone_idx(f, f->file_offset +
806 f->io_size)];
807 for (z = zb; z < ze; z++) {
808 pthread_mutex_lock(&z->mutex);
809 swd += z->wp - z->start;
810 }
811 pthread_mutex_lock(&f->zbd_info->mutex);
91d25131
BVA
812 switch (a) {
813 case CHECK_SWD:
814 assert(f->zbd_info->sectors_with_data == swd);
815 break;
816 case SET_SWD:
817 f->zbd_info->sectors_with_data = swd;
818 break;
819 }
615555bb
BVA
820 pthread_mutex_unlock(&f->zbd_info->mutex);
821 for (z = zb; z < ze; z++)
822 pthread_mutex_unlock(&z->mutex);
91d25131
BVA
823
824 return swd;
825}
826
827/*
828 * The swd check is useful for debugging but takes too much time to leave
829 * it enabled all the time. Hence it is disabled by default.
830 */
831static const bool enable_check_swd = false;
832
833/* Check whether the value of zbd_info.sectors_with_data is correct. */
834static void zbd_check_swd(const struct fio_file *f)
835{
836 if (!enable_check_swd)
837 return;
838
839 zbd_process_swd(f, CHECK_SWD);
840}
841
842static void zbd_init_swd(struct fio_file *f)
843{
844 uint64_t swd;
845
409a4f29
NA
846 if (!enable_check_swd)
847 return;
848
91d25131
BVA
849 swd = zbd_process_swd(f, SET_SWD);
850 dprint(FD_ZBD, "%s(%s): swd = %" PRIu64 "\n", __func__, f->file_name,
851 swd);
615555bb
BVA
852}
853
bfbdd35b
BVA
854void zbd_file_reset(struct thread_data *td, struct fio_file *f)
855{
91d25131 856 struct fio_zone_info *zb, *ze;
bfbdd35b
BVA
857 uint32_t zone_idx_e;
858
767d1372 859 if (!f->zbd_info || !td_write(td))
bfbdd35b
BVA
860 return;
861
862 zb = &f->zbd_info->zone_info[zbd_zone_idx(f, f->file_offset)];
863 zone_idx_e = zbd_zone_idx(f, f->file_offset + f->io_size);
864 ze = &f->zbd_info->zone_info[zone_idx_e];
91d25131 865 zbd_init_swd(f);
bfbdd35b
BVA
866 /*
867 * If data verification is enabled reset the affected zones before
868 * writing any data to avoid that a zone reset has to be issued while
869 * writing data, which causes data loss.
870 */
871 zbd_reset_zones(td, f, zb, ze, td->o.verify != VERIFY_NONE &&
bfbdd35b 872 td->runstate != TD_VERIFYING);
a7c2b6fc 873 zbd_reset_write_cnt(td, f);
bfbdd35b
BVA
874}
875
59b07544
BVA
876/* The caller must hold f->zbd_info->mutex. */
877static bool is_zone_open(const struct thread_data *td, const struct fio_file *f,
878 unsigned int zone_idx)
879{
880 struct zoned_block_device_info *zbdi = f->zbd_info;
881 int i;
882
883 assert(td->o.max_open_zones <= ARRAY_SIZE(zbdi->open_zones));
884 assert(zbdi->num_open_zones <= td->o.max_open_zones);
885
886 for (i = 0; i < zbdi->num_open_zones; i++)
887 if (zbdi->open_zones[i] == zone_idx)
888 return true;
889
890 return false;
891}
892
893/*
894 * Open a ZBD zone if it was not yet open. Returns true if either the zone was
895 * already open or if opening a new zone is allowed. Returns false if the zone
896 * was not yet open and opening a new zone would cause the zone limit to be
897 * exceeded.
898 */
899static bool zbd_open_zone(struct thread_data *td, const struct io_u *io_u,
900 uint32_t zone_idx)
901{
902 const uint32_t min_bs = td->o.min_bs[DDIR_WRITE];
903 const struct fio_file *f = io_u->file;
904 struct fio_zone_info *z = &f->zbd_info->zone_info[zone_idx];
905 bool res = true;
906
b7694961 907 if (z->cond == ZBD_ZONE_COND_OFFLINE)
59b07544
BVA
908 return false;
909
910 /*
911 * Skip full zones with data verification enabled because resetting a
912 * zone causes data loss and hence causes verification to fail.
913 */
914 if (td->o.verify != VERIFY_NONE && zbd_zone_full(f, z, min_bs))
915 return false;
916
917 /* Zero means no limit */
918 if (!td->o.max_open_zones)
919 return true;
920
921 pthread_mutex_lock(&f->zbd_info->mutex);
922 if (is_zone_open(td, f, zone_idx))
923 goto out;
924 res = false;
925 if (f->zbd_info->num_open_zones >= td->o.max_open_zones)
926 goto out;
927 dprint(FD_ZBD, "%s: opening zone %d\n", f->file_name, zone_idx);
928 f->zbd_info->open_zones[f->zbd_info->num_open_zones++] = zone_idx;
929 z->open = 1;
930 res = true;
931
932out:
933 pthread_mutex_unlock(&f->zbd_info->mutex);
934 return res;
935}
936
6463db6c
AD
937/* Anything goes as long as it is not a constant. */
938static uint32_t pick_random_zone_idx(const struct fio_file *f,
939 const struct io_u *io_u)
940{
941 return io_u->offset * f->zbd_info->num_open_zones / f->real_file_size;
942}
943
59b07544
BVA
944/*
945 * Modify the offset of an I/O unit that does not refer to an open zone such
946 * that it refers to an open zone. Close an open zone and open a new zone if
947 * necessary. This algorithm can only work correctly if all write pointers are
948 * a multiple of the fio block size. The caller must neither hold z->mutex
949 * nor f->zbd_info->mutex. Returns with z->mutex held upon success.
950 */
379e5f09
BVA
951static struct fio_zone_info *zbd_convert_to_open_zone(struct thread_data *td,
952 struct io_u *io_u)
59b07544
BVA
953{
954 const uint32_t min_bs = td->o.min_bs[io_u->ddir];
955 const struct fio_file *f = io_u->file;
956 struct fio_zone_info *z;
957 unsigned int open_zone_idx = -1;
958 uint32_t zone_idx, new_zone_idx;
959 int i;
960
961 assert(is_valid_offset(f, io_u->offset));
962
963 if (td->o.max_open_zones) {
964 /*
965 * This statement accesses f->zbd_info->open_zones[] on purpose
966 * without locking.
967 */
6463db6c 968 zone_idx = f->zbd_info->open_zones[pick_random_zone_idx(f, io_u)];
59b07544
BVA
969 } else {
970 zone_idx = zbd_zone_idx(f, io_u->offset);
971 }
972 dprint(FD_ZBD, "%s(%s): starting from zone %d (offset %lld, buflen %lld)\n",
973 __func__, f->file_name, zone_idx, io_u->offset, io_u->buflen);
974
975 /*
976 * Since z->mutex is the outer lock and f->zbd_info->mutex the inner
977 * lock it can happen that the state of the zone with index zone_idx
978 * has changed after 'z' has been assigned and before f->zbd_info->mutex
979 * has been obtained. Hence the loop.
980 */
981 for (;;) {
6463db6c
AD
982 uint32_t tmp_idx;
983
59b07544
BVA
984 z = &f->zbd_info->zone_info[zone_idx];
985
b27aef6a 986 zone_lock(td, z);
59b07544
BVA
987 pthread_mutex_lock(&f->zbd_info->mutex);
988 if (td->o.max_open_zones == 0)
989 goto examine_zone;
990 if (f->zbd_info->num_open_zones == 0) {
991 pthread_mutex_unlock(&f->zbd_info->mutex);
992 pthread_mutex_unlock(&z->mutex);
993 dprint(FD_ZBD, "%s(%s): no zones are open\n",
994 __func__, f->file_name);
995 return NULL;
996 }
6463db6c
AD
997
998 /*
999 * List of opened zones is per-device, shared across all threads.
1000 * Start with quasi-random candidate zone.
1001 * Ignore zones which don't belong to thread's offset/size area.
1002 */
1003 open_zone_idx = pick_random_zone_idx(f, io_u);
59b07544 1004 assert(open_zone_idx < f->zbd_info->num_open_zones);
6463db6c
AD
1005 tmp_idx = open_zone_idx;
1006 for (i = 0; i < f->zbd_info->num_open_zones; i++) {
1007 uint32_t tmpz;
1008
1009 if (tmp_idx >= f->zbd_info->num_open_zones)
1010 tmp_idx = 0;
1011 tmpz = f->zbd_info->open_zones[tmp_idx];
1012
1013 if (is_valid_offset(f, f->zbd_info->zone_info[tmpz].start)) {
1014 open_zone_idx = tmp_idx;
1015 goto found_candidate_zone;
1016 }
1017
1018 tmp_idx++;
1019 }
1020
1021 dprint(FD_ZBD, "%s(%s): no candidate zone\n",
1022 __func__, f->file_name);
adc6adcb
DLM
1023 pthread_mutex_unlock(&f->zbd_info->mutex);
1024 pthread_mutex_unlock(&z->mutex);
6463db6c
AD
1025 return NULL;
1026
1027found_candidate_zone:
59b07544
BVA
1028 new_zone_idx = f->zbd_info->open_zones[open_zone_idx];
1029 if (new_zone_idx == zone_idx)
1030 break;
1031 zone_idx = new_zone_idx;
1032 pthread_mutex_unlock(&f->zbd_info->mutex);
1033 pthread_mutex_unlock(&z->mutex);
1034 }
1035
1036 /* Both z->mutex and f->zbd_info->mutex are held. */
1037
1038examine_zone:
ee3696bd 1039 if (z->wp + min_bs <= (z+1)->start) {
59b07544
BVA
1040 pthread_mutex_unlock(&f->zbd_info->mutex);
1041 goto out;
1042 }
1043 dprint(FD_ZBD, "%s(%s): closing zone %d\n", __func__, f->file_name,
1044 zone_idx);
1045 if (td->o.max_open_zones)
1046 zbd_close_zone(td, f, open_zone_idx);
1047 pthread_mutex_unlock(&f->zbd_info->mutex);
1048
1049 /* Only z->mutex is held. */
1050
1051 /* Zone 'z' is full, so try to open a new zone. */
1052 for (i = f->io_size / f->zbd_info->zone_size; i > 0; i--) {
1053 zone_idx++;
1054 pthread_mutex_unlock(&z->mutex);
1055 z++;
ee3696bd 1056 if (!is_valid_offset(f, z->start)) {
59b07544
BVA
1057 /* Wrap-around. */
1058 zone_idx = zbd_zone_idx(f, f->file_offset);
1059 z = &f->zbd_info->zone_info[zone_idx];
1060 }
ee3696bd 1061 assert(is_valid_offset(f, z->start));
b27aef6a 1062 zone_lock(td, z);
59b07544
BVA
1063 if (z->open)
1064 continue;
1065 if (zbd_open_zone(td, io_u, zone_idx))
1066 goto out;
1067 }
1068
1069 /* Only z->mutex is held. */
1070
1071 /* Check whether the write fits in any of the already opened zones. */
1072 pthread_mutex_lock(&f->zbd_info->mutex);
1073 for (i = 0; i < f->zbd_info->num_open_zones; i++) {
1074 zone_idx = f->zbd_info->open_zones[i];
1075 pthread_mutex_unlock(&f->zbd_info->mutex);
1076 pthread_mutex_unlock(&z->mutex);
1077
1078 z = &f->zbd_info->zone_info[zone_idx];
1079
b27aef6a 1080 zone_lock(td, z);
ee3696bd 1081 if (z->wp + min_bs <= (z+1)->start)
59b07544
BVA
1082 goto out;
1083 pthread_mutex_lock(&f->zbd_info->mutex);
1084 }
1085 pthread_mutex_unlock(&f->zbd_info->mutex);
1086 pthread_mutex_unlock(&z->mutex);
1087 dprint(FD_ZBD, "%s(%s): did not open another zone\n", __func__,
1088 f->file_name);
1089 return NULL;
1090
1091out:
1092 dprint(FD_ZBD, "%s(%s): returning zone %d\n", __func__, f->file_name,
1093 zone_idx);
ee3696bd 1094 io_u->offset = z->start;
59b07544
BVA
1095 return z;
1096}
1097
bfbdd35b 1098/* The caller must hold z->mutex. */
59b07544
BVA
1099static struct fio_zone_info *zbd_replay_write_order(struct thread_data *td,
1100 struct io_u *io_u,
1101 struct fio_zone_info *z)
bfbdd35b
BVA
1102{
1103 const struct fio_file *f = io_u->file;
1104 const uint32_t min_bs = td->o.min_bs[DDIR_WRITE];
1105
59b07544
BVA
1106 if (!zbd_open_zone(td, io_u, z - f->zbd_info->zone_info)) {
1107 pthread_mutex_unlock(&z->mutex);
1108 z = zbd_convert_to_open_zone(td, io_u);
1109 assert(z);
1110 }
1111
bfbdd35b 1112 if (z->verify_block * min_bs >= f->zbd_info->zone_size)
a0c84dd4
JA
1113 log_err("%s: %d * %d >= %llu\n", f->file_name, z->verify_block,
1114 min_bs, (unsigned long long) f->zbd_info->zone_size);
ee3696bd 1115 io_u->offset = z->start + z->verify_block++ * min_bs;
59b07544 1116 return z;
bfbdd35b
BVA
1117}
1118
1119/*
1120 * Find another zone for which @io_u fits below the write pointer. Start
1121 * searching in zones @zb + 1 .. @zl and continue searching in zones
1122 * @zf .. @zb - 1.
1123 *
1124 * Either returns NULL or returns a zone pointer and holds the mutex for that
1125 * zone.
1126 */
1127static struct fio_zone_info *
1128zbd_find_zone(struct thread_data *td, struct io_u *io_u,
1129 struct fio_zone_info *zb, struct fio_zone_info *zl)
1130{
1131 const uint32_t min_bs = td->o.min_bs[io_u->ddir];
1132 const struct fio_file *f = io_u->file;
1133 struct fio_zone_info *z1, *z2;
1134 const struct fio_zone_info *const zf =
1135 &f->zbd_info->zone_info[zbd_zone_idx(f, f->file_offset)];
1136
1137 /*
1138 * Skip to the next non-empty zone in case of sequential I/O and to
1139 * the nearest non-empty zone in case of random I/O.
1140 */
1141 for (z1 = zb + 1, z2 = zb - 1; z1 < zl || z2 >= zf; z1++, z2--) {
b7694961 1142 if (z1 < zl && z1->cond != ZBD_ZONE_COND_OFFLINE) {
c8e28d8f 1143 zone_lock(td, z1);
ee3696bd 1144 if (z1->start + min_bs <= z1->wp)
bfbdd35b
BVA
1145 return z1;
1146 pthread_mutex_unlock(&z1->mutex);
1147 } else if (!td_random(td)) {
1148 break;
1149 }
1150 if (td_random(td) && z2 >= zf &&
b7694961 1151 z2->cond != ZBD_ZONE_COND_OFFLINE) {
c8e28d8f 1152 zone_lock(td, z2);
ee3696bd 1153 if (z2->start + min_bs <= z2->wp)
bfbdd35b
BVA
1154 return z2;
1155 pthread_mutex_unlock(&z2->mutex);
1156 }
1157 }
1158 dprint(FD_ZBD, "%s: adjusting random read offset failed\n",
1159 f->file_name);
1160 return NULL;
1161}
1162
bfbdd35b 1163/**
d9ed3e63 1164 * zbd_queue_io - update the write pointer of a sequential zone
bfbdd35b 1165 * @io_u: I/O unit
d9ed3e63
DLM
1166 * @success: Whether or not the I/O unit has been queued successfully
1167 * @q: queueing status (busy, completed or queued).
bfbdd35b 1168 *
d9ed3e63
DLM
1169 * For write and trim operations, update the write pointer of the I/O unit
1170 * target zone.
bfbdd35b 1171 */
d9ed3e63 1172static void zbd_queue_io(struct io_u *io_u, int q, bool success)
bfbdd35b 1173{
d9ed3e63
DLM
1174 const struct fio_file *f = io_u->file;
1175 struct zoned_block_device_info *zbd_info = f->zbd_info;
bfbdd35b
BVA
1176 struct fio_zone_info *z;
1177 uint32_t zone_idx;
d9ed3e63 1178 uint64_t zone_end;
bfbdd35b 1179
bfbdd35b
BVA
1180 if (!zbd_info)
1181 return;
1182
d9ed3e63 1183 zone_idx = zbd_zone_idx(f, io_u->offset);
bfbdd35b 1184 assert(zone_idx < zbd_info->nr_zones);
d9ed3e63
DLM
1185 z = &zbd_info->zone_info[zone_idx];
1186
b7694961 1187 if (!zbd_zone_swr(z))
bfbdd35b 1188 return;
d9ed3e63 1189
bfbdd35b
BVA
1190 if (!success)
1191 goto unlock;
d9ed3e63
DLM
1192
1193 dprint(FD_ZBD,
1194 "%s: queued I/O (%lld, %llu) for zone %u\n",
1195 f->file_name, io_u->offset, io_u->buflen, zone_idx);
1196
bfbdd35b
BVA
1197 switch (io_u->ddir) {
1198 case DDIR_WRITE:
d9ed3e63
DLM
1199 zone_end = min((uint64_t)(io_u->offset + io_u->buflen),
1200 (z + 1)->start);
a7c2b6fc
BVA
1201 pthread_mutex_lock(&zbd_info->mutex);
1202 /*
1203 * z->wp > zone_end means that one or more I/O errors
1204 * have occurred.
1205 */
1206 if (z->wp <= zone_end)
1207 zbd_info->sectors_with_data += zone_end - z->wp;
1208 pthread_mutex_unlock(&zbd_info->mutex);
bfbdd35b
BVA
1209 z->wp = zone_end;
1210 break;
1211 case DDIR_TRIM:
1212 assert(z->wp == z->start);
1213 break;
1214 default:
1215 break;
1216 }
d9ed3e63 1217
bfbdd35b 1218unlock:
d9ed3e63
DLM
1219 if (!success || q != FIO_Q_QUEUED) {
1220 /* BUSY or COMPLETED: unlock the zone */
1221 pthread_mutex_unlock(&z->mutex);
1222 io_u->zbd_put_io = NULL;
1223 }
1224}
1225
1226/**
1227 * zbd_put_io - Unlock an I/O unit target zone lock
1228 * @io_u: I/O unit
1229 */
1230static void zbd_put_io(const struct io_u *io_u)
1231{
1232 const struct fio_file *f = io_u->file;
1233 struct zoned_block_device_info *zbd_info = f->zbd_info;
1234 struct fio_zone_info *z;
1235 uint32_t zone_idx;
f59c2f8e 1236 int ret;
d9ed3e63
DLM
1237
1238 if (!zbd_info)
1239 return;
615555bb 1240
d9ed3e63
DLM
1241 zone_idx = zbd_zone_idx(f, io_u->offset);
1242 assert(zone_idx < zbd_info->nr_zones);
1243 z = &zbd_info->zone_info[zone_idx];
1244
b7694961 1245 if (!zbd_zone_swr(z))
d9ed3e63
DLM
1246 return;
1247
1248 dprint(FD_ZBD,
1249 "%s: terminate I/O (%lld, %llu) for zone %u\n",
1250 f->file_name, io_u->offset, io_u->buflen, zone_idx);
1251
f59c2f8e
AD
1252 ret = pthread_mutex_unlock(&z->mutex);
1253 assert(ret == 0);
d9ed3e63 1254 zbd_check_swd(f);
bfbdd35b
BVA
1255}
1256
9d87c646
DLM
1257/*
1258 * Windows and MacOS do not define this.
1259 */
1260#ifndef EREMOTEIO
1261#define EREMOTEIO 121 /* POSIX value */
1262#endif
1263
bfbdd35b
BVA
1264bool zbd_unaligned_write(int error_code)
1265{
1266 switch (error_code) {
1267 case EIO:
1268 case EREMOTEIO:
1269 return true;
1270 }
1271 return false;
1272}
1273
4d37720a
DLM
1274/**
1275 * setup_zbd_zone_mode - handle zoneskip as necessary for ZBD drives
1276 * @td: FIO thread data.
1277 * @io_u: FIO I/O unit.
1278 *
1279 * For sequential workloads, change the file offset to skip zoneskip bytes when
1280 * no more IO can be performed in the current zone.
1281 * - For read workloads, zoneskip is applied when the io has reached the end of
1282 * the zone or the zone write position (when td->o.read_beyond_wp is false).
1283 * - For write workloads, zoneskip is applied when the zone is full.
1284 * This applies only to read and write operations.
1285 */
1286void setup_zbd_zone_mode(struct thread_data *td, struct io_u *io_u)
1287{
1288 struct fio_file *f = io_u->file;
1289 enum fio_ddir ddir = io_u->ddir;
1290 struct fio_zone_info *z;
1291 uint32_t zone_idx;
1292
1293 assert(td->o.zone_mode == ZONE_MODE_ZBD);
1294 assert(td->o.zone_size);
1295
1296 /*
1297 * zone_skip is valid only for sequential workloads.
1298 */
1299 if (td_random(td) || !td->o.zone_skip)
1300 return;
1301
1302 /*
1303 * It is time to switch to a new zone if:
1304 * - zone_bytes == zone_size bytes have already been accessed
1305 * - The last position reached the end of the current zone.
1306 * - For reads with td->o.read_beyond_wp == false, the last position
1307 * reached the zone write pointer.
1308 */
1309 zone_idx = zbd_zone_idx(f, f->last_pos[ddir]);
1310 z = &f->zbd_info->zone_info[zone_idx];
1311
1312 if (td->zone_bytes >= td->o.zone_size ||
1313 f->last_pos[ddir] >= (z+1)->start ||
1314 (ddir == DDIR_READ &&
1315 (!td->o.read_beyond_wp) && f->last_pos[ddir] >= z->wp)) {
1316 /*
1317 * Skip zones.
1318 */
1319 td->zone_bytes = 0;
1320 f->file_offset += td->o.zone_size + td->o.zone_skip;
1321
1322 /*
1323 * Wrap from the beginning, if we exceed the file size
1324 */
1325 if (f->file_offset >= f->real_file_size)
1326 f->file_offset = get_start_offset(td, f);
1327
1328 f->last_pos[ddir] = f->file_offset;
1329 td->io_skip_bytes += td->o.zone_skip;
1330 }
1331}
1332
c65057f9
SK
1333/**
1334 * zbd_adjust_ddir - Adjust an I/O direction for zonedmode=zbd.
1335 *
1336 * @td: FIO thread data.
1337 * @io_u: FIO I/O unit.
1338 * @ddir: I/O direction before adjustment.
1339 *
1340 * Return adjusted I/O direction.
1341 */
1342enum fio_ddir zbd_adjust_ddir(struct thread_data *td, struct io_u *io_u,
1343 enum fio_ddir ddir)
1344{
1345 /*
1346 * In case read direction is chosen for the first random I/O, fio with
1347 * zonemode=zbd stops because no data can be read from zoned block
1348 * devices with all empty zones. Overwrite the first I/O direction as
1349 * write to make sure data to read exists.
1350 */
731461cc 1351 if (ddir != DDIR_READ || !td_rw(td))
c65057f9
SK
1352 return ddir;
1353
1354 if (io_u->file->zbd_info->sectors_with_data ||
1355 td->o.read_beyond_wp)
1356 return DDIR_READ;
1357
1358 return DDIR_WRITE;
1359}
1360
bfbdd35b
BVA
1361/**
1362 * zbd_adjust_block - adjust the offset and length as necessary for ZBD drives
1363 * @td: FIO thread data.
1364 * @io_u: FIO I/O unit.
1365 *
1366 * Locking strategy: returns with z->mutex locked if and only if z refers
1367 * to a sequential zone and if io_u_accept is returned. z is the zone that
1368 * corresponds to io_u->offset at the end of this function.
1369 */
1370enum io_u_action zbd_adjust_block(struct thread_data *td, struct io_u *io_u)
1371{
b7694961 1372 struct fio_file *f = io_u->file;
bfbdd35b 1373 uint32_t zone_idx_b;
de65f7b7 1374 struct fio_zone_info *zb, *zl, *orig_zb;
bfbdd35b
BVA
1375 uint32_t orig_len = io_u->buflen;
1376 uint32_t min_bs = td->o.min_bs[io_u->ddir];
1377 uint64_t new_len;
1378 int64_t range;
1379
1380 if (!f->zbd_info)
1381 return io_u_accept;
1382
adc6adcb 1383 assert(min_bs);
bfbdd35b
BVA
1384 assert(is_valid_offset(f, io_u->offset));
1385 assert(io_u->buflen);
1386 zone_idx_b = zbd_zone_idx(f, io_u->offset);
1387 zb = &f->zbd_info->zone_info[zone_idx_b];
de65f7b7 1388 orig_zb = zb;
bfbdd35b
BVA
1389
1390 /* Accept the I/O offset for conventional zones. */
b7694961 1391 if (!zbd_zone_swr(zb))
bfbdd35b
BVA
1392 return io_u_accept;
1393
1394 /*
1395 * Accept the I/O offset for reads if reading beyond the write pointer
1396 * is enabled.
1397 */
b7694961 1398 if (zb->cond != ZBD_ZONE_COND_OFFLINE &&
bfbdd35b
BVA
1399 io_u->ddir == DDIR_READ && td->o.read_beyond_wp)
1400 return io_u_accept;
1401
615555bb
BVA
1402 zbd_check_swd(f);
1403
b27aef6a 1404 zone_lock(td, zb);
6f0c6085 1405
bfbdd35b
BVA
1406 switch (io_u->ddir) {
1407 case DDIR_READ:
1408 if (td->runstate == TD_VERIFYING) {
ecc9e8f7
DLM
1409 if (td_write(td))
1410 zb = zbd_replay_write_order(td, io_u, zb);
bfbdd35b
BVA
1411 goto accept;
1412 }
1413 /*
de65f7b7
DLM
1414 * Check that there is enough written data in the zone to do an
1415 * I/O of at least min_bs B. If there isn't, find a new zone for
1416 * the I/O.
bfbdd35b 1417 */
b7694961 1418 range = zb->cond != ZBD_ZONE_COND_OFFLINE ?
ee3696bd 1419 zb->wp - zb->start : 0;
de65f7b7 1420 if (range < min_bs ||
ee3696bd 1421 ((!td_random(td)) && (io_u->offset + min_bs > zb->wp))) {
bfbdd35b
BVA
1422 pthread_mutex_unlock(&zb->mutex);
1423 zl = &f->zbd_info->zone_info[zbd_zone_idx(f,
1424 f->file_offset + f->io_size)];
1425 zb = zbd_find_zone(td, io_u, zb, zl);
1426 if (!zb) {
1427 dprint(FD_ZBD,
1428 "%s: zbd_find_zone(%lld, %llu) failed\n",
1429 f->file_name, io_u->offset,
1430 io_u->buflen);
1431 goto eof;
1432 }
de65f7b7
DLM
1433 /*
1434 * zbd_find_zone() returned a zone with a range of at
1435 * least min_bs.
1436 */
ee3696bd 1437 range = zb->wp - zb->start;
de65f7b7
DLM
1438 assert(range >= min_bs);
1439
1440 if (!td_random(td))
ee3696bd 1441 io_u->offset = zb->start;
bfbdd35b 1442 }
de65f7b7
DLM
1443 /*
1444 * Make sure the I/O is within the zone valid data range while
1445 * maximizing the I/O size and preserving randomness.
1446 */
1447 if (range <= io_u->buflen)
ee3696bd 1448 io_u->offset = zb->start;
de65f7b7 1449 else if (td_random(td))
ee3696bd
DLM
1450 io_u->offset = zb->start +
1451 ((io_u->offset - orig_zb->start) %
de65f7b7
DLM
1452 (range - io_u->buflen)) / min_bs * min_bs;
1453 /*
1454 * Make sure the I/O does not cross over the zone wp position.
1455 */
1456 new_len = min((unsigned long long)io_u->buflen,
ee3696bd 1457 (unsigned long long)(zb->wp - io_u->offset));
de65f7b7
DLM
1458 new_len = new_len / min_bs * min_bs;
1459 if (new_len < io_u->buflen) {
1460 io_u->buflen = new_len;
1461 dprint(FD_IO, "Changed length from %u into %llu\n",
1462 orig_len, io_u->buflen);
bfbdd35b 1463 }
ee3696bd
DLM
1464 assert(zb->start <= io_u->offset);
1465 assert(io_u->offset + io_u->buflen <= zb->wp);
bfbdd35b
BVA
1466 goto accept;
1467 case DDIR_WRITE:
ee3696bd 1468 if (io_u->buflen > f->zbd_info->zone_size)
bfbdd35b 1469 goto eof;
59b07544
BVA
1470 if (!zbd_open_zone(td, io_u, zone_idx_b)) {
1471 pthread_mutex_unlock(&zb->mutex);
1472 zb = zbd_convert_to_open_zone(td, io_u);
1473 if (!zb)
1474 goto eof;
1475 zone_idx_b = zb - f->zbd_info->zone_info;
1476 }
a7c2b6fc
BVA
1477 /* Check whether the zone reset threshold has been exceeded */
1478 if (td->o.zrf.u.f) {
ee3696bd 1479 if (f->zbd_info->sectors_with_data >=
a7c2b6fc
BVA
1480 f->io_size * td->o.zrt.u.f &&
1481 zbd_dec_and_reset_write_cnt(td, f)) {
1482 zb->reset_zone = 1;
1483 }
1484 }
bfbdd35b
BVA
1485 /* Reset the zone pointer if necessary */
1486 if (zb->reset_zone || zbd_zone_full(f, zb, min_bs)) {
1487 assert(td->o.verify == VERIFY_NONE);
1488 /*
1489 * Since previous write requests may have been submitted
1490 * asynchronously and since we will submit the zone
1491 * reset synchronously, wait until previously submitted
1492 * write requests have completed before issuing a
1493 * zone reset.
1494 */
1495 io_u_quiesce(td);
1496 zb->reset_zone = 0;
1497 if (zbd_reset_zone(td, f, zb) < 0)
1498 goto eof;
1499 }
1500 /* Make writes occur at the write pointer */
1501 assert(!zbd_zone_full(f, zb, min_bs));
ee3696bd 1502 io_u->offset = zb->wp;
bfbdd35b
BVA
1503 if (!is_valid_offset(f, io_u->offset)) {
1504 dprint(FD_ZBD, "Dropped request with offset %llu\n",
1505 io_u->offset);
1506 goto eof;
1507 }
1508 /*
1509 * Make sure that the buflen is a multiple of the minimal
1510 * block size. Give up if shrinking would make the request too
1511 * small.
1512 */
1513 new_len = min((unsigned long long)io_u->buflen,
ee3696bd 1514 (zb + 1)->start - io_u->offset);
bfbdd35b
BVA
1515 new_len = new_len / min_bs * min_bs;
1516 if (new_len == io_u->buflen)
1517 goto accept;
1518 if (new_len >= min_bs) {
1519 io_u->buflen = new_len;
1520 dprint(FD_IO, "Changed length from %u into %llu\n",
1521 orig_len, io_u->buflen);
1522 goto accept;
1523 }
1524 log_err("Zone remainder %lld smaller than minimum block size %d\n",
ee3696bd 1525 ((zb + 1)->start - io_u->offset),
bfbdd35b
BVA
1526 min_bs);
1527 goto eof;
1528 case DDIR_TRIM:
1529 /* fall-through */
1530 case DDIR_SYNC:
1531 case DDIR_DATASYNC:
1532 case DDIR_SYNC_FILE_RANGE:
1533 case DDIR_WAIT:
1534 case DDIR_LAST:
1535 case DDIR_INVAL:
1536 goto accept;
1537 }
1538
1539 assert(false);
1540
1541accept:
1542 assert(zb);
b7694961 1543 assert(zb->cond != ZBD_ZONE_COND_OFFLINE);
d9ed3e63
DLM
1544 assert(!io_u->zbd_queue_io);
1545 assert(!io_u->zbd_put_io);
1546 io_u->zbd_queue_io = zbd_queue_io;
1547 io_u->zbd_put_io = zbd_put_io;
bfbdd35b
BVA
1548 return io_u_accept;
1549
1550eof:
1551 if (zb)
1552 pthread_mutex_unlock(&zb->mutex);
1553 return io_u_eof;
1554}
fd5d733f
BVA
1555
1556/* Return a string with ZBD statistics */
1557char *zbd_write_status(const struct thread_stat *ts)
1558{
1559 char *res;
1560
a0c84dd4 1561 if (asprintf(&res, "; %llu zone resets", (unsigned long long) ts->nr_zone_resets) < 0)
fd5d733f
BVA
1562 return NULL;
1563 return res;
1564}