2 * Copyright (C) 2019 Western Digital Corporation or its affiliates.
4 * This file is released under the GPL.
7 * IO engine using libzbc library to talk to SMR disks.
12 #include <libzbc/zbc.h>
16 #include "zbd_types.h"
19 struct zbc_device *zdev;
20 enum zbc_dev_model model;
22 uint32_t max_open_seq_req;
25 static int libzbc_get_dev_info(struct libzbc_data *ld, struct fio_file *f)
27 struct zbc_device_info *zinfo;
29 zinfo = calloc(1, sizeof(*zinfo));
33 zbc_get_device_info(ld->zdev, zinfo);
34 ld->model = zinfo->zbd_model;
35 ld->nr_sectors = zinfo->zbd_sectors;
36 ld->max_open_seq_req = zinfo->zbd_max_nr_open_seq_req;
38 dprint(FD_ZBD, "%s: vendor_id:%s, type: %s, model: %s\n",
39 f->file_name, zinfo->zbd_vendor_id,
40 zbc_device_type_str(zinfo->zbd_type),
41 zbc_device_model_str(zinfo->zbd_model));
48 static int libzbc_open_dev(struct thread_data *td, struct fio_file *f,
49 struct libzbc_data **p_ld)
51 struct libzbc_data *ld = td->io_ops_data;
52 int ret, flags = OS_O_DIRECT;
60 if (f->filetype != FIO_TYPE_BLOCK && f->filetype != FIO_TYPE_CHAR) {
61 td_verror(td, EINVAL, "wrong file type");
62 log_err("ioengine libzbc only works on block or character devices\n");
69 } else if (td_read(td)) {
70 if (f->filetype == FIO_TYPE_CHAR && !read_only)
74 } else if (td_trim(td)) {
75 td_verror(td, EINVAL, "libzbc does not support trim");
76 log_err("%s: libzbc does not support trim\n", f->file_name);
81 td_verror(td, EINVAL, "libzbc does not support O_ATOMIC");
82 log_err("%s: libzbc does not support O_ATOMIC\n", f->file_name);
86 ld = calloc(1, sizeof(*ld));
90 ret = zbc_open(f->file_name,
91 flags | ZBC_O_DRV_BLOCK | ZBC_O_DRV_SCSI | ZBC_O_DRV_ATA,
94 log_err("%s: zbc_open() failed, err=%d\n",
99 ret = libzbc_get_dev_info(ld, f);
103 td->io_ops_data = ld;
117 static int libzbc_close_dev(struct thread_data *td)
119 struct libzbc_data *ld = td->io_ops_data;
122 td->io_ops_data = NULL;
125 ret = zbc_close(ld->zdev);
131 static int libzbc_open_file(struct thread_data *td, struct fio_file *f)
133 return libzbc_open_dev(td, f, NULL);
136 static int libzbc_close_file(struct thread_data *td, struct fio_file *f)
140 ret = libzbc_close_dev(td);
142 log_err("%s: close device failed err %d\n",
148 static void libzbc_cleanup(struct thread_data *td)
150 libzbc_close_dev(td);
153 static int libzbc_invalidate(struct thread_data *td, struct fio_file *f)
155 /* Passthrough IO do not cache data. Nothing to do */
159 static int libzbc_get_file_size(struct thread_data *td, struct fio_file *f)
161 struct libzbc_data *ld;
164 if (fio_file_size_known(f))
167 ret = libzbc_open_dev(td, f, &ld);
171 f->real_file_size = ld->nr_sectors << 9;
172 fio_file_set_size_known(f);
177 static int libzbc_get_zoned_model(struct thread_data *td, struct fio_file *f,
178 enum zbd_zoned_model *model)
180 struct libzbc_data *ld;
183 if (f->filetype != FIO_TYPE_BLOCK && f->filetype != FIO_TYPE_CHAR) {
188 ret = libzbc_open_dev(td, f, &ld);
193 case ZBC_DM_HOST_AWARE:
194 *model = ZBD_HOST_AWARE;
196 case ZBC_DM_HOST_MANAGED:
197 *model = ZBD_HOST_MANAGED;
207 static int libzbc_report_zones(struct thread_data *td, struct fio_file *f,
208 uint64_t offset, struct zbd_zone *zbdz,
209 unsigned int nr_zones)
211 struct libzbc_data *ld;
212 uint64_t sector = offset >> 9;
213 struct zbc_zone *zones;
217 ret = libzbc_open_dev(td, f, &ld);
221 if (sector >= ld->nr_sectors)
224 zones = calloc(nr_zones, sizeof(struct zbc_zone));
230 ret = zbc_report_zones(ld->zdev, sector, ZBC_RO_ALL, zones, &nr_zones);
232 log_err("%s: zbc_report_zones failed, err=%d\n",
237 for (i = 0; i < nr_zones; i++, zbdz++) {
238 zbdz->start = zones[i].zbz_start << 9;
239 zbdz->len = zones[i].zbz_length << 9;
240 zbdz->wp = zones[i].zbz_write_pointer << 9;
242 * ZBC/ZAC do not define zone capacity, so use the zone size as
245 zbdz->capacity = zbdz->len;
247 switch (zones[i].zbz_type) {
248 case ZBC_ZT_CONVENTIONAL:
249 zbdz->type = ZBD_ZONE_TYPE_CNV;
251 case ZBC_ZT_SEQUENTIAL_REQ:
252 zbdz->type = ZBD_ZONE_TYPE_SWR;
254 case ZBC_ZT_SEQUENTIAL_PREF:
255 zbdz->type = ZBD_ZONE_TYPE_SWP;
258 td_verror(td, errno, "invalid zone type");
259 log_err("%s: invalid type for zone at sector %llu.\n",
260 f->file_name, (unsigned long long)zbdz->start);
265 switch (zones[i].zbz_condition) {
267 zbdz->cond = ZBD_ZONE_COND_NOT_WP;
270 zbdz->cond = ZBD_ZONE_COND_EMPTY;
272 case ZBC_ZC_IMP_OPEN:
273 zbdz->cond = ZBD_ZONE_COND_IMP_OPEN;
275 case ZBC_ZC_EXP_OPEN:
276 zbdz->cond = ZBD_ZONE_COND_EXP_OPEN;
279 zbdz->cond = ZBD_ZONE_COND_CLOSED;
282 zbdz->cond = ZBD_ZONE_COND_FULL;
287 /* Treat all these conditions as offline (don't use!) */
288 zbdz->cond = ZBD_ZONE_COND_OFFLINE;
289 zbdz->wp = zbdz->start;
299 static int libzbc_reset_wp(struct thread_data *td, struct fio_file *f,
300 uint64_t offset, uint64_t length)
302 struct libzbc_data *ld = td->io_ops_data;
303 uint64_t sector = offset >> 9;
304 uint64_t end_sector = (offset + length) >> 9;
305 unsigned int nr_zones;
306 struct zbc_errno err;
312 nr_zones = (length + td->o.zone_size - 1) / td->o.zone_size;
313 if (!sector && end_sector >= ld->nr_sectors) {
314 /* Reset all zones */
315 ret = zbc_reset_zone(ld->zdev, 0, ZBC_OP_ALL_ZONES);
322 for (i = 0; i < nr_zones; i++, sector += td->o.zone_size >> 9) {
323 ret = zbc_reset_zone(ld->zdev, sector, 0);
331 zbc_errno(ld->zdev, &err);
332 td_verror(td, errno, "zbc_reset_zone failed");
334 log_err("%s: reset wp failed %s:%s\n",
336 zbc_sk_str(err.sk), zbc_asc_ascq_str(err.asc_ascq));
340 static int libzbc_get_max_open_zones(struct thread_data *td, struct fio_file *f,
341 unsigned int *max_open_zones)
343 struct libzbc_data *ld;
346 ret = libzbc_open_dev(td, f, &ld);
350 if (ld->max_open_seq_req == ZBC_NO_LIMIT)
353 *max_open_zones = ld->max_open_seq_req;
358 ssize_t libzbc_rw(struct thread_data *td, struct io_u *io_u)
360 struct libzbc_data *ld = td->io_ops_data;
361 struct fio_file *f = io_u->file;
362 uint64_t sector = io_u->offset >> 9;
363 size_t count = io_u->xfer_buflen >> 9;
364 struct zbc_errno err;
367 if (io_u->ddir == DDIR_WRITE)
368 ret = zbc_pwrite(ld->zdev, io_u->xfer_buf, count, sector);
370 ret = zbc_pread(ld->zdev, io_u->xfer_buf, count, sector);
375 log_err("Short %s, len=%zu, ret=%zd\n",
376 io_u->ddir == DDIR_READ ? "read" : "write",
377 count << 9, ret << 9);
382 zbc_errno(ld->zdev, &err);
383 td_verror(td, errno, "libzbc i/o failed");
385 log_err("%s: op %u offset %llu+%llu failed (%s:%s), err %zd\n",
386 f->file_name, io_u->ddir,
387 io_u->offset, io_u->xfer_buflen,
389 zbc_asc_ascq_str(err.asc_ascq), ret);
391 log_err("%s: op %u offset %llu+%llu failed, err %zd\n",
392 f->file_name, io_u->ddir,
393 io_u->offset, io_u->xfer_buflen, ret);
399 static enum fio_q_status libzbc_queue(struct thread_data *td, struct io_u *io_u)
401 struct libzbc_data *ld = td->io_ops_data;
402 struct fio_file *f = io_u->file;
405 fio_ro_check(td, io_u);
407 dprint(FD_ZBD, "%p:%s: libzbc queue %llu\n",
408 td, f->file_name, io_u->offset);
410 if (io_u->ddir == DDIR_READ || io_u->ddir == DDIR_WRITE) {
411 ret = libzbc_rw(td, io_u);
412 } else if (ddir_sync(io_u->ddir)) {
413 ret = zbc_flush(ld->zdev);
415 log_err("zbc_flush error %zd\n", ret);
416 } else if (io_u->ddir != DDIR_TRIM) {
417 log_err("Unsupported operation %u\n", io_u->ddir);
423 return FIO_Q_COMPLETED;
426 FIO_STATIC struct ioengine_ops ioengine = {
428 .version = FIO_IOOPS_VERSION,
429 .open_file = libzbc_open_file,
430 .close_file = libzbc_close_file,
431 .cleanup = libzbc_cleanup,
432 .invalidate = libzbc_invalidate,
433 .get_file_size = libzbc_get_file_size,
434 .get_zoned_model = libzbc_get_zoned_model,
435 .report_zones = libzbc_report_zones,
436 .reset_wp = libzbc_reset_wp,
437 .get_max_open_zones = libzbc_get_max_open_zones,
438 .queue = libzbc_queue,
439 .flags = FIO_SYNCIO | FIO_NOEXTEND | FIO_RAWIO,
442 static void fio_init fio_libzbc_register(void)
444 register_ioengine(&ioengine);
447 static void fio_exit fio_libzbc_unregister(void)
449 unregister_ioengine(&ioengine);