4 * IO engine that reads/writes from MTD character devices.
12 #include <sys/ioctl.h>
13 #include <mtd/mtd-user.h>
16 #include "../verify.h"
17 #include "../oslib/libmtd.h"
22 struct mtd_dev_info info;
25 static int fio_mtd_maybe_mark_bad(struct thread_data *td,
26 struct fio_mtd_data *fmd,
27 struct io_u *io_u, int eb)
31 ret = mtd_mark_bad(&fmd->info, io_u->file->fd, eb);
34 td_verror(td, errno, "mtd_mark_bad");
41 static int fio_mtd_is_bad(struct thread_data *td,
42 struct fio_mtd_data *fmd,
43 struct io_u *io_u, int eb)
45 int ret = mtd_is_bad(&fmd->info, io_u->file->fd, eb);
48 td_verror(td, errno, "mtd_is_bad");
50 io_u->error = EIO; /* Silent failure--don't flood stderr */
54 static int fio_mtd_queue(struct thread_data *td, struct io_u *io_u)
56 struct fio_file *f = io_u->file;
57 struct fio_mtd_data *fmd = FILE_ENG_DATA(f);
61 fio_ro_check(td, io_u);
64 * Errors tend to pertain to particular erase blocks, so divide up
65 * I/O to erase block size.
66 * If an error is encountered, log it and keep going onto the next
67 * block because the error probably just pertains to that block.
68 * TODO(dehrenberg): Divide up reads and writes into page-sized
69 * operations to get more fine-grained information about errors.
71 while (local_offs < io_u->buflen) {
72 int eb = (io_u->offset + local_offs) / fmd->info.eb_size;
73 int eb_offs = (io_u->offset + local_offs) % fmd->info.eb_size;
74 /* The length is the smaller of the length remaining in the
75 * buffer and the distance to the end of the erase block */
76 int len = min((int)io_u->buflen - local_offs,
77 (int)fmd->info.eb_size - eb_offs);
78 char *buf = ((char *)io_u->buf) + local_offs;
81 ret = fio_mtd_is_bad(td, fmd, io_u, eb);
87 if (io_u->ddir == DDIR_READ) {
88 ret = mtd_read(&fmd->info, f->fd, eb, eb_offs, buf, len);
91 td_verror(td, errno, "mtd_read");
92 if (fio_mtd_maybe_mark_bad(td, fmd, io_u, eb))
95 } else if (io_u->ddir == DDIR_WRITE) {
96 ret = mtd_write(desc, &fmd->info, f->fd, eb,
97 eb_offs, buf, len, NULL, 0, 0);
100 td_verror(td, errno, "mtd_write");
101 if (fio_mtd_maybe_mark_bad(td, fmd, io_u, eb))
104 } else if (io_u->ddir == DDIR_TRIM) {
105 if (eb_offs != 0 || len != fmd->info.eb_size) {
106 io_u->error = EINVAL;
107 td_verror(td, EINVAL,
108 "trim on MTD must be erase block-aligned");
110 ret = mtd_erase(desc, &fmd->info, f->fd, eb);
113 td_verror(td, errno, "mtd_erase");
114 if (fio_mtd_maybe_mark_bad(td, fmd, io_u, eb))
118 io_u->error = ENOTSUP;
119 td_verror(td, io_u->error, "operation not supported on mtd");
126 return FIO_Q_COMPLETED;
129 static int fio_mtd_open_file(struct thread_data *td, struct fio_file *f)
131 struct fio_mtd_data *fmd;
134 ret = generic_open_file(td, f);
138 fmd = calloc(1, sizeof(*fmd));
142 ret = mtd_get_dev_info(desc, f->file_name, &fmd->info);
144 td_verror(td, errno, "mtd_get_dev_info");
148 FILE_SET_ENG_DATA(f, fmd);
155 int fio_unused __ret;
156 __ret = generic_close_file(td, f);
161 static int fio_mtd_close_file(struct thread_data *td, struct fio_file *f)
163 struct fio_mtd_data *fmd = FILE_ENG_DATA(f);
165 FILE_SET_ENG_DATA(f, NULL);
168 return generic_close_file(td, f);
171 static int fio_mtd_get_file_size(struct thread_data *td, struct fio_file *f)
173 struct mtd_dev_info info;
175 int ret = mtd_get_dev_info(desc, f->file_name, &info);
177 td_verror(td, errno, "mtd_get_dev_info");
180 f->real_file_size = info.size;
185 static struct ioengine_ops ioengine = {
187 .version = FIO_IOOPS_VERSION,
188 .queue = fio_mtd_queue,
189 .open_file = fio_mtd_open_file,
190 .close_file = fio_mtd_close_file,
191 .get_file_size = fio_mtd_get_file_size,
192 .flags = FIO_SYNCIO | FIO_NOEXTEND,
195 static void fio_init fio_mtd_register(void)
197 desc = libmtd_open();
198 register_ioengine(&ioengine);
201 static void fio_exit fio_mtd_unregister(void)
203 unregister_ioengine(&ioengine);