engines/io_uring: ensure sqe stores are ordered SQ ring tail update
[fio.git] / engines / mtd.c
CommitLineData
65fa28ca
DE
1/*
2 * MTD engine
3 *
4 * IO engine that reads/writes from MTD character devices.
5 *
6 */
65fa28ca
DE
7#include <stdio.h>
8#include <stdlib.h>
65fa28ca
DE
9#include <errno.h>
10#include <sys/ioctl.h>
11#include <mtd/mtd-user.h>
12
13#include "../fio.h"
e59b9e11 14#include "../optgroup.h"
984f30c9 15#include "../oslib/libmtd.h"
65fa28ca 16
a89ba4b1 17static libmtd_t desc;
65fa28ca
DE
18
19struct fio_mtd_data {
20 struct mtd_dev_info info;
21};
22
e59b9e11
TK
23struct fio_mtd_options {
24 void *pad; /* avoid off1 == 0 */
25 unsigned int skip_bad;
26};
27
28static struct fio_option options[] = {
29 {
30 .name = "skip_bad",
31 .lname = "Skip operations against bad blocks",
32 .type = FIO_OPT_BOOL,
33 .off1 = offsetof(struct fio_mtd_options, skip_bad),
34 .help = "Skip operations against known bad blocks.",
35 .hide = 1,
36 .def = "0",
37 .category = FIO_OPT_C_ENGINE,
38 .group = FIO_OPT_G_MTD,
39 },
40 {
41 .name = NULL,
42 },
43};
44
65fa28ca
DE
45static int fio_mtd_maybe_mark_bad(struct thread_data *td,
46 struct fio_mtd_data *fmd,
47 struct io_u *io_u, int eb)
48{
49 int ret;
50 if (errno == EIO) {
51 ret = mtd_mark_bad(&fmd->info, io_u->file->fd, eb);
52 if (ret != 0) {
53 io_u->error = errno;
54 td_verror(td, errno, "mtd_mark_bad");
55 return -1;
56 }
57 }
58 return 0;
59}
60
61static int fio_mtd_is_bad(struct thread_data *td,
62 struct fio_mtd_data *fmd,
63 struct io_u *io_u, int eb)
64{
65 int ret = mtd_is_bad(&fmd->info, io_u->file->fd, eb);
66 if (ret == -1) {
67 io_u->error = errno;
68 td_verror(td, errno, "mtd_is_bad");
69 } else if (ret == 1)
70 io_u->error = EIO; /* Silent failure--don't flood stderr */
71 return ret;
72}
73
2e4ef4fb
JA
74static enum fio_q_status fio_mtd_queue(struct thread_data *td,
75 struct io_u *io_u)
65fa28ca
DE
76{
77 struct fio_file *f = io_u->file;
78 struct fio_mtd_data *fmd = FILE_ENG_DATA(f);
e59b9e11 79 struct fio_mtd_options *o = td->eo;
65fa28ca
DE
80 int local_offs = 0;
81 int ret;
82
83 fio_ro_check(td, io_u);
84
85 /*
86 * Errors tend to pertain to particular erase blocks, so divide up
87 * I/O to erase block size.
88 * If an error is encountered, log it and keep going onto the next
89 * block because the error probably just pertains to that block.
90 * TODO(dehrenberg): Divide up reads and writes into page-sized
91 * operations to get more fine-grained information about errors.
92 */
93 while (local_offs < io_u->buflen) {
94 int eb = (io_u->offset + local_offs) / fmd->info.eb_size;
95 int eb_offs = (io_u->offset + local_offs) % fmd->info.eb_size;
96 /* The length is the smaller of the length remaining in the
97 * buffer and the distance to the end of the erase block */
98 int len = min((int)io_u->buflen - local_offs,
99 (int)fmd->info.eb_size - eb_offs);
100 char *buf = ((char *)io_u->buf) + local_offs;
101
e59b9e11 102 if (o->skip_bad) {
65fa28ca
DE
103 ret = fio_mtd_is_bad(td, fmd, io_u, eb);
104 if (ret == -1)
105 break;
106 else if (ret == 1)
107 goto next;
108 }
109 if (io_u->ddir == DDIR_READ) {
110 ret = mtd_read(&fmd->info, f->fd, eb, eb_offs, buf, len);
111 if (ret != 0) {
112 io_u->error = errno;
113 td_verror(td, errno, "mtd_read");
114 if (fio_mtd_maybe_mark_bad(td, fmd, io_u, eb))
115 break;
116 }
117 } else if (io_u->ddir == DDIR_WRITE) {
118 ret = mtd_write(desc, &fmd->info, f->fd, eb,
119 eb_offs, buf, len, NULL, 0, 0);
120 if (ret != 0) {
121 io_u->error = errno;
122 td_verror(td, errno, "mtd_write");
123 if (fio_mtd_maybe_mark_bad(td, fmd, io_u, eb))
124 break;
125 }
126 } else if (io_u->ddir == DDIR_TRIM) {
127 if (eb_offs != 0 || len != fmd->info.eb_size) {
128 io_u->error = EINVAL;
129 td_verror(td, EINVAL,
130 "trim on MTD must be erase block-aligned");
131 }
132 ret = mtd_erase(desc, &fmd->info, f->fd, eb);
133 if (ret != 0) {
134 io_u->error = errno;
135 td_verror(td, errno, "mtd_erase");
136 if (fio_mtd_maybe_mark_bad(td, fmd, io_u, eb))
137 break;
138 }
139 } else {
140 io_u->error = ENOTSUP;
141 td_verror(td, io_u->error, "operation not supported on mtd");
142 }
143
144next:
145 local_offs += len;
146 }
147
148 return FIO_Q_COMPLETED;
149}
150
151static int fio_mtd_open_file(struct thread_data *td, struct fio_file *f)
152{
153 struct fio_mtd_data *fmd;
154 int ret;
155
156 ret = generic_open_file(td, f);
157 if (ret)
158 return ret;
159
160 fmd = calloc(1, sizeof(*fmd));
161 if (!fmd)
162 goto err_close;
163
164 ret = mtd_get_dev_info(desc, f->file_name, &fmd->info);
165 if (ret != 0) {
166 td_verror(td, errno, "mtd_get_dev_info");
167 goto err_free;
168 }
169
170 FILE_SET_ENG_DATA(f, fmd);
171 return 0;
172
173err_free:
174 free(fmd);
175err_close:
176 {
8a68c41c
JA
177 int fio_unused __ret;
178 __ret = generic_close_file(td, f);
65fa28ca
DE
179 return 1;
180 }
181}
182
183static int fio_mtd_close_file(struct thread_data *td, struct fio_file *f)
184{
185 struct fio_mtd_data *fmd = FILE_ENG_DATA(f);
186
187 FILE_SET_ENG_DATA(f, NULL);
188 free(fmd);
189
190 return generic_close_file(td, f);
191}
192
a89ba4b1 193static int fio_mtd_get_file_size(struct thread_data *td, struct fio_file *f)
65fa28ca
DE
194{
195 struct mtd_dev_info info;
196
197 int ret = mtd_get_dev_info(desc, f->file_name, &info);
198 if (ret != 0) {
199 td_verror(td, errno, "mtd_get_dev_info");
200 return errno;
201 }
202 f->real_file_size = info.size;
203
204 return 0;
205}
206
207static struct ioengine_ops ioengine = {
208 .name = "mtd",
209 .version = FIO_IOOPS_VERSION,
210 .queue = fio_mtd_queue,
211 .open_file = fio_mtd_open_file,
212 .close_file = fio_mtd_close_file,
213 .get_file_size = fio_mtd_get_file_size,
214 .flags = FIO_SYNCIO | FIO_NOEXTEND,
e59b9e11
TK
215 .options = options,
216 .option_struct_size = sizeof(struct fio_mtd_options),
65fa28ca
DE
217};
218
219static void fio_init fio_mtd_register(void)
220{
221 desc = libmtd_open();
222 register_ioengine(&ioengine);
223}
224
225static void fio_exit fio_mtd_unregister(void)
226{
227 unregister_ioengine(&ioengine);
228 libmtd_close(desc);
229 desc = NULL;
230}
231
232
233